repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
aleksandar-mitrevski/ai | Simulated-Annealing-and-Constraint-Satisfaction/Simulated-Annealing/search.py | 1 | 7711 | import random
import math
import matplotlib.pyplot as pyplot
import time
class CitySearchLibrary(object):
"""Defines a search library for finding minimum cost paths between cities.
Author: Aleksandar Mitrevski
"""
def __init__(self, cities):
"""Initializes a library.
Keyword arguments:
cities -- A list of 'CityData' objects.
"""
self.cities = cities
self.number_of_cities = len(self.cities)
def search(self, maximum_runtime_allowed):
"""Looks for a shortest cost cycle that connects all cities
(i.e. looks for a solution to the traveling salesman problem)
using simulated annealing. Returns:
- The minimum cost found.
- The number of moves (ascends and descends) made by the algorithm.
Keyword arguments:
maximum_runtime_allowed -- Maximum time (in seconds) allowed for the search.
"""
print 'Running simulated annealing...'
#we start with a random initial state
current_state = self._get_initial_state()
#we find the total cost of the initial configuration
#because we will need to use it for comparing with the costs
#of the neighbour states
cost = self._calculate_cost(current_state)
current_cost = cost
#used for visualization purposes
cost_at_each_move = [current_cost]
#used for counting the number of moves made by the algorithm
number_of_moves = 0
start_time = time.clock()
time_elapsed = 0.
#as long as the descends are optimistic, the algorithm looks for neighbour states
#and chooses the one that offers the biggest cost improvement
while time_elapsed < maximum_runtime_allowed:
temperature = self._schedule(number_of_moves)
if temperature < 1e-200:
break
#we generate a random neighbour of the current state
neighbour = self._generate_neighbour(current_state)
neighbour_cost = self._calculate_cost(neighbour)
delta_cost = neighbour_cost - current_cost
#if the neighbour improves the cost, we definitely make a move to it
if delta_cost < 0:
current_state = list(neighbour)
current_cost = neighbour_cost
#if the neighbour doesn't offer an improvement, we only select it with a certain probability
else:
change_state = self._change_state(delta_cost, temperature)
if change_state:
current_state = list(neighbour)
current_cost = neighbour_cost
cost_at_each_move.append(current_cost)
number_of_moves += 1
time_elapsed = time.clock() - start_time
#we visualize the results of the algorithm
self.visualize_iterations(number_of_moves, cost_at_each_move)
return current_cost, number_of_moves
def _get_initial_state(self):
"""Generates a random city configuration that contains each city
once. Assumes that the cities form a complete graph. Returns a list
of indices corresponding to cities in 'self.cities'.
"""
indices = [i for i in xrange(self.number_of_cities)]
city_arrangement = []
for i in xrange(self.number_of_cities):
city_index = random.randint(0, len(indices)-1)
city_arrangement.append(indices[city_index])
del indices[city_index]
city_arrangement.append(city_arrangement[0])
return city_arrangement
def _calculate_cost(self, city_arrangement):
"""Calculates costs between all cities in a given city configuration,
given the longitude and latitude coordinates of the cities.
Returns the total cost between the cities.
Keyword arguments:
city_arrangement -- A list of city indices.
"""
total_cost = 0.
number_of_cities = len(self.cities)
#we calculate the distance between two consecutive
#cities in the list; uses the Haversine formula for calculating distance
#(source http://www.movable-type.co.uk/scripts/latlong.html)
for i in xrange(number_of_cities):
current_city = self.cities[city_arrangement[i]]
neighbour = self.cities[city_arrangement[i+1]]
radius_of_earth = 6371.
longitude_distance = (current_city.longitude - neighbour.longitude) * math.pi / 180.
latitude_distance = (current_city.latitude - neighbour.latitude) * math.pi / 180.
current_city_latitude_radians = current_city.latitude * math.pi / 180.0
neighbour_latitude_radians = neighbour.latitude * math.pi / 180.0
a = math.sin(latitude_distance / 2.)**2 + math.sin(longitude_distance / 2.)**2 * math.cos(current_city_latitude_radians) * math.cos(neighbour_latitude_radians)
c = 2. * math.atan2(math.sqrt(a), math.sqrt(1.-a))
distance = radius_of_earth * c
total_cost += distance
return total_cost
def _schedule(self, iteration_counter):
"""Returns temperature as a function of 'iteration_counter'.
Keyword arguments:
iteration_counter -- Iteration counter of the simulated annealing algorithm.
"""
temperature = 1e10 * 0.999**iteration_counter
return temperature
def _generate_neighbour(self, city_arrangement):
"""Generates neighbour states for the given city arrangement
by choosing a random list position and swapping the element
in that position with all the other elements.
Once the neighbours have been generated, randomly picks one neighbour and returns it.
Keyword arguments:
city_arrangement -- A list of city indices.
"""
city_to_swap = random.randint(0, self.number_of_cities-1)
neighbours = []
for i in xrange(self.number_of_cities):
if i == city_to_swap:
continue
neighbour = list(city_arrangement)
temp = neighbour[i]
neighbour[i] = neighbour[city_to_swap]
neighbour[city_to_swap] = temp
if city_to_swap == 0:
neighbour[len(neighbour)-1] = neighbour[i]
neighbours.append(neighbour)
neighbour_index = random.randint(0, len(neighbours)-1)
return neighbours[neighbour_index]
def _change_state(self, delta_cost, temperature):
"""Returns 'True' if we want to accept the state change and 'False' otherwise.
The acceptance probability is exp(-delta_cost/temperature).
Keyword arguments:
delta_cost -- Difference between the cost of a neighbour state and the cost of the current state.
temperature -- A temperature that determines the acceptance probability.
"""
probability_of_changing = math.exp(-delta_cost/temperature)
random_number = random.random()
if random_number < probability_of_changing:
return True
return False
def visualize_iterations(self, number_of_moves, cost_at_each_move):
"""Visualizes the results of the simulated annealing algorithm.
Keyword arguments:
number_of_moves -- The number of moves in the simulated annealing algorithm.
cost_at_each_move -- A list containing costs at each move of the algorithm.
"""
moves = [x+1 for x in xrange(number_of_moves + 1)]
pyplot.xlabel('Number of moves')
pyplot.ylabel('Total cost')
pyplot.plot(moves, cost_at_each_move, 'b-')
pyplot.show()
| mit |
Spinmob/spinmob | _plotting_mess.py | 1 | 40481 | import os as _os
import pylab as _pylab
import numpy as _n
import itertools as _itertools
import time as _time
import spinmob as _s
try: from . import _functions as _fun
except: import _functions as _fun
try: from . import _pylab_tweaks as _pt
except: import _pylab_tweaks as _pt
from . import _data as _data
try: from . import _data as _data
except: import _data as _data
# expose all the eval statements to all the functions in numpy
from numpy import *
from scipy.special import *
# handle for the colormap so it doesn't immediately close
_colormap = None
def _get_standard_title():
"""
Gets the standard title.
"""
title = 'Plot created ' + _time.asctime()
# Try to add the last command to the title.
try:
command = list(get_ipython().history_manager.get_range())[-1][2]
# Get the path of the runfile()
if command[0:8] == 'runfile(':
def f(*a, **kw): return a[0]
command = eval('f'+command[7:], dict(f=f))
# Just the last line
command = command.replace('\n', '; ')
title = title + '\n' + command
except: pass
return title
def _draw():
""" Method for interactive drawing used by all plotters at the end."""
_pylab.ion()
_pylab.draw()
_pylab.show() # This command always raises the figure, unfortunately, and is needed to see it on the first plot.
def complex_data(data, edata=None, draw=True, **kwargs):
"""
Plots the imaginary vs real for complex data.
Parameters
----------
data
Array of complex data
edata=None
Array of complex error bars
draw=True
Draw the plot after it's assembled?
See spinmob.plot.xy.data() for additional optional keyword arguments.
"""
_pylab.ioff()
# generate the data the easy way
try:
rdata = _n.real(data)
idata = _n.imag(data)
if edata is None:
erdata = None
eidata = None
else:
erdata = _n.real(edata)
eidata = _n.imag(edata)
# generate the data the hard way.
except:
rdata = []
idata = []
if edata is None:
erdata = None
eidata = None
else:
erdata = []
eidata = []
for n in range(len(data)):
rdata.append(_n.real(data[n]))
idata.append(_n.imag(data[n]))
if not edata is None:
erdata.append(_n.real(edata[n]))
eidata.append(_n.imag(edata[n]))
if 'xlabel' not in kwargs: kwargs['xlabel'] = 'Real'
if 'ylabel' not in kwargs: kwargs['ylabel'] = 'Imaginary'
xy_data(rdata, idata, eidata, erdata, draw=False, **kwargs)
if draw: _draw()
def complex_databoxes(ds, script='d[1]+1j*d[2]', escript=None, **kwargs):
"""
Uses databoxes and specified script to generate data and send to
spinmob.plot.complex_data()
Parameters
----------
ds
List of databoxes
script='d[1]+1j*d[2]'
Complex-valued script for data array.
escript=None
Complex-valued script for error bars
See spinmob.plot.complex.data() for additional optional keyword arguments.
See spinmob.data.databox.execute_script() for more information about scripts.
"""
datas = []
labels = []
if escript is None: errors = None
else: errors = []
for d in ds:
datas.append(d(script))
labels.append(_os.path.split(d.path)[-1])
if not escript is None: errors.append(d(escript))
complex_data(datas, errors, label=labels, **kwargs)
if "draw" in kwargs and not kwargs["draw"]: return
_pylab.ion()
_pylab.draw()
_pylab.show()
return ds
def complex_files(script='d[1]+1j*d[2]', escript=None, paths=None, **kwargs):
"""
Loads files and plots complex data in the real-imaginary plane.
Parameters
----------
script='d[1]+1j*d[2]'
Complex-valued script for data array.
escript=None
Complex-valued script for error bars
paths=None
List of paths to open. None means use a dialog
See spinmob.plot.complex.data() for additional optional keyword arguments.
See spinmob.data.databox.execute_script() for more information about scripts.
Common additional parameters
----------------------------
filters="*.*"
Set the file filters for the dialog.
"""
ds = _data.load_multiple(paths=paths)
if len(ds) == 0: return
if 'title' not in kwargs: kwargs['title'] = _os.path.split(ds[0].path)[0]
return complex_databoxes(ds, script=script, **kwargs)
def complex_function(f='1.0/(1+1j*x)', xmin=-1, xmax=1, steps=200, p='x', g=None, erange=False, **kwargs):
"""
Plots function(s) in the complex plane over the specified range.
Parameters
----------
f='1.0/(1+1j*x)'
Complex-valued function or list of functions to plot.
These can be string functions or single-argument python functions;
additional globals can be supplied by g.
xmin=-1, xmax=1, steps=200
Range over which to plot and how many points to plot
p='x'
If using strings for functions, p is the independent parameter name.
g=None
Optional dictionary of extra globals. Try g=globals()!
erange=False
Use exponential spacing of the x data?
See spinmob.plot.xy.data() for additional optional keyword arguments.
"""
kwargs2 = dict(xlabel='Real', ylabel='Imaginary')
kwargs2.update(kwargs)
function(f, xmin, xmax, steps, p, g, erange, plotter=xy_data, complex_plane=True, draw=True, **kwargs2)
def magphase_data(xdata, ydata, eydata=None, exdata=None, xscale='linear', mscale='linear', pscale='linear', mlabel='Magnitude', plabel='Phase', phase='degrees', figure='gcf', clear=1, draw=True, **kwargs):
"""
Plots the magnitude and phase of complex ydata vs xdata.
Parameters
----------
xdata
Real-valued x-axis data
ydata
Complex-valued y-axis data
eydata=None
Complex-valued y-error
exdata=None
Real-valued x-error
xscale='linear'
'log' or 'linear' scale of the x axis
mscale='linear'
'log' or 'linear' scale of the magnitude axis
pscale='linear'
'log' or 'linear' scale of the phase axis
mlabel='Magnitude'
y-axis label for magnitude plot
plabel='Phase'
y-axis label for phase plot
phase='degrees'
'degrees' or 'radians' for the phase axis
figure='gcf'
Plot on the specified figure instance or 'gcf' for current figure.
clear=1
Clear the figure?
draw=True
Draw the figure when complete?
See spinmob.plot.xy.data() for additional optional keyword arguments.
"""
_pylab.ioff()
# set up the figure and axes
if figure == 'gcf': f = _pylab.gcf()
if clear: f.clear()
axes1 = _pylab.subplot(211)
axes2 = _pylab.subplot(212,sharex=axes1)
# Make sure the dimensionality of the data sets matches
xdata, ydata = _fun._match_data_sets(xdata, ydata)
exdata = _fun._match_error_to_data_set(xdata, exdata)
eydata = _fun._match_error_to_data_set(ydata, eydata)
# convert to magnitude and phase
m = []
p = []
em = []
ep = []
# Note this is a loop over data sets, not points.
for l in range(len(ydata)):
m.append(_n.abs(ydata[l]))
p.append(_n.angle(ydata[l]))
# get the mag - phase errors
if eydata[l] is None:
em.append(None)
ep.append(None)
else:
er = _n.real(eydata[l])
ei = _n.imag(eydata[l])
em.append(0.5*((er+ei) + (er-ei)*_n.cos(p[l])) )
ep.append(0.5*((er+ei) - (er-ei)*_n.cos(p[l]))/m[l] )
# convert to degrees
if phase=='degrees':
p[-1] = p[-1]*180.0/_n.pi
if not ep[l] is None:
ep[l] = ep[l]*180.0/_n.pi
if phase=='degrees': plabel = plabel + " (degrees)"
else: plabel = plabel + " (radians)"
if 'xlabel' in kwargs: xlabel=kwargs.pop('xlabel')
else: xlabel=''
if 'ylabel' in kwargs: kwargs.pop('ylabel')
if 'autoformat' not in kwargs: kwargs['autoformat'] = True
autoformat = kwargs['autoformat']
kwargs['autoformat'] = False
kwargs['xlabel'] = ''
xy_data(xdata, m, em, exdata, ylabel=mlabel, axes=axes1, clear=0, xscale=xscale, yscale=mscale, draw=False, **kwargs)
kwargs['autoformat'] = autoformat
kwargs['xlabel'] = xlabel
xy_data(xdata, p, ep, exdata, ylabel=plabel, axes=axes2, clear=0, xscale=xscale, yscale=pscale, draw=False, **kwargs)
axes2.set_title('')
if draw: _draw()
def magphase_databoxes(ds, xscript=0, yscript='d[1]+1j*d[2]', eyscript=None, exscript=None, g=None, **kwargs):
"""
Use databoxes and scripts to generate data and plot the complex magnitude
and phase versus xdata.
Parameters
----------
ds
List of databoxes
xscript=0
Script for x data
yscript='d[1]+1j*d[2]'
Script for y data
eyscript=None
Script for y error
exscript=None
Script for x error
g=None
Optional dictionary of globals for the scripts
See spinmob.plot.magphase.data() for additional optional keyword arguments.
See spinmob.data.databox.execute_script() for more information about scripts.
"""
databoxes(ds, xscript, yscript, eyscript, exscript, plotter=magphase_data, g=g, **kwargs)
def magphase_files(xscript=0, yscript='d[1]+1j*d[2]', eyscript=None, exscript=None, paths=None, g=None, **kwargs):
"""
This will load a bunch of data files, generate data based on the supplied
scripts, and then plot the ydata's magnitude and phase versus xdata.
Parameters
----------
xscript=0
Script for x data
yscript='d[1]+1j*d[2]'
Script for y data
eyscript=None
Script for y error
exscript=None
Script for x error
paths=None
List of paths to open.
g=None
Optional dictionary of globals for the scripts
See spinmob.plot.magphase.data() for additional optional arguments.
See spinmob.data.databox.execute_script() for more information about scripts.
Common additional parameters
----------------------------
filters="*.*"
Set the file filters for the dialog.
"""
return files(xscript, yscript, eyscript, exscript, plotter=magphase_databoxes, paths=paths, g=g, **kwargs)
def magphase_function(f='1.0/(1+1j*x)', xmin=-1, xmax=1, steps=200, p='x', g=None, erange=False, **kwargs):
"""
Plots function(s) magnitude and phase over the specified range.
Parameters
----------
f='1.0/(1+1j*x)'
Complex-valued function or list of functions to plot.
These can be string functions or single-argument python functions;
additional globals can be supplied by g.
xmin=-1, xmax=1, steps=200
Range over which to plot and how many points to plot
p='x'
If using strings for functions, p is the independent parameter name.
g=None
Optional dictionary of extra globals. Try g=globals()!
erange=False
Use exponential spacing of the x data?
See spinmob.plot.xy.data() for additional optional keyword arguments.
"""
function(f, xmin, xmax, steps, p, g, erange, plotter=magphase_data, **kwargs)
def realimag_data(xdata, ydata, eydata=None, exdata=None, xscale='linear', rscale='linear', iscale='linear', rlabel='Real', ilabel='Imaginary', figure='gcf', clear=1, draw=True, **kwargs):
"""
Plots the real and imaginary parts of complex ydata vs xdata.
Parameters
----------
xdata
Real-valued x-axis data
ydata
Complex-valued y-axis data
eydata=None
Complex-valued y-error
exdata=None
Real-valued x-error
xscale='linear'
'log' or 'linear' scale of the x axis
rscale='linear'
'log' or 'linear' scale of the real axis
iscale='linear'
'log' or 'linear' scale of the imaginary axis
rlabel='Magnitude'
y-axis label for real value plot
ilabel='Phase'
y-axis label for imaginary value plot
figure='gcf'
Plot on the specified figure instance or 'gcf' for current figure.
clear=1
Clear the figure?
draw=True
Draw the figure when completed?
See spinmob.plot.xy.data() for additional optional keyword arguments.
"""
_pylab.ioff()
# Make sure the dimensionality of the data sets matches
xdata, ydata = _fun._match_data_sets(xdata, ydata)
exdata = _fun._match_error_to_data_set(xdata, exdata)
eydata = _fun._match_error_to_data_set(ydata, eydata)
# convert to real imag, and get error bars
rdata = []
idata = []
erdata = []
eidata = []
for l in range(len(ydata)):
rdata.append(_n.real(ydata[l]))
idata.append(_n.imag(ydata[l]))
if eydata[l] is None:
erdata.append(None)
eidata.append(None)
else:
erdata.append(_n.real(eydata[l]))
eidata.append(_n.imag(eydata[l]))
# set up the figure and axes
if figure == 'gcf': f = _pylab.gcf()
if clear: f.clear()
axes1 = _pylab.subplot(211)
axes2 = _pylab.subplot(212,sharex=axes1)
if 'xlabel' in kwargs : xlabel=kwargs.pop('xlabel')
else: xlabel=''
if 'ylabel' in kwargs : kwargs.pop('ylabel')
if 'tall' not in kwargs: kwargs['tall'] = False
if 'autoformat' not in kwargs: kwargs['autoformat'] = True
autoformat = kwargs['autoformat']
kwargs['autoformat'] = False
kwargs['xlabel'] = ''
xy_data(xdata, rdata, eydata=erdata, exdata=exdata, ylabel=rlabel, axes=axes1, clear=0, xscale=xscale, yscale=rscale, draw=False, **kwargs)
kwargs['autoformat'] = autoformat
kwargs['xlabel'] = xlabel
xy_data(xdata, idata, eydata=eidata, exdata=exdata, ylabel=ilabel, axes=axes2, clear=0, xscale=xscale, yscale=iscale, draw=False, **kwargs)
axes2.set_title('')
if draw: _draw()
def realimag_databoxes(ds, xscript=0, yscript="d[1]+1j*d[2]", eyscript=None, exscript=None, g=None, **kwargs):
"""
Use databoxes and scripts to generate data and plot the real and
imaginary ydata versus xdata.
Parameters
----------
ds
List of databoxes
xscript=0
Script for x data
yscript='d[1]+1j*d[2]'
Script for y data
eyscript=None
Script for y error
exscript=None
Script for x error
g=None
Optional dictionary of globals for the scripts
See spinmob.plot.realimag.data() for additional optional keyword arguments.
See spinmob.data.databox.execute_script() for more information about scripts.
"""
databoxes(ds, xscript, yscript, eyscript, exscript, plotter=realimag_data, g=g, **kwargs)
def realimag_files(xscript=0, yscript="d[1]+1j*d[2]", eyscript=None, exscript=None, paths=None, g=None, **kwargs):
"""
This will load a bunch of data files, generate data based on the supplied
scripts, and then plot the ydata's real and imaginary parts versus xdata.
Parameters
----------
xscript=0
Script for x data
yscript='d[1]+1j*d[2]'
Script for y data
eyscript=None
Script for y error
exscript=None
Script for x error
paths=None
List of paths to open.
g=None
Optional dictionary of globals for the scripts
See spinmob.plot.realimag.data() for additional optional arguments.
See spinmob.data.databox.execute_script() for more information about scripts.
Common additional parameters
----------------------------
filters="*.*"
Set the file filters for the dialog.
"""
return files(xscript, yscript, eyscript, exscript, plotter=realimag_databoxes, paths=paths, g=g, **kwargs)
def realimag_function(f='1.0/(1+1j*x)', xmin=-1, xmax=1, steps=200, p='x', g=None, erange=False, **kwargs):
"""
Plots function(s) real and imaginary parts over the specified range.
Parameters
----------
f='1.0/(1+1j*x)'
Complex-valued function or list of functions to plot.
These can be string functions or single-argument python functions;
additional globals can be supplied by g.
xmin=-1, xmax=1, steps=200
Range over which to plot and how many points to plot
p='x'
If using strings for functions, p is the independent parameter name.
g=None
Optional dictionary of extra globals. Try g=globals()!
erange=False
Use exponential spacing of the x data?
See spinmob.plot.xy.data() for additional optional keyword arguments.
"""
function(f, xmin, xmax, steps, p, g, erange, plotter=realimag_data, **kwargs)
def xy_data(xdata, ydata, eydata=None, exdata=None, label=None, xlabel='', ylabel='', \
title='', shell_history=0, xshift=0, yshift=0, xshift_every=1, yshift_every=1, \
coarsen=0, style=None, clear=True, axes=None, xscale='linear', yscale='linear', grid=False, \
legend='best', legend_max=20, autoformat=True, autoformat_window=True, tall=False, draw=True, **kwargs):
"""
Plots specified data.
Parameters
----------
xdata, ydata
Arrays (or arrays of arrays) of data to plot. You can also
specify None for one of these, which will result in a plot
versus the data point number (starting from zero). Or you can
specify a number (alone, generally not in a list) to set the
scale of the auto-generated data.
eydata=None, exdata=None
Arrays of x and y errorbar values
label=None
String or array of strings for the line labels
xlabel=''
Label for the x-axis
ylabel=''
Label for the y-axis
title=''
Title for the axes; set to None to have nothing.
shell_history=0
How many commands from the pyshell history to include with the title
xshift=0, yshift=0
Progressive shifts on the data, to make waterfall plots
xshift_every=1
Perform the progressive shift every 1 or n'th line. Set to 0 or False
to shift all the curves by the same amount.
yshift_every=1
perform the progressive shift every 1 or n'th line. Set to 0 or False
to shift all the curves by the same amount.
style=None
style cycle object.
clear=True
If no axes are specified (see below), clear the figure, otherwise clear just the axes.
axes=None
Which matplotlib axes to use, or "gca" for the current axes
xscale='linear', yscale='linear'
'linear' or 'log' x and y axis scales.
grid=False
Should we draw a grid on the axes?
legend='best'
Where to place the legend (see pylab.legend() for options)
Set this to None to ignore the legend.
legend_max=20
Number of legend entries before it's truncated with '...'
autoformat=True
Should we format the figure for printing?
autoformat_window=True
Should we resize and reposition the window when autoformatting?
tall=False
Should the format be tall?
draw=True
Whether or not to draw the plot and raise the figure after plotting.
See matplotlib's errorbar() function for additional optional keyword arguments.
"""
_pylab.ioff()
# Make sure the dimensionality of the data sets matches
xdata, ydata = _fun._match_data_sets(xdata, ydata)
exdata = _fun._match_error_to_data_set(xdata, exdata)
eydata = _fun._match_error_to_data_set(ydata, eydata)
# check that the labels is a list of strings of the same length
if not _fun.is_iterable(label): label = [label]*len(xdata)
while len(label) < len(ydata): label.append(label[0])
# concatenate if necessary
if len(label) > legend_max:
label[legend_max-2] = '...'
for n in range(legend_max-1,len(label)-1): label[n] = "_nolegend_"
# clear the figure?
if clear and not axes: _pylab.gcf().clear() # axes cleared later
# setup axes
if axes=="gca" or not axes: axes = _pylab.gca()
# if we're clearing the axes
if clear: axes.clear()
# set the current axes
_pylab.axes(axes)
# now loop over the list of data in xdata and ydata
for n in range(0,len(xdata)):
# get the label
if label[n]=='_nolegend_': l = '_nolegend_'
elif len(xdata) > 1: l = str(n)+": "+str(label[n])
else: l = str(label[n])
# calculate the x an y progressive shifts
if xshift_every: dx = xshift*(n/xshift_every)
else: dx = xshift
if yshift_every: dy = yshift*(n/yshift_every)
else: dy = yshift
# if we're supposed to coarsen the data, do so.
x = _fun.coarsen_array(xdata[n], coarsen)
y = _fun.coarsen_array(ydata[n], coarsen)
ey = _fun.coarsen_array(eydata[n], coarsen, 'quadrature')
ex = _fun.coarsen_array(exdata[n], coarsen, 'quadrature')
# update the style
if not style is None: kwargs.update(next(style))
axes.errorbar(x+dx, y+dy, label=l, yerr=ey, xerr=ex, **kwargs)
_pylab.xscale(xscale)
_pylab.yscale(yscale)
if legend: axes.legend(loc=legend)
axes.set_xlabel(xlabel)
axes.set_ylabel(ylabel)
# for some arguments there should be no title.
if title in [None, False, 0]:
axes.set_title('')
# add the commands to the title
else:
title = str(title)
history = _fun.get_shell_history()
for n in range(0, min(shell_history, len(history))):
title = title + "\n" + history[n].split('\n')[0].strip()
title = title + '\n' + _get_standard_title()
axes.set_title(title)
if grid: _pylab.grid(True)
if autoformat:
_pt.format_figure(draw=False, modify_geometry=autoformat_window)
_pt.auto_zoom(axes=axes, draw=False)
# update the canvas
if draw: _draw()
return axes
def xy_databoxes(ds, xscript=0, yscript='d[1]', eyscript=None, exscript=None, g=None, **kwargs):
"""
Use databoxes and scripts to generate and plot ydata versus xdata.
Parameters
----------
ds
List of databoxes
xscript=0
Script for x data
yscript='d[1]'
Script for y data
eyscript=None
Script for y error
exscript=None
Script for x error
g=None
Optional dictionary of globals for the scripts
See spinmob.plot.xy.data() for additional optional keyword arguments.
See spinmob.data.databox.execute_script() for more information about scripts.
"""
databoxes(ds, xscript, yscript, eyscript, exscript, plotter=xy_data, g=g, **kwargs)
def xy_files(xscript=0, yscript='d[1]', eyscript=None, exscript=None, paths=None, g=None, **kwargs):
"""
This will load a bunch of data files, generate data based on the supplied
scripts, and then plot the ydata versus xdata.
Parameters
----------
xscript=0
Script for x data
yscript='d[1]'
Script for y data
eyscript=None
Script for y error
exscript=None
Script for x error
paths=None
List of paths to open.
g=None
Optional dictionary of globals for the scripts
See spinmob.plot.xy.data() for additional optional arguments.
See spinmob.data.databox.execute_script() for more information about scripts.
Common additional parameters
----------------------------
filters="*.*"
Set the file filters for the dialog.
"""
return files(xscript, yscript, eyscript, exscript, plotter=xy_databoxes, paths=paths, g=g, **kwargs)
def xy_function(f='sin(x)', xmin=-1, xmax=1, steps=200, p='x', g=None, erange=False, **kwargs):
"""
Plots function(s) over the specified range.
Parameters
----------
f='sin(x)'
Function or list of functions to plot.
These can be string functions or single-argument python functions;
additional globals can be supplied by g.
xmin=-1, xmax=1, steps=200
Range over which to plot and how many points to plot
p='x'
If using strings for functions, p is the independent parameter name.
g=None
Optional dictionary of extra globals. Try g=globals()!
erange=False
Use exponential spacing of the x data?
See spinmob.plot.xy.data() for additional optional keyword arguments.
"""
function(f, xmin, xmax, steps, p, g, erange, plotter=xy_data, **kwargs)
def databoxes(ds, xscript=0, yscript=1, eyscript=None, exscript=None, g=None, plotter=xy_data, transpose=False, **kwargs):
"""
Plots the listed databox objects with the specified scripts.
ds list of databoxes
xscript script for x data
yscript script for y data
eyscript script for y error
exscript script for x error
plotter function used to do the plotting
transpose applies databox.transpose() prior to plotting
g optional dictionary of globals for the supplied scripts
**kwargs are sent to plotter()
"""
if not _fun.is_iterable(ds): ds = [ds]
if 'xlabel' not in kwargs: kwargs['xlabel'] = str(xscript)
if 'ylabel' not in kwargs: kwargs['ylabel'] = str(yscript)
# First make sure everything is a list of scripts (or None's)
if not _fun.is_iterable(xscript): xscript = [xscript]
if not _fun.is_iterable(yscript): yscript = [yscript]
if not _fun.is_iterable(exscript): exscript = [exscript]
if not _fun.is_iterable(eyscript): eyscript = [eyscript]
# make sure exscript matches shape with xscript (and the same for y)
if len(exscript) < len(xscript):
for n in range(len(xscript)-1): exscript.append(exscript[0])
if len(eyscript) < len(yscript):
for n in range(len(yscript)-1): eyscript.append(eyscript[0])
# Make xscript and exscript match in shape with yscript and eyscript
if len(xscript) < len(yscript):
for n in range(len(yscript)-1):
xscript.append(xscript[0])
exscript.append(exscript[0])
# check for the reverse possibility
if len(yscript) < len(xscript):
for n in range(len(xscript)-1):
yscript.append(yscript[0])
eyscript.append(eyscript[0])
# now check for None's (counting scripts)
for n in range(len(xscript)):
if xscript[n] is None and yscript[n] is None:
print("Two None scripts? But... why?")
return
if xscript[n] is None:
if type(yscript[n])==str: xscript[n] = 'range(len('+yscript[n]+'))'
else: xscript[n] = 'range(len(c('+str(yscript[n])+')))'
if yscript[n] is None:
if type(xscript[n])==str: yscript[n] = 'range(len('+xscript[n]+'))'
else: yscript[n] = 'range(len(c('+str(xscript[n])+')))'
xdatas = []
ydatas = []
exdatas = []
eydatas = []
labels = []
# Loop over all the data boxes
for i in range(len(ds)):
# Reset the default globals
all_globals = dict(n=i,m=len(ds)-1-i)
# Update them with the user-specified globals
if not g==None: all_globals.update(g)
# For ease of coding
d = ds[i]
# Take the transpose if necessary
if transpose: d = d.transpose()
# Generate the x-data; returns a list of outputs, one for each xscript
xdata = d(xscript, all_globals)
# Loop over each xdata, appending to the master list, and generating a label
for n in range(len(xdata)):
xdatas.append(xdata[n])
# Normal data set is a 1d array
labels.append(_os.path.split(d.path)[-1])
# Special case: single-point per file
if _fun.is_a_number(xdata[0]) == 1 and len(ds) > 1:
labels = [_os.path.split(ds[0].path)[-1] + ' - ' + _os.path.split(ds[-1].path)[-1]]
# Append the other data sets to their master lists
for y in d( yscript, all_globals): ydatas.append(y)
for x in d(exscript, all_globals): exdatas.append(x)
for y in d(eyscript, all_globals): eydatas.append(y)
if "label" in kwargs: labels = kwargs.pop("label")
plotter(xdatas, ydatas, eydatas, exdatas, label=labels, **kwargs)
def files(xscript=0, yscript=1, eyscript=None, exscript=None, g=None, plotter=xy_databoxes, paths=None, **kwargs):
"""
This will load a bunch of data files, generate data based on the supplied
scripts, and then plot this data using the specified databox plotter.
xscript, yscript, eyscript, exscript scripts to generate x, y, and errors
g optional dictionary of globals
optional: filters="*.*" to set the file filters for the dialog.
**kwargs are sent to plotter()
"""
if 'delimiter' in kwargs: delimiter = kwargs.pop('delimiter')
else: delimiter = None
if 'filters' in kwargs: filters = kwargs.pop('filters')
else: filters = '*.*'
ds = _data.load_multiple(paths=paths, delimiter=delimiter, filters=filters)
if ds is None or len(ds) == 0: return
# generate a default title (the directory)
if 'title' not in kwargs: kwargs['title']=_os.path.split(ds[0].path)[0]
# run the databox plotter
plotter(ds, xscript=xscript, yscript=yscript, eyscript=eyscript, exscript=exscript, g=g, **kwargs)
return ds
def function(f='sin(x)', xmin=-1, xmax=1, steps=200, p='x', g=None, erange=False, plotter=xy_data, complex_plane=False, **kwargs):
"""
Plots the function over the specified range
f function or list of functions to plot; can be string functions
xmin, xmax, steps range over which to plot, and how many points to plot
p if using strings for functions, p is the parameter name
g optional dictionary of extra globals. Try g=globals()
erange Use exponential spacing of the x data?
plotter function used to plot the generated data
complex_plane plot imag versus real of f?
**kwargs are sent to spinmob.plot.real_imag.data()
"""
if not g: g = {}
# do the opposite kind of update()
for k in list(globals().keys()):
if k not in g: g[k] = globals()[k]
# if the x-axis is a log scale, use erange
steps = int(steps)
if erange: x = _fun.erange(xmin, xmax, steps)
else: x = _n.linspace(xmin, xmax, steps)
# make sure it's a list so we can loop over it
if not type(f) in [type([]), type(())]: f = [f]
# loop over the list of functions
xdatas = []
ydatas = []
labels = []
for fs in f:
if type(fs) == str:
a = eval('lambda ' + p + ': ' + fs, g)
a.__name__ = fs
else:
a = fs
# try directly evaluating
try: y = a(x)
# do it the slow way.
except:
y = []
for z in x: y.append(a(z))
xdatas.append(x)
ydatas.append(y)
labels.append(a.__name__)
if 'xlabel' not in kwargs: kwargs['xlabel'] = p
if 'label' not in kwargs: kwargs['label'] = labels
# plot!
if complex_plane: plotter(_n.real(ydatas),_n.imag(ydatas), **kwargs)
else: plotter(xdatas, ydatas, **kwargs)
def image_data(Z, X=[0,1.0], Y=[0,1.0], aspect=1.0, zmin=None, zmax=None, clear=1, clabel='z', autoformat=True, colormap="Last Used", shell_history=0, **kwargs):
"""
Generates an image plot.
Parameters
----------
Z
2-d array of z-values
X=[0,1.0], Y=[0,1.0]
1-d array of x-values (only the first and last element are used)
See matplotlib's imshow() for additional optional arguments.
"""
global _colormap
# Set interpolation to something more relevant for every day science
if not 'interpolation' in kwargs.keys(): kwargs['interpolation'] = 'nearest'
_pylab.ioff()
fig = _pylab.gcf()
if clear:
fig.clear()
_pylab.axes()
# generate the 3d axes
X = _n.array(X)
Y = _n.array(Y)
Z = _n.array(Z)
# assume X and Y are the bin centers and figure out the bin widths
x_width = abs(float(X[-1] - X[0])/(len(Z[0])-1))
y_width = abs(float(Y[-1] - Y[0])/(len(Z)-1))
# reverse the Z's
# Transpose and reverse
Z = Z.transpose()
Z = Z[-1::-1]
# get rid of the label and title kwargs
xlabel=''
ylabel=''
title =''
if 'xlabel' in kwargs: xlabel = kwargs.pop('xlabel')
if 'ylabel' in kwargs: ylabel = kwargs.pop('ylabel')
if 'title' in kwargs: title = kwargs.pop('title')
_pylab.imshow(Z, extent=[X[0]-x_width/2.0, X[-1]+x_width/2.0,
Y[0]-y_width/2.0, Y[-1]+y_width/2.0], **kwargs)
cb = _pylab.colorbar()
_pt.image_set_clim(zmin,zmax)
_pt.image_set_aspect(aspect)
cb.set_label(clabel)
a = _pylab.gca()
a.set_xlabel(xlabel)
a.set_ylabel(ylabel)
#_pt.close_sliders()
#_pt.image_sliders()
# title
history = _fun.get_shell_history()
for n in range(0, min(shell_history, len(history))):
title = title + "\n" + history[n].split('\n')[0].strip()
title = title + '\n' + _get_standard_title()
a.set_title(title.strip())
if autoformat: _pt.image_format_figure(fig)
_draw()
# add the color sliders
if colormap:
if _colormap: _colormap.close()
_colormap = _pt.image_colormap(colormap, image=a.images[0])
def image_function(f='sin(5*x)*cos(5*y)', xmin=-1, xmax=1, ymin=-1, ymax=1, xsteps=100, ysteps=100, p='x,y', g=None, **kwargs):
"""
Plots a 2-d function over the specified range
Parameters
----------
f='sin(5*x)*cos(5*y)'
Takes two inputs and returns one value. Can also
be a string function such as sin(x*y)
xmin=-1, xmax=1, ymin=-1, ymax=1
Range over which to generate/plot the data
xsteps=100, ysteps=100
How many points to plot on the specified range
p='x,y'
If using strings for functions, this is a string of parameters.
g=None
Optional additional globals. Try g=globals()!
See spinmob.plot.image.data() for additional optional keyword arguments.
"""
default_kwargs = dict(clabel=str(f), xlabel='x', ylabel='y')
default_kwargs.update(kwargs)
# aggregate globals
if not g: g = {}
for k in list(globals().keys()):
if k not in g: g[k] = globals()[k]
if type(f) == str:
f = eval('lambda ' + p + ': ' + f, g)
# generate the grid x and y coordinates
xones = _n.linspace(1,1,ysteps)
x = _n.linspace(xmin, xmax, xsteps)
xgrid = _n.outer(xones, x)
yones = _n.linspace(1,1,xsteps)
y = _n.linspace(ymin, ymax, ysteps)
ygrid = _n.outer(y, yones)
# now get the z-grid
try:
# try it the fast numpy way. Add 0 to assure dimensions
zgrid = f(xgrid, ygrid) + xgrid*0.0
except:
print("Notice: function is not rocking hardcore. Generating grid the slow way...")
# manually loop over the data to generate the z-grid
zgrid = []
for ny in range(0, len(y)):
zgrid.append([])
for nx in range(0, len(x)):
zgrid[ny].append(f(x[nx], y[ny]))
zgrid = _n.array(zgrid)
# now plot!
image_data(zgrid.transpose(), x, y, **default_kwargs)
def image_file(path=None, zscript='self[1:]', xscript='[0,1]', yscript='d[0]', g=None, **kwargs):
"""
Loads an data file and plots it with color. Data file must have columns of the
same length!
Parameters
----------
path=None
Path to data file.
zscript='self[1:]'
Determines how to get data from the columns
xscript='[0,1]', yscript='d[0]'
Determine the x and y arrays used for setting the axes bounds
g=None
Optional dictionary of globals for the scripts
See spinmob.plot.image.data() for additional optional keyword arguments.
See spinmob.data.databox.execute_script() for more information about scripts.
"""
if 'delimiter' in kwargs: delimiter = kwargs.pop('delimiter')
else: delimiter = None
d = _data.load(paths=path, delimiter = delimiter)
if d is None or len(d) == 0: return
# allows the user to overwrite the defaults
default_kwargs = dict(xlabel = str(xscript),
ylabel = str(yscript),
title = d.path,
clabel = str(zscript))
default_kwargs.update(kwargs)
# get the data
X = d(xscript, g)
Y = d(yscript, g)
Z = _n.array(d(zscript, g))
# Z = Z.transpose()
# plot!
image_data(Z, X, Y, **default_kwargs)
def parametric_function(fx='sin(t)', fy='cos(t)', tmin=-1, tmax=1, steps=200, p='t', g=None, erange=False, **kwargs):
"""
Plots the parametric function over the specified range
Parameters
----------
fx='sin(t)', fy='cos(t)'
Functions or (matching) lists of functions to plot;
can be string functions or python functions taking one argument
tmin=-1, tmax=1, steps=200
Range over which to plot, and how many points to plot
p='t'
If using strings for functions, p is the parameter name
g=None
Optional dictionary of extra globals. Try g=globals()!
erange=False
Use exponential spacing of the t data?
See spinmob.plot.xy.data() for additional optional keyword arguments.
"""
if not g: g = {}
for k in list(globals().keys()):
if k not in g: g[k] = globals()[k]
# if the x-axis is a log scale, use erange
if erange: r = _fun.erange(tmin, tmax, steps)
else: r = _n.linspace(tmin, tmax, steps)
# make sure it's a list so we can loop over it
if not type(fy) in [type([]), type(())]: fy = [fy]
if not type(fx) in [type([]), type(())]: fx = [fx]
# loop over the list of functions
xdatas = []
ydatas = []
labels = []
for fs in fx:
if type(fs) == str:
a = eval('lambda ' + p + ': ' + fs, g)
a.__name__ = fs
else:
a = fs
x = []
for z in r: x.append(a(z))
xdatas.append(x)
labels.append(a.__name__)
for n in range(len(fy)):
fs = fy[n]
if type(fs) == str:
a = eval('lambda ' + p + ': ' + fs, g)
a.__name__ = fs
else:
a = fs
y = []
for z in r: y.append(a(z))
ydatas.append(y)
labels[n] = labels[n]+', '+a.__name__
# plot!
xy_data(xdatas, ydatas, label=labels, **kwargs)
class plot_style_cycle(dict):
iterators = {}
def __init__(self, **kwargs):
"""
Supply keyword arguments that would be sent to pylab.plot(), except
as a list so there is some order to follow. For example:
style = plot_style_cycle(color=['k','r','b'], marker='o')
"""
self.iterators = {}
# make sure everything is iterable
for key in kwargs:
if not getattr(kwargs[key],'__iter__',False): kwargs[key] = [kwargs[key]]
# The base class is a dictionary, so update our own elements!
self.update(kwargs)
# create the auxiliary iterator dictionary
self.reset()
def __repr__(self): return '{style}'
def __next__(self):
"""
Returns the next dictionary of styles to send to plot as kwargs.
For example:
pylab.plot([1,2,3],[1,2,1], **style.next())
"""
s = {}
for key in list(self.iterators.keys()): s[key] = next(self.iterators[key])
return s
def reset(self):
"""
Resets the style cycle.
"""
for key in list(self.keys()): self.iterators[key] = _itertools.cycle(self[key])
return self
if __name__ == '__main__':
xy_files(0, 'd[1]*2**n', yscale='log') | gpl-3.0 |
iABC2XYZ/abc | BPM/Paper/DealData.py | 1 | 1583 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
@author: Peiyong Jiang
作者: 姜培勇
[email protected]
本文件解释:
"""
import numpy as np
import matplotlib.pyplot as plt
nameFolder='/home/e/ABC/abc/BPM/Paper/'
nameData=nameFolder+'Rec_1106_2046.dat'
exData=np.loadtxt(nameData)
'''
for i in range(24):
plt.figure(1)
plt.clf()
plt.plot(exData[:,i],'.')
plt.title(i)
plt.pause(1)
'''
'''
for i in range(10,24):
plt.figure(1)
plt.clf()
plt.hist(exData[:,i],1000)
plt.title(i)
plt.pause(1)
'''
exDataMean=[]
for i in range(14,24):
exDataMeanTmp= np.mean(exData[:,i])
exDataMean.append(exDataMeanTmp)
print "+"
print str(np.round(np.array(exDataMean)*100.)/100.).replace(' ',' ').replace(' ',' ').replace(' ',' ').replace(' ',' ').replace(' ',',')
print ('\n')
exDataMean[0]=0.00
exDataMean[7]=0.00
print "-"
print str(np.round(-np.array(exDataMean)*100.)/100.).replace(' ',' ').replace(' ',' ').replace(' ',' ').replace(' ',' ').replace(' ',',')
##
exDataMean=[]
for i in range(14,24):
exDataMeanTmp= np.mean(exData[:,i])
exDataMean.append(exDataMeanTmp)
bpms=exData[:,14:24]
for i in range(10):
bpms[:,i]-=exDataMean[i]
rBPM=[]
for i in range(np.shape(bpms)[0]):
rBPM.append(np.sum(np.square(bpms[i,:])))
idMinR=np.argmin(rBPM)
print idMinR,rBPM[idMinR]
print '---- I -------'
print (exData[idMinR,14:24])
print exDataMean
print np.sum(np.square(exData[idMinR+1,14:24]-exDataMean))
#plt.plot(rBPM)
#plt.show()
#print np.where(np.min(rBPM)==rBPM)
| gpl-3.0 |
DSLituiev/scikit-learn | doc/datasets/mldata_fixture.py | 367 | 1183 | """Fixture module to skip the datasets loading when offline
Mock urllib2 access to mldata.org and create a temporary data folder.
"""
from os import makedirs
from os.path import join
import numpy as np
import tempfile
import shutil
from sklearn import datasets
from sklearn.utils.testing import install_mldata_mock
from sklearn.utils.testing import uninstall_mldata_mock
def globs(globs):
# Create a temporary folder for the data fetcher
global custom_data_home
custom_data_home = tempfile.mkdtemp()
makedirs(join(custom_data_home, 'mldata'))
globs['custom_data_home'] = custom_data_home
return globs
def setup_module():
# setup mock urllib2 module to avoid downloading from mldata.org
install_mldata_mock({
'mnist-original': {
'data': np.empty((70000, 784)),
'label': np.repeat(np.arange(10, dtype='d'), 7000),
},
'iris': {
'data': np.empty((150, 4)),
},
'datasets-uci-iris': {
'double0': np.empty((150, 4)),
'class': np.empty((150,)),
},
})
def teardown_module():
uninstall_mldata_mock()
shutil.rmtree(custom_data_home)
| bsd-3-clause |
sinpantuflas/aubio | python/demos/demo_sink_create_woodblock.py | 10 | 1580 | #! /usr/bin/env python
import sys
from math import pi, e
from aubio import sink
from numpy import arange, resize, sin, exp, zeros
if len(sys.argv) < 2:
print 'usage: %s <outputfile> [samplerate]' % sys.argv[0]
sys.exit(1)
samplerate = 44100 # samplerate in Hz
if len( sys.argv ) > 2: samplerate = int(sys.argv[2])
pitch = 2200 # in Hz
blocksize = 256 # in samples
duration = 0.02 # in seconds
twopi = pi * 2.
duration = int ( samplerate * duration ) # convert to samples
attack = int (samplerate * .001 )
decay = .5
period = float(samplerate) / pitch
# create a sine lookup table
tablelen = 1000
sinetable = arange(tablelen + 1, dtype = 'float32')
sinetable = 0.7 * sin(twopi * sinetable/tablelen)
sinetone = zeros((duration,), dtype = 'float32')
# compute sinetone at floating point period
for i in range(duration):
x = int((i % period) / float(period) * tablelen)
idx = int(x)
frac = x - idx
a = sinetable[idx]
b = sinetable[idx + 1]
sinetone[i] = a + frac * (b -a)
# apply some envelope
float_ramp = arange(duration, dtype = 'float32')
sinetone *= exp( - e * float_ramp / duration / decay)
sinetone[:attack] *= exp( e * ( float_ramp[:attack] / attack - 1 ) )
if 1:
import matplotlib.pyplot as plt
plt.plot(sinetone)
plt.show()
my_sink = sink(sys.argv[1], samplerate)
total_frames = 0
while total_frames + blocksize < duration:
my_sink(sinetone[total_frames:total_frames+blocksize], blocksize)
total_frames += blocksize
my_sink(sinetone[total_frames:duration], duration - total_frames)
| gpl-3.0 |
zonemercy/Kaggle | quora/solution/utils/keras_utils.py | 2 | 3685 | # -*- coding: utf-8 -*-
"""
@author: Chenglong Chen <[email protected]>
@brief: utils for Keras models
"""
from sklearn.preprocessing import StandardScaler
from keras.models import Sequential
from keras.layers.core import Dense, Layer, Dropout, Activation
from keras.layers.normalization import BatchNormalization
from keras.layers.advanced_activations import ELU, PReLU
from keras.optimizers import SGD
from keras.utils import np_utils, generic_utils
class KerasDNNRegressor:
def __init__(self, input_dropout=0.2, hidden_layers=2, hidden_units=64,
hidden_activation="relu", hidden_dropout=0.5, batch_norm=None,
optimizer="adadelta", nb_epoch=10, batch_size=64):
self.input_dropout = input_dropout
self.hidden_layers = hidden_layers
self.hidden_units = hidden_units
self.hidden_activation = hidden_activation
self.hidden_dropout = hidden_dropout
self.batch_norm = batch_norm
self.optimizer = optimizer
self.nb_epoch = nb_epoch
self.batch_size = batch_size
self.scaler = None
self.model = None
def __str__(self):
return self.__repr__()
def __repr__(self):
return ("%s(input_dropout=%f, hidden_layers=%d, hidden_units=%d, \n"
"hidden_activation=\'%s\', hidden_dropout=%f, batch_norm=\'%s\', \n"
"optimizer=\'%s\', nb_epoch=%d, batch_size=%d)" % (
self.__class__.__name__,
self.input_dropout,
self.hidden_layers,
self.hidden_units,
self.hidden_activation,
self.hidden_dropout,
str(self.batch_norm),
self.optimizer,
self.nb_epoch,
self.batch_size,
))
def fit(self, X, y):
## scaler
self.scaler = StandardScaler()
X = self.scaler.fit_transform(X)
#### build model
self.model = Sequential()
## input layer
self.model.add(Dropout(self.input_dropout, input_shape=(X.shape[1],)))
## hidden layers
first = True
hidden_layers = self.hidden_layers
while hidden_layers > 0:
self.model.add(Dense(self.hidden_units))
if self.batch_norm == "before_act":
self.model.add(BatchNormalization())
if self.hidden_activation == "prelu":
self.model.add(PReLU())
elif self.hidden_activation == "elu":
self.model.add(ELU())
else:
self.model.add(Activation(self.hidden_activation))
if self.batch_norm == "after_act":
self.model.add(BatchNormalization())
self.model.add(Dropout(self.hidden_dropout))
hidden_layers -= 1
## output layer
output_dim = 1
output_act = "linear"
self.model.add(Dense(output_dim))
self.model.add(Activation(output_act))
## loss
if self.optimizer == "sgd":
sgd = SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True)
self.model.compile(loss="mse", optimizer=sgd)
else:
self.model.compile(loss="mse", optimizer=self.optimizer)
## fit
self.model.fit(X, y,
nb_epoch=self.nb_epoch,
batch_size=self.batch_size,
validation_split=0, verbose=0)
return self
def predict(self, X):
X = self.scaler.transform(X)
y_pred = self.model.predict(X)
y_pred = y_pred.flatten()
return y_pred
| mit |
hurricup/intellij-community | python/testData/debug/test_dataframe.py | 23 | 1309 | import pandas as pd
import numpy as np
df1 = pd.DataFrame({'row': [0, 1, 2],
'One_X': [1.1, 1.1, 1.1],
'One_Y': [1.2, 1.2, 1.2],
'Two_X': [1.11, 1.11, 1.11],
'Two_Y': [1.22, 1.22, 1.22]})
print(df1) ###line 8
df2 = pd.DataFrame({'row': [0, 1, 2],
'One_X': [1.1, 1.1, 1.1],
'One_Y': [1.2, 1.2, 1.2],
'Two_X': [1.11, 1.11, 1.11],
'Two_Y': [1.22, 1.22, 1.22],
'LABELS': ['A', 'B', 'C']})
print(df2) ##line 16
df3 = pd.DataFrame(data={'Province' : ['ON','QC','BC','AL','AL','MN','ON'],
'City' : ['Toronto','Montreal','Vancouver','Calgary','Edmonton','Winnipeg','Windsor'],
'Sales' : [13,6,16,8,4,3,1]})
table = pd.pivot_table(df3,values=['Sales'],index=['Province'],columns=['City'],aggfunc=np.sum,margins=True)
table.stack('City')
print(df3)
df4 = pd.DataFrame({'row': np.random.random(10000),
'One_X': np.random.random(10000),
'One_Y': np.random.random(10000),
'Two_X': np.random.random(10000),
'Two_Y': np.random.random(10000),
'LABELS': ['A'] * 10000})
print(df4) ##line 31
| apache-2.0 |
songjq/polystring | scripts/render.py | 1 | 6741 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
render
======
A structure data renderer.
Copyright (C) 2012 Yi-Xin Liu
"""
import argparse
import numpy as np
import scipy.io
from mayavi import mlab
#import matplotlib
#if(not args.display):
#matplotlib.use('Agg')
#mlab.options.offscreen = True #Error in running
import matplotlib.pyplot as plt
from matplotlib import colors
def render_1d(struct_name, data_file, img_file, period, **kwargs):
''' render 1D structure.
:param struct_name: the struct variable name to be rendered
:type struct_name: string
:param data_file: the MAT file containing sturct varible
:type data_file: string
:param img_file: the file name of the image file
:type img_file: string
:param period: how many periods to draw
:type img_file: integer
:param show_img: if True, show image on the screen
:type show_img: bool
:param kwargs: any extra key words arguments will be passed to plot functions
'''
data = scipy.io.loadmat(data_file)
struct = data[struct_name]
struct = np.tile(struct,period)
x = data['x']
Lx = np.size(x)
xa = x[0]
dx = x[1] - xa
rx = np.zeros(Lx*period)
for i in xrange(Lx*period):
rx[i] = i * dx
# No frame, white background
fig = plt.figure(dpi=80, facecolor='w')
# full figure subplot
ax = fig.add_axes([0, 0, 1, 1])
ax.plot(rx,struct,**kwargs)
plt.savefig(img_file)
def render_2d(struct_name, data_file, img_file, period,
levels=None, cmap=None,
**kwargs):
''' Render 2D structure.
:param struct_name: the struct variable name to be rendered
:type struct_name: string
:param data_file: the MAT file containing sturct varible
:type data_file: string
:param img_file: the file name of the image file
:type img_file: string
:param period: how many periods to draw
:type img_file: integer
:param show_img: if True, show image on the screen
:type show_img: bool
:param levels: how many contour levels
:type levels: integer
:param cmap: colormap for contour plot
:type cmap: `Colormap`
:param kwargs: any extra key words arguments will be passed to plot functions
'''
data = scipy.io.loadmat(data_file)
struct = data[struct_name]
repeat = (period,period)
struct = np.tile(struct,repeat)
x = data['x']
y = data['y']
Lx, Ly = np.shape(x)
xa = x[0,0]
dx1 = x[1,0] - xa
dx2 = x[0,1] - xa
yc = y[0,0]
dy1 = y[1,0] - yc
dy2 = y[0,1] - yc
rx = np.zeros((Lx*period,Ly*period))
ry = np.zeros((Lx*period,Ly*period))
for (i,j) in np.ndindex(Lx*period,Ly*period):
rx[i,j] = i * dx1 + j * dx2
ry[i,j] = i * dy1 + j * dy2
dx = rx.max() - rx.min()
dy = ry.max() - ry.min()
w, h = plt.figaspect(float(dy / dx)) # float is must
# No frame, white background, w/h aspect ratio figure
fig = plt.figure(figsize=(w, h), frameon=False,
dpi=80, facecolor='w')
# full figure subplot, no border, no axes
ax = fig.add_axes([0, 0, 1, 1], frameon=False, axisbg='w')
# no ticks
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
# Default: there are 256 contour levels
if levels is None:
step = (struct.max() - struct.min()) / 256
levels = np.arange(struct.min(), struct.max() + step, step)
# Default: colormap is monochromatic red
if cmap is None:
clr = np.zeros((256, 3))
for i in np.arange(256):
clr[i, 0] = i / 255.0
cmap = colors.ListedColormap(clr)
# actual plot
ax.contourf(rx, ry, struct, levels=levels,
cmap=cmap, antialiased=False, **kwargs)
#ax.contourf(rx,ry,struct)
plt.savefig(img_file)
def render_3d(struct_name, data_file, img_file, period, **kwargs):
''' Render 3D structure.
:param struct_name: the struct variable name to be rendered
:type struct_name: string
:param data_file: the MAT file containing sturct varible
:type data_file: string
:param img_file: the file name of the image file
:type img_file: string
:param period: how many periods to draw
:type img_file: integer
:param show_img: if True, show image on the screen
:type show_img: bool
:param kwargs: any extra key words arguments will be passed to plot functions
'''
data = scipy.io.loadmat(data_file)
struct = data[struct_name]
repeat = (period,period,period)
struct = np.tile(struct,repeat)
x = data['x']
y = data['y']
z = data['z']
Lx, Ly, Lz = np.shape(x)
xa = x[0,0,0]
dx1 = x[1,0,0] - xa
dx2 = x[0,1,0] - xa
dx3 = x[0,0,1] - xa
yc = y[0,0,0]
dy1 = y[1,0,0] - yc
dy2 = y[0,1,0] - yc
dy3 = y[0,0,1] - yc
ze = z[0,0,0]
dz1 = z[1,0,0] - ze
dz2 = z[0,1,0] - ze
dz3 = z[0,0,1] - ze
rx = np.zeros((Lx*period,Ly*period,Lz*period))
ry = np.zeros((Lx*period,Ly*period,Lz*period))
rz = np.zeros((Lx*period,Ly*period,Lz*period))
for (i,j,k) in np.ndindex(Lx*period,Ly*period,Lz*period):
rx[i,j,k] = i * dx1 + j * dx2 + k * dx3
ry[i,j,k] = i * dy1 + j * dy2 + k * dy3
rz[i,j,k] = i * dz1 + j * dz2 + k * dz3
mlab.contour3d(rx,ry,rz,struct,**kwargs)
#mlab.pipeline.volume(mlab.pipeline.scalar_field(rx, ry, rz, struct))
mlab.savefig(img_file)
mlab.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-a', '--space',
default='2',
help='the space dimensionality.')
parser.add_argument('-s', '--struct',
default='struct',
help='the variable name in data file to render.')
parser.add_argument('-d', '--data', default='data.mat',
help='the data file to read.')
parser.add_argument('-i', '--image', default='struct.png',
help='the image file to write.')
parser.add_argument('-p', '--period', default=2, type=int,
help='how many periods to render.' + \
'same for each dimension.')
parser.add_argument('-y', '--display', action='store_true',
help='dispaly the image')
args = parser.parse_args()
struct = args.struct
data_file = args.data
img_file = args.image
period = args.period
if(args.space == '1'):
render_1d(struct, data_file, img_file, period)
elif(args.space == '2'):
render_2d(struct, data_file, img_file, period)
else:
render_3d(struct, data_file, img_file, period)
| gpl-3.0 |
yutiansut/QUANTAXIS | QUANTAXIS/QAAnalysis/QAAnalysis_signal.py | 2 | 22236 | # coding:utf-8
# Author: 阿财(Rgveda@github)([email protected])
# Created date: 2020-02-27
#
# The MIT License (MIT)
#
# Copyright (c) 2016-2021 yutiansut/QUANTAXIS
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import numpy as np
import pandas as pd
import numba as nb
import scipy.signal as signal
from scipy.signal import lfilter, lfilter_zi, filtfilt, butter, savgol_filter
from QUANTAXIS.QAIndicator.base import *
from QUANTAXIS.QAData.base_datastruct import *
try:
import peakutils
except:
#print('PLEASE run "pip install peakutils" to call these modules')
pass
try:
from QUANTAXIS.QAIndicator.talib_numpy import *
import QUANTAXIS as QA
from QUANTAXIS.QAIndicator.base import *
from QUANTAXIS.QAUtil.QADate_Adv import (
QA_util_timestamp_to_str,
QA_util_datetime_to_Unix_timestamp,
QA_util_print_timestamp
)
except:
print('PLEASE run "pip install QUANTAXIS" before call QUANTAXIS.QAAnalysis.QAAnalysis_signal modules')
pass
"""
时序信号处理,公共函数
"""
def time_series_momemtum(price, n=24, rf=0.02):
"""
时间序列动量指标
Time Series Momentum strategy
"""
return (price / price.shift(n) - 1) - rf
def find_peak_vextors_eagerly(price, offest=0):
"""
(饥渴的)在 MACD 上坡的时候查找更多的极值点
"""
xn = price
# pass 0
window_size, poly_order = 5, 1
yy_sg = savgol_filter(xn, window_size, poly_order)
# pass 1
x_tp_min, x_tp_max = signal.argrelextrema(yy_sg, np.less)[0], signal.argrelextrema(yy_sg, np.greater)[0]
n = int(len(price) / (len(x_tp_min) + len(x_tp_max))) * 2
# peakutils 似乎一根筋只能查最大极值,通过曲线反相的方式查找极小点
mirrors = (yy_sg * -1) + np.mean(price) * 2
# pass 2 使用 peakutils 查找
x_tp_max = peakutils.indexes(yy_sg, thres=0.01 / max(price), min_dist=n)
x_tp_min = peakutils.indexes(mirrors, thres=0.01 / max(price), min_dist=n)
return x_tp_min + offest, x_tp_max + offest
def find_peak_vextors(price, return_ref=False, offest=0):
"""
采用巴特沃斯信号滤波器,自适应寻找最佳极值点,决定平均周期分段数量。
使用 scipy.Gaussian 机器学习统计算法进行第二次分析
If you meet a Warning message, To slove this need upgrade scipy=>1.2.
but QUANTAXIS incompatible scipy=>1.2
Parameters
----------
price : (N,) array_like
传入需要查找极值点的价格-时间序列。
The numerator coefficient vector of the filter.
return_ref : bool or None, optional
返回作为参照的平滑曲线,平滑曲线的目的是减少锯齿抖动,减少计算的极值点。
Return the smoothed line for reference.
offest : int or None, optional
传递参数时可能会被 .dropna() 或者 price[t:0] 等切片手段移除 nparray 头部
的 np.nan 元素,因为此函数返回的是向量节点的数组索引,为了跟原始参数对应,调用者
可以指定一个补偿偏移量,在返回的最大最小值中每个索引都会追加这个偏移量。
The number of elements index offest, for jump np.nan in price's head.
Returns
-------
x_tp_min, x_tp_max : ndarray
包含最大值和最少值索引的数组
The min/max peakpoint's index in array.
"""
xn = price
# Create an order 3 lowpass butterworth filter.
b, a = butter(3, 0.05)
# Apply the filter to xn. Use lfilter_zi to choose the initial condition
# of the filter.
zi = lfilter_zi(b, a)
z, _ = lfilter(b, a, xn, zi=zi * xn[0])
# Apply the filter again, to have a result filtered at an order
# the same as filtfilt.
z2, _ = lfilter(b, a, z, zi=zi * z[0])
# Use filtfilt to apply the filter. If you meet a Warning need upgrade to
# scipy=>1.2 but QUANTAXIS incompatible scipy=>1.2
y = filtfilt(b, a, xn)
# pass 1
x_tp_min, x_tp_max = signal.argrelextrema(y, np.less)[0], signal.argrelextrema(y, np.greater)[0]
n = int(len(price) / (len(x_tp_min) + len(x_tp_max))) * 2
# peakutils 似乎一根筋只能查最大极值,通过曲线反相的方式查找极小点
mirrors = (price * -1) + np.mean(price) * 2
# pass 2 使用 peakutils 查找
x_tp_max = peakutils.indexes(price, thres=0.01 / max(price), min_dist=n)
x_tp_min = peakutils.indexes(mirrors, thres=0.01 / max(price), min_dist=n)
if (return_ref):
return x_tp_min + offest, x_tp_max + offest, y
else:
return x_tp_min + offest, x_tp_max + offest
def Timeline_Integral_with_lambda(Tm,):
"""
explanation:
计算时域金叉/死叉信号的累积卷积和(死叉(1-->0)清零)
params:
* Tm ->:
meaning: 数据
type: null
optional: [null]
return:
np.array
demonstrate:
Not described
output:
Not described
"""
T = [Tm[0]]
#Ti = list(map(lambda x: reduce(lambda z,y: y * (z + y), Tm[0:x]), Tm))
#Ti = list(map(lambda x,y: x * (y + x), Ti[1:], Tm))
# print(Ti)
#list(map(lambda x,y: x * (y + x), Tm[1:], T))
return np.array(T)
@nb.jit(nopython=True)
def Timeline_Integral(Tm:np.ndarray,):
"""
explanation:
计算时域金叉/死叉信号的累积卷积和(死叉(1-->0)清零),经测试for实现最快,比reduce快
params:
* Tm ->:
meaning:
type: null
optional: [null]
return:
np.array
demonstrate:
Not described
output:
Not described
"""
T = np.zeros(len(Tm)).astype(np.int32)
for i, Tmx in enumerate(Tm):
T[i] = Tmx * (T[i - 1] + Tmx)
return T
def Timeline_Integral_with_reduce(Tm,):
"""
explanation:
计算时域金叉/死叉信号的累积卷积和(死叉(1-->0)清零),经测试for实现最快,比reduce快
params:
* Tm ->:
meaning: 数据
type: null
optional: [null]
return:
np.array
demonstrate:
Not described
output:
Not described
"""
T = []
for i in range(1,len(Tm)):
T.append(reduce(lambda x,y: int(y * (y + x)), Tm[0:i]))
return np.array(T)
@nb.jit(nopython=True)
def Timeline_Integral_with_cross_before(Tm:np.ndarray,):
"""
explanation:
计算时域金叉/死叉信号的累积卷积和(死叉(1-->0)不清零,金叉(0-->1)清零)
经测试for最快,比reduce快(无jit,jit的话for就更快了)
params:
* Tm ->:
meaning: 数据
type: null
optional: [null]
return:
np.array
demonstrate:
Not described
output:
Not described
"""
T = np.zeros(len(Tm)).astype(np.int32)
for i, Tmx in enumerate(Tm):
T[i] = (T[i - 1] + 1) if (Tmx != 1) else 0
return T
@nb.jit(nopython=True)
def LIS(X):
"""
explanation:
计算最长递增子序列
Longest increasing subsequence
params:
* X ->:
meaning: 序列
type: null
optional: [null]
return:
(子序列开始位置, 子序列结束位置)
demonstrate:
Not described
output:
Not described
"""
N = len(X)
P = [0] * N
M = [0] * (N + 1)
L = 0
for i in range(N):
lo = 1
hi = L
while lo <= hi:
mid = (lo + hi) // 2
if (X[M[mid]] < X[i]):
lo = mid + 1
else:
hi = mid - 1
newL = lo
P[i] = M[newL - 1]
M[newL] = i
if (newL > L):
L = newL
S = []
pos = []
k = M[L]
for i in range(L - 1, -1, -1):
S.append(X[k])
pos.append(k)
k = P[k]
return S[::-1], pos[::-1]
@nb.jit(nopython=True)
def LDS(X):
"""
explanation:
计算最长递减子序列
Longest decreasing subsequence
params:
* X ->:
meaning: 序列
type: null
optional: [null]
return:
(子序列开始位置, 子序列结束位置)
demonstrate:
Not described
output:
Not described
"""
N = len(X)
P = [0] * N
M = [0] * (N + 1)
L = 0
for i in range(N):
lo = 1
hi = L
while lo <= hi:
mid = (lo + hi) // 2
if (X[M[mid]] > X[i]):
lo = mid + 1
else:
hi = mid - 1
newL = lo
P[i] = M[newL - 1]
M[newL] = i
if (newL > L):
L = newL
S = []
pos = []
k = M[L]
for i in range(L - 1, -1, -1):
S.append(X[k])
pos.append(k)
k = P[k]
return S[::-1], pos[::-1]
def price_predict_with_macd_trend_func(data):
"""
价格趋势,基于巴特沃斯带通滤波器和scipy.Gaussian机器学习统计算法预测
它包含了macd_cross_func()全部功能(没办法,重复计算2次MACD似乎很蠢)
"""
MACD = TA_MACD(data.close)
PRICE_PREDICT = pd.DataFrame(columns=['PRICE_PRED_CROSS', 'PRICE_PRED_CROSS_JX', 'PRICE_PRED_CROSS_SX', 'MACD_CROSS', 'MACD_CROSS_JX', 'MACD_CROSS_SX'], index=data.index)
PRICE_PREDICT = PRICE_PREDICT.assign(DIF=MACD[:,0])
PRICE_PREDICT = PRICE_PREDICT.assign(DEA=MACD[:,1])
PRICE_PREDICT = PRICE_PREDICT.assign(MACD=MACD[:,2])
PRICE_PREDICT = PRICE_PREDICT.assign(DELTA=MACD[:,3])
dea_tp_min, dea_tp_max = find_peak_vextors(PRICE_PREDICT['DEA'].values[33:], offest=33)
PRICE_PREDICT.iloc[dea_tp_min, PRICE_PREDICT.columns.get_loc('MACD_CROSS')] = 1
PRICE_PREDICT.iloc[dea_tp_max, PRICE_PREDICT.columns.get_loc('MACD_CROSS')] = -1
MACD_CROSS_JX = CROSS(PRICE_PREDICT['DIF'], PRICE_PREDICT['DEA'])
DEA_CROSS_JX = CROSS(PRICE_PREDICT['DEA'], 0)
MACD_CROSS_SX = CROSS(PRICE_PREDICT['DEA'], PRICE_PREDICT['DIF'])
DEA_CROSS_SX = CROSS(0, PRICE_PREDICT['DEA'])
PRICE_PREDICT.loc[MACD_CROSS_JX == 1, 'MACD_CROSS_JX'] = 1
PRICE_PREDICT.loc[MACD_CROSS_SX == 1, 'MACD_CROSS_SX'] = -1
PRICE_PREDICT.iloc[dea_tp_min, PRICE_PREDICT.columns.get_loc('MACD_CROSS_JX')] = 1
PRICE_PREDICT.iloc[dea_tp_max, PRICE_PREDICT.columns.get_loc('MACD_CROSS_SX')] = 1
PRICE_PREDICT['MACD_CROSS_JX'] = Timeline_Integral_with_cross_before(PRICE_PREDICT['MACD_CROSS_JX'])
PRICE_PREDICT['MACD_CROSS_SX'] = Timeline_Integral_with_cross_before(PRICE_PREDICT['MACD_CROSS_SX'])
# pass 1
x_tp_min, x_tp_max = find_peak_vextors(data.close.values)
PRICE_PREDICT.iloc[x_tp_min, PRICE_PREDICT.columns.get_loc('PRICE_PRED_CROSS')] = x_tp_min
PRICE_PREDICT.iloc[x_tp_max, PRICE_PREDICT.columns.get_loc('PRICE_PRED_CROSS')] = -x_tp_max
PRICE_PREDICT.iloc[x_tp_min, PRICE_PREDICT.columns.get_loc('PRICE_PRED_CROSS_JX')] = 1
PRICE_PREDICT.iloc[x_tp_max, PRICE_PREDICT.columns.get_loc('PRICE_PRED_CROSS_SX')] = 1
# pass 2 MACD 金叉的时候寻找更多的极值点,创造更多买入条件
x_tp_min, x_tp_max = find_peak_vextors_eagerly(data.close.values)
macd_up_trend_PEAKPOINT_MIN = (PRICE_PREDICT.iloc[x_tp_min, PRICE_PREDICT.columns.get_loc('MACD_CROSS_JX')] < PRICE_PREDICT.iloc[x_tp_min, PRICE_PREDICT.columns.get_loc('MACD_CROSS_SX')])
macd_up_trend_PEAKPOINT_MAX = (PRICE_PREDICT.iloc[x_tp_max, PRICE_PREDICT.columns.get_loc('MACD_CROSS_JX')] < PRICE_PREDICT.iloc[x_tp_max, PRICE_PREDICT.columns.get_loc('MACD_CROSS_SX')])
macd_up_trend_PEAKPOINT_MIN = macd_up_trend_PEAKPOINT_MIN[macd_up_trend_PEAKPOINT_MIN.apply(lambda x: x == True)] # eqv. Trim(x == False)
macd_up_trend_PEAKPOINT_MAX = macd_up_trend_PEAKPOINT_MAX[macd_up_trend_PEAKPOINT_MAX.apply(lambda x: x == True)] # eqv. Trim(x == False)
PRICE_PREDICT.loc[macd_up_trend_PEAKPOINT_MIN.index, 'PRICE_PRED_CROSS_JX'] = 1
PRICE_PREDICT.loc[macd_up_trend_PEAKPOINT_MAX.index, 'PRICE_PRED_CROSS_SX'] = 1
PRICE_PREDICT.loc[macd_up_trend_PEAKPOINT_MIN.index, 'PRICE_PRED_CROSS'] = PRICE_PREDICT.loc[macd_up_trend_PEAKPOINT_MIN.index].apply(lambda x: PRICE_PREDICT.index.get_level_values(level=0).get_loc(x.name[0]), axis=1)
PRICE_PREDICT.loc[macd_up_trend_PEAKPOINT_MAX.index, 'PRICE_PRED_CROSS'] = PRICE_PREDICT.loc[macd_up_trend_PEAKPOINT_MAX.index].apply(lambda x: PRICE_PREDICT.index.get_level_values(level=0).get_loc(x.name[0]) * -1, axis=1)
PRICE_PREDICT['PRICE_PRED_CROSS_JX'] = Timeline_Integral_with_cross_before(PRICE_PREDICT['PRICE_PRED_CROSS_JX'])
PRICE_PREDICT['PRICE_PRED_CROSS_SX'] = Timeline_Integral_with_cross_before(PRICE_PREDICT['PRICE_PRED_CROSS_SX'])
if (len(PRICE_PREDICT.index.names) > 2):
return PRICE_PREDICT.reset_index([1,2])
elif (len(PRICE_PREDICT.index.names) > 1):
return PRICE_PREDICT.reset_index([1])
else:
return PRICE_PREDICT
def macd_cross_func(data):
"""
神一样的指标:MACD
"""
MACD = TA_MACD(data.close)
MACD_CROSS = pd.DataFrame(columns=['MACD_CROSS', 'MACD_CROSS_JX', 'MACD_CROSS_SX'], index=data.index)
MACD_CROSS = MACD_CROSS.assign(DIF=MACD[:,0])
MACD_CROSS = MACD_CROSS.assign(DEA=MACD[:,1])
MACD_CROSS = MACD_CROSS.assign(MACD=MACD[:,2])
MACD_CROSS = MACD_CROSS.assign(DELTA=MACD[:,3])
dea_tp_min, dea_tp_max = find_peak_vextors(MACD_CROSS['DEA'].values[33:], offest=33)
MACD_CROSS.iloc[dea_tp_min, MACD_CROSS.columns.get_loc('MACD_CROSS')] = 1
MACD_CROSS.iloc[dea_tp_max, MACD_CROSS.columns.get_loc('MACD_CROSS')] = -1
MACD_CROSS_JX = CROSS(MACD_CROSS['DIF'], MACD_CROSS['DEA'])
MACD_CROSS_SX = CROSS(MACD_CROSS['DEA'], MACD_CROSS['DIF'])
MACD_CROSS.loc[MACD_CROSS_JX == 1, 'MACD_CROSS_JX'] = 1
MACD_CROSS.loc[MACD_CROSS_SX == 1, 'MACD_CROSS_SX'] = -1
MACD_CROSS.iloc[dea_tp_min, MACD_CROSS.columns.get_loc('MACD_CROSS_JX')] = 1
MACD_CROSS.iloc[dea_tp_max, MACD_CROSS.columns.get_loc('MACD_CROSS_SX')] = 1
MACD_CROSS['MACD_CROSS_JX'] = Timeline_Integral_with_cross_before(MACD_CROSS['MACD_CROSS_JX'])
MACD_CROSS['MACD_CROSS_SX'] = Timeline_Integral_with_cross_before(MACD_CROSS['MACD_CROSS_SX'])
return MACD_CROSS
def maxfactor_cross_func(data):
"""
自创指标:MAXFACTOR
"""
RSI = QA.TA_RSI(data.close, timeperiod=12)
CCI = QA.TA_CCI(data.high, data.low, data.close)
KDJ = QA.TA_KDJ(data.high, data.low, data.close)
MAX_FACTOR = CCI[:,0] + (RSI[:,0] - 50) * 4 + (KDJ[:,2] - 50) * 4
MAX_FACTOR_delta = np.r_[np.nan, np.diff(MAX_FACTOR)]
REGRESSION_BASELINE = pd.Series((RSI[:,0] - 50) * 4, index=data.index)
MAXFACTOR_CROSS = pd.DataFrame(columns=['MAXFACTOR_CROSS', 'MAXFACTOR_CROSS_JX', 'MAXFACTOR_CROSS_SX'], index=data.index)
MAXFACTOR_CROSS = MAXFACTOR_CROSS.assign(MAXFACTOR=MAX_FACTOR)
MAXFACTOR_CROSS = MAXFACTOR_CROSS.assign(MAXFACTOR_DELTA=MAX_FACTOR_delta)
MAXFACTOR_CROSS = MAXFACTOR_CROSS.assign(REGRESSION_BASELINE=REGRESSION_BASELINE)
MAXFACTOR_CROSS_JX1 = CROSS(MAX_FACTOR + MAX_FACTOR_delta, REGRESSION_BASELINE - 133)
MAXFACTOR_CROSS_JX2 = CROSS(MAX_FACTOR + MAX_FACTOR_delta, REGRESSION_BASELINE)
MAXFACTOR_CROSS_JX3 = CROSS(MAX_FACTOR + MAX_FACTOR_delta, REGRESSION_BASELINE + 133)
MAXFACTOR_CROSS_JX_JUNCTION = (MAXFACTOR_CROSS_JX1 | MAXFACTOR_CROSS_JX2 | MAXFACTOR_CROSS_JX3)
MAXFACTOR_CROSS_SX1 = CROSS(REGRESSION_BASELINE + 133, MAX_FACTOR + MAX_FACTOR_delta)
MAXFACTOR_CROSS_SX2 = CROSS(REGRESSION_BASELINE, MAX_FACTOR + MAX_FACTOR_delta)
MAXFACTOR_CROSS_SX3 = CROSS(REGRESSION_BASELINE - 133, MAX_FACTOR + MAX_FACTOR_delta)
MAXFACTOR_CROSS_SX_JUNCTION = (MAXFACTOR_CROSS_SX1 | MAXFACTOR_CROSS_SX2 | MAXFACTOR_CROSS_SX3)
MAXFACTOR_CROSS.loc[(MAXFACTOR_CROSS_JX1 | MAXFACTOR_CROSS_JX2 | MAXFACTOR_CROSS_JX3) == 1, 'MAXFACTOR_CROSS'] = 1
MAXFACTOR_CROSS.loc[(MAXFACTOR_CROSS_SX1 | MAXFACTOR_CROSS_SX2 | MAXFACTOR_CROSS_SX3) == 1, 'MAXFACTOR_CROSS'] = -1
MAXFACTOR_CROSS['MAXFACTOR_CROSS_JX'] = Timeline_Integral_with_cross_before(MAXFACTOR_CROSS_JX_JUNCTION)
MAXFACTOR_CROSS['MAXFACTOR_CROSS_SX'] = Timeline_Integral_with_cross_before(MAXFACTOR_CROSS_SX_JUNCTION)
return MAXFACTOR_CROSS
def dual_cross_func(data):
"""
自创指标:CCI/KDJ 对 偏移后的 RSI 双金叉
为了避免 Warning,计算时忽略了前13个 NaN 的,最后 加入DataFrame 的时候补回来
"""
RSI = TA_RSI(data.close, timeperiod=12)
CCI = TA_CCI(data.high, data.low, data.close)
KDJ = TA_KDJ(data.high, data.low, data.close)
CCI_CROSS_JX = CROSS_STATUS(CCI[13:,0], (RSI[13:,0] - 50) * 4)
KDJ_J_CROSS_JX = CROSS_STATUS(KDJ[13:,2], RSI[13:,0])
KDJ_J_CROSS_JX_PLUS = CROSS_STATUS(KDJ[13:,2] + KDJ[13:,3], RSI[13:,0])
DUAL_CROSS_JX = np.r_[np.zeros(13), CROSS_STATUS(CCI_CROSS_JX * (CCI_CROSS_JX + KDJ_J_CROSS_JX + KDJ_J_CROSS_JX_PLUS), 1)]
CCI_CROSS_SX = CROSS_STATUS((RSI[13:,0] - 50) * 4, CCI[13:,0])
KDJ_J_CROSS_SX = CROSS_STATUS(RSI[13:,0], KDJ[13:,2])
KDJ_J_CROSS_SX_PLUS = CROSS_STATUS(RSI[13:,0], KDJ[13:,2] + KDJ[13:,3])
DUAL_CROSS_SX = np.r_[np.zeros(13), CROSS_STATUS(CCI_CROSS_SX * (CCI_CROSS_SX + KDJ_J_CROSS_SX + KDJ_J_CROSS_SX_PLUS), 1)]
DUAL_CROSS = pd.DataFrame(columns=['DUAL_CROSS', 'DUAL_CROSS_JX', 'DUAL_CROSS_SX'], index=data.index)
DUAL_CROSS.loc[DUAL_CROSS_JX == 1, 'DUAL_CROSS'] = 1
DUAL_CROSS.loc[DUAL_CROSS_SX == 1, 'DUAL_CROSS'] = -1
DUAL_CROSS['DUAL_CROSS_JX'] = Timeline_Integral(DUAL_CROSS_JX)
DUAL_CROSS['DUAL_CROSS_SX'] = Timeline_Integral(DUAL_CROSS_SX)
return DUAL_CROSS
def ma30_cross_func(data):
"""
MA均线金叉指标
"""
MA5 = talib.MA(data.close, 5)
MA30 = talib.MA(data.close, 30)
MA30_CROSS_JX = CROSS(MA5, MA30)
MA30_CROSS_JX_Integral = Timeline_Integral_with_cross_before(MA30_CROSS_JX)
MA30_CROSS_SX = CROSS(MA30, MA5)
MA30_CROSS_SX_Integral = Timeline_Integral_with_cross_before(MA30_CROSS_SX)
MA30_CROSS = pd.DataFrame(columns=['MA30_CROSS', 'MA30_CROSS_JX', 'MA30_CROSS_SX', 'MA30_TP_CROSS_JX', 'MA30_TP_CROSS_SX'], index=data.index)
MA30_CROSS.loc[MA30_CROSS_JX == 1, 'MA30_CROSS'] = 1
MA30_CROSS.loc[MA30_CROSS_SX == 1, 'MA30_CROSS'] = -1
MA30_CROSS['MA30_CROSS_JX'] = Timeline_Integral_with_cross_before(MA30_CROSS_JX)
MA30_CROSS['MA30_CROSS_SX'] = Timeline_Integral_with_cross_before(MA30_CROSS_SX)
# MA30 前29个是 NaN,处理会抛出 Warning,使用 [29:] 则不会计算 NaN,相应的 return_index+29
MA30_tp_min, MA30_tp_max = find_peak_vextors(MA30.values[29:], offest=29)
MA30_TP_CROSS = pd.DataFrame(columns=['MA30_TP_CROSS_JX', 'MA30_TP_CROSS_SX'], index=data.index)
MA30_TP_CROSS['MA30_TP_CROSS_SX'] = MA30_TP_CROSS['MA30_TP_CROSS_JX'] = 0
MA30_TP_CROSS.iloc[MA30_tp_min, MA30_TP_CROSS.columns.get_loc('MA30_TP_CROSS_JX')] = 1
MA30_TP_CROSS.iloc[MA30_tp_max, MA30_TP_CROSS.columns.get_loc('MA30_TP_CROSS_SX')] = 1
MA30_CROSS['MA30_TP_CROSS_JX'] = Timeline_Integral_with_cross_before(MA30_TP_CROSS['MA30_TP_CROSS_JX'])
MA30_CROSS['MA30_TP_CROSS_SX'] = Timeline_Integral_with_cross_before(MA30_TP_CROSS['MA30_TP_CROSS_SX'])
return MA30_CROSS
def boll_cross_func(data):
"""
布林线和K线金叉死叉 状态分析
"""
BBANDS = TA_BBANDS(data.close, timeperiod=20, nbdevup=2)
BOLL_CROSS = pd.DataFrame(columns=['min_peak', 'max_peak', 'BOLL_CROSS', 'BOLL_CROSS_JX', 'BOLL_CROSS_SX'], index=data.index)
data = data.assign(BOLL_MA=BBANDS[:,1])
# 防止插针行情突然搞乱故
data['smooth_low'] = talib.MA(data.low, 2)
data['smooth_high'] = talib.MA(data.high, 2)
BOLL_CROSS['min_peak'] = data.apply(lambda x: min(x['open'], x['close'], x['low'] if x['open'] < x['BOLL_MA'] else x['smooth_low']), axis=1)
BOLL_CROSS['max_peak'] = data.apply(lambda x: max(x['open'], x['close'], x['high'] if x['open'] > x['BOLL_MA'] else x['smooth_high']), axis=1)
BOLL_CROSS_JX = CROSS(BOLL_CROSS['min_peak'], BBANDS[:,2])
BOLL_CROSS_SX = CROSS(BBANDS[:,0], BOLL_CROSS['max_peak'])
BOLL_CROSS.loc[BOLL_CROSS_JX == 1, 'BOLL_CROSS'] = 1
BOLL_CROSS.loc[BOLL_CROSS_SX == 1, 'BOLL_CROSS'] = -1
BOLL_TP_CROSS = pd.DataFrame(columns=['BOLL_TP_CROSS_JX', 'BOLL_TP_CROSS_SX'], index=data.index)
BOLL_TP_CROSS['BOLL_TP_CROSS_SX'] = BOLL_TP_CROSS['BOLL_TP_CROSS_JX'] = 0
BOLL_TP_CROSS.loc[BOLL_CROSS_JX == 1, 'BOLL_TP_CROSS_JX'] = 1
BOLL_TP_CROSS.loc[BOLL_CROSS_SX == 1, 'BOLL_TP_CROSS_SX'] = 1
BOLL_CROSS = BOLL_CROSS.assign(BOLL_UB=BBANDS[:,0])
BOLL_CROSS = BOLL_CROSS.assign(BOLL_MA=BBANDS[:,1])
BOLL_CROSS = BOLL_CROSS.assign(BOLL_LB=BBANDS[:,2])
BOLL_CROSS = BOLL_CROSS.assign(BOLL_WIDTH=BBANDS[:,3])
BOLL_CROSS = BOLL_CROSS.assign(BOLL_DELTA=BBANDS[:,4])
BOLL_CROSS = BOLL_CROSS.assign(BBW_MA20=talib.MA(BBANDS[:,3], 20))
BOLL_CROSS['BOLL_CROSS_JX'] = Timeline_Integral_with_cross_before(BOLL_TP_CROSS['BOLL_TP_CROSS_JX'])
BOLL_CROSS['BOLL_CROSS_SX'] = Timeline_Integral_with_cross_before(BOLL_TP_CROSS['BOLL_TP_CROSS_SX'])
return BOLL_CROSS
| mit |
ericpre/hyperspy | hyperspy/drawing/_markers/vertical_line_segment.py | 2 | 3486 | # -*- coding: utf-8 -*-
# Copyright 2007-2021 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
import matplotlib.pyplot as plt
from hyperspy.drawing.marker import MarkerBase
class VerticalLineSegment(MarkerBase):
"""Vertical line segment marker that can be added to the signal figure
Parameters
----------
x : array or float
The position of line segment in x.
If float, the marker is fixed.
If array, the marker will be updated when navigating. The array should
have the same dimensions in the navigation axes.
y1 : array or float
The position of the start of the line segment in x.
see x1 arguments
y2 : array or float
The position of the start of the line segment in y.
see x1 arguments
kwargs :
Keywords argument of axvline valid properties (i.e. recognized by
mpl.plot).
Example
-------
>>> im = hs.signals.Signal2D(np.zeros((100, 100)))
>>> m = hs.plot.markers.vertical_line_segment(
>>> x=20, y1=30, y2=70, linewidth=4, color='red', linestyle='dotted')
>>> im.add_marker(m)
Add a marker permanently to a marker
>>> im = hs.signals.Signal2D(np.zeros((60, 60)))
>>> m = hs.plot.markers.vertical_line_segment(x=10, y1=20, y2=50)
>>> im.add_marker(m, permanent=True)
"""
def __init__(self, x, y1, y2, **kwargs):
MarkerBase.__init__(self)
lp = {'color': 'black', 'linewidth': 1}
self.marker_properties = lp
self.set_data(x1=x, y1=y1, y2=y2)
self.set_marker_properties(**kwargs)
self.name = 'vertical_line_segment'
def __repr__(self):
string = "<marker.{}, {} (x={},y1={},y2={},color={})>".format(
self.__class__.__name__,
self.name,
self.get_data_position('x1'),
self.get_data_position('y1'),
self.get_data_position('y2'),
self.marker_properties['color'],
)
return(string)
def update(self):
if self.auto_update is False:
return
self._update_segment()
def _plot_marker(self):
self.marker = self.ax.vlines(0, 0, 1, **self.marker_properties)
self._update_segment()
def _update_segment(self):
segments = self.marker.get_segments()
segments[0][0, 0] = self.get_data_position('x1')
segments[0][1, 0] = segments[0][0, 0]
if self.get_data_position('y1') is None:
segments[0][0, 1] = plt.getp(self.marker.axes, 'ylim')[0]
else:
segments[0][0, 1] = self.get_data_position('y1')
if self.get_data_position('y2') is None:
segments[0][1, 1] = plt.getp(self.marker.axes, 'ylim')[1]
else:
segments[0][1, 1] = self.get_data_position('y2')
self.marker.set_segments(segments)
| gpl-3.0 |
DamCB/tyssue | tyssue/draw/plt_draw.py | 2 | 17946 | """
Matplotlib based plotting
"""
import shutil
import glob
import tempfile
import subprocess
import warnings
import pathlib
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib.path import Path
from matplotlib.patches import FancyArrow, Arc, PathPatch
from matplotlib.collections import PatchCollection, PolyCollection, LineCollection
from ..config.draw import sheet_spec
from ..utils.utils import spec_updater, get_sub_eptm
COORDS = ["x", "y"]
def create_gif(
history,
output,
num_frames=None,
interval=None,
draw_func=None,
margin=5,
**draw_kwds,
):
"""Creates an animated gif of the recorded history.
You need imagemagick on your system for this function to work.
Parameters
----------
history : a :class:`tyssue.History` object
output : path to the output gif file
num_frames : int, the number of frames in the gif
interval : tuples, define begin and end frame of the gif
draw_func : a drawing function
this function must take a `sheet` object as first argument
and return a `fig, ax` pair. Defaults to quick_edge_draw
(aka sheet_view with quick mode)
margin : int, the graph margins in percents, default 5
if margin is -1, let the draw function decide
**draw_kwds are passed to the drawing function
"""
if draw_func is None:
draw_func = sheet_view
draw_kwds.update({"mode": "quick"})
time_stamps = history.time_stamps
if num_frames is not None:
times = np.linspace(time_stamps[0], time_stamps[-1], num_frames)
elif interval is not None:
times = time_stamps[interval[0] : interval[1] + 1]
num_frames = len(times)
else:
raise ValueError("Need to define `num_frames` or `interval` parameters.")
graph_dir = pathlib.Path(tempfile.mkdtemp())
x, y = coords = draw_kwds.get("coords", history.sheet.coords[:2])
sheet0 = history.retrieve(0)
bounds = sheet0.vert_df[coords].describe().loc[["min", "max"]]
delta = (bounds.loc["max"] - bounds.loc["min"]).max()
margin = delta * margin / 100
xlim = bounds.loc["min", x] - margin, bounds.loc["max", x] + margin
ylim = bounds.loc["min", y] - margin, bounds.loc["max", y] + margin
if len(history) < num_frames:
for i, (t_, sheet) in enumerate(history):
fig, ax = draw_func(sheet, **draw_kwds)
if isinstance(ax, plt.Axes) and margin >= 0:
ax.set(xlim=xlim, ylim=ylim)
fig.savefig(graph_dir / f"sheet_{i:03d}")
plt.close(fig)
figs = glob.glob((graph_dir / "sheet_*.png").as_posix())
figs.sort()
for i, t in enumerate(times):
index = np.where(time_stamps >= t)[0][0]
fig = figs[index]
shutil.copy(fig, graph_dir / f"movie_{i:04d}.png")
else:
for i, t in enumerate(times):
sheet = history.retrieve(t)
try:
fig, ax = draw_func(sheet, **draw_kwds)
except Exception as e:
print("Droped frame {i}")
if isinstance(ax, plt.Axes) and margin >= 0:
ax.set(xlim=xlim, ylim=ylim)
fig.savefig(graph_dir / f"movie_{i:04d}.png")
plt.close(fig)
try:
proc = subprocess.run(
["convert", (graph_dir / "movie_*.png").as_posix(), output]
)
except Exception as e:
print(
"Converting didn't work, make sure imagemagick is available on your system"
)
raise e
finally:
shutil.rmtree(graph_dir)
def sheet_view(sheet, coords=COORDS, ax=None, **draw_specs_kw):
"""Base view function, parametrizable
through draw_secs
The default sheet_spec specification is:
{'edge': {
'visible': True,
'width': 0.5,
'head_width': 0.2, # arrow head width for the edges
'length_includes_head': True, # see matplotlib Arrow artist doc
'shape': 'right',
'color': '#2b5d0a', # can be an array
'alpha': 0.8,
'zorder': 1,
'colormap': 'viridis'},
'vert': {
'visible': True,
's': 100,
'color': '#000a4b',
'alpha': 0.3,
'zorder': 2},
'face': {
'visible': False,
'color': '#8aa678',
'alpha': 1.0,
'zorder': -1}
}
"""
draw_specs = sheet_spec()
spec_updater(draw_specs, draw_specs_kw)
if ax is None:
fig, ax = plt.subplots()
else:
fig = ax.get_figure()
vert_spec = draw_specs["vert"]
if vert_spec["visible"]:
ax = draw_vert(sheet, coords, ax, **vert_spec)
edge_spec = draw_specs["edge"]
if edge_spec["visible"]:
ax = draw_edge(sheet, coords, ax, **edge_spec)
face_spec = draw_specs["face"]
if face_spec["visible"]:
ax = draw_face(sheet, coords, ax, **face_spec)
ax.autoscale()
ax.set_aspect("equal")
return fig, ax
def draw_face(sheet, coords, ax, **draw_spec_kw):
"""Draws epithelial sheet polygonal faces in matplotlib
Keyword values can be specified at the element
level as columns of the sheet.face_df
"""
draw_spec = sheet_spec()["face"]
draw_spec.update(**draw_spec_kw)
collection_specs = parse_face_specs(draw_spec, sheet)
if "visible" in sheet.face_df.columns:
edges = sheet.edge_df[sheet.upcast_face(sheet.face_df["visible"])].index
if edges.shape[0]:
_sheet = get_sub_eptm(sheet, edges)
sheet = _sheet
color = collection_specs["facecolors"]
if isinstance(color, np.ndarray):
faces = sheet.face_df["face_o"].values.astype(np.uint32)
collection_specs["facecolors"] = color.take(faces, axis=0)
else:
warnings.warn("No face is visible")
if not sheet.is_ordered:
sheet_ = sheet.copy()
sheet_.reset_index(order=True)
polys = sheet_.face_polygons(coords)
else:
polys = sheet.face_polygons(coords)
p = PolyCollection(polys, closed=True, **collection_specs)
ax.add_collection(p)
return ax
def parse_face_specs(face_draw_specs, sheet):
collection_specs = {}
color = face_draw_specs.get("color")
if callable(color):
color = color(sheet)
face_draw_specs["color"] = color
if color is None:
return {}
elif isinstance(color, str):
collection_specs["facecolors"] = color
elif hasattr(color, "__len__"):
collection_specs["facecolors"] = _face_color_from_sequence(
face_draw_specs, sheet
)
if "alpha" in face_draw_specs:
collection_specs["alpha"] = face_draw_specs["alpha"]
return collection_specs
def _face_color_from_sequence(face_spec, sheet):
color_ = face_spec["color"]
cmap = cm.get_cmap(face_spec.get("colormap", "viridis"))
color_min, color_max = face_spec.get("color_range", (color_.min(), color_.max()))
if color_.shape in [(sheet.Nf, 3), (sheet.Nf, 4)]:
return color_
elif color_.shape == (sheet.Nf,):
if np.ptp(color_) < 1e-10:
warnings.warn("Attempting to draw a colormap " "with a uniform value")
return np.ones((sheet.Nf, 3)) * 0.5
normed = (color_ - color_min) / (color_max - color_min)
return cmap(normed)
else:
raise ValueError(
"shape of `face_spec['color']` must be either (Nf, 3), (Nf, 4) or (Nf,)"
)
def draw_vert(sheet, coords, ax, **draw_spec_kw):
"""Draw junction vertices in matplotlib."""
draw_spec = sheet_spec()["vert"]
draw_spec.update(**draw_spec_kw)
x, y = coords
if "z_coord" in sheet.vert_df.columns:
pos = sheet.vert_df.sort_values("z_coord")[coords]
else:
pos = sheet.vert_df[coords]
ax.scatter(pos[x], pos[y], **draw_spec_kw)
return ax
def draw_edge(sheet, coords, ax, **draw_spec_kw):
""""""
draw_spec = sheet_spec()["edge"]
draw_spec.update(**draw_spec_kw)
arrow_specs, collections_specs = _parse_edge_specs(draw_spec, sheet)
dx, dy = ("d" + c for c in coords)
sx, sy = ("s" + c for c in coords)
tx, ty = ("t" + c for c in coords)
if draw_spec.get("head_width"):
app_length = (
np.hypot(sheet.edge_df[dx], sheet.edge_df[dy]) * sheet.edge_df.length.mean()
)
patches = [
FancyArrow(*edge[[sx, sy, dx, dy]], **arrow_specs)
for idx, edge in sheet.edge_df[app_length > 1e-6].iterrows()
]
ax.add_collection(PatchCollection(patches, False, **collections_specs))
else:
segments = sheet.edge_df[[sx, sy, tx, ty]].to_numpy().reshape((-1, 2, 2))
ax.add_collection(LineCollection(segments, **collections_specs))
return ax
def _parse_edge_specs(edge_draw_specs, sheet):
arrow_keys = ["head_width", "length_includes_head", "shape"]
arrow_specs = {
key: val for key, val in edge_draw_specs.items() if key in arrow_keys
}
collection_specs = {}
if arrow_specs.get("head_width"): # draw arrows
color_key = "edgecolors"
else:
color_key = "colors"
if "color" in edge_draw_specs:
if callable(edge_draw_specs["color"]):
edge_draw_specs["color"] = edge_draw_specs["color"](sheet)
if isinstance(edge_draw_specs["color"], str):
collection_specs[color_key] = edge_draw_specs["color"]
elif hasattr(edge_draw_specs["color"], "__len__"):
collection_specs[color_key] = _wire_color_from_sequence(
edge_draw_specs, sheet
)
if "width" in edge_draw_specs:
collection_specs["linewidths"] = edge_draw_specs["width"]
if "alpha" in edge_draw_specs:
collection_specs["alpha"] = edge_draw_specs["alpha"]
return arrow_specs, collection_specs
def _wire_color_from_sequence(edge_spec, sheet):
""""""
color_ = edge_spec["color"]
color_min, color_max = edge_spec.get("color_range", (color_.min(), color_.max()))
cmap = cm.get_cmap(edge_spec.get("colormap", "viridis"))
if color_.shape in [(sheet.Nv, 3), (sheet.Nv, 4)]:
return (sheet.upcast_srce(color_) + sheet.upcast_trgt(color_)) / 2
elif color_.shape == (sheet.Nv,):
if np.ptp(color_) < 1e-10:
warnings.warn("Attempting to draw a colormap " "with a uniform value")
return np.ones((sheet.Ne, 3)) * 0.7
if not hasattr(color_, "index"):
color_ = pd.Series(color_, index=sheet.vert_df.index)
color_ = (sheet.upcast_srce(color_) + sheet.upcast_trgt(color_)) / 2
return cmap((color_ - color_min) / (color_max - color_min))
elif color_.shape in [(sheet.Ne, 3), (sheet.Ne, 4)]:
return color_
elif color_.shape == (sheet.Ne,):
if np.ptp(color_) < 1e-10:
warnings.warn("Attempting to draw a colormap " "with a uniform value")
return np.ones((sheet.Nv, 3)) * 0.7
return cmap((color_ - color_min) / (color_max - color_min))
def quick_edge_draw(sheet, coords=["x", "y"], ax=None, **draw_spec_kw):
if ax is None:
fig, ax = plt.subplots()
else:
fig = ax.get_figure()
lines_x, lines_y = _get_lines(sheet, coords)
ax.plot(lines_x, lines_y, **draw_spec_kw)
ax.set_aspect("equal")
return fig, ax
def _get_lines(sheet, coords):
lines_x, lines_y = np.zeros(2 * sheet.Ne), np.zeros(2 * sheet.Ne)
scoords = ["s" + c for c in coords]
tcoords = ["t" + c for c in coords]
if set(scoords + tcoords).issubset(sheet.edge_df.columns):
srce_x, srce_y = sheet.edge_df[scoords].values.T
trgt_x, trgt_y = sheet.edge_df[tcoords].values.T
else:
srce_x, srce_y = sheet.upcast_srce(sheet.vert_df[coords]).values.T
trgt_x, trgt_y = sheet.upcast_trgt(sheet.vert_df[coords]).values.T
lines_x[::2] = srce_x
lines_x[1::2] = trgt_x
lines_y[::2] = srce_y
lines_y[1::2] = trgt_y
# Trick from https://github.com/matplotlib/
# matplotlib/blob/master/lib/matplotlib/tri/triplot.py#L65
lines_x = np.insert(lines_x, slice(None, None, 2), np.nan)
lines_y = np.insert(lines_y, slice(None, None, 2), np.nan)
return lines_x, lines_y
def plot_forces(
sheet, geom, model, coords, scaling, ax=None, approx_grad=None, **draw_specs_kw
):
"""Plot the net forces at each vertex, with their amplitudes multiplied
by `scaling`. To be clear, this is the oposite of the gradient - grad E.
"""
draw_specs = sheet_spec()
spec_updater(draw_specs, draw_specs_kw)
gcoords = ["g" + c for c in coords]
if approx_grad is not None:
app_grad = approx_grad(sheet, geom, model)
grad_i = (
pd.DataFrame(
index=sheet.vert_df[sheet.vert_df.is_active.astype(bool)].index,
data=app_grad.reshape((-1, len(sheet.coords))),
columns=["g" + c for c in sheet.coords],
)
* scaling
)
else:
grad_i = model.compute_gradient(sheet, components=False) * scaling
grad_i = grad_i.loc[sheet.vert_df["is_active"].astype(bool)]
sheet.vert_df[gcoords]=-grad_i[gcoords] # F = -grad E
if 'extract' in draw_specs:
sheet = sheet.extract_bounding_box(**draw_specs['extract'])
if ax is None:
fig, ax = quick_edge_draw(sheet, coords)
else:
fig = ax.get_figure()
arrows = sheet.vert_df[coords+gcoords]
for _, arrow in arrows.iterrows():
ax.arrow(*arrow, **draw_specs["grad"])
return fig, ax
def plot_scaled_energies(sheet, geom, model, scales, ax=None):
"""Plot scaled energies
Parameters
----------
sheet: a:class: Sheet object
geom: a :class:`Geometry` class
model: a :class:'Model'
scales: np.linspace of float
Returns
-------
fig: a :class:matplotlib.figure.Figure instance
ax: :class:matplotlib.Axes instance, default None
"""
from ..utils import scaled_unscaled
def get_energies():
energies = np.array([e.mean() for e in model.compute_energy(sheet, True)])
return energies
energies = np.array(
[scaled_unscaled(get_energies, scale, sheet, geom) for scale in scales]
)
if ax is None:
fig, ax = plt.subplots()
else:
fig = ax.get_figure()
ax.plot(scales, energies.sum(axis=1), "k-", lw=4, alpha=0.3, label="total")
for e, label in zip(energies.T, model.labels):
ax.plot(scales, e, label=label)
ax.legend()
return fig, ax
def get_arc_data(sheet):
srce_pos = sheet.upcast_srce(sheet.vert_df[sheet.coords])
trgt_pos = sheet.upcast_trgt(sheet.vert_df[sheet.coords])
radius = 1 / sheet.edge_df["curvature"]
e_x = sheet.edge_df["dx"] / sheet.edge_df["length"]
e_y = sheet.edge_df["dy"] / sheet.edge_df["length"]
center_x = (srce_pos.x + trgt_pos.x) / 2 - e_y * (radius - sheet.edge_df["sagitta"])
center_y = (srce_pos.y + trgt_pos.y) / 2 - e_x * (radius - sheet.edge_df["sagitta"])
alpha = sheet.edge_df["arc_chord_angle"]
beta = sheet.edge_df["chord_orient"]
# Ok, I admit a fair amount of trial and
# error to get to the stuff below :-p
rot = beta - np.sign(alpha) * np.pi / 2
theta1 = (-alpha + rot) * np.sign(alpha)
theta2 = (alpha + rot) * np.sign(alpha)
center_data = pd.DataFrame.from_dict(
{
"radius": np.abs(radius),
"x": center_x,
"y": center_y,
"theta1": theta1,
"theta2": theta2,
}
)
return center_data
def curved_view(sheet, radius_cutoff=1e3):
center_data = get_arc_data(sheet)
fig, ax = sheet_view(sheet, **{"edge": {"visible": False}})
curves = []
for idx, edge in center_data.iterrows():
if edge["radius"] > radius_cutoff:
st = sheet.edge_df.loc[idx, ["srce", "trgt"]]
xy = sheet.vert_df.loc[st, sheet.coords]
patch = PathPatch(Path(xy))
else:
patch = Arc(
edge[["x", "y"]],
2 * edge["radius"],
2 * edge["radius"],
theta1=edge["theta1"] * 180 / np.pi,
theta2=edge["theta2"] * 180 / np.pi,
)
curves.append(patch)
ax.add_collection(PatchCollection(curves, False, **{"facecolors": "none"}))
ax.autoscale()
return fig, ax
def plot_junction(eptm, edge_index, coords=["x", "y"]):
"""Plots local graph around a junction, for debugging purposes."""
v10, v11 = eptm.edge_df.loc[edge_index, ["srce", "trgt"]]
fig, ax = plt.subplots()
ax.scatter(*eptm.vert_df.loc[[v10, v11], coords].values.T, marker="+", s=300)
v10_out = set(eptm.edge_df[eptm.edge_df["srce"] == v10]["trgt"]) - {v11}
v11_out = set(eptm.edge_df[eptm.edge_df["srce"] == v11]["trgt"]) - {v10}
verts = v10_out.union(v11_out)
ax.scatter(*eptm.vert_df.loc[v10_out, coords].values.T)
ax.scatter(*eptm.vert_df.loc[v11_out, coords].values.T)
for _, edge in eptm.edge_df.query(f"srce == {v10}").iterrows():
ax.plot(
edge[["s" + coords[0], "t" + coords[0]]],
edge[["s" + coords[1], "t" + coords[1]]],
lw=3,
alpha=0.3,
c="r",
)
for _, edge in eptm.edge_df.query(f"srce == {v11}").iterrows():
ax.plot(
edge[["s" + coords[0], "t" + coords[0]]],
edge[["s" + coords[1], "t" + coords[1]]],
"k--",
)
for v in verts:
for _, edge in eptm.edge_df.query(f"srce == {v}").iterrows():
if edge["trgt"] in {v10, v11}:
continue
ax.plot(
edge[["s" + coords[0], "t" + coords[0]]],
edge[["s" + coords[1], "t" + coords[1]]],
"k",
lw=0.4,
)
fig.set_size_inches(12, 12)
return fig, ax
| gpl-3.0 |
daajoe/trellis | trellis/extractor/parambfs.py | 1 | 10608 | # noinspection PyRedundantParentheses
import copy, os
from itertools import permutations
import random
import networkx as nx
# from networkx.drawing.nx_agraph import graphviz_layout, write_dot
# import matplotlib.pyplot as plt
# #import matplotlib
from trellis.extractor.extractor import Extractor
from trellis.td import TreeDecomposition
class ParamExtractor(Extractor):
@staticmethod
def bfs(decomp, max_bag_size=None, budget=50, rand=False, c1=1.0, c2=0.5, beta=5, gamma=10, delta=2):
# get the bags from the tree decomposition
"""
:param budget: the number of vertices in the local decomp
:param max_bag_size: the bagsize from where we want to start bfs
:type decomp: decomposition
"""
rest_decomp = copy.deepcopy(decomp)
bag_lengths = dict(zip(decomp.bags.keys(), map(len, decomp.bags.values())))
bags = decomp.bags
# root of the BFS is the bag with max elements
root_id = decomp.get_first_node(max_bag_size)
root = bag_lengths.keys()[root_id]
bfs_queue = [root]
bfs_depth = dict()
bfs_common_nodes = {}
parent = {}
# initialization for BFS
for i in decomp.tree.nodes():
bfs_depth[i] = -1
parent[i] = -1
bfs_depth[root] = 0
parent[root] = root
internal_nodes = []
bfs_common_nodes[root] = decomp.bags[root]
sub_vertices = set(decomp.bags[root])
# root is the internal node should not be deleted from the local tree
internal_nodes.append(root)
# maybe change this part Not sure how to avoid this.
while bfs_queue:
# show_graph(decomp.tree, 1)
# print "BFS:", bfs_queue
if rand:
random.shuffle(bfs_queue)
v2 = bfs_queue.pop(0)
# print v2,bfs_queue
# print v2,decomp.tree[v2]
# if any of the neighbours have a bag of size > current bag do not continue on this bag
# changing the checking to the intersection of two bags i.e. check how many vertices are common.
for w in decomp.tree[v2]:
flag = 0
if bfs_depth[w] == -1:
parent[w] = v2
bfs_common_nodes[w] = bags[w].intersection(bags[v2])
bfs_depth[w] = bfs_depth[v2] + 1
if c1 * len(bags[w]) - c2 * len(bfs_common_nodes[w]) <= 1:
if w not in bfs_queue and w not in internal_nodes:
bfs_queue.append(w)
if w not in internal_nodes:
internal_nodes.append(w)
sub_vertices |= decomp.bags[w]
continue
if bfs_depth[w] <= beta:
if w not in bfs_queue and w not in internal_nodes:
bfs_queue.append(w)
if w not in internal_nodes:
internal_nodes.append(w)
sub_vertices |= decomp.bags[w]
continue
sub_tree = ParamExtractor.subtree(decomp, w, v2)
if len(sub_tree) <= gamma:
for w1 in sub_tree:
if w1 not in bfs_queue and w1 not in internal_nodes:
bfs_queue.append(w1)
bfs_depth[w1] = bfs_depth[w] + 1
parent[w1] = w
if w1 not in internal_nodes:
internal_nodes.append(w1)
sub_vertices |= decomp.bags[w1]
continue
else:
flag = 1
if flag == 1:
new_node = max(rest_decomp.tree.nodes()) + 1
rest_decomp.tree.add_node(new_node)
rest_decomp.tree.add_edge(new_node, w)
rest_decomp.tree.add_edge(new_node, parent[w])
rest_decomp.tree.remove_edge(w, parent[w])
rest_decomp.bags[new_node] = set(bfs_common_nodes[w])
if w in internal_nodes:
internal_nodes.remove(w)
if new_node not in internal_nodes:
internal_nodes.append(new_node)
if len(sub_vertices) >= budget + delta * max_bag_size:
break
print len(internal_nodes), len(sub_vertices)
# rest_decomp.show(layout=1)
return internal_nodes, sub_vertices, rest_decomp
@staticmethod
def subtree(decomp, w, v):
neigh = decomp.tree.neighbors(w)
neigh.remove(v)
dfs_visited = [w]
while neigh:
try:
n = neigh.pop()
dfs_visited.append(n)
for i in decomp.tree.neighbors(n):
if i in dfs_visited:
continue
neigh.append(i)
except StopIteration:
break
return dfs_visited
@staticmethod
def extract_graph(internal_nodes, decomp, g):
"""
generates graph for the local tree decomposition
ASSUMPTION: vertices have not been relabelled
:return:
:param g: input graph type: Networkx Graph
:param internal_nodes: nodes of tree decomposition which are picked by BFS type: list
:param decomp: Tree decomposition type: Networkx Graph
:return: sub_graph: graph generated by the local tree decomposition by adding clique for all leaf nodes/bags type: networkx graph
:return: rest_decomp: Sub Tree Decomposition after removing the local tree decomposition type: networkx Graph
:return: connecting_leave: The leaves where local tree decomposition connects with the rest_decomp type:list
- """
y = decomp.tree.subgraph(internal_nodes)
# show_graph(y,layout=1)
sub_nodes = set()
for n in y.nodes():
sub_nodes |= set(decomp.bags[n])
connecting_nodes = {}
sub_graph = g.subgraph(sub_nodes)
for leaf, degree in y.degree().iteritems():
if degree != 1:
continue
if decomp.tree.degree(leaf) > y.degree(leaf):
internal_nodes.remove(leaf)
connecting_nodes[leaf] = decomp.bags[leaf]
for i, j in permutations(decomp.bags[leaf], r=2):
sub_graph.add_edge(i, j)
rest_decomp = TreeDecomposition(tree=decomp.tree.subgraph(set(decomp.tree.nodes()) - set(internal_nodes)))
#TODO:
#,
# temp_path=self.temp_path,
# delete_temp=self.delete_temp, plot_if_td_invalid=self.plot_if_td_invalid
for i in internal_nodes:
del decomp.bags[i]
rest_decomp.bags = decomp.bags
return sub_graph, rest_decomp, connecting_nodes
@staticmethod
def extract_decomposition(decomp, g, max_bag_size=None, budget=50,
extractor_args={'extractor_c1': 1.0, 'extractor_c2': 0.5, 'extractor_beta': 3,
'extractor_gamma': 5, 'extractor_random': False, 'extractor_delta': 2}):
internal_nodes, _, rest_decomp = ParamExtractor.bfs(decomp, max_bag_size=max_bag_size, budget=budget,
c1=extractor_args['extractor_c1'],
c2=extractor_args['extractor_c2'],
beta=extractor_args['extractor_beta'],
gamma=extractor_args['extractor_gamma'],
rand=extractor_args['extractor_random'],
delta=extractor_args['extractor_delta'])
sub_graph, rest_decomp, connecting_leaves = ParamExtractor.extract_graph(internal_nodes,
copy.deepcopy(rest_decomp), g)
# exit(0)
return rest_decomp, sub_graph, connecting_leaves
@staticmethod
def connect_decomp(rest_decomp, sub_decomp, connecting_nodes, graph, td_name, always_validate=True):
if rest_decomp.tree.number_of_nodes() == 0:
return TreeDecomposition(tree=sub_decomp.tree, bags=sub_decomp.bags, graph=graph, td_name=td_name)
new_decomp = nx.union(rest_decomp.tree, sub_decomp.tree)
for node, bag in connecting_nodes.iteritems():
connect = True
for key, value in sub_decomp.bags.iteritems():
rest_decomp.bags[key] = value
if bag.issubset(value) and connect:
new_decomp.add_edge(node, key)
connect = False
td = TreeDecomposition(tree=new_decomp, bags=rest_decomp.bags, graph=graph, td_name=td_name)
if always_validate:
td.validate2()
return td
#
# def show_graph(graph, layout, nolabel=0, write=0, file_name=None, dnd=0, labels=None):
# """ show graph
# layout 1:graphviz,
# 2:circular,
# 3:spring,
# 4:spectral,
# 5: random,
# 6: shell
# """
# if dnd == 0:
# m = graph.copy()
# pos = graphviz_layout(m)
# if layout == 1:
# pos = graphviz_layout(m)
# elif layout == 2:
# pos = nx.circular_layout(m)
# elif layout == 3:
# pos = nx.spring_layout(m)
# elif layout == 4:
# pos = nx.spectral_layout(m)
# elif layout == 5:
# pos = nx.random_layout(m)
# elif layout == 6:
# pos = nx.shell_layout(m)
# if not nolabel:
# nx.draw_networkx_edge_labels(m, pos)
# nx.draw_networkx_nodes(m, pos)
# if labels:
# labels = {k: '%s:%s'%(k,str(sorted(list(v)))) for k,v in labels.iteritems()}
# nx.draw_networkx_labels(m, pos, labels)
# else:
# nx.draw_networkx_labels(m, pos)
# if write != 0:
# write_dot(m, file_name + ".dot")
# os.system("dot -Tps " + file_name + ".dot -o " + file_name + '.ps')
# else:
# # plt.ion()
# # nx.draw(m, pos)
# # plt.plot(m,pos)
# nx.draw(m, pos)
# # plt.show(block=False)
# plt.show()
| gpl-3.0 |
adamgreenhall/scikit-learn | examples/manifold/plot_compare_methods.py | 259 | 4031 | """
=========================================
Comparison of Manifold Learning methods
=========================================
An illustration of dimensionality reduction on the S-curve dataset
with various manifold learning methods.
For a discussion and comparison of these algorithms, see the
:ref:`manifold module page <manifold>`
For a similar example, where the methods are applied to a
sphere dataset, see :ref:`example_manifold_plot_manifold_sphere.py`
Note that the purpose of the MDS is to find a low-dimensional
representation of the data (here 2D) in which the distances respect well
the distances in the original high-dimensional space, unlike other
manifold-learning algorithms, it does not seeks an isotropic
representation of the data in the low-dimensional space.
"""
# Author: Jake Vanderplas -- <[email protected]>
print(__doc__)
from time import time
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.ticker import NullFormatter
from sklearn import manifold, datasets
# Next line to silence pyflakes. This import is needed.
Axes3D
n_points = 1000
X, color = datasets.samples_generator.make_s_curve(n_points, random_state=0)
n_neighbors = 10
n_components = 2
fig = plt.figure(figsize=(15, 8))
plt.suptitle("Manifold Learning with %i points, %i neighbors"
% (1000, n_neighbors), fontsize=14)
try:
# compatibility matplotlib < 1.0
ax = fig.add_subplot(251, projection='3d')
ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=color, cmap=plt.cm.Spectral)
ax.view_init(4, -72)
except:
ax = fig.add_subplot(251, projection='3d')
plt.scatter(X[:, 0], X[:, 2], c=color, cmap=plt.cm.Spectral)
methods = ['standard', 'ltsa', 'hessian', 'modified']
labels = ['LLE', 'LTSA', 'Hessian LLE', 'Modified LLE']
for i, method in enumerate(methods):
t0 = time()
Y = manifold.LocallyLinearEmbedding(n_neighbors, n_components,
eigen_solver='auto',
method=method).fit_transform(X)
t1 = time()
print("%s: %.2g sec" % (methods[i], t1 - t0))
ax = fig.add_subplot(252 + i)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("%s (%.2g sec)" % (labels[i], t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
t0 = time()
Y = manifold.Isomap(n_neighbors, n_components).fit_transform(X)
t1 = time()
print("Isomap: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(257)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("Isomap (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
t0 = time()
mds = manifold.MDS(n_components, max_iter=100, n_init=1)
Y = mds.fit_transform(X)
t1 = time()
print("MDS: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(258)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("MDS (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
t0 = time()
se = manifold.SpectralEmbedding(n_components=n_components,
n_neighbors=n_neighbors)
Y = se.fit_transform(X)
t1 = time()
print("SpectralEmbedding: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(259)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("SpectralEmbedding (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
t0 = time()
tsne = manifold.TSNE(n_components=n_components, init='pca', random_state=0)
Y = tsne.fit_transform(X)
t1 = time()
print("t-SNE: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(250)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("t-SNE (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
plt.show()
| bsd-3-clause |
ywcui1990/htmresearch | projects/sequence_prediction/mackey_glass/generate_line.py | 13 | 2270 | #!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import csv
import math
from matplotlib import pyplot
FILE_PREFIX = "data"
m = 1
def run():
T = []
Y = []
a = 0.05
b = 2.0
def fn(t):
global m
v = ((a * t) % b) - 1
if abs(v) < 1e-5:
m *= -1
return m * v
t = 0.
y = fn(t)
dt = .1
with open(FILE_PREFIX + "_all.csv", 'wb') as allFile:
with open(FILE_PREFIX + "_train.csv", 'wb') as trainFile:
with open(FILE_PREFIX + "_test.csv", 'wb') as testFile:
allWriter = csv.writer(allFile)
trainWriter = csv.writer(trainFile)
testWriter = csv.writer(testFile)
for writer in (allWriter, trainWriter, testWriter):
writer.writerow(["y"])
writer.writerow(["float"])
writer.writerow([])
while True:
T.append(t)
Y.append(y)
if abs(round(t) - t) < 1e-5:
print("y(%2.1f)\t= %4.6f \t" % ( t, y ))
allWriter.writerow([y])
if t >= 200 and t < 3200:
trainWriter.writerow([y])
elif t >= 5000 and t < 5500:
testWriter.writerow([y])
t, y = t + dt, fn(t)
if t > 5500:
break
pyplot.plot(T, Y)
pyplot.xlim(5000, 5250)
pyplot.show()
if __name__ == "__main__":
run()
| agpl-3.0 |
timy/dm_spec | ana/mpi_spec_1d/plot_orien.py | 1 | 1634 | from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import numpy as np
from itertools import product, combinations
n_esmb = 100000
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.set_aspect("equal")
data = np.loadtxt("res/euler.dat")
#draw cube
# r = [-1, 1]
# for s, e in combinations(np.array(list(product(r, r, r))), 2):
# if np.sum(np.abs(s - e)) == (r[1] - r[0]):
# ax.plot3D(*zip(s, e), color="b")
#draw sphere
u, v = np.mgrid[0:2*np.pi:20j, 0:np.pi:10j]
x = np.cos(u) * np.sin(v)
y = np.sin(u) * np.sin(v)
z = np.cos(v)
ax.plot_wireframe(x, y, z, color="r")
#draw a point
ax.scatter([0],[0],[0],color="g",s=100)
#draw a vector
from matplotlib.patches import FancyArrowPatch
from mpl_toolkits.mplot3d import proj3d
class Arrow3D(FancyArrowPatch):
def __init__(self, xs, ys, zs, *args, **kwargs):
FancyArrowPatch.__init__(self, (0,0), (0,0), *args, **kwargs)
self._verts3d = xs, ys, zs
def draw(self, renderer):
xs3d, ys3d, zs3d = self._verts3d
xs, ys, zs = proj3d.proj_transform(xs3d, ys3d, zs3d, renderer.M)
self.set_positions((xs[0],ys[0]),(xs[1],ys[1]))
FancyArrowPatch.draw(self, renderer)
phi = data[:,0]
theta = data[:,1]
psi = data[:,2]
x = np.sin(theta) * np.sin(psi);
y = np.sin(theta) * np.cos(psi);
z = np.cos(theta);
for i_esmb in range(n_esmb):
a = Arrow3D( [0, x[i_esmb]], [0, y[i_esmb]], [0, z[i_esmb]],
mutation_scale=20, lw=1, arrowstyle="-|>",
color=plt.cm.RdYlBu(i_esmb) )
ax.add_artist(a)
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
plt.show()
| mit |
mbayon/TFG-MachineLearning | venv/lib/python3.6/site-packages/pandas/tests/io/json/test_json_table_schema.py | 9 | 18572 | """Tests for Table Schema integration."""
import json
from collections import OrderedDict
import numpy as np
import pandas as pd
import pytest
from pandas import DataFrame
from pandas.core.dtypes.dtypes import (
PeriodDtype, CategoricalDtype, DatetimeTZDtype)
from pandas.io.json.table_schema import (
as_json_table_type,
build_table_schema,
make_field,
set_default_names)
class TestBuildSchema(object):
def setup_method(self, method):
self.df = DataFrame(
{'A': [1, 2, 3, 4],
'B': ['a', 'b', 'c', 'c'],
'C': pd.date_range('2016-01-01', freq='d', periods=4),
'D': pd.timedelta_range('1H', periods=4, freq='T'),
},
index=pd.Index(range(4), name='idx'))
def test_build_table_schema(self):
result = build_table_schema(self.df, version=False)
expected = {
'fields': [{'name': 'idx', 'type': 'integer'},
{'name': 'A', 'type': 'integer'},
{'name': 'B', 'type': 'string'},
{'name': 'C', 'type': 'datetime'},
{'name': 'D', 'type': 'duration'},
],
'primaryKey': ['idx']
}
assert result == expected
result = build_table_schema(self.df)
assert "pandas_version" in result
def test_series(self):
s = pd.Series([1, 2, 3], name='foo')
result = build_table_schema(s, version=False)
expected = {'fields': [{'name': 'index', 'type': 'integer'},
{'name': 'foo', 'type': 'integer'}],
'primaryKey': ['index']}
assert result == expected
result = build_table_schema(s)
assert 'pandas_version' in result
def tets_series_unnamed(self):
result = build_table_schema(pd.Series([1, 2, 3]), version=False)
expected = {'fields': [{'name': 'index', 'type': 'integer'},
{'name': 'values', 'type': 'integer'}],
'primaryKey': ['index']}
assert result == expected
def test_multiindex(self):
df = self.df.copy()
idx = pd.MultiIndex.from_product([('a', 'b'), (1, 2)])
df.index = idx
result = build_table_schema(df, version=False)
expected = {
'fields': [{'name': 'level_0', 'type': 'string'},
{'name': 'level_1', 'type': 'integer'},
{'name': 'A', 'type': 'integer'},
{'name': 'B', 'type': 'string'},
{'name': 'C', 'type': 'datetime'},
{'name': 'D', 'type': 'duration'},
],
'primaryKey': ['level_0', 'level_1']
}
assert result == expected
df.index.names = ['idx0', None]
expected['fields'][0]['name'] = 'idx0'
expected['primaryKey'] = ['idx0', 'level_1']
result = build_table_schema(df, version=False)
assert result == expected
class TestTableSchemaType(object):
def test_as_json_table_type_int_data(self):
int_data = [1, 2, 3]
int_types = [np.int, np.int16, np.int32, np.int64]
for t in int_types:
assert as_json_table_type(np.array(
int_data, dtype=t)) == 'integer'
def test_as_json_table_type_float_data(self):
float_data = [1., 2., 3.]
float_types = [np.float, np.float16, np.float32, np.float64]
for t in float_types:
assert as_json_table_type(np.array(
float_data, dtype=t)) == 'number'
def test_as_json_table_type_bool_data(self):
bool_data = [True, False]
bool_types = [bool, np.bool]
for t in bool_types:
assert as_json_table_type(np.array(
bool_data, dtype=t)) == 'boolean'
def test_as_json_table_type_date_data(self):
date_data = [pd.to_datetime(['2016']),
pd.to_datetime(['2016'], utc=True),
pd.Series(pd.to_datetime(['2016'])),
pd.Series(pd.to_datetime(['2016'], utc=True)),
pd.period_range('2016', freq='A', periods=3)]
for arr in date_data:
assert as_json_table_type(arr) == 'datetime'
def test_as_json_table_type_string_data(self):
strings = [pd.Series(['a', 'b']), pd.Index(['a', 'b'])]
for t in strings:
assert as_json_table_type(t) == 'string'
def test_as_json_table_type_categorical_data(self):
assert as_json_table_type(pd.Categorical(['a'])) == 'any'
assert as_json_table_type(pd.Categorical([1])) == 'any'
assert as_json_table_type(pd.Series(pd.Categorical([1]))) == 'any'
assert as_json_table_type(pd.CategoricalIndex([1])) == 'any'
assert as_json_table_type(pd.Categorical([1])) == 'any'
# ------
# dtypes
# ------
def test_as_json_table_type_int_dtypes(self):
integers = [np.int, np.int16, np.int32, np.int64]
for t in integers:
assert as_json_table_type(t) == 'integer'
def test_as_json_table_type_float_dtypes(self):
floats = [np.float, np.float16, np.float32, np.float64]
for t in floats:
assert as_json_table_type(t) == 'number'
def test_as_json_table_type_bool_dtypes(self):
bools = [bool, np.bool]
for t in bools:
assert as_json_table_type(t) == 'boolean'
def test_as_json_table_type_date_dtypes(self):
# TODO: datedate.date? datetime.time?
dates = [np.datetime64, np.dtype("<M8[ns]"), PeriodDtype(),
DatetimeTZDtype('ns', 'US/Central')]
for t in dates:
assert as_json_table_type(t) == 'datetime'
def test_as_json_table_type_timedelta_dtypes(self):
durations = [np.timedelta64, np.dtype("<m8[ns]")]
for t in durations:
assert as_json_table_type(t) == 'duration'
def test_as_json_table_type_string_dtypes(self):
strings = [object] # TODO
for t in strings:
assert as_json_table_type(t) == 'string'
def test_as_json_table_type_categorical_dtypes(self):
assert as_json_table_type(pd.Categorical) == 'any'
assert as_json_table_type(CategoricalDtype()) == 'any'
class TestTableOrient(object):
def setup_method(self, method):
self.df = DataFrame(
{'A': [1, 2, 3, 4],
'B': ['a', 'b', 'c', 'c'],
'C': pd.date_range('2016-01-01', freq='d', periods=4),
'D': pd.timedelta_range('1H', periods=4, freq='T'),
'E': pd.Series(pd.Categorical(['a', 'b', 'c', 'c'])),
'F': pd.Series(pd.Categorical(['a', 'b', 'c', 'c'],
ordered=True)),
'G': [1., 2., 3, 4.],
'H': pd.date_range('2016-01-01', freq='d', periods=4,
tz='US/Central'),
},
index=pd.Index(range(4), name='idx'))
def test_build_series(self):
s = pd.Series([1, 2], name='a')
s.index.name = 'id'
result = s.to_json(orient='table', date_format='iso')
result = json.loads(result, object_pairs_hook=OrderedDict)
assert "pandas_version" in result['schema']
result['schema'].pop('pandas_version')
fields = [{'name': 'id', 'type': 'integer'},
{'name': 'a', 'type': 'integer'}]
schema = {
'fields': fields,
'primaryKey': ['id'],
}
expected = OrderedDict([
('schema', schema),
('data', [OrderedDict([('id', 0), ('a', 1)]),
OrderedDict([('id', 1), ('a', 2)])])])
assert result == expected
def test_to_json(self):
df = self.df.copy()
df.index.name = 'idx'
result = df.to_json(orient='table', date_format='iso')
result = json.loads(result, object_pairs_hook=OrderedDict)
assert "pandas_version" in result['schema']
result['schema'].pop('pandas_version')
fields = [
{'name': 'idx', 'type': 'integer'},
{'name': 'A', 'type': 'integer'},
{'name': 'B', 'type': 'string'},
{'name': 'C', 'type': 'datetime'},
{'name': 'D', 'type': 'duration'},
{'constraints': {'enum': ['a', 'b', 'c']},
'name': 'E',
'ordered': False,
'type': 'any'},
{'constraints': {'enum': ['a', 'b', 'c']},
'name': 'F',
'ordered': True,
'type': 'any'},
{'name': 'G', 'type': 'number'},
{'name': 'H', 'type': 'datetime', 'tz': 'US/Central'}
]
schema = {
'fields': fields,
'primaryKey': ['idx'],
}
data = [
OrderedDict([('idx', 0), ('A', 1), ('B', 'a'),
('C', '2016-01-01T00:00:00.000Z'),
('D', 'P0DT1H0M0S'),
('E', 'a'), ('F', 'a'), ('G', 1.),
('H', '2016-01-01T06:00:00.000Z')
]),
OrderedDict([('idx', 1), ('A', 2), ('B', 'b'),
('C', '2016-01-02T00:00:00.000Z'),
('D', 'P0DT1H1M0S'),
('E', 'b'), ('F', 'b'), ('G', 2.),
('H', '2016-01-02T06:00:00.000Z')
]),
OrderedDict([('idx', 2), ('A', 3), ('B', 'c'),
('C', '2016-01-03T00:00:00.000Z'),
('D', 'P0DT1H2M0S'),
('E', 'c'), ('F', 'c'), ('G', 3.),
('H', '2016-01-03T06:00:00.000Z')
]),
OrderedDict([('idx', 3), ('A', 4), ('B', 'c'),
('C', '2016-01-04T00:00:00.000Z'),
('D', 'P0DT1H3M0S'),
('E', 'c'), ('F', 'c'), ('G', 4.),
('H', '2016-01-04T06:00:00.000Z')
]),
]
expected = OrderedDict([('schema', schema), ('data', data)])
assert result == expected
def test_to_json_float_index(self):
data = pd.Series(1, index=[1., 2.])
result = data.to_json(orient='table', date_format='iso')
result = json.loads(result, object_pairs_hook=OrderedDict)
result['schema'].pop('pandas_version')
expected = (
OrderedDict([('schema', {
'fields': [{'name': 'index', 'type': 'number'},
{'name': 'values', 'type': 'integer'}],
'primaryKey': ['index']
}),
('data', [OrderedDict([('index', 1.0), ('values', 1)]),
OrderedDict([('index', 2.0), ('values', 1)])])])
)
assert result == expected
def test_to_json_period_index(self):
idx = pd.period_range('2016', freq='Q-JAN', periods=2)
data = pd.Series(1, idx)
result = data.to_json(orient='table', date_format='iso')
result = json.loads(result, object_pairs_hook=OrderedDict)
result['schema'].pop('pandas_version')
fields = [{'freq': 'Q-JAN', 'name': 'index', 'type': 'datetime'},
{'name': 'values', 'type': 'integer'}]
schema = {'fields': fields, 'primaryKey': ['index']}
data = [OrderedDict([('index', '2015-11-01T00:00:00.000Z'),
('values', 1)]),
OrderedDict([('index', '2016-02-01T00:00:00.000Z'),
('values', 1)])]
expected = OrderedDict([('schema', schema), ('data', data)])
assert result == expected
def test_to_json_categorical_index(self):
data = pd.Series(1, pd.CategoricalIndex(['a', 'b']))
result = data.to_json(orient='table', date_format='iso')
result = json.loads(result, object_pairs_hook=OrderedDict)
result['schema'].pop('pandas_version')
expected = (
OrderedDict([('schema',
{'fields': [{'name': 'index', 'type': 'any',
'constraints': {'enum': ['a', 'b']},
'ordered': False},
{'name': 'values', 'type': 'integer'}],
'primaryKey': ['index']}),
('data', [
OrderedDict([('index', 'a'),
('values', 1)]),
OrderedDict([('index', 'b'), ('values', 1)])])])
)
assert result == expected
def test_date_format_raises(self):
with pytest.raises(ValueError):
self.df.to_json(orient='table', date_format='epoch')
# others work
self.df.to_json(orient='table', date_format='iso')
self.df.to_json(orient='table')
def test_make_field_int(self):
data = [1, 2, 3]
kinds = [pd.Series(data, name='name'), pd.Index(data, name='name')]
for kind in kinds:
result = make_field(kind)
expected = {"name": "name", "type": 'integer'}
assert result == expected
def test_make_field_float(self):
data = [1., 2., 3.]
kinds = [pd.Series(data, name='name'), pd.Index(data, name='name')]
for kind in kinds:
result = make_field(kind)
expected = {"name": "name", "type": 'number'}
assert result == expected
def test_make_field_datetime(self):
data = [1., 2., 3.]
kinds = [pd.Series(pd.to_datetime(data), name='values'),
pd.to_datetime(data)]
for kind in kinds:
result = make_field(kind)
expected = {"name": "values", "type": 'datetime'}
assert result == expected
kinds = [pd.Series(pd.to_datetime(data, utc=True), name='values'),
pd.to_datetime(data, utc=True)]
for kind in kinds:
result = make_field(kind)
expected = {"name": "values", "type": 'datetime', "tz": "UTC"}
assert result == expected
arr = pd.period_range('2016', freq='A-DEC', periods=4)
result = make_field(arr)
expected = {"name": "values", "type": 'datetime', "freq": "A-DEC"}
assert result == expected
def test_make_field_categorical(self):
data = ['a', 'b', 'c']
ordereds = [True, False]
for ordered in ordereds:
arr = pd.Series(pd.Categorical(data, ordered=ordered), name='cats')
result = make_field(arr)
expected = {"name": "cats", "type": "any",
"constraints": {"enum": data},
"ordered": ordered}
assert result == expected
arr = pd.CategoricalIndex(data, ordered=ordered, name='cats')
result = make_field(arr)
expected = {"name": "cats", "type": "any",
"constraints": {"enum": data},
"ordered": ordered}
assert result == expected
def test_categorical(self):
s = pd.Series(pd.Categorical(['a', 'b', 'a']))
s.index.name = 'idx'
result = s.to_json(orient='table', date_format='iso')
result = json.loads(result, object_pairs_hook=OrderedDict)
result['schema'].pop('pandas_version')
fields = [{'name': 'idx', 'type': 'integer'},
{'constraints': {'enum': ['a', 'b']},
'name': 'values',
'ordered': False,
'type': 'any'}]
expected = OrderedDict([
('schema', {'fields': fields,
'primaryKey': ['idx']}),
('data', [OrderedDict([('idx', 0), ('values', 'a')]),
OrderedDict([('idx', 1), ('values', 'b')]),
OrderedDict([('idx', 2), ('values', 'a')])])])
assert result == expected
def test_set_default_names_unset(self):
data = pd.Series(1, pd.Index([1]))
result = set_default_names(data)
assert result.index.name == 'index'
def test_set_default_names_set(self):
data = pd.Series(1, pd.Index([1], name='myname'))
result = set_default_names(data)
assert result.index.name == 'myname'
def test_set_default_names_mi_unset(self):
data = pd.Series(
1, pd.MultiIndex.from_product([('a', 'b'), ('c', 'd')]))
result = set_default_names(data)
assert result.index.names == ['level_0', 'level_1']
def test_set_default_names_mi_set(self):
data = pd.Series(
1, pd.MultiIndex.from_product([('a', 'b'), ('c', 'd')],
names=['n1', 'n2']))
result = set_default_names(data)
assert result.index.names == ['n1', 'n2']
def test_set_default_names_mi_partion(self):
data = pd.Series(
1, pd.MultiIndex.from_product([('a', 'b'), ('c', 'd')],
names=['n1', None]))
result = set_default_names(data)
assert result.index.names == ['n1', 'level_1']
def test_timestamp_in_columns(self):
df = pd.DataFrame([[1, 2]], columns=[pd.Timestamp('2016'),
pd.Timedelta(10, unit='s')])
result = df.to_json(orient="table")
js = json.loads(result)
assert js['schema']['fields'][1]['name'] == 1451606400000
assert js['schema']['fields'][2]['name'] == 10000
def test_overlapping_names(self):
cases = [
pd.Series([1], index=pd.Index([1], name='a'), name='a'),
pd.DataFrame({"A": [1]}, index=pd.Index([1], name="A")),
pd.DataFrame({"A": [1]}, index=pd.MultiIndex.from_arrays([
['a'], [1]
], names=["A", "a"])),
]
for data in cases:
with pytest.raises(ValueError) as excinfo:
data.to_json(orient='table')
assert 'Overlapping' in str(excinfo.value)
def test_mi_falsey_name(self):
# GH 16203
df = pd.DataFrame(np.random.randn(4, 4),
index=pd.MultiIndex.from_product([('A', 'B'),
('a', 'b')]))
result = [x['name'] for x in build_table_schema(df)['fields']]
assert result == ['level_0', 'level_1', 0, 1, 2, 3]
| mit |
3manuek/scikit-learn | sklearn/cluster/tests/test_bicluster.py | 226 | 9457 | """Testing for Spectral Biclustering methods"""
import numpy as np
from scipy.sparse import csr_matrix, issparse
from sklearn.grid_search import ParameterGrid
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import SkipTest
from sklearn.base import BaseEstimator, BiclusterMixin
from sklearn.cluster.bicluster import SpectralCoclustering
from sklearn.cluster.bicluster import SpectralBiclustering
from sklearn.cluster.bicluster import _scale_normalize
from sklearn.cluster.bicluster import _bistochastic_normalize
from sklearn.cluster.bicluster import _log_normalize
from sklearn.metrics import consensus_score
from sklearn.datasets import make_biclusters, make_checkerboard
class MockBiclustering(BaseEstimator, BiclusterMixin):
# Mock object for testing get_submatrix.
def __init__(self):
pass
def get_indices(self, i):
# Overridden to reproduce old get_submatrix test.
return (np.where([True, True, False, False, True])[0],
np.where([False, False, True, True])[0])
def test_get_submatrix():
data = np.arange(20).reshape(5, 4)
model = MockBiclustering()
for X in (data, csr_matrix(data), data.tolist()):
submatrix = model.get_submatrix(0, X)
if issparse(submatrix):
submatrix = submatrix.toarray()
assert_array_equal(submatrix, [[2, 3],
[6, 7],
[18, 19]])
submatrix[:] = -1
if issparse(X):
X = X.toarray()
assert_true(np.all(X != -1))
def _test_shape_indices(model):
# Test get_shape and get_indices on fitted model.
for i in range(model.n_clusters):
m, n = model.get_shape(i)
i_ind, j_ind = model.get_indices(i)
assert_equal(len(i_ind), m)
assert_equal(len(j_ind), n)
def test_spectral_coclustering():
# Test Dhillon's Spectral CoClustering on a simple problem.
param_grid = {'svd_method': ['randomized', 'arpack'],
'n_svd_vecs': [None, 20],
'mini_batch': [False, True],
'init': ['k-means++'],
'n_init': [10],
'n_jobs': [1]}
random_state = 0
S, rows, cols = make_biclusters((30, 30), 3, noise=0.5,
random_state=random_state)
S -= S.min() # needs to be nonnegative before making it sparse
S = np.where(S < 1, 0, S) # threshold some values
for mat in (S, csr_matrix(S)):
for kwargs in ParameterGrid(param_grid):
model = SpectralCoclustering(n_clusters=3,
random_state=random_state,
**kwargs)
model.fit(mat)
assert_equal(model.rows_.shape, (3, 30))
assert_array_equal(model.rows_.sum(axis=0), np.ones(30))
assert_array_equal(model.columns_.sum(axis=0), np.ones(30))
assert_equal(consensus_score(model.biclusters_,
(rows, cols)), 1)
_test_shape_indices(model)
def test_spectral_biclustering():
# Test Kluger methods on a checkerboard dataset.
S, rows, cols = make_checkerboard((30, 30), 3, noise=0.5,
random_state=0)
non_default_params = {'method': ['scale', 'log'],
'svd_method': ['arpack'],
'n_svd_vecs': [20],
'mini_batch': [True]}
for mat in (S, csr_matrix(S)):
for param_name, param_values in non_default_params.items():
for param_value in param_values:
model = SpectralBiclustering(
n_clusters=3,
n_init=3,
init='k-means++',
random_state=0,
)
model.set_params(**dict([(param_name, param_value)]))
if issparse(mat) and model.get_params().get('method') == 'log':
# cannot take log of sparse matrix
assert_raises(ValueError, model.fit, mat)
continue
else:
model.fit(mat)
assert_equal(model.rows_.shape, (9, 30))
assert_equal(model.columns_.shape, (9, 30))
assert_array_equal(model.rows_.sum(axis=0),
np.repeat(3, 30))
assert_array_equal(model.columns_.sum(axis=0),
np.repeat(3, 30))
assert_equal(consensus_score(model.biclusters_,
(rows, cols)), 1)
_test_shape_indices(model)
def _do_scale_test(scaled):
"""Check that rows sum to one constant, and columns to another."""
row_sum = scaled.sum(axis=1)
col_sum = scaled.sum(axis=0)
if issparse(scaled):
row_sum = np.asarray(row_sum).squeeze()
col_sum = np.asarray(col_sum).squeeze()
assert_array_almost_equal(row_sum, np.tile(row_sum.mean(), 100),
decimal=1)
assert_array_almost_equal(col_sum, np.tile(col_sum.mean(), 100),
decimal=1)
def _do_bistochastic_test(scaled):
"""Check that rows and columns sum to the same constant."""
_do_scale_test(scaled)
assert_almost_equal(scaled.sum(axis=0).mean(),
scaled.sum(axis=1).mean(),
decimal=1)
def test_scale_normalize():
generator = np.random.RandomState(0)
X = generator.rand(100, 100)
for mat in (X, csr_matrix(X)):
scaled, _, _ = _scale_normalize(mat)
_do_scale_test(scaled)
if issparse(mat):
assert issparse(scaled)
def test_bistochastic_normalize():
generator = np.random.RandomState(0)
X = generator.rand(100, 100)
for mat in (X, csr_matrix(X)):
scaled = _bistochastic_normalize(mat)
_do_bistochastic_test(scaled)
if issparse(mat):
assert issparse(scaled)
def test_log_normalize():
# adding any constant to a log-scaled matrix should make it
# bistochastic
generator = np.random.RandomState(0)
mat = generator.rand(100, 100)
scaled = _log_normalize(mat) + 1
_do_bistochastic_test(scaled)
def test_fit_best_piecewise():
model = SpectralBiclustering(random_state=0)
vectors = np.array([[0, 0, 0, 1, 1, 1],
[2, 2, 2, 3, 3, 3],
[0, 1, 2, 3, 4, 5]])
best = model._fit_best_piecewise(vectors, n_best=2, n_clusters=2)
assert_array_equal(best, vectors[:2])
def test_project_and_cluster():
model = SpectralBiclustering(random_state=0)
data = np.array([[1, 1, 1],
[1, 1, 1],
[3, 6, 3],
[3, 6, 3]])
vectors = np.array([[1, 0],
[0, 1],
[0, 0]])
for mat in (data, csr_matrix(data)):
labels = model._project_and_cluster(data, vectors,
n_clusters=2)
assert_array_equal(labels, [0, 0, 1, 1])
def test_perfect_checkerboard():
raise SkipTest("This test is failing on the buildbot, but cannot"
" reproduce. Temporarily disabling it until it can be"
" reproduced and fixed.")
model = SpectralBiclustering(3, svd_method="arpack", random_state=0)
S, rows, cols = make_checkerboard((30, 30), 3, noise=0,
random_state=0)
model.fit(S)
assert_equal(consensus_score(model.biclusters_,
(rows, cols)), 1)
S, rows, cols = make_checkerboard((40, 30), 3, noise=0,
random_state=0)
model.fit(S)
assert_equal(consensus_score(model.biclusters_,
(rows, cols)), 1)
S, rows, cols = make_checkerboard((30, 40), 3, noise=0,
random_state=0)
model.fit(S)
assert_equal(consensus_score(model.biclusters_,
(rows, cols)), 1)
def test_errors():
data = np.arange(25).reshape((5, 5))
model = SpectralBiclustering(n_clusters=(3, 3, 3))
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(n_clusters='abc')
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(n_clusters=(3, 'abc'))
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(method='unknown')
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(svd_method='unknown')
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(n_components=0)
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(n_best=0)
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(n_components=3, n_best=4)
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering()
data = np.arange(27).reshape((3, 3, 3))
assert_raises(ValueError, model.fit, data)
| bsd-3-clause |
mainyanim/eyetoai | findings/distrib.py | 1 | 9083 | import pandas as pd
import numpy as np
import random
import json
from openpyxl import load_workbook
from openpyxl import Workbook
import numpy as np
import math
# define values check and append to arr
# define probability array
# read excel
df = pd.read_excel("output.xlsx")
wb = load_workbook('output.xlsx')
ws = wb.get_sheet_by_name('Sheet1') # Define worksheet
def get_dic_from_two_lists(keys, values):
return {keys[i]: values[i] for i in range(len(keys))}
# Define function to normalize arr values
def normalize(items):
problist = [x / sum(items) for x in items]
# def probslist
def concatvals(row, start, stop):
prob_head = list(df)[start:stop]
width = stop - start
col = start
val_arr = []
prob_arr = []
for i in range(width):
value_temp = df.iloc[row - 2, col]
if isinstance(value_temp, float) is False:
value = [x.strip() for x in value_temp.split(',')]
len_val = len(value)
prob_arr += [prob_head[i] for _ in range(len_val)]
val_arr += value[0:len_val]
col += 1
randparameter = random.choices(val_arr, prob_arr, k=1)
return randparameter
def grab_data(r, s, x, y):
ps = [concatvals(r+s, x, y)]
return ps
def create_rep(arr, dict1, row_data, condname, modality):
params = []
to_json = []
if condname == 'Mass' and modality == 'Mammography':
for i in range(len(arr)):
params += grab_data(row_data, 0, 14, 19)
row_data += 1
elif condname == 'Calcifications' and modality == 'Mammography':
for i in range(len(arr)):
params += grab_data(row_data, 3, 14, 19)
row_data += 1
elif condname == 'Assymetry' and modality == 'Mammography':
for i in range(len(arr)):
params += grab_data(row_data, 6, 14, 19)
row_data += 1
elif condname == 'Lymph nodes' and modality == 'Mammography':
for i in range(len(arr)):
params += [concatvals(row_data + 7, 14, 19)]
row_data += 1
elif condname == 'Mass' and modality == 'US':
for i in range(len(arr)):
params += [concatvals(row_data + 8, 14, 19)]
row_data += 1
elif condname == 'Calcifications US' and modality == 'US':
for i in range(len(arr)):
params += [concatvals(row_data + 12, 14, 19)]
row_data += 1
elif condname == 'Lymph nodes' and modality == 'US':
for i in range(len(arr)):
params += [concatvals(row_data + 13, 14, 19)]
row_data += 1
elif condname == 'Special cases' and modality == 'US':
for i in range(len(arr)):
params += [concatvals(row_data + 14, 14, 19)]
row_data += 1
elif condname == 'Mass' and modality == 'MRI':
for i in range(len(arr)):
params += [concatvals(row_data + 15, 14, 19)]
row_data += 1
elif condname == 'MRI features' and modality == 'MRI':
for i in range(len(arr)):
params += [concatvals(row_data + 18, 14, 19)]
row_data += 1
elif condname == 'Kinetic curve assessment' and modality == 'MRI':
for i in range(len(arr)):
params += [concatvals(row_data + 19, 14, 19)]
row_data += 1
elif condname == 'Non-mass enhancement (NME)' and modality == 'MRI':
for i in range(len(arr)):
params += [concatvals(row_data + 20, 14, 19)]
row_data += 1
elif condname == 'Non-enhancing findings' and modality == 'MRI':
for i in range(len(arr)):
params += [concatvals(row_data + 22, 14, 19)]
row_data += 1
elif condname == 'Lymph nodes' and modality == 'MRI':
for i in range(len(arr)):
params += [concatvals(row_data + 22, 14, 19)]
row_data += 1
elif condname == 'Fat containing lesions' and modality == 'MRI':
for i in range(len(arr)):
params += [concatvals(row_data + 23, 14, 19)]
row_data += 1
data = get_dic_from_two_lists(arr, params)
dict1.update(data)
to_json += [dict1]
return to_json
def get_name(infile):
with open(infile, 'r') as f:
contents_of_file = f.read()
lines = contents_of_file.splitlines()
line_number = random.randrange(0, len(lines))
person_name = lines[line_number]
return person_name
def get_numcond():
names = len(df.Name.unique())
return names
def get_cond_name():
name_arr = df.Name.unique()
n = list(name_arr)
n_arr = []
for i in range(len(name_arr)):
if (isinstance(n[i], float)) is False:
n_arr += [n[i]]
rand_cond_name = random.choice(n_arr)
return rand_cond_name
def check_row(cond_name):
from xlrd import open_workbook
book = open_workbook("output.xlsx")
for sheet in book.sheets():
for rowidx in range(sheet.nrows):
row = sheet.row(rowidx)
for colidx, cell in enumerate(row):
if cell.value == cond_name:
print("condition name is: ", cond_name)
return rowidx + 1
# Create random with parameter of report numbers
def generate_report(infile):
# for i in range(items):
a = np.array([[i.value for i in j] for j in ws['C1':'I1']]).ravel()
b = np.array([[i.value for i in j] for j in ws['C2':'I2']]).ravel()
# Read BiRads Probabilities into list
# Read BiRads into list
person_name = get_name(infile)
p_id = random.randrange(100)
p_age = random.randrange(25, 65)
br_p = normalize(b)
print(br_p)
br = random.choices(a, br_p, k=1)
name = get_cond_name()
names = get_numcond()
row = check_row(name)
"create list of values and slice empty entities from list"
rm = df['Relevant modalities'].values.tolist()[0:26]
# r = 'Mammography'
r = random.choice(rm)
dict_report = {'Id': p_id, 'First name': person_name, 'Age': p_age, 'Condition Name': name, 'BiRad': br,
'Relevant Modality': r}
# mammo params
if r == 'Mammography':
f_list = df['Relevant findings'].values.tolist()[0:8]
f = random.choice(f_list)
dict_report.update({'Relevant finding': f})
iter_params_mass = ['Shape', 'Margin', 'Density']
iter_params_calc = ['Typically benign', 'Suspicious morphology', 'Distribution']
iter_params_a = ['Assymetry']
iter_params_lymph = ['Lymph nodes']
if f == 'Mass':
report = create_rep(iter_params_mass, dict_report, row, f, r)
elif f == 'Calcifications':
report = create_rep(iter_params_calc, dict_report, row, f, r)
elif f == 'Assymetry':
report = create_rep(iter_params_a, dict_report, row, f, r)
else:
report = create_rep(iter_params_lymph, dict_report, row, f, r)
elif r == 'US':
f_list = df['Relevant findings'].values.tolist()[8:15]
f = random.choice(f_list)
dict_report.update({'Relevant finding': f})
us_params_mass = ['Shape', 'Margin', 'Echo', 'Posterior']
us_params_calc = ['Calcifications']
us_params_l_nodes = ['Lymph Nodes']
us_params_sp_cases = ['Special Cases']
if f == 'Mass':
report = create_rep(us_params_mass, dict_report, row, f, r)
elif f == 'Calcifications US':
report = create_rep(us_params_calc, dict_report, row, f, r)
elif f == 'Lymph nodes':
report = create_rep(us_params_l_nodes, dict_report, row, f, r)
else:
report = create_rep(us_params_sp_cases, dict_report, row, f, r)
elif r == 'MRI':
f_list = df['Relevant findings'].values.tolist()[15:25]
mri_params_mass = ['Shape', 'Margin', 'Internal enhancement']
mri_params_mri_f = ['MRI features']
mri_params_kin_c_a = ['Kinetic curve assessment']
mri_params_nme = ['Distribution', 'Internal enhancement patterns']
mri_params_nef = ['Non-enhancing patterns']
mri_params_l_nodes = ['Lymph Nodes']
mri_params_fcl = ['Fat containing lesions']
f = random.choice(f_list)
dict_report.update({'Relevant finding': f})
if f == 'Mass':
report = create_rep(mri_params_mass, dict_report, row, f, r)
elif f == 'MRI features':
report = create_rep(mri_params_mri_f, dict_report, row, f, r)
elif f == 'Kinetic curve assessment':
report = create_rep(mri_params_kin_c_a, dict_report, row, f, r)
elif f == 'Non-mass enhancement (NME)':
report = create_rep(mri_params_nme, dict_report, row, f, r)
elif f == 'Non-enhancing findings':
report = create_rep(mri_params_nef, dict_report, row, f, r)
elif f == 'Lymph nodes':
report = create_rep(mri_params_l_nodes, dict_report, row, f, r)
else:
report = create_rep(mri_params_fcl, dict_report, row, f, r)
print(report)
def main():
for i in range(1):
generate_report("first-names.txt")
main()
| mit |
luispedro/BuildingMachineLearningSystemsWithPython | ch01/gen_webstats.py | 1 | 1108 | # This code is supporting material for the book
# Building Machine Learning Systems with Python
# by Willi Richert and Luis Pedro Coelho
# published by PACKT Publishing
#
# It is made available under the MIT License
# This script generates web traffic data for our hypothetical
# web startup "MLASS" in chapter 01
import os
import scipy as sp
from scipy.stats import gamma
import matplotlib.pyplot as plt
from utils import DATA_DIR, CHART_DIR
sp.random.seed(3) # to reproduce the data later on
x = sp.arange(1, 31*24)
y = sp.array(200*(sp.sin(2*sp.pi*x/(7*24))), dtype=int)
y += gamma.rvs(15, loc=0, scale=100, size=len(x))
y += 2 * sp.exp(x/100.0)
y = sp.ma.array(y, mask=[y<0])
print(sum(y), sum(y<0))
plt.scatter(x, y)
plt.title("Web traffic over the last month")
plt.xlabel("Time")
plt.ylabel("Hits/hour")
plt.xticks([w*7*24 for w in range(5)],
['week %i' %(w+1) for w in range(5)])
plt.autoscale(tight=True)
plt.grid()
plt.savefig(os.path.join(CHART_DIR, "1400_01_01.png"))
sp.savetxt(os.path.join(DATA_DIR, "web_traffic.tsv"),
list(zip(x, y)), delimiter="\t", fmt="%s")
| mit |
jlegendary/scikit-learn | examples/svm/plot_oneclass.py | 249 | 2302 | """
==========================================
One-class SVM with non-linear kernel (RBF)
==========================================
An example using a one-class SVM for novelty detection.
:ref:`One-class SVM <svm_outlier_detection>` is an unsupervised
algorithm that learns a decision function for novelty detection:
classifying new data as similar or different to the training set.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.font_manager
from sklearn import svm
xx, yy = np.meshgrid(np.linspace(-5, 5, 500), np.linspace(-5, 5, 500))
# Generate train data
X = 0.3 * np.random.randn(100, 2)
X_train = np.r_[X + 2, X - 2]
# Generate some regular novel observations
X = 0.3 * np.random.randn(20, 2)
X_test = np.r_[X + 2, X - 2]
# Generate some abnormal novel observations
X_outliers = np.random.uniform(low=-4, high=4, size=(20, 2))
# fit the model
clf = svm.OneClassSVM(nu=0.1, kernel="rbf", gamma=0.1)
clf.fit(X_train)
y_pred_train = clf.predict(X_train)
y_pred_test = clf.predict(X_test)
y_pred_outliers = clf.predict(X_outliers)
n_error_train = y_pred_train[y_pred_train == -1].size
n_error_test = y_pred_test[y_pred_test == -1].size
n_error_outliers = y_pred_outliers[y_pred_outliers == 1].size
# plot the line, the points, and the nearest vectors to the plane
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
plt.title("Novelty Detection")
plt.contourf(xx, yy, Z, levels=np.linspace(Z.min(), 0, 7), cmap=plt.cm.Blues_r)
a = plt.contour(xx, yy, Z, levels=[0], linewidths=2, colors='red')
plt.contourf(xx, yy, Z, levels=[0, Z.max()], colors='orange')
b1 = plt.scatter(X_train[:, 0], X_train[:, 1], c='white')
b2 = plt.scatter(X_test[:, 0], X_test[:, 1], c='green')
c = plt.scatter(X_outliers[:, 0], X_outliers[:, 1], c='red')
plt.axis('tight')
plt.xlim((-5, 5))
plt.ylim((-5, 5))
plt.legend([a.collections[0], b1, b2, c],
["learned frontier", "training observations",
"new regular observations", "new abnormal observations"],
loc="upper left",
prop=matplotlib.font_manager.FontProperties(size=11))
plt.xlabel(
"error train: %d/200 ; errors novel regular: %d/40 ; "
"errors novel abnormal: %d/40"
% (n_error_train, n_error_test, n_error_outliers))
plt.show()
| bsd-3-clause |
scizen9/kpy | flask/stats_web.py | 1 | 20311 | ''' Modified from online verion of:
_README: https://github.com/bokeh/bokeh/blob/master/examples/app/stocks/README.md
.. note::
Running this example requires having the "stats.log" file.
Use the ``bokeh serve`` command to run the example by executing:
bokeh serve stocks
at your command prompt. Then navigate to the URL
http://localhost:5006/stocks
..
'''
try:
from functools import lru_cache
except ImportError:
# Python 2 does stdlib does not have lru_cache so let's just
# create a dummy decorator to avoid crashing
print ("WARNING: Cache for this example is available on Python 3 only.")
def lru_cache():
def dec(f):
def _(*args, **kws):
return f(*args, **kws)
return _
return dec
from os.path import dirname, join
import pandas as pd
import datetime
import numpy as np
import os
from bokeh.io import curdoc
from bokeh.layouts import row, column
from bokeh.models import ColumnDataSource, Label, CDSView, GroupFilter, Range1d, LinearAxis
from bokeh.models import HoverTool
from bokeh.models.annotations import BoxAnnotation
from bokeh.models.widgets import PreText, Select
from bokeh.plotting import figure
from bokeh.core.properties import value
from bokeh.palettes import Paired
from astropy.time import Time
from astropy.coordinates import SkyCoord, EarthLocation, AltAz, get_sun, get_moon
import astropy.units as u
#from datetime import datetime, timedelta
import datetime
import model
@lru_cache()
def load_p48seeing(obsdate):
time, seeing = model.get_p18obsdata(obsdate)
day_frac_diff = datetime.timedelta(np.ceil((datetime.datetime.now() - datetime.datetime.utcnow() ).total_seconds())/3600/24)
local_date = np.array(time) + day_frac_diff
d = pd.DataFrame({'date':local_date, 'seeing':seeing})
return d
@lru_cache()
def load_stats(statsfile='stats.log'):
data = pd.read_csv(statsfile, header=None,
names=['path', 'obj', 'jd', 'ns', 'fwhm', 'ellipticity', 'bkg', 'airmass', 'in_temp', 'imtype', 'out_temp', 'in_hum'])
jds = data['jd']
t = Time(jds, format='jd', scale='utc')
date = t.utc.datetime
day_frac_diff = datetime.timedelta(np.ceil((datetime.datetime.now() - datetime.datetime.utcnow() ).total_seconds())/3600/24)
local_date = date + day_frac_diff
data2 = data.assign(localdate=local_date)
data2.set_index('localdate')
return pd.DataFrame({'date':data2['localdate'], 'ns':data2['ns'], 'fwhm':data2['fwhm'], 'ellipticity':data2['ellipticity'], \
'bkg':data2['bkg'], 'airmass':data2['airmass'], 'in_temp':data2['in_temp'], 'imtype':data2['imtype'],\
'out_temp':data2['out_temp'], 'in_hum':data2['in_hum']})
@lru_cache()
def plot_stats(statsfile, mydate):
source = ColumnDataSource(data=dict(date=[], ns=[], fwhm=[], ellipticity=[], bkg=[], airmass=[], in_temp=[], imtype=[], out_temp=[], in_hum=[]))
source_static = ColumnDataSource(data=dict(date=[], ns=[], fwhm=[], ellipticity=[], bkg=[], airmass=[], in_temp=[], imtype=[], out_temp=[], in_hum=[]))
viewScience = CDSView(source=source, filters=[GroupFilter(column_name='imtype', group='SCIENCE')])
viewAcquisition = CDSView(source=source, filters=[GroupFilter(column_name='imtype', group='ACQUISITION')])
viewGuider = CDSView(source=source, filters=[GroupFilter(column_name='imtype', group='GUIDER')])
viewFocus = CDSView(source=source, filters=[GroupFilter(column_name='imtype', group='FOCUS')])
source_p48 = ColumnDataSource(data=dict(date=[], seeing=[]))
def update(selected=None):
if statsfile:
data = load_stats(statsfile)
source.data = source.from_df(data[['date', 'ns', 'fwhm', 'ellipticity', 'bkg', 'airmass', 'in_temp', 'imtype', 'out_temp', 'in_hum']])
source_static.data = source.data
p48 = load_p48seeing(mydate)
source_p48.data = source_p48.from_df(p48[['date', 'seeing']])
source_static_p48.data = source_p48.data
source_static_p48 = ColumnDataSource(data=dict(date=[], seeing=[]))
tools = 'pan,box_zoom,reset'
p48seeing = figure(plot_width=425, plot_height=250, tools=tools, x_axis_type='datetime', active_drag="box_zoom")
p48seeing.circle('date', 'seeing', source=source_static_p48, color="black")
p48seeing.title.text = "P18 seeing [arcsec]"
if statsfile:
ns = figure(plot_width=425, plot_height=250, tools=tools, x_axis_type='datetime', active_drag="box_zoom")
ns.line('date', 'ns', source=source_static)
ns.circle('date', 'ns', size=1, source=source, color=None, selection_color="orange")
ns.title.text = "Number of bright sources extracted"
bkg = figure(plot_width=425, plot_height=250, tools=tools, x_axis_type='datetime', active_drag="box_zoom")
bkg.x_range = ns.x_range
bkg.line('date', 'bkg', source=source_static)
bkg.circle('date', 'bkg', size=1, source=source, color=None, selection_color="orange")
bkg.title.text = "Background (counts)"
temp = figure(plot_width=425, plot_height=250, tools=tools, x_axis_type='datetime', active_drag="box_zoom")
temp.x_range = ns.x_range
temp.line('date', 'in_temp', source=source_static, color='blue', legend="Inside")
temp.line('date', 'out_temp', source=source_static, color='green', legend="Outside")
temp.circle('date', 'in_temp', size=1, source=source, color=None, selection_color="orange")
temp.title.text = "Temperature [C]"
fwhm = figure(plot_width=425, plot_height=250, tools=tools, x_axis_type='datetime', active_drag="box_zoom")
fwhm.x_range = ns.x_range
fwhm.circle('date', 'fwhm', source=source_static, color="green", legend="Focus", view=viewFocus)
fwhm.circle('date', 'fwhm', source=source_static, color="red", legend="Science", view=viewScience)
fwhm.circle('date', 'fwhm', source=source_static, color="blue", legend="Acquisition", view=viewAcquisition)
fwhm.circle('date', 'fwhm', source=source_static, color="black", legend="Guider", view=viewGuider)
fwhm.circle('date', 'fwhm', size=1, source=source, color=None, selection_color="orange")
fwhm.title.text = "P60 FWHM [arcsec]"
airmass = figure(plot_width=425, plot_height=250, tools=tools, x_axis_type='datetime', active_drag="box_zoom")
airmass.x_range = ns.x_range
airmass.line('date', 'airmass', source=source_static)
airmass.circle('date', 'airmass', size=1, source=source, color=None, selection_color="orange")
airmass.title.text = "Airmass"
ellipticity = figure(plot_width=425, plot_height=250, tools=tools, x_axis_type='datetime', active_drag="box_zoom")
ellipticity.x_range = ns.x_range
ellipticity.line('date', 'ellipticity', source=source_static)
ellipticity.circle('date', 'ellipticity', size=1, source=source, color=None, selection_color="orange")
ellipticity.title.text = "Ellipticity"
humidity = figure(plot_width=425, plot_height=250, tools=tools, x_axis_type='datetime', active_drag="box_zoom")
humidity.x_range = ns.x_range
humidity.line('date', 'in_hum', source=source_static)
humidity.circle('date', 'in_hum', size=1, source=source, color=None, selection_color="orange")
humidity.title.text = "Inside Humidity [%]"
p48seeing.x_range = ns.x_range
left = column(fwhm, p48seeing, airmass)
center = column(ellipticity, ns, bkg, )
right = column(temp, humidity)
layout = row(left, center, right)
else:
layout = row(column(p48seeing))
# initialize
update()
curdoc().add_root(layout)
curdoc().title = "Stats"
return layout
@lru_cache()
def plot_not_found_message(day):
not_found = figure(plot_width=900, plot_height=450, x_range=[0, 900], y_range=[0, 450])
not_found.image(image=[np.zeros([900, 450])+0.1], x=0, y=0, dw=900, dh=450)
citation = Label(x=50, y=225, x_units='screen', y_units='screen', text='No statistics found for today \n (likely we were weathered out...)')
not_found.add_layout(citation)
not_found.title.text = "Statistics not found for day %s"%(day)
layout = column(not_found)
curdoc().add_root(layout)
curdoc().title = "Stats not found"
@lru_cache()
def plot_stats_allocation(data):
"""
Plots in the shape of bars the time available and spent for each active allocation.
"""
#Create the first plot with the allocation hours
alloc_names = data['allocations']
categories = ["spent_hours", "free_hours"]
colors = [ "#e84d60", "darkgreen"] #"#c9d9d3"
N = len(alloc_names)
source = ColumnDataSource(data=data)
p = figure(x_range=alloc_names, plot_height=420, plot_width=80*8, title="Time spent/available for SEDM allocations this term",
toolbar_location=None, tools="")
p.vbar_stack(categories, x='allocations', width=0.9, color=colors, source=source, legend=["Spent", "Available"])
p.y_range.start = 0
p.x_range.range_padding = 0.1
p.xgrid.grid_line_color = None
p.axis.minor_tick_line_color = None
p.outline_line_color = None
p.legend.location = "top_right"
p.legend.orientation = "horizontal"
p.yaxis.axis_label = 'Hours'
p.xaxis.major_label_orientation = 0.3
#Create the second plot with the % spent
alloc_names = data['allocations']
percentage = (data["spent_hours"] / data["alloc_hours"]) * 100
colors=N*['#084594']
'''for i, p in enumerate(percentage):
if p<50: colors[i] = '#22A784'
elif p>50 and p<75: colors[i] = '#FD9F6C'
else: colors[i] = '#DD4968'''
source = ColumnDataSource(data=dict(alloc_names=alloc_names, percentage=percentage, color=colors))
p2 = figure(x_range=alloc_names, y_range=(0,100), plot_height=420, plot_width=80*8, title="Percentage of time spent",
toolbar_location=None, tools="")
p2.vbar(x='alloc_names', top='percentage', width=0.9, color='color', source=source)
p2.xgrid.grid_line_color = None
p2.legend.orientation = "horizontal"
p2.legend.location = "top_center"
p2.yaxis.axis_label = '% time spent'
p2.xaxis.major_label_orientation = 0.3
#Create the pie charts
pieColors = 10*["red", "green", "blue", "orange", "yellow", 'lime', 'brown', 'cyan', \
'magenta', 'olive', 'black', 'teal', 'gold', 'crimson', 'moccasin', 'greenyellow', 'navy', 'ivory', 'lightpink']
#First one with the time spent
# define starts/ends for wedges from percentages of a circle
percents_only = np.round( np.array(list(data["spent_hours"] / np.sum(data["spent_hours"])))*100, 1)
percents = np.cumsum( [0] + list(data["spent_hours"] / np.sum(data["spent_hours"])))
starts = [per*2*np.pi for per in percents[:-1]]
ends = [per*2*np.pi for per in percents[1:]]
p3 = figure(x_range=(-1, 2.5), y_range=(-1.1, 1.1), plot_height=420, plot_width=600, title="% spent")
#Add individual wedges:
for i in range(N):
p3.wedge(x=0, y=0, radius=.9, start_angle=starts[i], end_angle=ends[i], color=pieColors[i], legend="[{0}%] {1}".format(percents_only[i], alloc_names[i]) )
p3.xgrid.grid_line_color = None
p3.ygrid.grid_line_color = None
p3.legend.orientation = "vertical"
p3.legend.location = "top_right"
p3.legend.border_line_alpha = 0
p3.legend.background_fill_color = None
p3.xaxis.visible = False
p3.yaxis.visible = False
#Second one with the time allocated
# define starts/ends for wedges from percentages of a circle
percents_only = np.round( np.array(list(data["alloc_hours"] / np.sum(data["alloc_hours"])))*100, 1)
percents = np.cumsum( [0] + list(data["alloc_hours"] / np.sum(data["alloc_hours"])))
starts = [per*2*np.pi for per in percents[:-1]]
ends = [per*2*np.pi for per in percents[1:]]
p4 = figure(x_range=(-1, 2.5), y_range=(-1.1, 1.1), plot_height=420, plot_width=600, title="% time allocated to each program")
#Add individual wedges:
for i in range(N):
p4.wedge(x=0, y=0, radius=.9, start_angle=starts[i], end_angle=ends[i], color=pieColors[i], legend="[{0}%] {1}".format(percents_only[i], alloc_names[i]) )
p4.xgrid.grid_line_color = None
p4.ygrid.grid_line_color = None
p4.legend.orientation = "vertical"
p4.legend.location = "top_right"
p4.legend.border_line_alpha = 0
p4.legend.background_fill_color = None
p4.xaxis.visible = False
p4.yaxis.visible = False
layout = row(column(p, p2), column(p4, p3))
curdoc().add_root(layout)
curdoc().title = "Allocation stats"
return layout
def plot_visibility(ras, decs, names, allocs=[None], priorities=[5], endobs=[None],
exptime=2430, date=None, allowed_allocs=[None]):
''' makes a visibility plot for one or many objects, highlighting observed patches if relevant
all these arguments are lists/arrays/iterables, even if they are of size 1
priorities: integers
obsd: list/array of observed objects, should match 'names'
endobs: 'YYYY-MM-DDTHH:MM:SS.ssssssssss' (as outputed from sql query) of the time the observation ended
exptime: in seconds
date: YYYYMMDD, conveniently matching the folder names, of midnight
allowed_allocs: list of string allocation names visible to a user'''
allocpalette = Paired[12][1::2] + Paired[12][::2]
priorities = np.array(priorities, dtype=np.int)
allocs = np.asarray(allocs)
names = np.asarray(names)
allowed_allocs = np.asarray(allowed_allocs)
allocs[~np.isin(allocs, allowed_allocs)] = 'other'
p = figure(plot_width=700, plot_height=500, toolbar_location='above',
y_range=(0, 90), y_axis_location="right")
### setup with axes, sun/moon, frames, background
palomar_mountain = EarthLocation(lon=243.1361*u.deg, lat=33.3558*u.deg, height=1712*u.m)
utcoffset = -7 * u.hour # Pacific Daylight Time
if date == None:
time = (Time.now() - utcoffset).datetime # date is based on local time
time = Time(datetime.datetime(time.year, time.month, time.day))
else:
time = Time(datetime.datetime(int(date[:4]), int(date[4:6]), int(date[6:8])))
midnight = time - utcoffset # 7am local time of correct date, midnight UTC
if endobs[0] != None:
endobs = Time(np.array(endobs, dtype='|S32'), format='isot')
endobs.format = u'datetime'
delta_midnight = np.linspace(-8, 8, 500) * u.hour
t = midnight + delta_midnight
abstimes = [i.datetime.strftime('%I:%M %p') for i in t + utcoffset]
frame = AltAz(obstime=t, location=palomar_mountain)
sun_alt = get_sun(t).transform_to(frame).alt
moon_alt = get_moon(t).transform_to(frame).alt
# shading for nighttime and twilight
dark_times = delta_midnight[sun_alt < 0].value
twilit_times = delta_midnight[sun_alt < -18 * u.deg].value
plotted_times = delta_midnight[sun_alt < 5 * u.deg].value
twilight = BoxAnnotation(left=min(twilit_times), right=max(twilit_times), bottom=0,
fill_alpha=0.15, fill_color='black', level='underlay')
night = BoxAnnotation(left=min(dark_times), right=max(dark_times), bottom=0,
fill_alpha=0.25, fill_color='black', level='underlay')
earth = BoxAnnotation(top=0, fill_alpha=0.8, fill_color='sienna')
p.add_layout(night)
p.add_layout(twilight)
p.add_layout(earth)
# sun and moon
sun = p.line(delta_midnight, sun_alt, line_color='red', name="Sun", legend='Sun', line_dash='dashed')
moon = p.line(delta_midnight, moon_alt, line_color='yellow', line_dash='dashed',
name="Moon", legend='Moon')
# labels and axes
p.title.text = "Visibility for %s UTC" %midnight
p.xaxis.axis_label = "Hours from PDT Midnight"
p.x_range.start = min(plotted_times)
p.x_range.end = max(plotted_times)
p.yaxis.axis_label = "Airmass"
# primary airmass label on right
airmasses = (1.01, 1.1, 1.25, 1.5, 2., 3., 6.)
ticker = [90 - np.arccos(1./i) * 180/np.pi for i in airmasses]
p.yaxis.ticker = ticker
p.yaxis.major_label_overrides = {tick: str(airmasses[i]) for i, tick in enumerate(ticker)}
# add supplementary alt label on left
p.extra_y_ranges = {"altitude": Range1d(0, 90)}
p.add_layout(LinearAxis(y_range_name="altitude", axis_label='Altitude [deg]'), 'left')
### adding data from the actual objects
#objs = SkyCoord(np.array(ras, dtype=np.float),
# np.array(decs, dtype=np.float), unit="deg")
approx_midnight = int(Time.now().jd - .5) + .5 - utcoffset.value/24.
palo_sin_lat = 0.549836545
palo_cos_lat = 0.835272275
palo_long = 243.1362
ras = np.array(ras, dtype=np.float)
decs = np.array(decs, dtype=np.float)
alloc_color = {}
for i, val in enumerate(np.unique(allocs)):
if val in allowed_allocs:
alloc_color[val] = allocpalette[i % len(allocpalette)]
else:
alloc_color[val] = 'lightgray'
tooltipped = [] # things with tooltips
tooltips = [('obj', '@name'), # make it #name when we get to bokeh 0.13
('time', '@abstime'),
('altitude', u"@alt\N{DEGREE SIGN}"),
('airmass', '@airmass'),
('priority', '@priority'),
('allocation', '@alloc')]
for i in np.array(allocs).argsort(): # go in order by alloc for an alphabetized legend
color = alloc_color[allocs[i]]
alt = 180 / np.pi * np.arcsin(palo_cos_lat * \
np.cos(np.pi/180 * (palo_long - ras[i] + 15 * (18.697374558 + 24.06570982 * (delta_midnight.value/24. + approx_midnight - 2451545)))) * \
np.cos(decs[i] * np.pi/180) + palo_sin_lat * np.sin(decs[i] * np.pi/180))
airmass = 1./np.cos((90 - alt) * np.pi/180)
source = ColumnDataSource( dict(times=delta_midnight,
alt=alt,
airmass=airmass,
abstime=abstimes,
priority=np.full(len(abstimes), priorities[i]),
alloc=np.full(len(abstimes), allocs[i]),
name=np.full(len(abstimes), names[i]))) # delete the name when we get to bokeh 0.13
if allocs[i] == None: # single object
legend = names[i]
tooltips = tooltips[:4]
else:
legend = '{}'.format(allocs[i])
if endobs[0] != None: # plot that highlights observed part of the night
# full path of the night
dotted = p.line('times', 'alt', color=color, source=source, line_dash='2 2',
name=names[i], line_width=1, legend=legend)
# manually crop the source so only thick observed part has tooltips
endtime = endobs[i]
initime = endtime - exptime * u.second
if i > 0:
initime = max(initime, endobs[i - 1])
mask = np.logical_and(delta_midnight + midnight + utcoffset > initime,
delta_midnight + midnight + utcoffset < endtime)
source = ColumnDataSource(pd.DataFrame(source.data)[mask])
priorities[i] += 3 # all it changes is the line width
line = p.line('times', 'alt', color=color, source=source, name=''.format(names[i]),
line_width=priorities[i], legend=legend)
if allocs[i] in allowed_allocs:
tooltipped.append(line)
p.legend.click_policy = 'hide'
p.legend.location = 'bottom_right'
p.add_tools(HoverTool(renderers=tooltipped, tooltips=tooltips))
curdoc().add_root(p)
curdoc().title = 'Visibility plot'
return p
| gpl-2.0 |
pap/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/backends/backend_gtk.py | 69 | 43991 | from __future__ import division
import os, sys
def fn_name(): return sys._getframe(1).f_code.co_name
try:
import gobject
import gtk; gdk = gtk.gdk
import pango
except ImportError:
raise ImportError("Gtk* backend requires pygtk to be installed.")
pygtk_version_required = (2,2,0)
if gtk.pygtk_version < pygtk_version_required:
raise ImportError ("PyGTK %d.%d.%d is installed\n"
"PyGTK %d.%d.%d or later is required"
% (gtk.pygtk_version + pygtk_version_required))
del pygtk_version_required
import matplotlib
from matplotlib import verbose
from matplotlib._pylab_helpers import Gcf
from matplotlib.backend_bases import RendererBase, GraphicsContextBase, \
FigureManagerBase, FigureCanvasBase, NavigationToolbar2, cursors
from matplotlib.backends.backend_gdk import RendererGDK, FigureCanvasGDK
from matplotlib.cbook import is_string_like, is_writable_file_like
from matplotlib.colors import colorConverter
from matplotlib.figure import Figure
from matplotlib.widgets import SubplotTool
from matplotlib import lines
from matplotlib import cbook
backend_version = "%d.%d.%d" % gtk.pygtk_version
_debug = False
#_debug = True
# the true dots per inch on the screen; should be display dependent
# see http://groups.google.com/groups?q=screen+dpi+x11&hl=en&lr=&ie=UTF-8&oe=UTF-8&safe=off&selm=7077.26e81ad5%40swift.cs.tcd.ie&rnum=5 for some info about screen dpi
PIXELS_PER_INCH = 96
cursord = {
cursors.MOVE : gdk.Cursor(gdk.FLEUR),
cursors.HAND : gdk.Cursor(gdk.HAND2),
cursors.POINTER : gdk.Cursor(gdk.LEFT_PTR),
cursors.SELECT_REGION : gdk.Cursor(gdk.TCROSS),
}
# ref gtk+/gtk/gtkwidget.h
def GTK_WIDGET_DRAWABLE(w):
flags = w.flags();
return flags & gtk.VISIBLE != 0 and flags & gtk.MAPPED != 0
def draw_if_interactive():
"""
Is called after every pylab drawing command
"""
if matplotlib.is_interactive():
figManager = Gcf.get_active()
if figManager is not None:
figManager.canvas.draw()
def show(mainloop=True):
"""
Show all the figures and enter the gtk main loop
This should be the last line of your script
"""
for manager in Gcf.get_all_fig_managers():
manager.window.show()
if mainloop and gtk.main_level() == 0 and \
len(Gcf.get_all_fig_managers())>0:
gtk.main()
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
FigureClass = kwargs.pop('FigureClass', Figure)
thisFig = FigureClass(*args, **kwargs)
canvas = FigureCanvasGTK(thisFig)
manager = FigureManagerGTK(canvas, num)
# equals:
#manager = FigureManagerGTK(FigureCanvasGTK(Figure(*args, **kwargs), num)
return manager
class FigureCanvasGTK (gtk.DrawingArea, FigureCanvasBase):
keyvald = {65507 : 'control',
65505 : 'shift',
65513 : 'alt',
65508 : 'control',
65506 : 'shift',
65514 : 'alt',
65361 : 'left',
65362 : 'up',
65363 : 'right',
65364 : 'down',
65307 : 'escape',
65470 : 'f1',
65471 : 'f2',
65472 : 'f3',
65473 : 'f4',
65474 : 'f5',
65475 : 'f6',
65476 : 'f7',
65477 : 'f8',
65478 : 'f9',
65479 : 'f10',
65480 : 'f11',
65481 : 'f12',
65300 : 'scroll_lock',
65299 : 'break',
65288 : 'backspace',
65293 : 'enter',
65379 : 'insert',
65535 : 'delete',
65360 : 'home',
65367 : 'end',
65365 : 'pageup',
65366 : 'pagedown',
65438 : '0',
65436 : '1',
65433 : '2',
65435 : '3',
65430 : '4',
65437 : '5',
65432 : '6',
65429 : '7',
65431 : '8',
65434 : '9',
65451 : '+',
65453 : '-',
65450 : '*',
65455 : '/',
65439 : 'dec',
65421 : 'enter',
}
# Setting this as a static constant prevents
# this resulting expression from leaking
event_mask = (gdk.BUTTON_PRESS_MASK |
gdk.BUTTON_RELEASE_MASK |
gdk.EXPOSURE_MASK |
gdk.KEY_PRESS_MASK |
gdk.KEY_RELEASE_MASK |
gdk.ENTER_NOTIFY_MASK |
gdk.LEAVE_NOTIFY_MASK |
gdk.POINTER_MOTION_MASK |
gdk.POINTER_MOTION_HINT_MASK)
def __init__(self, figure):
if _debug: print 'FigureCanvasGTK.%s' % fn_name()
FigureCanvasBase.__init__(self, figure)
gtk.DrawingArea.__init__(self)
self._idle_draw_id = 0
self._need_redraw = True
self._pixmap_width = -1
self._pixmap_height = -1
self._lastCursor = None
self.connect('scroll_event', self.scroll_event)
self.connect('button_press_event', self.button_press_event)
self.connect('button_release_event', self.button_release_event)
self.connect('configure_event', self.configure_event)
self.connect('expose_event', self.expose_event)
self.connect('key_press_event', self.key_press_event)
self.connect('key_release_event', self.key_release_event)
self.connect('motion_notify_event', self.motion_notify_event)
self.connect('leave_notify_event', self.leave_notify_event)
self.connect('enter_notify_event', self.enter_notify_event)
self.set_events(self.__class__.event_mask)
self.set_double_buffered(False)
self.set_flags(gtk.CAN_FOCUS)
self._renderer_init()
self._idle_event_id = gobject.idle_add(self.idle_event)
def destroy(self):
#gtk.DrawingArea.destroy(self)
gobject.source_remove(self._idle_event_id)
if self._idle_draw_id != 0:
gobject.source_remove(self._idle_draw_id)
def scroll_event(self, widget, event):
if _debug: print 'FigureCanvasGTK.%s' % fn_name()
x = event.x
# flipy so y=0 is bottom of canvas
y = self.allocation.height - event.y
if event.direction==gdk.SCROLL_UP:
step = 1
else:
step = -1
FigureCanvasBase.scroll_event(self, x, y, step)
return False # finish event propagation?
def button_press_event(self, widget, event):
if _debug: print 'FigureCanvasGTK.%s' % fn_name()
x = event.x
# flipy so y=0 is bottom of canvas
y = self.allocation.height - event.y
FigureCanvasBase.button_press_event(self, x, y, event.button)
return False # finish event propagation?
def button_release_event(self, widget, event):
if _debug: print 'FigureCanvasGTK.%s' % fn_name()
x = event.x
# flipy so y=0 is bottom of canvas
y = self.allocation.height - event.y
FigureCanvasBase.button_release_event(self, x, y, event.button)
return False # finish event propagation?
def key_press_event(self, widget, event):
if _debug: print 'FigureCanvasGTK.%s' % fn_name()
key = self._get_key(event)
if _debug: print "hit", key
FigureCanvasBase.key_press_event(self, key)
return False # finish event propagation?
def key_release_event(self, widget, event):
if _debug: print 'FigureCanvasGTK.%s' % fn_name()
key = self._get_key(event)
if _debug: print "release", key
FigureCanvasBase.key_release_event(self, key)
return False # finish event propagation?
def motion_notify_event(self, widget, event):
if _debug: print 'FigureCanvasGTK.%s' % fn_name()
if event.is_hint:
x, y, state = event.window.get_pointer()
else:
x, y, state = event.x, event.y, event.state
# flipy so y=0 is bottom of canvas
y = self.allocation.height - y
FigureCanvasBase.motion_notify_event(self, x, y)
return False # finish event propagation?
def leave_notify_event(self, widget, event):
FigureCanvasBase.leave_notify_event(self, event)
def enter_notify_event(self, widget, event):
FigureCanvasBase.enter_notify_event(self, event)
def _get_key(self, event):
if event.keyval in self.keyvald:
key = self.keyvald[event.keyval]
elif event.keyval <256:
key = chr(event.keyval)
else:
key = None
ctrl = event.state & gdk.CONTROL_MASK
shift = event.state & gdk.SHIFT_MASK
return key
def configure_event(self, widget, event):
if _debug: print 'FigureCanvasGTK.%s' % fn_name()
if widget.window is None:
return
w, h = event.width, event.height
if w < 3 or h < 3:
return # empty fig
# resize the figure (in inches)
dpi = self.figure.dpi
self.figure.set_size_inches (w/dpi, h/dpi)
self._need_redraw = True
return False # finish event propagation?
def draw(self):
# Note: FigureCanvasBase.draw() is inconveniently named as it clashes
# with the deprecated gtk.Widget.draw()
self._need_redraw = True
if GTK_WIDGET_DRAWABLE(self):
self.queue_draw()
# do a synchronous draw (its less efficient than an async draw,
# but is required if/when animation is used)
self.window.process_updates (False)
def draw_idle(self):
def idle_draw(*args):
self.draw()
self._idle_draw_id = 0
return False
if self._idle_draw_id == 0:
self._idle_draw_id = gobject.idle_add(idle_draw)
def _renderer_init(self):
"""Override by GTK backends to select a different renderer
Renderer should provide the methods:
set_pixmap ()
set_width_height ()
that are used by
_render_figure() / _pixmap_prepare()
"""
self._renderer = RendererGDK (self, self.figure.dpi)
def _pixmap_prepare(self, width, height):
"""
Make sure _._pixmap is at least width, height,
create new pixmap if necessary
"""
if _debug: print 'FigureCanvasGTK.%s' % fn_name()
create_pixmap = False
if width > self._pixmap_width:
# increase the pixmap in 10%+ (rather than 1 pixel) steps
self._pixmap_width = max (int (self._pixmap_width * 1.1),
width)
create_pixmap = True
if height > self._pixmap_height:
self._pixmap_height = max (int (self._pixmap_height * 1.1),
height)
create_pixmap = True
if create_pixmap:
self._pixmap = gdk.Pixmap (self.window, self._pixmap_width,
self._pixmap_height)
self._renderer.set_pixmap (self._pixmap)
def _render_figure(self, pixmap, width, height):
"""used by GTK and GTKcairo. GTKAgg overrides
"""
self._renderer.set_width_height (width, height)
self.figure.draw (self._renderer)
def expose_event(self, widget, event):
"""Expose_event for all GTK backends. Should not be overridden.
"""
if _debug: print 'FigureCanvasGTK.%s' % fn_name()
if GTK_WIDGET_DRAWABLE(self):
if self._need_redraw:
x, y, w, h = self.allocation
self._pixmap_prepare (w, h)
self._render_figure(self._pixmap, w, h)
self._need_redraw = False
x, y, w, h = event.area
self.window.draw_drawable (self.style.fg_gc[self.state],
self._pixmap, x, y, x, y, w, h)
return False # finish event propagation?
filetypes = FigureCanvasBase.filetypes.copy()
filetypes['jpg'] = 'JPEG'
filetypes['jpeg'] = 'JPEG'
filetypes['png'] = 'Portable Network Graphics'
def print_jpeg(self, filename, *args, **kwargs):
return self._print_image(filename, 'jpeg')
print_jpg = print_jpeg
def print_png(self, filename, *args, **kwargs):
return self._print_image(filename, 'png')
def _print_image(self, filename, format):
if self.flags() & gtk.REALIZED == 0:
# for self.window(for pixmap) and has a side effect of altering
# figure width,height (via configure-event?)
gtk.DrawingArea.realize(self)
width, height = self.get_width_height()
pixmap = gdk.Pixmap (self.window, width, height)
self._renderer.set_pixmap (pixmap)
self._render_figure(pixmap, width, height)
# jpg colors don't match the display very well, png colors match
# better
pixbuf = gdk.Pixbuf(gdk.COLORSPACE_RGB, 0, 8, width, height)
pixbuf.get_from_drawable(pixmap, pixmap.get_colormap(),
0, 0, 0, 0, width, height)
if is_string_like(filename):
try:
pixbuf.save(filename, format)
except gobject.GError, exc:
error_msg_gtk('Save figure failure:\n%s' % (exc,), parent=self)
elif is_writable_file_like(filename):
if hasattr(pixbuf, 'save_to_callback'):
def save_callback(buf, data=None):
data.write(buf)
try:
pixbuf.save_to_callback(save_callback, format, user_data=filename)
except gobject.GError, exc:
error_msg_gtk('Save figure failure:\n%s' % (exc,), parent=self)
else:
raise ValueError("Saving to a Python file-like object is only supported by PyGTK >= 2.8")
else:
raise ValueError("filename must be a path or a file-like object")
def get_default_filetype(self):
return 'png'
def flush_events(self):
gtk.gdk.threads_enter()
while gtk.events_pending():
gtk.main_iteration(True)
gtk.gdk.flush()
gtk.gdk.threads_leave()
def start_event_loop(self,timeout):
FigureCanvasBase.start_event_loop_default(self,timeout)
start_event_loop.__doc__=FigureCanvasBase.start_event_loop_default.__doc__
def stop_event_loop(self):
FigureCanvasBase.stop_event_loop_default(self)
stop_event_loop.__doc__=FigureCanvasBase.stop_event_loop_default.__doc__
class FigureManagerGTK(FigureManagerBase):
"""
Public attributes
canvas : The FigureCanvas instance
num : The Figure number
toolbar : The gtk.Toolbar (gtk only)
vbox : The gtk.VBox containing the canvas and toolbar (gtk only)
window : The gtk.Window (gtk only)
"""
def __init__(self, canvas, num):
if _debug: print 'FigureManagerGTK.%s' % fn_name()
FigureManagerBase.__init__(self, canvas, num)
self.window = gtk.Window()
self.window.set_title("Figure %d" % num)
self.vbox = gtk.VBox()
self.window.add(self.vbox)
self.vbox.show()
self.canvas.show()
# attach a show method to the figure for pylab ease of use
self.canvas.figure.show = lambda *args: self.window.show()
self.vbox.pack_start(self.canvas, True, True)
self.toolbar = self._get_toolbar(canvas)
# calculate size for window
w = int (self.canvas.figure.bbox.width)
h = int (self.canvas.figure.bbox.height)
if self.toolbar is not None:
self.toolbar.show()
self.vbox.pack_end(self.toolbar, False, False)
tb_w, tb_h = self.toolbar.size_request()
h += tb_h
self.window.set_default_size (w, h)
def destroy(*args):
Gcf.destroy(num)
self.window.connect("destroy", destroy)
self.window.connect("delete_event", destroy)
if matplotlib.is_interactive():
self.window.show()
def notify_axes_change(fig):
'this will be called whenever the current axes is changed'
if self.toolbar is not None: self.toolbar.update()
self.canvas.figure.add_axobserver(notify_axes_change)
self.canvas.grab_focus()
def destroy(self, *args):
if _debug: print 'FigureManagerGTK.%s' % fn_name()
self.vbox.destroy()
self.window.destroy()
self.canvas.destroy()
self.toolbar.destroy()
self.__dict__.clear()
if Gcf.get_num_fig_managers()==0 and \
not matplotlib.is_interactive() and \
gtk.main_level() >= 1:
gtk.main_quit()
def show(self):
# show the figure window
self.window.show()
def full_screen_toggle (self):
self._full_screen_flag = not self._full_screen_flag
if self._full_screen_flag:
self.window.fullscreen()
else:
self.window.unfullscreen()
_full_screen_flag = False
def _get_toolbar(self, canvas):
# must be inited after the window, drawingArea and figure
# attrs are set
if matplotlib.rcParams['toolbar'] == 'classic':
toolbar = NavigationToolbar (canvas, self.window)
elif matplotlib.rcParams['toolbar'] == 'toolbar2':
toolbar = NavigationToolbar2GTK (canvas, self.window)
else:
toolbar = None
return toolbar
def set_window_title(self, title):
self.window.set_title(title)
def resize(self, width, height):
'set the canvas size in pixels'
#_, _, cw, ch = self.canvas.allocation
#_, _, ww, wh = self.window.allocation
#self.window.resize (width-cw+ww, height-ch+wh)
self.window.resize(width, height)
class NavigationToolbar2GTK(NavigationToolbar2, gtk.Toolbar):
# list of toolitems to add to the toolbar, format is:
# text, tooltip_text, image_file, callback(str)
toolitems = (
('Home', 'Reset original view', 'home.png', 'home'),
('Back', 'Back to previous view','back.png', 'back'),
('Forward', 'Forward to next view','forward.png', 'forward'),
('Pan', 'Pan axes with left mouse, zoom with right', 'move.png','pan'),
('Zoom', 'Zoom to rectangle','zoom_to_rect.png', 'zoom'),
(None, None, None, None),
('Subplots', 'Configure subplots','subplots.png', 'configure_subplots'),
('Save', 'Save the figure','filesave.png', 'save_figure'),
)
def __init__(self, canvas, window):
self.win = window
gtk.Toolbar.__init__(self)
NavigationToolbar2.__init__(self, canvas)
self._idle_draw_id = 0
def set_message(self, s):
if self._idle_draw_id == 0:
self.message.set_label(s)
def set_cursor(self, cursor):
self.canvas.window.set_cursor(cursord[cursor])
def release(self, event):
try: del self._imageBack
except AttributeError: pass
def dynamic_update(self):
# legacy method; new method is canvas.draw_idle
self.canvas.draw_idle()
def draw_rubberband(self, event, x0, y0, x1, y1):
'adapted from http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/189744'
drawable = self.canvas.window
if drawable is None:
return
gc = drawable.new_gc()
height = self.canvas.figure.bbox.height
y1 = height - y1
y0 = height - y0
w = abs(x1 - x0)
h = abs(y1 - y0)
rect = [int(val)for val in min(x0,x1), min(y0, y1), w, h]
try: lastrect, imageBack = self._imageBack
except AttributeError:
#snap image back
if event.inaxes is None:
return
ax = event.inaxes
l,b,w,h = [int(val) for val in ax.bbox.bounds]
b = int(height)-(b+h)
axrect = l,b,w,h
self._imageBack = axrect, drawable.get_image(*axrect)
drawable.draw_rectangle(gc, False, *rect)
self._idle_draw_id = 0
else:
def idle_draw(*args):
drawable.draw_image(gc, imageBack, 0, 0, *lastrect)
drawable.draw_rectangle(gc, False, *rect)
self._idle_draw_id = 0
return False
if self._idle_draw_id == 0:
self._idle_draw_id = gobject.idle_add(idle_draw)
def _init_toolbar(self):
self.set_style(gtk.TOOLBAR_ICONS)
if gtk.pygtk_version >= (2,4,0):
self._init_toolbar2_4()
else:
self._init_toolbar2_2()
def _init_toolbar2_2(self):
basedir = os.path.join(matplotlib.rcParams['datapath'],'images')
for text, tooltip_text, image_file, callback in self.toolitems:
if text is None:
self.append_space()
continue
fname = os.path.join(basedir, image_file)
image = gtk.Image()
image.set_from_file(fname)
w = self.append_item(text,
tooltip_text,
'Private',
image,
getattr(self, callback)
)
self.append_space()
self.message = gtk.Label()
self.append_widget(self.message, None, None)
self.message.show()
def _init_toolbar2_4(self):
basedir = os.path.join(matplotlib.rcParams['datapath'],'images')
self.tooltips = gtk.Tooltips()
for text, tooltip_text, image_file, callback in self.toolitems:
if text is None:
self.insert( gtk.SeparatorToolItem(), -1 )
continue
fname = os.path.join(basedir, image_file)
image = gtk.Image()
image.set_from_file(fname)
tbutton = gtk.ToolButton(image, text)
self.insert(tbutton, -1)
tbutton.connect('clicked', getattr(self, callback))
tbutton.set_tooltip(self.tooltips, tooltip_text, 'Private')
toolitem = gtk.SeparatorToolItem()
self.insert(toolitem, -1)
# set_draw() not making separator invisible,
# bug #143692 fixed Jun 06 2004, will be in GTK+ 2.6
toolitem.set_draw(False)
toolitem.set_expand(True)
toolitem = gtk.ToolItem()
self.insert(toolitem, -1)
self.message = gtk.Label()
toolitem.add(self.message)
self.show_all()
def get_filechooser(self):
if gtk.pygtk_version >= (2,4,0):
return FileChooserDialog(
title='Save the figure',
parent=self.win,
filetypes=self.canvas.get_supported_filetypes(),
default_filetype=self.canvas.get_default_filetype())
else:
return FileSelection(title='Save the figure',
parent=self.win,)
def save_figure(self, button):
fname, format = self.get_filechooser().get_filename_from_user()
if fname:
try:
self.canvas.print_figure(fname, format=format)
except Exception, e:
error_msg_gtk(str(e), parent=self)
def configure_subplots(self, button):
toolfig = Figure(figsize=(6,3))
canvas = self._get_canvas(toolfig)
toolfig.subplots_adjust(top=0.9)
tool = SubplotTool(self.canvas.figure, toolfig)
w = int (toolfig.bbox.width)
h = int (toolfig.bbox.height)
window = gtk.Window()
window.set_title("Subplot Configuration Tool")
window.set_default_size(w, h)
vbox = gtk.VBox()
window.add(vbox)
vbox.show()
canvas.show()
vbox.pack_start(canvas, True, True)
window.show()
def _get_canvas(self, fig):
return FigureCanvasGTK(fig)
class NavigationToolbar(gtk.Toolbar):
"""
Public attributes
canvas - the FigureCanvas (gtk.DrawingArea)
win - the gtk.Window
"""
# list of toolitems to add to the toolbar, format is:
# text, tooltip_text, image, callback(str), callback_arg, scroll(bool)
toolitems = (
('Left', 'Pan left with click or wheel mouse (bidirectional)',
gtk.STOCK_GO_BACK, 'panx', -1, True),
('Right', 'Pan right with click or wheel mouse (bidirectional)',
gtk.STOCK_GO_FORWARD, 'panx', 1, True),
('Zoom In X',
'Zoom In X (shrink the x axis limits) with click or wheel'
' mouse (bidirectional)',
gtk.STOCK_ZOOM_IN, 'zoomx', 1, True),
('Zoom Out X',
'Zoom Out X (expand the x axis limits) with click or wheel'
' mouse (bidirectional)',
gtk.STOCK_ZOOM_OUT, 'zoomx', -1, True),
(None, None, None, None, None, None,),
('Up', 'Pan up with click or wheel mouse (bidirectional)',
gtk.STOCK_GO_UP, 'pany', 1, True),
('Down', 'Pan down with click or wheel mouse (bidirectional)',
gtk.STOCK_GO_DOWN, 'pany', -1, True),
('Zoom In Y',
'Zoom in Y (shrink the y axis limits) with click or wheel'
' mouse (bidirectional)',
gtk.STOCK_ZOOM_IN, 'zoomy', 1, True),
('Zoom Out Y',
'Zoom Out Y (expand the y axis limits) with click or wheel'
' mouse (bidirectional)',
gtk.STOCK_ZOOM_OUT, 'zoomy', -1, True),
(None, None, None, None, None, None,),
('Save', 'Save the figure',
gtk.STOCK_SAVE, 'save_figure', None, False),
)
def __init__(self, canvas, window):
"""
figManager is the FigureManagerGTK instance that contains the
toolbar, with attributes figure, window and drawingArea
"""
gtk.Toolbar.__init__(self)
self.canvas = canvas
# Note: gtk.Toolbar already has a 'window' attribute
self.win = window
self.set_style(gtk.TOOLBAR_ICONS)
if gtk.pygtk_version >= (2,4,0):
self._create_toolitems_2_4()
self.update = self._update_2_4
self.fileselect = FileChooserDialog(
title='Save the figure',
parent=self.win,
filetypes=self.canvas.get_supported_filetypes(),
default_filetype=self.canvas.get_default_filetype())
else:
self._create_toolitems_2_2()
self.update = self._update_2_2
self.fileselect = FileSelection(title='Save the figure',
parent=self.win)
self.show_all()
self.update()
def _create_toolitems_2_4(self):
# use the GTK+ 2.4 GtkToolbar API
iconSize = gtk.ICON_SIZE_SMALL_TOOLBAR
self.tooltips = gtk.Tooltips()
for text, tooltip_text, image_num, callback, callback_arg, scroll \
in self.toolitems:
if text is None:
self.insert( gtk.SeparatorToolItem(), -1 )
continue
image = gtk.Image()
image.set_from_stock(image_num, iconSize)
tbutton = gtk.ToolButton(image, text)
self.insert(tbutton, -1)
if callback_arg:
tbutton.connect('clicked', getattr(self, callback),
callback_arg)
else:
tbutton.connect('clicked', getattr(self, callback))
if scroll:
tbutton.connect('scroll_event', getattr(self, callback))
tbutton.set_tooltip(self.tooltips, tooltip_text, 'Private')
# Axes toolitem, is empty at start, update() adds a menu if >=2 axes
self.axes_toolitem = gtk.ToolItem()
self.insert(self.axes_toolitem, 0)
self.axes_toolitem.set_tooltip (
self.tooltips,
tip_text='Select axes that controls affect',
tip_private = 'Private')
align = gtk.Alignment (xalign=0.5, yalign=0.5, xscale=0.0, yscale=0.0)
self.axes_toolitem.add(align)
self.menubutton = gtk.Button ("Axes")
align.add (self.menubutton)
def position_menu (menu):
"""Function for positioning a popup menu.
Place menu below the menu button, but ensure it does not go off
the bottom of the screen.
The default is to popup menu at current mouse position
"""
x0, y0 = self.window.get_origin()
x1, y1, m = self.window.get_pointer()
x2, y2 = self.menubutton.get_pointer()
sc_h = self.get_screen().get_height() # requires GTK+ 2.2 +
w, h = menu.size_request()
x = x0 + x1 - x2
y = y0 + y1 - y2 + self.menubutton.allocation.height
y = min(y, sc_h - h)
return x, y, True
def button_clicked (button, data=None):
self.axismenu.popup (None, None, position_menu, 0,
gtk.get_current_event_time())
self.menubutton.connect ("clicked", button_clicked)
def _update_2_4(self):
# for GTK+ 2.4+
# called by __init__() and FigureManagerGTK
self._axes = self.canvas.figure.axes
if len(self._axes) >= 2:
self.axismenu = self._make_axis_menu()
self.menubutton.show_all()
else:
self.menubutton.hide()
self.set_active(range(len(self._axes)))
def _create_toolitems_2_2(self):
# use the GTK+ 2.2 (and lower) GtkToolbar API
iconSize = gtk.ICON_SIZE_SMALL_TOOLBAR
for text, tooltip_text, image_num, callback, callback_arg, scroll \
in self.toolitems:
if text is None:
self.append_space()
continue
image = gtk.Image()
image.set_from_stock(image_num, iconSize)
item = self.append_item(text, tooltip_text, 'Private', image,
getattr(self, callback), callback_arg)
if scroll:
item.connect("scroll_event", getattr(self, callback))
self.omenu = gtk.OptionMenu()
self.omenu.set_border_width(3)
self.insert_widget(
self.omenu,
'Select axes that controls affect',
'Private', 0)
def _update_2_2(self):
# for GTK+ 2.2 and lower
# called by __init__() and FigureManagerGTK
self._axes = self.canvas.figure.axes
if len(self._axes) >= 2:
# set up the axis menu
self.omenu.set_menu( self._make_axis_menu() )
self.omenu.show_all()
else:
self.omenu.hide()
self.set_active(range(len(self._axes)))
def _make_axis_menu(self):
# called by self._update*()
def toggled(item, data=None):
if item == self.itemAll:
for item in items: item.set_active(True)
elif item == self.itemInvert:
for item in items:
item.set_active(not item.get_active())
ind = [i for i,item in enumerate(items) if item.get_active()]
self.set_active(ind)
menu = gtk.Menu()
self.itemAll = gtk.MenuItem("All")
menu.append(self.itemAll)
self.itemAll.connect("activate", toggled)
self.itemInvert = gtk.MenuItem("Invert")
menu.append(self.itemInvert)
self.itemInvert.connect("activate", toggled)
items = []
for i in range(len(self._axes)):
item = gtk.CheckMenuItem("Axis %d" % (i+1))
menu.append(item)
item.connect("toggled", toggled)
item.set_active(True)
items.append(item)
menu.show_all()
return menu
def set_active(self, ind):
self._ind = ind
self._active = [ self._axes[i] for i in self._ind ]
def panx(self, button, direction):
'panx in direction'
for a in self._active:
a.xaxis.pan(direction)
self.canvas.draw()
return True
def pany(self, button, direction):
'pany in direction'
for a in self._active:
a.yaxis.pan(direction)
self.canvas.draw()
return True
def zoomx(self, button, direction):
'zoomx in direction'
for a in self._active:
a.xaxis.zoom(direction)
self.canvas.draw()
return True
def zoomy(self, button, direction):
'zoomy in direction'
for a in self._active:
a.yaxis.zoom(direction)
self.canvas.draw()
return True
def get_filechooser(self):
if gtk.pygtk_version >= (2,4,0):
return FileChooserDialog(
title='Save the figure',
parent=self.win,
filetypes=self.canvas.get_supported_filetypes(),
default_filetype=self.canvas.get_default_filetype())
else:
return FileSelection(title='Save the figure',
parent=self.win)
def save_figure(self, button):
fname, format = self.get_filechooser().get_filename_from_user()
if fname:
try:
self.canvas.print_figure(fname, format=format)
except Exception, e:
error_msg_gtk(str(e), parent=self)
if gtk.pygtk_version >= (2,4,0):
class FileChooserDialog(gtk.FileChooserDialog):
"""GTK+ 2.4 file selector which remembers the last file/directory
selected and presents the user with a menu of supported image formats
"""
def __init__ (self,
title = 'Save file',
parent = None,
action = gtk.FILE_CHOOSER_ACTION_SAVE,
buttons = (gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,
gtk.STOCK_SAVE, gtk.RESPONSE_OK),
path = None,
filetypes = [],
default_filetype = None
):
super (FileChooserDialog, self).__init__ (title, parent, action,
buttons)
self.set_default_response (gtk.RESPONSE_OK)
if not path: path = os.getcwd() + os.sep
# create an extra widget to list supported image formats
self.set_current_folder (path)
self.set_current_name ('image.' + default_filetype)
hbox = gtk.HBox (spacing=10)
hbox.pack_start (gtk.Label ("File Format:"), expand=False)
liststore = gtk.ListStore(gobject.TYPE_STRING)
cbox = gtk.ComboBox(liststore)
cell = gtk.CellRendererText()
cbox.pack_start(cell, True)
cbox.add_attribute(cell, 'text', 0)
hbox.pack_start (cbox)
self.filetypes = filetypes
self.sorted_filetypes = filetypes.items()
self.sorted_filetypes.sort()
default = 0
for i, (ext, name) in enumerate(self.sorted_filetypes):
cbox.append_text ("%s (*.%s)" % (name, ext))
if ext == default_filetype:
default = i
cbox.set_active(default)
self.ext = default_filetype
def cb_cbox_changed (cbox, data=None):
"""File extension changed"""
head, filename = os.path.split(self.get_filename())
root, ext = os.path.splitext(filename)
ext = ext[1:]
new_ext = self.sorted_filetypes[cbox.get_active()][0]
self.ext = new_ext
if ext in self.filetypes:
filename = root + '.' + new_ext
elif ext == '':
filename = filename.rstrip('.') + '.' + new_ext
self.set_current_name (filename)
cbox.connect ("changed", cb_cbox_changed)
hbox.show_all()
self.set_extra_widget(hbox)
def get_filename_from_user (self):
while True:
filename = None
if self.run() != int(gtk.RESPONSE_OK):
break
filename = self.get_filename()
break
self.hide()
return filename, self.ext
else:
class FileSelection(gtk.FileSelection):
"""GTK+ 2.2 and lower file selector which remembers the last
file/directory selected
"""
def __init__(self, path=None, title='Select a file', parent=None):
super(FileSelection, self).__init__(title)
if path: self.path = path
else: self.path = os.getcwd() + os.sep
if parent: self.set_transient_for(parent)
def get_filename_from_user(self, path=None, title=None):
if path: self.path = path
if title: self.set_title(title)
self.set_filename(self.path)
filename = None
if self.run() == int(gtk.RESPONSE_OK):
self.path = filename = self.get_filename()
self.hide()
ext = None
if filename is not None:
ext = os.path.splitext(filename)[1]
if ext.startswith('.'):
ext = ext[1:]
return filename, ext
class DialogLineprops:
"""
A GUI dialog for controlling lineprops
"""
signals = (
'on_combobox_lineprops_changed',
'on_combobox_linestyle_changed',
'on_combobox_marker_changed',
'on_colorbutton_linestyle_color_set',
'on_colorbutton_markerface_color_set',
'on_dialog_lineprops_okbutton_clicked',
'on_dialog_lineprops_cancelbutton_clicked',
)
linestyles = [ls for ls in lines.Line2D.lineStyles if ls.strip()]
linestyled = dict([ (s,i) for i,s in enumerate(linestyles)])
markers = [m for m in lines.Line2D.markers if cbook.is_string_like(m)]
markerd = dict([(s,i) for i,s in enumerate(markers)])
def __init__(self, lines):
import gtk.glade
datadir = matplotlib.get_data_path()
gladefile = os.path.join(datadir, 'lineprops.glade')
if not os.path.exists(gladefile):
raise IOError('Could not find gladefile lineprops.glade in %s'%datadir)
self._inited = False
self._updateson = True # suppress updates when setting widgets manually
self.wtree = gtk.glade.XML(gladefile, 'dialog_lineprops')
self.wtree.signal_autoconnect(dict([(s, getattr(self, s)) for s in self.signals]))
self.dlg = self.wtree.get_widget('dialog_lineprops')
self.lines = lines
cbox = self.wtree.get_widget('combobox_lineprops')
cbox.set_active(0)
self.cbox_lineprops = cbox
cbox = self.wtree.get_widget('combobox_linestyles')
for ls in self.linestyles:
cbox.append_text(ls)
cbox.set_active(0)
self.cbox_linestyles = cbox
cbox = self.wtree.get_widget('combobox_markers')
for m in self.markers:
cbox.append_text(m)
cbox.set_active(0)
self.cbox_markers = cbox
self._lastcnt = 0
self._inited = True
def show(self):
'populate the combo box'
self._updateson = False
# flush the old
cbox = self.cbox_lineprops
for i in range(self._lastcnt-1,-1,-1):
cbox.remove_text(i)
# add the new
for line in self.lines:
cbox.append_text(line.get_label())
cbox.set_active(0)
self._updateson = True
self._lastcnt = len(self.lines)
self.dlg.show()
def get_active_line(self):
'get the active line'
ind = self.cbox_lineprops.get_active()
line = self.lines[ind]
return line
def get_active_linestyle(self):
'get the active lineinestyle'
ind = self.cbox_linestyles.get_active()
ls = self.linestyles[ind]
return ls
def get_active_marker(self):
'get the active lineinestyle'
ind = self.cbox_markers.get_active()
m = self.markers[ind]
return m
def _update(self):
'update the active line props from the widgets'
if not self._inited or not self._updateson: return
line = self.get_active_line()
ls = self.get_active_linestyle()
marker = self.get_active_marker()
line.set_linestyle(ls)
line.set_marker(marker)
button = self.wtree.get_widget('colorbutton_linestyle')
color = button.get_color()
r, g, b = [val/65535. for val in color.red, color.green, color.blue]
line.set_color((r,g,b))
button = self.wtree.get_widget('colorbutton_markerface')
color = button.get_color()
r, g, b = [val/65535. for val in color.red, color.green, color.blue]
line.set_markerfacecolor((r,g,b))
line.figure.canvas.draw()
def on_combobox_lineprops_changed(self, item):
'update the widgets from the active line'
if not self._inited: return
self._updateson = False
line = self.get_active_line()
ls = line.get_linestyle()
if ls is None: ls = 'None'
self.cbox_linestyles.set_active(self.linestyled[ls])
marker = line.get_marker()
if marker is None: marker = 'None'
self.cbox_markers.set_active(self.markerd[marker])
r,g,b = colorConverter.to_rgb(line.get_color())
color = gtk.gdk.Color(*[int(val*65535) for val in r,g,b])
button = self.wtree.get_widget('colorbutton_linestyle')
button.set_color(color)
r,g,b = colorConverter.to_rgb(line.get_markerfacecolor())
color = gtk.gdk.Color(*[int(val*65535) for val in r,g,b])
button = self.wtree.get_widget('colorbutton_markerface')
button.set_color(color)
self._updateson = True
def on_combobox_linestyle_changed(self, item):
self._update()
def on_combobox_marker_changed(self, item):
self._update()
def on_colorbutton_linestyle_color_set(self, button):
self._update()
def on_colorbutton_markerface_color_set(self, button):
'called colorbutton marker clicked'
self._update()
def on_dialog_lineprops_okbutton_clicked(self, button):
self._update()
self.dlg.hide()
def on_dialog_lineprops_cancelbutton_clicked(self, button):
self.dlg.hide()
# set icon used when windows are minimized
# Unfortunately, the SVG renderer (rsvg) leaks memory under earlier
# versions of pygtk, so we have to use a PNG file instead.
try:
if gtk.pygtk_version < (2, 8, 0):
icon_filename = 'matplotlib.png'
else:
icon_filename = 'matplotlib.svg'
gtk.window_set_default_icon_from_file (
os.path.join (matplotlib.rcParams['datapath'], 'images', icon_filename))
except:
verbose.report('Could not load matplotlib icon: %s' % sys.exc_info()[1])
def error_msg_gtk(msg, parent=None):
if parent is not None: # find the toplevel gtk.Window
parent = parent.get_toplevel()
if parent.flags() & gtk.TOPLEVEL == 0:
parent = None
if not is_string_like(msg):
msg = ','.join(map(str,msg))
dialog = gtk.MessageDialog(
parent = parent,
type = gtk.MESSAGE_ERROR,
buttons = gtk.BUTTONS_OK,
message_format = msg)
dialog.run()
dialog.destroy()
FigureManager = FigureManagerGTK
| agpl-3.0 |
JT5D/scikit-learn | sklearn/datasets/tests/test_mldata.py | 384 | 5221 | """Test functionality of mldata fetching utilities."""
import os
import shutil
import tempfile
import scipy as sp
from sklearn import datasets
from sklearn.datasets import mldata_filename, fetch_mldata
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_not_in
from sklearn.utils.testing import mock_mldata_urlopen
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import with_setup
from sklearn.utils.testing import assert_array_equal
tmpdir = None
def setup_tmpdata():
# create temporary dir
global tmpdir
tmpdir = tempfile.mkdtemp()
os.makedirs(os.path.join(tmpdir, 'mldata'))
def teardown_tmpdata():
# remove temporary dir
if tmpdir is not None:
shutil.rmtree(tmpdir)
def test_mldata_filename():
cases = [('datasets-UCI iris', 'datasets-uci-iris'),
('news20.binary', 'news20binary'),
('book-crossing-ratings-1.0', 'book-crossing-ratings-10'),
('Nile Water Level', 'nile-water-level'),
('MNIST (original)', 'mnist-original')]
for name, desired in cases:
assert_equal(mldata_filename(name), desired)
@with_setup(setup_tmpdata, teardown_tmpdata)
def test_download():
"""Test that fetch_mldata is able to download and cache a data set."""
_urlopen_ref = datasets.mldata.urlopen
datasets.mldata.urlopen = mock_mldata_urlopen({
'mock': {
'label': sp.ones((150,)),
'data': sp.ones((150, 4)),
},
})
try:
mock = fetch_mldata('mock', data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data"]:
assert_in(n, mock)
assert_equal(mock.target.shape, (150,))
assert_equal(mock.data.shape, (150, 4))
assert_raises(datasets.mldata.HTTPError,
fetch_mldata, 'not_existing_name')
finally:
datasets.mldata.urlopen = _urlopen_ref
@with_setup(setup_tmpdata, teardown_tmpdata)
def test_fetch_one_column():
_urlopen_ref = datasets.mldata.urlopen
try:
dataname = 'onecol'
# create fake data set in cache
x = sp.arange(6).reshape(2, 3)
datasets.mldata.urlopen = mock_mldata_urlopen({dataname: {'x': x}})
dset = fetch_mldata(dataname, data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "data"]:
assert_in(n, dset)
assert_not_in("target", dset)
assert_equal(dset.data.shape, (2, 3))
assert_array_equal(dset.data, x)
# transposing the data array
dset = fetch_mldata(dataname, transpose_data=False, data_home=tmpdir)
assert_equal(dset.data.shape, (3, 2))
finally:
datasets.mldata.urlopen = _urlopen_ref
@with_setup(setup_tmpdata, teardown_tmpdata)
def test_fetch_multiple_column():
_urlopen_ref = datasets.mldata.urlopen
try:
# create fake data set in cache
x = sp.arange(6).reshape(2, 3)
y = sp.array([1, -1])
z = sp.arange(12).reshape(4, 3)
# by default
dataname = 'threecol-default'
datasets.mldata.urlopen = mock_mldata_urlopen({
dataname: (
{
'label': y,
'data': x,
'z': z,
},
['z', 'data', 'label'],
),
})
dset = fetch_mldata(dataname, data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data", "z"]:
assert_in(n, dset)
assert_not_in("x", dset)
assert_not_in("y", dset)
assert_array_equal(dset.data, x)
assert_array_equal(dset.target, y)
assert_array_equal(dset.z, z.T)
# by order
dataname = 'threecol-order'
datasets.mldata.urlopen = mock_mldata_urlopen({
dataname: ({'y': y, 'x': x, 'z': z},
['y', 'x', 'z']), })
dset = fetch_mldata(dataname, data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data", "z"]:
assert_in(n, dset)
assert_not_in("x", dset)
assert_not_in("y", dset)
assert_array_equal(dset.data, x)
assert_array_equal(dset.target, y)
assert_array_equal(dset.z, z.T)
# by number
dataname = 'threecol-number'
datasets.mldata.urlopen = mock_mldata_urlopen({
dataname: ({'y': y, 'x': x, 'z': z},
['z', 'x', 'y']),
})
dset = fetch_mldata(dataname, target_name=2, data_name=0,
data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data", "x"]:
assert_in(n, dset)
assert_not_in("y", dset)
assert_not_in("z", dset)
assert_array_equal(dset.data, z)
assert_array_equal(dset.target, y)
# by name
dset = fetch_mldata(dataname, target_name='y', data_name='z',
data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data", "x"]:
assert_in(n, dset)
assert_not_in("y", dset)
assert_not_in("z", dset)
finally:
datasets.mldata.urlopen = _urlopen_ref
| bsd-3-clause |
mfe5003/rydtip | Rb87BlockadeSimple.py | 1 | 4940 |
# coding: utf-8
# In[1]:
import scipy.constants as consts
from lib.AtomNumbers import QD, Rb87, State, TransitionFrequency
import lib.DipoleDipoleInteractions as ddi
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from sympy import *
import datetime
get_ipython().magic(u'matplotlib inline')
# In[2]:
states = []
n_min = 50
n_max = 150
l_max = 1 # stop at f
sI = State(97,2,2.5,2.5)
mj_total = 2*sI.mj
for n in range(n_min,n_max):
for l in [sI.l-1,sI.l+1]:
for j in np.arange(abs(l-0.5),l+1): # l+0.5 doesn't regiser the second j value
for mj in np.arange(-j,j+0.5):
s=State(n,l,j,mj)
states.append([s, TransitionFrequency(Rb87,sI,s)])
print(len(states))
# ### calculate forster defects
# In[3]:
EnergyCut = 1e9 # only consider states within 1 Ghz
molecular_states = []
for s1 in states:
for s2 in states:
# molecular dissociation energy limit relative to the initial state
molecular_energy = s1[1][0]+s2[1][0]
if abs(molecular_energy) <= EnergyCut:
if s1[0].mj + s2[0].mj == mj_total:
molecular_states.append([(s1[0],s2[0]), molecular_energy])
print(len(molecular_states))
# ### Build the Hamiltonian
# In[4]:
dimension = len(molecular_states)+1 # add on the initial state
Hcoupling = np.zeros((dimension,dimension))
energies = np.zeros(dimension)
for i in xrange(dimension-1):
ms = molecular_states[i]
temp = Rb87.c3(sI,sI,ms[0][0],ms[0][1])[0]
Hcoupling[i][-1] = temp
Hcoupling[-1][i] = temp
energies[i] = ms[1]*1e-9 # in GHz
# #### verify at large R (100 um)
# In[5]:
ddi.getRelevantCouplings(Hcoupling, energies, 100, 0.001)
# #### Calculate blockade curves
# In[6]:
data = []
# In[7]:
r_start = 3
r_stop = 15
samples = 500
r_last = 0
for r in np.linspace(r_start,r_stop,samples):
current_time = datetime.datetime.now().time()
if int(r) > int(r_last):
print(r)
print(current_time.isoformat())
r_last = r
data = data + ddi.getRelevantCouplings(Hcoupling, energies, r, 0.01, 1.0)
# In[8]:
mark_r = [0.20202, 0.40404, 0.606061, 0.808081, 1.0101, 1.21212, 1.41414, 1.61616, 1.81818, 2.0202, 2.22222, 2.42424, 2.62626, 2.82828, 3.0303, 3.23232, 3.43434, 3.63636, 3.83838, 4.0404, 4.24242, 4.44444, 4.64646, 4.84848, 5.05051, 5.25253, 5.45455, 5.65657, 5.85859, 6.06061, 6.26263, 6.46465, 6.66667, 6.86869, 7.07071, 7.27273, 7.47475, 7.67677, 7.87879, 8.08081, 8.28283, 8.48485, 8.68687, 8.88889, 9.09091, 9.29293, 9.49495, 9.69697, 9.89899, 10.101, 10.303, 10.5051, 10.7071, 10.9091, 11.1111, 11.3131, 11.5152, 11.7172, 11.9192, 12.1212, 12.3232, 12.5253, 12.7273, 12.9293, 13.1313, 13.3333, 13.5354, 13.7374, 13.9394, 14.1414, 14.3434, 14.5455, 14.7475, 14.9495, 15.1515, 15.3535, 15.5556, 15.7576, 15.9596, 16.1616, 16.3636, 16.5657, 16.7677, 16.9697, 17.1717, 17.3737, 17.5758, 17.7778, 17.9798, 18.1818, 18.3838, 18.5859, 18.7879, 18.9899, 19.1919, 19.3939, 19.596, 19.798, 20.]
mark_B = [-10966.3, -1370.79, -406.16, -171.349, -87.7303, -50.7695, -31.971, -21.4175, -15.0414, -10.9642, -8.23635, -6.34261, -4.98687, -3.99069, -3.24218, -2.66872, -2.22181, -1.86818, -1.58454, -1.35422, -1.16507, -1.00814, -0.876692, -0.76563, -0.671032, -0.589863, -0.51975, -0.458827, -0.405612, -0.358925, -0.317818, -0.281522, -0.24941, -0.220963, -0.195746, -0.17339, -0.153577, -0.136026, -0.120493, -0.106757, -0.0946198, -0.0839036, -0.074448, -0.0661083, -0.0587547, -0.0522711, -0.0465538, -0.0415106, -0.0370599, -0.0331295, -0.0296559, -0.0265832, -0.0238624, -0.0214505, -0.01931, -0.017408, -0.0157157, -0.0142081, -0.0128632, -0.0116618, -0.0105871, -0.00962452, -0.00876107, -0.00798552, -0.00728798, -0.00665975, -0.00609318, -0.00558157, -0.00511896, -0.00470014, -0.00432048, -0.0039759, -0.00366277, -0.00337787, -0.00311837, -0.00288173, -0.00266568, -0.00246822, -0.00228756, -0.00212208, -0.00197036, -0.0018311, -0.00170316, -0.0015855, -0.0014772, -0.00137741, -0.00128538, -0.00120043, -0.00112194, -0.00104937, -0.000982209, -0.00092, -0.000862334, -0.000808835, -0.000759164, -0.000713011, -0.000670096, -0.00063016, -0.000592972]
# In[11]:
plt.figure(figsize=(12,8))
x =[]
x.append([ d[0] for d in data ])
x.append([ d[1] for d in data ])
x.append(np.real([ 1-sqrt(d[2]) for d in data ]))
idx = x[2].argsort()[::-1]
x[0] = [ x[0][i] for i in idx ]
x[1] = [ x[1][i] for i in idx ]
x[2] = [ x[2][i] for i in idx ]
plt.scatter(x[0], x[1], c=x[2], marker='o', s=30, vmax=1, vmin=0, linewidth=0, alpha=1)
plt.plot(mark_r,mark_B, color='r')
plt.gray()
plt.ylim(-0.5,0.5)
plt.xlim(0,1.1*r_stop)
plt.grid(b=True, which='major', color='0.5', linestyle='-')
plt.grid(b=True, which='minor', color='0.75', linestyle='--')
plt.title('Rybderg Blockade ${}+{}$, B=0 T'.format(sI,sI), fontsize=24)
plt.xlabel('$R (\mu m)$', fontsize=20)
plt.ylabel('U (GHz)', fontsize=20)
#plt.savefig('MolecularResonances_B=370uT_97D52m52_97D52m52.pdf')
plt.show()
# In[ ]:
| gpl-3.0 |
Winand/pandas | pandas/io/gbq.py | 13 | 4006 | """ Google BigQuery support """
def _try_import():
# since pandas is a dependency of pandas-gbq
# we need to import on first use
try:
import pandas_gbq
except ImportError:
# give a nice error message
raise ImportError("Load data from Google BigQuery\n"
"\n"
"the pandas-gbq package is not installed\n"
"see the docs: https://pandas-gbq.readthedocs.io\n"
"\n"
"you can install via pip or conda:\n"
"pip install pandas-gbq\n"
"conda install pandas-gbq -c conda-forge\n")
return pandas_gbq
def read_gbq(query, project_id=None, index_col=None, col_order=None,
reauth=False, verbose=True, private_key=None, dialect='legacy',
**kwargs):
r"""Load data from Google BigQuery.
The main method a user calls to execute a Query in Google BigQuery
and read results into a pandas DataFrame.
Google BigQuery API Client Library v2 for Python is used.
Documentation is available `here
<https://developers.google.com/api-client-library/python/apis/bigquery/v2>`__
Authentication to the Google BigQuery service is via OAuth 2.0.
- If "private_key" is not provided:
By default "application default credentials" are used.
If default application credentials are not found or are restrictive,
user account credentials are used. In this case, you will be asked to
grant permissions for product name 'pandas GBQ'.
- If "private_key" is provided:
Service account credentials will be used to authenticate.
Parameters
----------
query : str
SQL-Like Query to return data values
project_id : str
Google BigQuery Account project ID.
index_col : str (optional)
Name of result column to use for index in results DataFrame
col_order : list(str) (optional)
List of BigQuery column names in the desired order for results
DataFrame
reauth : boolean (default False)
Force Google BigQuery to reauthenticate the user. This is useful
if multiple accounts are used.
verbose : boolean (default True)
Verbose output
private_key : str (optional)
Service account private key in JSON format. Can be file path
or string contents. This is useful for remote server
authentication (eg. jupyter iPython notebook on remote host)
dialect : {'legacy', 'standard'}, default 'legacy'
'legacy' : Use BigQuery's legacy SQL dialect.
'standard' : Use BigQuery's standard SQL (beta), which is
compliant with the SQL 2011 standard. For more information
see `BigQuery SQL Reference
<https://cloud.google.com/bigquery/sql-reference/>`__
**kwargs : Arbitrary keyword arguments
configuration (dict): query config parameters for job processing.
For example:
configuration = {'query': {'useQueryCache': False}}
For more information see `BigQuery SQL Reference
<https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.query>`__
Returns
-------
df: DataFrame
DataFrame representing results of query
"""
pandas_gbq = _try_import()
return pandas_gbq.read_gbq(
query, project_id=project_id,
index_col=index_col, col_order=col_order,
reauth=reauth, verbose=verbose,
private_key=private_key,
dialect=dialect,
**kwargs)
def to_gbq(dataframe, destination_table, project_id, chunksize=10000,
verbose=True, reauth=False, if_exists='fail', private_key=None):
pandas_gbq = _try_import()
pandas_gbq.to_gbq(dataframe, destination_table, project_id,
chunksize=chunksize,
verbose=verbose, reauth=reauth,
if_exists=if_exists, private_key=private_key)
| bsd-3-clause |
jmschrei/scikit-learn | examples/manifold/plot_mds.py | 45 | 2731 | """
=========================
Multi-dimensional scaling
=========================
An illustration of the metric and non-metric MDS on generated noisy data.
The reconstructed points using the metric MDS and non metric MDS are slightly
shifted to avoid overlapping.
"""
# Author: Nelle Varoquaux <[email protected]>
# Licence: BSD
print(__doc__)
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.collections import LineCollection
from sklearn import manifold
from sklearn.metrics import euclidean_distances
from sklearn.decomposition import PCA
n_samples = 20
seed = np.random.RandomState(seed=3)
X_true = seed.randint(0, 20, 2 * n_samples).astype(np.float)
X_true = X_true.reshape((n_samples, 2))
# Center the data
X_true -= X_true.mean()
similarities = euclidean_distances(X_true)
# Add noise to the similarities
noise = np.random.rand(n_samples, n_samples)
noise = noise + noise.T
noise[np.arange(noise.shape[0]), np.arange(noise.shape[0])] = 0
similarities += noise
mds = manifold.MDS(n_components=2, max_iter=3000, eps=1e-9, random_state=seed,
dissimilarity="precomputed", n_jobs=1)
pos = mds.fit(similarities).embedding_
nmds = manifold.MDS(n_components=2, metric=False, max_iter=3000, eps=1e-12,
dissimilarity="precomputed", random_state=seed, n_jobs=1,
n_init=1)
npos = nmds.fit_transform(similarities, init=pos)
# Rescale the data
pos *= np.sqrt((X_true ** 2).sum()) / np.sqrt((pos ** 2).sum())
npos *= np.sqrt((X_true ** 2).sum()) / np.sqrt((npos ** 2).sum())
# Rotate the data
clf = PCA(n_components=2)
X_true = clf.fit_transform(X_true)
pos = clf.fit_transform(pos)
npos = clf.fit_transform(npos)
fig = plt.figure(1)
ax = plt.axes([0., 0., 1., 1.])
s = 100
plt.scatter(X_true[:, 0], X_true[:, 1], color='navy', s=s, lw=0,
label='True Position')
plt.scatter(pos[:, 0], pos[:, 1], color='turquoise', s=s, lw=0, label='MDS')
plt.scatter(npos[:, 0], npos[:, 1], color='darkorange', s=s, lw=0, label='NMDS')
plt.legend(scatterpoints=1, loc='best', shadow=False)
similarities = similarities.max() / similarities * 100
similarities[np.isinf(similarities)] = 0
# Plot the edges
start_idx, end_idx = np.where(pos)
# a sequence of (*line0*, *line1*, *line2*), where::
# linen = (x0, y0), (x1, y1), ... (xm, ym)
segments = [[X_true[i, :], X_true[j, :]]
for i in range(len(pos)) for j in range(len(pos))]
values = np.abs(similarities)
lc = LineCollection(segments,
zorder=0, cmap=plt.cm.Blues,
norm=plt.Normalize(0, values.max()))
lc.set_array(similarities.flatten())
lc.set_linewidths(0.5 * np.ones(len(segments)))
ax.add_collection(lc)
plt.show()
| bsd-3-clause |
kdebrab/pandas | pandas/tests/tseries/test_holiday.py | 16 | 16104 | import pytest
from datetime import datetime
import pandas.util.testing as tm
from pandas import compat
from pandas import DatetimeIndex
from pandas.tseries.holiday import (USFederalHolidayCalendar, USMemorialDay,
USThanksgivingDay, nearest_workday,
next_monday_or_tuesday, next_monday,
previous_friday, sunday_to_monday, Holiday,
DateOffset, MO, SA, Timestamp,
AbstractHolidayCalendar, get_calendar,
HolidayCalendarFactory, next_workday,
previous_workday, before_nearest_workday,
EasterMonday, GoodFriday,
after_nearest_workday, weekend_to_monday,
USLaborDay, USColumbusDay,
USMartinLutherKingJr, USPresidentsDay)
from pytz import utc
class TestCalendar(object):
def setup_method(self, method):
self.holiday_list = [
datetime(2012, 1, 2),
datetime(2012, 1, 16),
datetime(2012, 2, 20),
datetime(2012, 5, 28),
datetime(2012, 7, 4),
datetime(2012, 9, 3),
datetime(2012, 10, 8),
datetime(2012, 11, 12),
datetime(2012, 11, 22),
datetime(2012, 12, 25)]
self.start_date = datetime(2012, 1, 1)
self.end_date = datetime(2012, 12, 31)
def test_calendar(self):
calendar = USFederalHolidayCalendar()
holidays = calendar.holidays(self.start_date, self.end_date)
holidays_1 = calendar.holidays(
self.start_date.strftime('%Y-%m-%d'),
self.end_date.strftime('%Y-%m-%d'))
holidays_2 = calendar.holidays(
Timestamp(self.start_date),
Timestamp(self.end_date))
assert list(holidays.to_pydatetime()) == self.holiday_list
assert list(holidays_1.to_pydatetime()) == self.holiday_list
assert list(holidays_2.to_pydatetime()) == self.holiday_list
def test_calendar_caching(self):
# Test for issue #9552
class TestCalendar(AbstractHolidayCalendar):
def __init__(self, name=None, rules=None):
super(TestCalendar, self).__init__(name=name, rules=rules)
jan1 = TestCalendar(rules=[Holiday('jan1', year=2015, month=1, day=1)])
jan2 = TestCalendar(rules=[Holiday('jan2', year=2015, month=1, day=2)])
tm.assert_index_equal(jan1.holidays(), DatetimeIndex(['01-Jan-2015']))
tm.assert_index_equal(jan2.holidays(), DatetimeIndex(['02-Jan-2015']))
def test_calendar_observance_dates(self):
# Test for issue 11477
USFedCal = get_calendar('USFederalHolidayCalendar')
holidays0 = USFedCal.holidays(datetime(2015, 7, 3), datetime(
2015, 7, 3)) # <-- same start and end dates
holidays1 = USFedCal.holidays(datetime(2015, 7, 3), datetime(
2015, 7, 6)) # <-- different start and end dates
holidays2 = USFedCal.holidays(datetime(2015, 7, 3), datetime(
2015, 7, 3)) # <-- same start and end dates
tm.assert_index_equal(holidays0, holidays1)
tm.assert_index_equal(holidays0, holidays2)
def test_rule_from_name(self):
USFedCal = get_calendar('USFederalHolidayCalendar')
assert USFedCal.rule_from_name('Thanksgiving') == USThanksgivingDay
class TestHoliday(object):
def setup_method(self, method):
self.start_date = datetime(2011, 1, 1)
self.end_date = datetime(2020, 12, 31)
def check_results(self, holiday, start, end, expected):
assert list(holiday.dates(start, end)) == expected
# Verify that timezone info is preserved.
assert (list(holiday.dates(utc.localize(Timestamp(start)),
utc.localize(Timestamp(end)))) ==
[utc.localize(dt) for dt in expected])
def test_usmemorialday(self):
self.check_results(holiday=USMemorialDay,
start=self.start_date,
end=self.end_date,
expected=[
datetime(2011, 5, 30),
datetime(2012, 5, 28),
datetime(2013, 5, 27),
datetime(2014, 5, 26),
datetime(2015, 5, 25),
datetime(2016, 5, 30),
datetime(2017, 5, 29),
datetime(2018, 5, 28),
datetime(2019, 5, 27),
datetime(2020, 5, 25),
], )
def test_non_observed_holiday(self):
self.check_results(
Holiday('July 4th Eve', month=7, day=3),
start="2001-01-01",
end="2003-03-03",
expected=[
Timestamp('2001-07-03 00:00:00'),
Timestamp('2002-07-03 00:00:00')
]
)
self.check_results(
Holiday('July 4th Eve', month=7, day=3, days_of_week=(0, 1, 2, 3)),
start="2001-01-01",
end="2008-03-03",
expected=[
Timestamp('2001-07-03 00:00:00'),
Timestamp('2002-07-03 00:00:00'),
Timestamp('2003-07-03 00:00:00'),
Timestamp('2006-07-03 00:00:00'),
Timestamp('2007-07-03 00:00:00'),
]
)
def test_easter(self):
self.check_results(EasterMonday,
start=self.start_date,
end=self.end_date,
expected=[
Timestamp('2011-04-25 00:00:00'),
Timestamp('2012-04-09 00:00:00'),
Timestamp('2013-04-01 00:00:00'),
Timestamp('2014-04-21 00:00:00'),
Timestamp('2015-04-06 00:00:00'),
Timestamp('2016-03-28 00:00:00'),
Timestamp('2017-04-17 00:00:00'),
Timestamp('2018-04-02 00:00:00'),
Timestamp('2019-04-22 00:00:00'),
Timestamp('2020-04-13 00:00:00'),
], )
self.check_results(GoodFriday,
start=self.start_date,
end=self.end_date,
expected=[
Timestamp('2011-04-22 00:00:00'),
Timestamp('2012-04-06 00:00:00'),
Timestamp('2013-03-29 00:00:00'),
Timestamp('2014-04-18 00:00:00'),
Timestamp('2015-04-03 00:00:00'),
Timestamp('2016-03-25 00:00:00'),
Timestamp('2017-04-14 00:00:00'),
Timestamp('2018-03-30 00:00:00'),
Timestamp('2019-04-19 00:00:00'),
Timestamp('2020-04-10 00:00:00'),
], )
def test_usthanksgivingday(self):
self.check_results(USThanksgivingDay,
start=self.start_date,
end=self.end_date,
expected=[
datetime(2011, 11, 24),
datetime(2012, 11, 22),
datetime(2013, 11, 28),
datetime(2014, 11, 27),
datetime(2015, 11, 26),
datetime(2016, 11, 24),
datetime(2017, 11, 23),
datetime(2018, 11, 22),
datetime(2019, 11, 28),
datetime(2020, 11, 26),
], )
def test_holidays_within_dates(self):
# Fix holiday behavior found in #11477
# where holiday.dates returned dates outside start/end date
# or observed rules could not be applied as the holiday
# was not in the original date range (e.g., 7/4/2015 -> 7/3/2015)
start_date = datetime(2015, 7, 1)
end_date = datetime(2015, 7, 1)
calendar = get_calendar('USFederalHolidayCalendar')
new_years = calendar.rule_from_name('New Years Day')
july_4th = calendar.rule_from_name('July 4th')
veterans_day = calendar.rule_from_name('Veterans Day')
christmas = calendar.rule_from_name('Christmas')
# Holiday: (start/end date, holiday)
holidays = {USMemorialDay: ("2015-05-25", "2015-05-25"),
USLaborDay: ("2015-09-07", "2015-09-07"),
USColumbusDay: ("2015-10-12", "2015-10-12"),
USThanksgivingDay: ("2015-11-26", "2015-11-26"),
USMartinLutherKingJr: ("2015-01-19", "2015-01-19"),
USPresidentsDay: ("2015-02-16", "2015-02-16"),
GoodFriday: ("2015-04-03", "2015-04-03"),
EasterMonday: [("2015-04-06", "2015-04-06"),
("2015-04-05", [])],
new_years: [("2015-01-01", "2015-01-01"),
("2011-01-01", []),
("2010-12-31", "2010-12-31")],
july_4th: [("2015-07-03", "2015-07-03"),
("2015-07-04", [])],
veterans_day: [("2012-11-11", []),
("2012-11-12", "2012-11-12")],
christmas: [("2011-12-25", []),
("2011-12-26", "2011-12-26")]}
for rule, dates in compat.iteritems(holidays):
empty_dates = rule.dates(start_date, end_date)
assert empty_dates.tolist() == []
if isinstance(dates, tuple):
dates = [dates]
for start, expected in dates:
if len(expected):
expected = [Timestamp(expected)]
self.check_results(rule, start, start, expected)
def test_argument_types(self):
holidays = USThanksgivingDay.dates(self.start_date, self.end_date)
holidays_1 = USThanksgivingDay.dates(
self.start_date.strftime('%Y-%m-%d'),
self.end_date.strftime('%Y-%m-%d'))
holidays_2 = USThanksgivingDay.dates(
Timestamp(self.start_date),
Timestamp(self.end_date))
tm.assert_index_equal(holidays, holidays_1)
tm.assert_index_equal(holidays, holidays_2)
def test_special_holidays(self):
base_date = [datetime(2012, 5, 28)]
holiday_1 = Holiday('One-Time', year=2012, month=5, day=28)
holiday_2 = Holiday('Range', month=5, day=28,
start_date=datetime(2012, 1, 1),
end_date=datetime(2012, 12, 31),
offset=DateOffset(weekday=MO(1)))
assert base_date == holiday_1.dates(self.start_date, self.end_date)
assert base_date == holiday_2.dates(self.start_date, self.end_date)
def test_get_calendar(self):
class TestCalendar(AbstractHolidayCalendar):
rules = []
calendar = get_calendar('TestCalendar')
assert TestCalendar == calendar.__class__
def test_factory(self):
class_1 = HolidayCalendarFactory('MemorialDay',
AbstractHolidayCalendar,
USMemorialDay)
class_2 = HolidayCalendarFactory('Thansksgiving',
AbstractHolidayCalendar,
USThanksgivingDay)
class_3 = HolidayCalendarFactory('Combined', class_1, class_2)
assert len(class_1.rules) == 1
assert len(class_2.rules) == 1
assert len(class_3.rules) == 2
class TestObservanceRules(object):
def setup_method(self, method):
self.we = datetime(2014, 4, 9)
self.th = datetime(2014, 4, 10)
self.fr = datetime(2014, 4, 11)
self.sa = datetime(2014, 4, 12)
self.su = datetime(2014, 4, 13)
self.mo = datetime(2014, 4, 14)
self.tu = datetime(2014, 4, 15)
def test_next_monday(self):
assert next_monday(self.sa) == self.mo
assert next_monday(self.su) == self.mo
def test_next_monday_or_tuesday(self):
assert next_monday_or_tuesday(self.sa) == self.mo
assert next_monday_or_tuesday(self.su) == self.tu
assert next_monday_or_tuesday(self.mo) == self.tu
def test_previous_friday(self):
assert previous_friday(self.sa) == self.fr
assert previous_friday(self.su) == self.fr
def test_sunday_to_monday(self):
assert sunday_to_monday(self.su) == self.mo
def test_nearest_workday(self):
assert nearest_workday(self.sa) == self.fr
assert nearest_workday(self.su) == self.mo
assert nearest_workday(self.mo) == self.mo
def test_weekend_to_monday(self):
assert weekend_to_monday(self.sa) == self.mo
assert weekend_to_monday(self.su) == self.mo
assert weekend_to_monday(self.mo) == self.mo
def test_next_workday(self):
assert next_workday(self.sa) == self.mo
assert next_workday(self.su) == self.mo
assert next_workday(self.mo) == self.tu
def test_previous_workday(self):
assert previous_workday(self.sa) == self.fr
assert previous_workday(self.su) == self.fr
assert previous_workday(self.tu) == self.mo
def test_before_nearest_workday(self):
assert before_nearest_workday(self.sa) == self.th
assert before_nearest_workday(self.su) == self.fr
assert before_nearest_workday(self.tu) == self.mo
def test_after_nearest_workday(self):
assert after_nearest_workday(self.sa) == self.mo
assert after_nearest_workday(self.su) == self.tu
assert after_nearest_workday(self.fr) == self.mo
class TestFederalHolidayCalendar(object):
def test_no_mlk_before_1986(self):
# see gh-10278
class MLKCalendar(AbstractHolidayCalendar):
rules = [USMartinLutherKingJr]
holidays = MLKCalendar().holidays(start='1984',
end='1988').to_pydatetime().tolist()
# Testing to make sure holiday is not incorrectly observed before 1986
assert holidays == [datetime(1986, 1, 20, 0, 0),
datetime(1987, 1, 19, 0, 0)]
def test_memorial_day(self):
class MemorialDay(AbstractHolidayCalendar):
rules = [USMemorialDay]
holidays = MemorialDay().holidays(start='1971',
end='1980').to_pydatetime().tolist()
# Fixes 5/31 error and checked manually against Wikipedia
assert holidays == [datetime(1971, 5, 31, 0, 0),
datetime(1972, 5, 29, 0, 0),
datetime(1973, 5, 28, 0, 0),
datetime(1974, 5, 27, 0, 0),
datetime(1975, 5, 26, 0, 0),
datetime(1976, 5, 31, 0, 0),
datetime(1977, 5, 30, 0, 0),
datetime(1978, 5, 29, 0, 0),
datetime(1979, 5, 28, 0, 0)]
class TestHolidayConflictingArguments(object):
def test_both_offset_observance_raises(self):
# see gh-10217
with pytest.raises(NotImplementedError):
Holiday("Cyber Monday", month=11, day=1,
offset=[DateOffset(weekday=SA(4))],
observance=next_monday)
| bsd-3-clause |
simsynser/SimSyn | SimSyn.py | 1 | 38537 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'SimSyn_v02.ui'
#
# Created by: PyQt4 UI code generator 4.11.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
import psycopg2
from psycopg2.extensions import AsIs
import time
import pysd
import pandas as pd
from os import path
QtCore.QCoreApplication.addLibraryPath(path.join(path.dirname(QtCore.__file__), "plugins"))
QtGui.QImageReader.supportedImageFormats()
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
#Create main window, insert central widget into main window (child of main window)
#Add grid layout as child of central widget
MainWindow.setObjectName(_fromUtf8("MainWindow"))
MainWindow.resize(600, 370)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(_fromUtf8("Local_v01-001.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
MainWindow.setWindowIcon(icon)
self.centralwidget = QtGui.QWidget(MainWindow)
self.centralwidget.setObjectName(_fromUtf8("centralwidget"))
self.v = QtGui.QGridLayout(self.centralwidget)
self.v.setObjectName(_fromUtf8("v"))
#Put spacer between croup boxes
spacerItem1 = QtGui.QSpacerItem(40, 5, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.v.addItem(spacerItem1, 0, 0, 1, 1)
#Create 1st Group Box: Set Connections
self.GroupBox_Connect = QtGui.QGroupBox(self.centralwidget) #Assign 1st group box to central widget
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.GroupBox_Connect.setFont(font)
self.GroupBox_Connect.setObjectName(_fromUtf8("GroupBox_Connect"))
self.gridLayout = QtGui.QGridLayout(self.GroupBox_Connect) #Assign gridLayout to 1st group box
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.Db_Name = QtGui.QLineEdit(self.GroupBox_Connect)
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.Db_Name.setFont(font)
self.Db_Name.setObjectName(_fromUtf8("Db_Name"))
self.gridLayout.addWidget(self.Db_Name, 0, 0, 1, 1)
self.Host_Name = QtGui.QLineEdit(self.GroupBox_Connect)
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.Host_Name.setFont(font)
self.Host_Name.setObjectName(_fromUtf8("Host_Name"))
self.gridLayout.addWidget(self.Host_Name, 0, 1, 1, 1)
self.User_Name = QtGui.QLineEdit(self.GroupBox_Connect)
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.User_Name.setFont(font)
self.User_Name.setObjectName(_fromUtf8("User_Name"))
self.gridLayout.addWidget(self.User_Name, 1, 0, 1, 1)
self.Password = QtGui.QLineEdit(self.GroupBox_Connect)
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.Password.setFont(font)
self.Password.setObjectName(_fromUtf8("Password"))
self.gridLayout.addWidget(self.Password, 1, 1, 1, 1)
self.Port = QtGui.QLineEdit(self.GroupBox_Connect)
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.Port.setFont(font)
self.Port.setObjectName(_fromUtf8("Port"))
self.gridLayout.addWidget(self.Port, 2, 0, 1, 1)
self.Table_Name = QtGui.QLineEdit(self.GroupBox_Connect)
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.Table_Name.setFont(font)
self.Table_Name.setObjectName(_fromUtf8("Table_Name"))
self.gridLayout.addWidget(self.Table_Name, 2, 1, 1, 1)
self.Btn_Connect = QtGui.QPushButton(self.GroupBox_Connect)
self.Btn_Connect.setEnabled(True)
font.setBold(False)
font.setWeight(50)
self.Btn_Connect.setFont(font)
self.Btn_Connect.setFixedWidth(150)
self.Btn_Connect.setFixedHeight(20)
self.Btn_Connect.setObjectName(_fromUtf8("Btn_Connect"))
self.gridLayout.addWidget(self.Btn_Connect, 3, 0, 1, 1)
self.v.addWidget(self.GroupBox_Connect, 1, 0, 1, 1) #Add 1st group box to master grid layout
self.Host_Name.raise_() #Raise widgets to the top of the parent widget's stack. After this call widget will be visually in front of any overlapping sibling widgets
self.Password.raise_()
self.User_Name.raise_()
self.Table_Name.raise_()
self.Port.raise_()
self.Db_Name.raise_()
self.Btn_Connect.raise_()
self.GroupBox_Connect.raise_()
#But spacer between croup boxes
spacerItem2 = QtGui.QSpacerItem(40, 10, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.v.addItem(spacerItem2, 2, 0, 1, 1)
#Create 2nd Group Box: Build Vensim Model
self.GroupBox_build = QtGui.QGroupBox(self.centralwidget) #Assign 1st group box to central widget
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.GroupBox_build.setFont(font)
self.GroupBox_build.setObjectName(_fromUtf8("GroupBox_Connect"))
self.gridLayout2 = QtGui.QGridLayout(self.GroupBox_build) #Assign gridLayout to 1st group box
self.gridLayout2.setObjectName(_fromUtf8("gridLayout2"))
self.Model_Dir = QtGui.QLineEdit(self.GroupBox_build)
self.gridLayout2.addWidget(self.Model_Dir, 0, 0, 1, 2)
self.sub_gridLayout = QtGui.QGridLayout(self.GroupBox_build) #Subgrid layout to backage 'browse-button' and 'Build-VENSIM-Model-Button' gridLayout
self.sub_gridLayout.setObjectName(_fromUtf8("sub_gridLayout"))
self.gridLayout2.addLayout(self.sub_gridLayout, 0,2,1,1)
self.Btn_Browse_Ven = QtGui.QPushButton(self.GroupBox_build)
self.Btn_Browse_Ven.setObjectName(_fromUtf8("Btn_Browse_Ven"))
self.Btn_Browse_Ven.setFixedWidth(25)
self.Btn_Browse_Ven.setFixedHeight(20)
self.sub_gridLayout.addWidget(self.Btn_Browse_Ven, 0, 0, 1, 1)
self.Btn_Build = QtGui.QPushButton(self.GroupBox_build)
self.Btn_Build.setEnabled(True)
font.setBold(False)
font.setWeight(50)
self.Btn_Build.setFont(font)
self.Btn_Build.setFixedWidth(90)
self.Btn_Build.setFixedHeight(20)
self.Btn_Build.setObjectName(_fromUtf8("Btn_Build"))
self.sub_gridLayout.addWidget(self.Btn_Build, 0, 1, 1, 1)
self.v.addWidget(self.GroupBox_build, 3, 0, 1, 1) #Add 2nd group box to master grid layout
#But spacer between croup boxes
spacerItem3 = QtGui.QSpacerItem(40, 10, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.v.addItem(spacerItem3, 4, 0, 1, 1)
#Create 3rd Group Box: Connection Settings
self.GroupBox_Settings = QtGui.QGroupBox(self.centralwidget) #Assign 3rd group box to central widget
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.GroupBox_Settings.setFont(font)
self.GroupBox_Settings.setObjectName(_fromUtf8("GroupBox_Settings"))
self.gridLayout_3 = QtGui.QGridLayout(self.GroupBox_Settings) #Assign gridLayout2 to 2nd group box
self.gridLayout_3.setObjectName(_fromUtf8("gridLayout_3"))
self.Link_Tab_Frame = QtGui.QFrame(self.GroupBox_Settings) #Create "Link_Tab_Frame" (Child) as subcontainer of "GroupBox_Settings" (Parent)
self.Link_Tab_Frame.setObjectName(_fromUtf8("Link_Tab_Frame"))
self.gridLayout_3.addWidget(self.Link_Tab_Frame, 2, 0, 3, 1) #Add subcontainer to grid Layout of the "GroupBox_Settings" parent object
self.gridLayout_frame = QtGui.QGridLayout(self.Link_Tab_Frame) #Create new grid Layout within "Link_Tab_Frame" subcontainer
self.gridLayout_frame.setObjectName(_fromUtf8("gridLayout_frame"))
self.gridLayout_frame.setContentsMargins(0, 0, 0, 0)
self.Link_Tab = QtGui.QTableWidget(self.Link_Tab_Frame)
self.Link_Tab.setObjectName(_fromUtf8("Link_Tab"))
self.Link_Tab.setColumnCount(4)
self.Link_Tab.horizontalHeader().setFixedHeight(20)
self.Link_Tab.setHorizontalHeaderLabels(["Model Variable", "Database Variable", "Use as", "Time (optional)"])
stylesheet = "QHeaderView::section{Background-color:rgb(90,90,90); border-radius:15px;border-right:1px solid #FFFFFF;}"
self.Link_Tab.horizontalHeader().setStyleSheet(stylesheet)
font = QtGui.QFont()
font.setBold(True)
font.setPointSize(8.5)
for c in range(self.Link_Tab.columnCount()):
self.Link_Tab.setColumnWidth(c, 130)
self.Link_Tab.horizontalHeaderItem(c).setFont(font)
self.Link_Tab.horizontalHeaderItem(c).setForeground(QtGui.QBrush(QtGui.QColor(255,255,255)))
self.gridLayout_frame.addWidget(self.Link_Tab, 0, 0, 1, 1)
self.Button_Frame = QtGui.QFrame(self.GroupBox_Settings)
self.Button_Frame.setObjectName(_fromUtf8("Button_Frame"))
self.gridLayout_3.addWidget(self.Button_Frame, 2, 1, 1, 2)
self.gridLayout_button = QtGui.QGridLayout(self.Button_Frame)
self.gridLayout_button.setObjectName(_fromUtf8("gridLayout_5"))
self.gridLayout_button.setContentsMargins(0, 0, 0, 0)
self.Btn_Plus = QtGui.QPushButton(self.GroupBox_Settings)
self.Btn_Plus.setEnabled(True)
self.Btn_Plus.setObjectName(_fromUtf8("Btn_Plus"))
self.Btn_Plus.setFixedHeight(20)
self.Btn_Plus.setFixedWidth(20)
self.gridLayout_button.addWidget(self.Btn_Plus, 0, 0, 1, 1)
self.Btn_Minus = QtGui.QPushButton(self.GroupBox_Settings)
self.Btn_Minus.setEnabled(True)
self.Btn_Minus.setObjectName(_fromUtf8("Btn_Minus"))
self.Btn_Minus.setFixedHeight(20)
self.Btn_Minus.setFixedWidth(20)
self.gridLayout_button.addWidget(self.Btn_Minus, 1, 0, 1, 1)
self.Btn_Reset = QtGui.QPushButton(self.GroupBox_Settings)
self.Btn_Reset.setEnabled(True)
font.setBold(False)
font.setWeight(50)
self.Btn_Reset.setFont(font)
self.Btn_Reset.setFixedHeight(20)
self.Btn_Reset.setFixedWidth(70)
self.Btn_Reset.setObjectName(_fromUtf8("Btn_Reset"))
self.gridLayout_button.addWidget(self.Btn_Reset, 2, 0, 1, 1)
self.Btn_Run = QtGui.QPushButton(self.GroupBox_Settings)
self.Btn_Run.setEnabled(True)
font.setBold(False)
font.setWeight(50)
self.Btn_Run.setFont(font)
self.Btn_Run.setFixedHeight(20)
self.Btn_Run.setFixedWidth(70)
self.Btn_Run.setObjectName(_fromUtf8("Btn_Run"))
self.gridLayout_button.addWidget(self.Btn_Run, 3, 0, 1, 1)
self.Progress_Run = QtGui.QProgressBar(self.GroupBox_Settings)
self.Progress_Run.setProperty("value", 0)
self.Progress_Run.setFixedHeight(15)
self.Progress_Run.setFixedWidth(70)
self.Progress_Run.setVisible(False) #Progress bar is not visible before compress button is pushed
self.Progress_Run.setObjectName(_fromUtf8("Progress_Run"))
self.gridLayout_button.addWidget(self.Progress_Run, 4, 0, 1, 1)
self.v.addWidget(self.GroupBox_Settings, 5, 0, 1, 1) #Add 2nd group box to master grid layout
self.GroupBox_Settings.raise_()
#But spacer between croup boxes
spacerItem4 = QtGui.QSpacerItem(40, 5, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.v.addItem(spacerItem4, 6, 0, 1, 1)
#?????????????????
MainWindow.setCentralWidget(self.centralwidget)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
MainWindow.setWindowTitle(_translate("MainWindow", "SimSyn", None))
self.GroupBox_Connect.setTitle(_translate("MainWindow", "Database Connection", None))
font = QtGui.QFont()
font.setBold(False)
font.setWeight(60)
self.GroupBox_Connect.setFont(font)
self.GroupBox_build.setTitle(_translate("MainWindow", "Model Connection", None))
font = QtGui.QFont()
font.setBold(False)
font.setWeight(60)
self.GroupBox_build.setFont(font)
self.GroupBox_Settings.setTitle(_translate("MainWindow", "Data Link(s)", None))
font = QtGui.QFont()
font.setBold(False)
font.setWeight(60)
self.GroupBox_Settings.setFont(font)
self.Password.placeholderText()
self.Password.setPlaceholderText(_translate("MainWindow", "Password", None))
self.Port.placeholderText()
self.Port.setPlaceholderText(_translate("MainWindow", "Port", None))
self.User_Name.placeholderText()
self.User_Name.setPlaceholderText(_translate("MainWindow", "User Name", None))
self.Host_Name.placeholderText()
self.Host_Name.setPlaceholderText(_translate("MainWindow", "Host", None))
self.Table_Name.placeholderText()
self.Table_Name.setPlaceholderText(_translate("MainWindow", "Table Name [comma delimit multiple tables]", None))
self.Db_Name.placeholderText()
self.Db_Name.setPlaceholderText(_translate("MainWindow", "Database Name", None))
self.Btn_Connect.setText(_translate("MainWindow", "Connect Database", None))
self.Btn_Build.setText(_translate("MainWindow", "Load Model", None))
self.Btn_Run.setText(_translate("MainWindow", "Run", None))
self.Btn_Reset.setText(_translate("MainWindow", "Reset...", None))
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap("Icons/BrowseIcon_v01.png"))
self.Btn_Browse_Ven.setIcon(icon)
icon2 = QtGui.QIcon()
icon2.addPixmap(QtGui.QPixmap("Icons/PlusIcon3.png")) #Cannot add vector svg!!!
self.Btn_Plus.setIcon(icon2)
icon3 = QtGui.QIcon()
icon3.addPixmap(QtGui.QPixmap("Icons/MinusIcon3.png")) #Cannot add vector svg!!!
self.Btn_Minus.setIcon(icon3)
#Execute functions, if buttons are clicked
self.Btn_Connect.clicked.connect(self.connectDb)
self.Btn_Browse_Ven.clicked.connect(self.browse)
self.Btn_Build.clicked.connect(self.loadVen)
self.Btn_Plus.clicked.connect(self.addrow)
self.Btn_Minus.clicked.connect(self.removerow)
self.Btn_Reset.clicked.connect(self.reset)
self.Btn_Run.clicked.connect(self.run)
def connectDb(self):
#Get DB specification
name_db = self.Db_Name.text()
user_nm = self.User_Name.text()
host = self.Host_Name.text()
passwrd = self.Password.text()
pt = self.Port.text()
#Format table names as 'Table 1', 'Table 2', 'Table 3' ...
self.name_tb = ""
for t in self.Table_Name.text().split(','):
if self.name_tb != "":
self.name_tb += ","
self.name_tb += "'" + t + "'"
#Delete empty space, if required
try:
self.name_tb.replace(" ", "")
except:
pass
#Access Database, get column names of tables and save them as a self attribute of the class
try:
self.con = psycopg2.connect(dbname = name_db, host = host, port = int(pt), user = user_nm, password = passwrd)
curs = self.con.cursor()
curs.execute("SELECT column_name, table_name FROM information_schema.columns WHERE table_name IN (%s);" % AsIs(str(self.name_tb)))
self.tb_columns = curs.fetchall()
try:
self.Progress_Label_con.clear() #Reset label
except:
pass
if len(self.tb_columns) == 0:
self.error_message = QtGui.QMessageBox(self.centralwidget)
self.error_message.setIcon(QtGui.QMessageBox.Critical)
self.error_message.setWindowTitle("Connection Info")
self.error_message.setText("Unable to connect to database!")
self.error_message.setStandardButtons(QtGui.QMessageBox.Ok)
self.error_message.exec_()
else:
font = QtGui.QFont()
font.setBold(False)
font.setWeight(15)
self.Progress_Label_con = QtGui.QLabel()
self.Progress_Label_con.setFont(font)
self.gridLayout.addWidget(self.Progress_Label_con, 3, 1, 1, 1)
self.Progress_Label_con.setText("Connected to " + self.name_tb)
except:
self.error_message = QtGui.QMessageBox(self.centralwidget)
self.error_message.setIcon(QtGui.QMessageBox.Critical)
self.error_message.setWindowTitle("Connection Info")
self.error_message.setText("Unable to connect to database!")
self.error_message.setStandardButtons(QtGui.QMessageBox.Ok)
self.error_message.exec_()
def browse(self):
self.Model_Dir.setText(QtGui.QFileDialog.getOpenFileName())
def loadVen(self):
try:
ven_path = str(self.Model_Dir.text())
self.model = pysd.read_vensim(ven_path)
self.message = QtGui.QMessageBox(self.centralwidget)
self.message.setIcon(QtGui.QMessageBox.Information)
self.message.setWindowTitle("Load VENSIM")
self.message.setText("Successfully connected to '" + str(self.model.__str__.split("/")[-1]) + "'")
self.message.setStandardButtons(QtGui.QMessageBox.Ok)
self.message.exec_()
except:
self.error_message1 = QtGui.QMessageBox(self.centralwidget) #self.centralwidget is used as parent to so that messagebox is centered above parent
self.error_message1.setWindowTitle("Load VENSIM")
self.error_message1.setText("Couldn't connect to VENSIM model.")
self.error_message1.setIcon(QtGui.QMessageBox.Critical)
self.error_message1.setStandardButtons(QtGui.QMessageBox.Ok)
self.error_message1.exec_()
def addrow(self):
labels = []
elem = []
try:
#Get name (labels) of PostgreSQL table columns
for c in self.tb_columns:
labels.append(c[0])
#Get name (labels) VENSIM model elements
for i in dir(self.model.components):
if i[0] != "_" and i not in ['doc', 'time', 'time_step', 't', 'state_vector', 'state', 'final_time', 'functions', 'reset_state', 'saveper', 'd_dt']: #Not very clean solution for filtering model elements!
elem.append(i)
except:
pass
if (len(labels) == 0) or (len(elem) == 0):
self.error_message2 = QtGui.QMessageBox(self.centralwidget)
self.error_message2.setIcon(QtGui.QMessageBox.Critical)
self.error_message2.setWindowTitle("Input Info")
self.error_message2.setText("No database dataset or no VENSIM model loaded!")
self.error_message2.setStandardButtons(QtGui.QMessageBox.Ok)
self.error_message2.exec_()
else:
#Add Combobox as item to table
rowPosition = self.Link_Tab.rowCount()
self.Link_Tab.insertRow(rowPosition)
self.Link_Tab.setRowHeight(rowPosition, 20)
self.ven_var = QtGui.QComboBox()
self.ven_var.addItems(labels)
self.post_var = QtGui.QComboBox()
self.post_var.addItems(elem)
self.use = QtGui.QComboBox()
self.use.addItems(["Time Series", "Subscript"])
self.time_edit = QtGui.QLineEdit()
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.time_edit.setFont(font)
self.time_edit.setObjectName(_fromUtf8("time_edit"))
#self.time_edit.setText("<Default is constant>")
self.time_edit.setPlaceholderText("<Default is constant>")
self.Link_Tab.setCellWidget(rowPosition,0,self.post_var)
self.Link_Tab.setCellWidget(rowPosition,1,self.ven_var)
self.Link_Tab.setCellWidget(rowPosition,2,self.use)
self.Link_Tab.setCellWidget(rowPosition,3,self.time_edit)
def removerow(self):
rowPosition = self.Link_Tab.rowCount()
self.Link_Tab.removeRow((rowPosition - 1))
def reset(self):
self.message1 = QtGui.QMessageBox(self.centralwidget)
self.message1.setIcon(QtGui.QMessageBox.Information)
self.message1.setWindowTitle("Reset Info")
self.message1.setText("Outputs ('sim_out' table) of a previous run will be deleted!")
self.message1.setStandardButtons(QtGui.QMessageBox.Ok | QtGui.QMessageBox.Cancel)
retval = self.message1.exec_()
if retval == 1024:
curs = self.con.cursor()
try:
curs.execute(""" DROP TABLE sim_out; """)
self.con.commit()
del curs
self.Progress_Run.setVisible(False)
except:
self.con.commit()
del curs
#So far users cannot decide what to consider in the output!!
#Only VENSIM Stocks are written to separate PostGIS Table Columns
def run(self):
#Catch runtime error, if no links are set in table
#Consider: Links can only be set if DB is connected and Simulation Input is selected
if len(self.Link_Tab.findChildren(QtGui.QComboBox)) == 0:
self.error_message3 = QtGui.QMessageBox(self.centralwidget)
self.error_message3.setIcon(QtGui.QMessageBox.Critical)
self.error_message3.setWindowTitle("Compression Info")
self.error_message3.setText("No application links selected")
self.error_message3.setStandardButtons(QtGui.QMessageBox.Ok)
self.error_message3.exec_()
#Catch exception: sim_out already exist --> Old compression schema (merge_id, sim_out) blocks compression
bool_tab_exist = 0
try:
curs = self.con.cursor()
curs.execute(""" SELECT * FROM sim_out WHERE gid = 1; """)
bool_tab_exist = 1
del curs
except:
self.con.commit()
del curs
if bool_tab_exist == 1:
self.error_message5 = QtGui.QMessageBox(self.centralwidget)
self.error_message5.setIcon(QtGui.QMessageBox.Critical)
self.error_message5.setWindowTitle("Compression Info")
self.error_message5.setText("Output already exists. Make a reset before execution.")
self.error_message5.setStandardButtons(QtGui.QMessageBox.Ok)
self.error_message5.exec_()
#Count subscripting links
subscripting_count = 0
for idx, itm in enumerate(self.Link_Tab.findChildren(QtGui.QComboBox)):
if (idx + 1) % 3 == 0:
if itm.currentText() == "Subscript":
subscripting_count += 1
##make sure that subscripting table has 'geometry column'
#Get name of subscripting table
subscripting_table = []
for idx, itm in enumerate(self.Link_Tab.findChildren(QtGui.QComboBox)):
if (idx + 1) % 3 == 0:
if itm.currentText() == "Subscript":
subscripting_table.append(self.tb_columns[self.Link_Tab.findChildren(QtGui.QComboBox)[idx - 1].currentIndex()][1])
subscripting_table = list(set(subscripting_table))
print subscripting_table
bool_tab_geom = 0
try:
curs = self.con.cursor()
curs.execute("SELECT geom FROM %s WHERE gid = 1;" % AsIs(str(subscripting_table[0])))
bool_tab_geom = 1
del curs
except:
self.con.commit()
del curs
#Create sim_out table with geometry column (input subscripting table with geometry)
if (subscripting_count > 0) and (bool_tab_geom == 1) and (bool_tab_exist == 0):
try:
curs = self.con.cursor()
curs.execute("CREATE TABLE sim_out AS SELECT geom, gid FROM %s; ALTER TABLE sim_out ADD PRIMARY KEY (gid);" % AsIs(subscripting_table[0])) #copy geom as spatial and gid as non-spatial foreign key
self.con.commit() #gid is simultaniously used as foreign and primary key in 'sim_out' as table relation is 1 to 1
del curs
except psycopg2.OperationalError:
self.error_message7 = QtGui.QMessageBox(self.centralwidget)
self.error_message7.setIcon(QtGui.QMessageBox.critical)
self.error_message7.setWindowTitle("Operational Error 1")
self.error_message7.setText("Size of array exceeded (see Documentation)")
self.error_message7.setStandardButtons(QtGui.QMessageBox.Ok)
self.error_message7.exec_()
curs = self.con.cursor()
self.con.commit()
del curs
#Create sim_out table without geometry column (input subscripting table is non-spatial)
if (subscripting_count > 0) and (bool_tab_geom == 0) and (bool_tab_exist == 0):
try:
curs = self.con.cursor()
curs.execute("CREATE TABLE sim_out AS SELECT gid FROM %s; ALTER TABLE sim_out ADD PRIMARY KEY (gid);" % AsIs(subscripting_table[0])) #copy gid as non-spatial foreign key
self.con.commit() #gid is simultaniously used as foreign and primary key in 'sim_out' as table relation is 1 to 1
del curs
except psycopg2.OperationalError:
self.error_message7 = QtGui.QMessageBox(self.centralwidget)
self.error_message7.setIcon(QtGui.QMessageBox.critical)
self.error_message7.setWindowTitle("Operational Error 1")
self.error_message7.setText("Size of array exceeded (see Documentation)")
self.error_message7.setStandardButtons(QtGui.QMessageBox.Ok)
self.error_message7.exec_()
#Assign time series to model
for idx, itm in enumerate(self.Link_Tab.findChildren(QtGui.QComboBox)):
if (idx + 1) % 3 == 0:
if itm.currentText() == "Time Series":
#Get name of time series table
time_series_tb = []
time_series_tb.append(self.tb_columns[self.Link_Tab.findChildren(QtGui.QComboBox)[idx - 1].currentIndex()][1])
time_series_tb = list(set(time_series_tb))
#print time_series_tb
#Fetch time series data from database
curs = self.con.cursor()
field = self.Link_Tab.findChildren(QtGui.QComboBox)[idx - 1].currentText()
curs.execute("SELECT %s FROM %s;" % (field, time_series_tb[0]))
time_series = curs.fetchall()
#Assign data to model
pandas_t = []
pandas_d = []
for t,d in enumerate(time_series):
pandas_t.append(t)
pandas_d.append(d[0])
time_series_pd = pd.Series(index=pandas_t, data=pandas_d)
ven_var_ts = self.Link_Tab.findChildren(QtGui.QComboBox)[idx - 2].currentText()
self.model.set_components(params={str(ven_var_ts):time_series_pd})
#Only run subscripting procedure, if at least one subscripting link is selected
if (subscripting_count != 0) and (bool_tab_exist == 0):
#Get Table Links as List [[VENSIM Var. Name 1, PostGIS Var. Name 1, Time1], [VENSIM Var. Name 2, PostGIS Var. Name 2, Time2], [...], ...]
table_links = []
for idx, itm in enumerate(self.Link_Tab.findChildren(QtGui.QComboBox)):
if (idx + 1) % 3 == 0:
if itm.currentText() == "Subscript":
if str(self.Link_Tab.findChildren(QtGui.QLineEdit)[((idx + 1) / 3) - 1].text()) != "":
table_links.append([str(self.Link_Tab.findChildren(QtGui.QComboBox)[idx - 2].currentText()),
str(self.Link_Tab.findChildren(QtGui.QComboBox)[idx - 1].currentText()), str(self.Link_Tab.findChildren(QtGui.QLineEdit)[((idx + 1) / 3) - 1].text())])
else:
table_links.append([str(self.Link_Tab.findChildren(QtGui.QComboBox)[idx - 2].currentText()),
str(self.Link_Tab.findChildren(QtGui.QComboBox)[idx - 1].currentText()), str(self.Link_Tab.findChildren(QtGui.QLineEdit)[((idx + 1) / 3) - 1].placeholderText())])
#Check for duplicates in entries (Vensim Variable is assigned twice,
#e.g. Table Row 1: 'albedo' | 'albedo0ad' | 0; Table Row 2: 'albedo' | 'albedo1000ad' | 0 --> albedo at time 0 can only have one value!)
table_links_reduced = [x[::2] for x in table_links]
dupl_count = 0
for idx, itm in enumerate(table_links_reduced):
for l in table_links_reduced:
if cmp(itm, l) == 0:
dupl_count+=1
if dupl_count > len(table_links_reduced):
self.error_message9 = QtGui.QMessageBox(self.centralwidget)
self.error_message9.setIcon(QtGui.QMessageBox.Critical)
self.error_message9.setWindowTitle("Input Error")
self.error_message9.setText("Time constant or time dependent value assignment is redundant! Change time settings. Then reset, compress and rerun simulation.")
self.error_message9.setStandardButtons(QtGui.QMessageBox.Ok)
self.error_message9.exec_()
###Input error 'time origin > 0' is not implemented yet!!!
else:
#Make progress bar visible once run button is pushed
self.Progress_Run.setVisible(True)
#Add column, one per stock in the SD model, type array; also count rows in the table
curs = self.con.cursor()
for i in self.model.components._stocknames:
curs.execute("ALTER TABLE sim_out ADD COLUMN %s real[]" % i) #real is 4 bytes per array element, double precision would be 8 bytes
self.con.commit()
#Count rows of sim_out
row_count = 0.0
curs.execute(""" SELECT count(*) FROM sim_out; """)
row_count = curs.fetchall()[0][0]
#Get name of subscripting table
subscripting_table = []
for idx, itm in enumerate(self.Link_Tab.findChildren(QtGui.QComboBox)):
if (idx + 1) % 3 == 0:
if itm.currentText() == "Subscript":
subscripting_table.append(self.tb_columns[self.Link_Tab.findChildren(QtGui.QComboBox)[idx - 1].currentIndex()][1])
subscripting_table = list(set(subscripting_table))
#Fetch run, save
g_count = 0.0
for g in xrange(1,(row_count + 1)):
start = time.time()
pvars = [x[1] for x in table_links]
post_vars = str(pvars).replace('[', '').replace(']', '').replace("'", "")
SQL = ''' SELECT %s FROM %s WHERE gid = %s; ''' % (AsIs(post_vars), AsIs(subscripting_table[0]), g)
curs.execute(SQL)
self.con.commit()
post_data = curs.fetchall()
#curs.execute("""DROP TABLE tempo;""")
#Append Fetched PostGIS data to the tables link list [[VENSIM Var. Name 1, PostGIS Var. Name 1, Time1, PostGIS Value 1], [VENSIM Var. Name 2, PostGIS Var. Name 2, Time2, PostGIS Value 2], [...], ...]
for idx, itm in enumerate(table_links):
itm.append(post_data[0][idx])
#Set time constant parameters in Vensim model
for x in table_links:
if x[2] == "<Default is constant>":
self.model.set_components(params={x[0]:x[3]})
#end = time.time()
#print "Fetch " + str((end - start))
#start = time.time()
#Set time dependent parameters in Vensim model. Values are linearly interpolated between time dependent inputs
for v in list(set([x[0] for x in table_links if x[2].isalnum()])):
pandas_time = []
pandas_data = []
for x in table_links:
if (x[2].isalnum()) and (x[0] == v):
pandas_time.append(x[2])
pandas_data.append(x[3])
pandas_time_float = [float(x) for x in pandas_time]
pandas_data_float = [float(x) for x in pandas_data]
pandas_data_sorted = [x for (y,x) in sorted(zip(pandas_time_float,pandas_data_float))] #sort pandas data by time
pandas_time_sorted = sorted(pandas_time_float)
look = pd.Series(index=pandas_time_sorted, data=pandas_data_sorted)
self.model.set_components(params={v:look})
#Run simulation for one collection element
st = self.model.run()
#end = time.time()
#print "Run " + str((end - start))
#Clear data value for next spatial object simulation run
for e in table_links:
del e[-1]
#start = time.time()
#save to database
for col in self.model.components._stocknames:
pd_lst = st[col].tolist()
curs.execute("UPDATE sim_out SET (%s) = (%s) WHERE gid = %s", (AsIs(col), pd_lst, g))
self.con.commit()
#Update progress
g_count += 1
complete = g_count / row_count * 100
self.Progress_Run.setValue(complete)
QtGui.QApplication.processEvents() #refresh application
end = time.time()
print "1 FRS " + str((end - start))
#Check for number of time series links
series_count = 0
for idx, itm in enumerate(self.Link_Tab.findChildren(QtGui.QComboBox)):
if (idx + 1) % 3 == 0:
if itm.currentText() == "Time Series":
series_count += 1
#If link schema includes time series links only, then run and save to sim_out
if (subscripting_count == 0) and (series_count > 0) and (bool_tab_exist == 0):
#Make progress bar visible once run button is pushed
self.Progress_Run.setVisible(True)
#create sim_out table
curs = self.con.cursor()
curs.execute(""" CREATE TABLE sim_out (gid BIGSERIAL PRIMARY KEY); """)
self.con.commit()
for i in self.model.components._stocknames:
curs.execute("ALTER TABLE sim_out ADD COLUMN %s numeric" % i) #real is 4 bytes per array element, double precision would be 8 bytes
self.con.commit()
del curs
#Run
st = self.model.run()
#Save
curs = self.con.cursor()
stocks_str = str(self.model.components._stocknames).replace("[", "").replace("]", "").replace("'", "")
for idx, row in st.iterrows():
groups = ()
for v in row:
groups += (v,)
curs.execute("INSERT INTO sim_out (%s) VALUES %s" % (stocks_str, groups))
self.con.commit()
del curs
self.Progress_Run.setValue(100)
try:
del curs
except:
pass
if __name__ == "__main__":
import sys
app = QtGui.QApplication(sys.argv)
app.setStyle(QtGui.QStyleFactory.create("plastique"))
MainWindow = QtGui.QMainWindow()
ui = Ui_MainWindow() #Create instance of class Ui_MainWindow() defined above
ui.setupUi(MainWindow) #The class Ui_MainWindow inherits from class "QtGui.QMainWindow()" using method setupUi (see definition of method setupUi above)
MainWindow.show()
sys.exit(app.exec_())
| agpl-3.0 |
nilmtk/nilmtk | nilmtk/stats/dropoutrateresults.py | 8 | 1783 | import matplotlib.pyplot as plt
from ..results import Results
from ..consts import SECS_PER_DAY
class DropoutRateResults(Results):
"""
Attributes
----------
_data : pd.DataFrame
index is start date for the whole chunk
`end` is end date for the whole chunk
`dropout_rate` is float [0,1]
`n_samples` is int, used for calculating weighted mean
"""
name = "dropout_rate"
def combined(self):
"""Calculates weighted average.
Returns
-------
dropout_rate : float, [0,1]
"""
tot_samples = self._data['n_samples'].sum()
proportion = self._data['n_samples'] / tot_samples
dropout_rate = (self._data['dropout_rate'] * proportion).sum()
return dropout_rate
def unify(self, other):
super(DropoutRateResults, self).unify(other)
for i, row in self._data.iterrows():
# store mean of dropout rate
self._data['dropout_rate'].loc[i] += other._data['dropout_rate'].loc[i]
self._data['dropout_rate'].loc[i] /= 2
self._data['n_samples'].loc[i] += other._data['n_samples'].loc[i]
def to_dict(self):
return {'statistics': {'dropout_rate': self.combined()}}
def plot(self, ax=None):
if ax is None:
ax = plt.gca()
ax.xaxis.axis_date()
for index, row in self._data.iterrows():
length = (row['end'] - index).total_seconds() / SECS_PER_DAY
rect = plt.Rectangle((index, 0), # bottom left corner
length,
row['dropout_rate'], # width
color='b')
ax.add_patch(rect)
ax.autoscale_view()
| apache-2.0 |
jaidevd/scikit-learn | examples/svm/plot_custom_kernel.py | 43 | 1546 | """
======================
SVM with custom kernel
======================
Simple usage of Support Vector Machines to classify a sample. It will
plot the decision surface and the support vectors.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
Y = iris.target
def my_kernel(X, Y):
"""
We create a custom kernel:
(2 0)
k(X, Y) = X ( ) Y.T
(0 1)
"""
M = np.array([[2, 0], [0, 1.0]])
return np.dot(np.dot(X, M), Y.T)
h = .02 # step size in the mesh
# we create an instance of SVM and fit out data.
clf = svm.SVC(kernel=my_kernel)
clf.fit(X, Y)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, x_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.pcolormesh(xx, yy, Z, cmap=plt.cm.Paired)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)
plt.title('3-Class classification using Support Vector Machine with custom'
' kernel')
plt.axis('tight')
plt.show()
| bsd-3-clause |
mxjl620/scikit-learn | examples/decomposition/plot_incremental_pca.py | 244 | 1878 | """
===============
Incremental PCA
===============
Incremental principal component analysis (IPCA) is typically used as a
replacement for principal component analysis (PCA) when the dataset to be
decomposed is too large to fit in memory. IPCA builds a low-rank approximation
for the input data using an amount of memory which is independent of the
number of input data samples. It is still dependent on the input data features,
but changing the batch size allows for control of memory usage.
This example serves as a visual check that IPCA is able to find a similar
projection of the data to PCA (to a sign flip), while only processing a
few samples at a time. This can be considered a "toy example", as IPCA is
intended for large datasets which do not fit in main memory, requiring
incremental approaches.
"""
print(__doc__)
# Authors: Kyle Kastner
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_iris
from sklearn.decomposition import PCA, IncrementalPCA
iris = load_iris()
X = iris.data
y = iris.target
n_components = 2
ipca = IncrementalPCA(n_components=n_components, batch_size=10)
X_ipca = ipca.fit_transform(X)
pca = PCA(n_components=n_components)
X_pca = pca.fit_transform(X)
for X_transformed, title in [(X_ipca, "Incremental PCA"), (X_pca, "PCA")]:
plt.figure(figsize=(8, 8))
for c, i, target_name in zip("rgb", [0, 1, 2], iris.target_names):
plt.scatter(X_transformed[y == i, 0], X_transformed[y == i, 1],
c=c, label=target_name)
if "Incremental" in title:
err = np.abs(np.abs(X_pca) - np.abs(X_ipca)).mean()
plt.title(title + " of iris dataset\nMean absolute unsigned error "
"%.6f" % err)
else:
plt.title(title + " of iris dataset")
plt.legend(loc="best")
plt.axis([-4, 4, -1.5, 1.5])
plt.show()
| bsd-3-clause |
luo66/scikit-learn | examples/neural_networks/plot_rbm_logistic_classification.py | 258 | 4609 | """
==============================================================
Restricted Boltzmann Machine features for digit classification
==============================================================
For greyscale image data where pixel values can be interpreted as degrees of
blackness on a white background, like handwritten digit recognition, the
Bernoulli Restricted Boltzmann machine model (:class:`BernoulliRBM
<sklearn.neural_network.BernoulliRBM>`) can perform effective non-linear
feature extraction.
In order to learn good latent representations from a small dataset, we
artificially generate more labeled data by perturbing the training data with
linear shifts of 1 pixel in each direction.
This example shows how to build a classification pipeline with a BernoulliRBM
feature extractor and a :class:`LogisticRegression
<sklearn.linear_model.LogisticRegression>` classifier. The hyperparameters
of the entire model (learning rate, hidden layer size, regularization)
were optimized by grid search, but the search is not reproduced here because
of runtime constraints.
Logistic regression on raw pixel values is presented for comparison. The
example shows that the features extracted by the BernoulliRBM help improve the
classification accuracy.
"""
from __future__ import print_function
print(__doc__)
# Authors: Yann N. Dauphin, Vlad Niculae, Gabriel Synnaeve
# License: BSD
import numpy as np
import matplotlib.pyplot as plt
from scipy.ndimage import convolve
from sklearn import linear_model, datasets, metrics
from sklearn.cross_validation import train_test_split
from sklearn.neural_network import BernoulliRBM
from sklearn.pipeline import Pipeline
###############################################################################
# Setting up
def nudge_dataset(X, Y):
"""
This produces a dataset 5 times bigger than the original one,
by moving the 8x8 images in X around by 1px to left, right, down, up
"""
direction_vectors = [
[[0, 1, 0],
[0, 0, 0],
[0, 0, 0]],
[[0, 0, 0],
[1, 0, 0],
[0, 0, 0]],
[[0, 0, 0],
[0, 0, 1],
[0, 0, 0]],
[[0, 0, 0],
[0, 0, 0],
[0, 1, 0]]]
shift = lambda x, w: convolve(x.reshape((8, 8)), mode='constant',
weights=w).ravel()
X = np.concatenate([X] +
[np.apply_along_axis(shift, 1, X, vector)
for vector in direction_vectors])
Y = np.concatenate([Y for _ in range(5)], axis=0)
return X, Y
# Load Data
digits = datasets.load_digits()
X = np.asarray(digits.data, 'float32')
X, Y = nudge_dataset(X, digits.target)
X = (X - np.min(X, 0)) / (np.max(X, 0) + 0.0001) # 0-1 scaling
X_train, X_test, Y_train, Y_test = train_test_split(X, Y,
test_size=0.2,
random_state=0)
# Models we will use
logistic = linear_model.LogisticRegression()
rbm = BernoulliRBM(random_state=0, verbose=True)
classifier = Pipeline(steps=[('rbm', rbm), ('logistic', logistic)])
###############################################################################
# Training
# Hyper-parameters. These were set by cross-validation,
# using a GridSearchCV. Here we are not performing cross-validation to
# save time.
rbm.learning_rate = 0.06
rbm.n_iter = 20
# More components tend to give better prediction performance, but larger
# fitting time
rbm.n_components = 100
logistic.C = 6000.0
# Training RBM-Logistic Pipeline
classifier.fit(X_train, Y_train)
# Training Logistic regression
logistic_classifier = linear_model.LogisticRegression(C=100.0)
logistic_classifier.fit(X_train, Y_train)
###############################################################################
# Evaluation
print()
print("Logistic regression using RBM features:\n%s\n" % (
metrics.classification_report(
Y_test,
classifier.predict(X_test))))
print("Logistic regression using raw pixel features:\n%s\n" % (
metrics.classification_report(
Y_test,
logistic_classifier.predict(X_test))))
###############################################################################
# Plotting
plt.figure(figsize=(4.2, 4))
for i, comp in enumerate(rbm.components_):
plt.subplot(10, 10, i + 1)
plt.imshow(comp.reshape((8, 8)), cmap=plt.cm.gray_r,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.suptitle('100 components extracted by RBM', fontsize=16)
plt.subplots_adjust(0.08, 0.02, 0.92, 0.85, 0.08, 0.23)
plt.show()
| bsd-3-clause |
RomainBrault/scikit-learn | examples/plot_isotonic_regression.py | 55 | 1767 | """
===================
Isotonic Regression
===================
An illustration of the isotonic regression on generated data. The
isotonic regression finds a non-decreasing approximation of a function
while minimizing the mean squared error on the training data. The benefit
of such a model is that it does not assume any form for the target
function such as linearity. For comparison a linear regression is also
presented.
"""
print(__doc__)
# Author: Nelle Varoquaux <[email protected]>
# Alexandre Gramfort <[email protected]>
# License: BSD
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.collections import LineCollection
from sklearn.linear_model import LinearRegression
from sklearn.isotonic import IsotonicRegression
from sklearn.utils import check_random_state
n = 100
x = np.arange(n)
rs = check_random_state(0)
y = rs.randint(-50, 50, size=(n,)) + 50. * np.log(1 + np.arange(n))
###############################################################################
# Fit IsotonicRegression and LinearRegression models
ir = IsotonicRegression()
y_ = ir.fit_transform(x, y)
lr = LinearRegression()
lr.fit(x[:, np.newaxis], y) # x needs to be 2d for LinearRegression
###############################################################################
# plot result
segments = [[[i, y[i]], [i, y_[i]]] for i in range(n)]
lc = LineCollection(segments, zorder=0)
lc.set_array(np.ones(len(y)))
lc.set_linewidths(0.5 * np.ones(n))
fig = plt.figure()
plt.plot(x, y, 'r.', markersize=12)
plt.plot(x, y_, 'g.-', markersize=12)
plt.plot(x, lr.predict(x[:, np.newaxis]), 'b-')
plt.gca().add_collection(lc)
plt.legend(('Data', 'Isotonic Fit', 'Linear Fit'), loc='lower right')
plt.title('Isotonic regression')
plt.show()
| bsd-3-clause |
vshtanko/scikit-learn | sklearn/utils/validation.py | 66 | 23629 | """Utilities for input validation"""
# Authors: Olivier Grisel
# Gael Varoquaux
# Andreas Mueller
# Lars Buitinck
# Alexandre Gramfort
# Nicolas Tresegnie
# License: BSD 3 clause
import warnings
import numbers
import numpy as np
import scipy.sparse as sp
from ..externals import six
from inspect import getargspec
FLOAT_DTYPES = (np.float64, np.float32, np.float16)
class DataConversionWarning(UserWarning):
"""A warning on implicit data conversions happening in the code"""
pass
warnings.simplefilter("always", DataConversionWarning)
class NonBLASDotWarning(UserWarning):
"""A warning on implicit dispatch to numpy.dot"""
class NotFittedError(ValueError, AttributeError):
"""Exception class to raise if estimator is used before fitting
This class inherits from both ValueError and AttributeError to help with
exception handling and backward compatibility.
"""
# Silenced by default to reduce verbosity. Turn on at runtime for
# performance profiling.
warnings.simplefilter('ignore', NonBLASDotWarning)
def _assert_all_finite(X):
"""Like assert_all_finite, but only for ndarray."""
X = np.asanyarray(X)
# First try an O(n) time, O(1) space solution for the common case that
# everything is finite; fall back to O(n) space np.isfinite to prevent
# false positives from overflow in sum method.
if (X.dtype.char in np.typecodes['AllFloat'] and not np.isfinite(X.sum())
and not np.isfinite(X).all()):
raise ValueError("Input contains NaN, infinity"
" or a value too large for %r." % X.dtype)
def assert_all_finite(X):
"""Throw a ValueError if X contains NaN or infinity.
Input MUST be an np.ndarray instance or a scipy.sparse matrix."""
_assert_all_finite(X.data if sp.issparse(X) else X)
def as_float_array(X, copy=True, force_all_finite=True):
"""Converts an array-like to an array of floats
The new dtype will be np.float32 or np.float64, depending on the original
type. The function can create a copy or modify the argument depending
on the argument copy.
Parameters
----------
X : {array-like, sparse matrix}
copy : bool, optional
If True, a copy of X will be created. If False, a copy may still be
returned if X's dtype is not a floating point type.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
Returns
-------
XT : {array, sparse matrix}
An array of type np.float
"""
if isinstance(X, np.matrix) or (not isinstance(X, np.ndarray)
and not sp.issparse(X)):
return check_array(X, ['csr', 'csc', 'coo'], dtype=np.float64,
copy=copy, force_all_finite=force_all_finite,
ensure_2d=False)
elif sp.issparse(X) and X.dtype in [np.float32, np.float64]:
return X.copy() if copy else X
elif X.dtype in [np.float32, np.float64]: # is numpy array
return X.copy('F' if X.flags['F_CONTIGUOUS'] else 'C') if copy else X
else:
return X.astype(np.float32 if X.dtype == np.int32 else np.float64)
def _is_arraylike(x):
"""Returns whether the input is array-like"""
return (hasattr(x, '__len__') or
hasattr(x, 'shape') or
hasattr(x, '__array__'))
def _num_samples(x):
"""Return number of samples in array-like x."""
if hasattr(x, 'fit'):
# Don't get num_samples from an ensembles length!
raise TypeError('Expected sequence or array-like, got '
'estimator %s' % x)
if not hasattr(x, '__len__') and not hasattr(x, 'shape'):
if hasattr(x, '__array__'):
x = np.asarray(x)
else:
raise TypeError("Expected sequence or array-like, got %s" %
type(x))
if hasattr(x, 'shape'):
if len(x.shape) == 0:
raise TypeError("Singleton array %r cannot be considered"
" a valid collection." % x)
return x.shape[0]
else:
return len(x)
def _shape_repr(shape):
"""Return a platform independent reprensentation of an array shape
Under Python 2, the `long` type introduces an 'L' suffix when using the
default %r format for tuples of integers (typically used to store the shape
of an array).
Under Windows 64 bit (and Python 2), the `long` type is used by default
in numpy shapes even when the integer dimensions are well below 32 bit.
The platform specific type causes string messages or doctests to change
from one platform to another which is not desirable.
Under Python 3, there is no more `long` type so the `L` suffix is never
introduced in string representation.
>>> _shape_repr((1, 2))
'(1, 2)'
>>> one = 2 ** 64 / 2 ** 64 # force an upcast to `long` under Python 2
>>> _shape_repr((one, 2 * one))
'(1, 2)'
>>> _shape_repr((1,))
'(1,)'
>>> _shape_repr(())
'()'
"""
if len(shape) == 0:
return "()"
joined = ", ".join("%d" % e for e in shape)
if len(shape) == 1:
# special notation for singleton tuples
joined += ','
return "(%s)" % joined
def check_consistent_length(*arrays):
"""Check that all arrays have consistent first dimensions.
Checks whether all objects in arrays have the same shape or length.
Parameters
----------
*arrays : list or tuple of input objects.
Objects that will be checked for consistent length.
"""
uniques = np.unique([_num_samples(X) for X in arrays if X is not None])
if len(uniques) > 1:
raise ValueError("Found arrays with inconsistent numbers of samples: "
"%s" % str(uniques))
def indexable(*iterables):
"""Make arrays indexable for cross-validation.
Checks consistent length, passes through None, and ensures that everything
can be indexed by converting sparse matrices to csr and converting
non-interable objects to arrays.
Parameters
----------
*iterables : lists, dataframes, arrays, sparse matrices
List of objects to ensure sliceability.
"""
result = []
for X in iterables:
if sp.issparse(X):
result.append(X.tocsr())
elif hasattr(X, "__getitem__") or hasattr(X, "iloc"):
result.append(X)
elif X is None:
result.append(X)
else:
result.append(np.array(X))
check_consistent_length(*result)
return result
def _ensure_sparse_format(spmatrix, accept_sparse, dtype, copy,
force_all_finite):
"""Convert a sparse matrix to a given format.
Checks the sparse format of spmatrix and converts if necessary.
Parameters
----------
spmatrix : scipy sparse matrix
Input to validate and convert.
accept_sparse : string, list of string or None (default=None)
String[s] representing allowed sparse matrix formats ('csc',
'csr', 'coo', 'dok', 'bsr', 'lil', 'dia'). None means that sparse
matrix input will raise an error. If the input is sparse but not in
the allowed format, it will be converted to the first listed format.
dtype : string, type or None (default=none)
Data type of result. If None, the dtype of the input is preserved.
copy : boolean (default=False)
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
Returns
-------
spmatrix_converted : scipy sparse matrix.
Matrix that is ensured to have an allowed type.
"""
if accept_sparse in [None, False]:
raise TypeError('A sparse matrix was passed, but dense '
'data is required. Use X.toarray() to '
'convert to a dense numpy array.')
if dtype is None:
dtype = spmatrix.dtype
changed_format = False
if (isinstance(accept_sparse, (list, tuple))
and spmatrix.format not in accept_sparse):
# create new with correct sparse
spmatrix = spmatrix.asformat(accept_sparse[0])
changed_format = True
if dtype != spmatrix.dtype:
# convert dtype
spmatrix = spmatrix.astype(dtype)
elif copy and not changed_format:
# force copy
spmatrix = spmatrix.copy()
if force_all_finite:
if not hasattr(spmatrix, "data"):
warnings.warn("Can't check %s sparse matrix for nan or inf."
% spmatrix.format)
else:
_assert_all_finite(spmatrix.data)
return spmatrix
def check_array(array, accept_sparse=None, dtype="numeric", order=None,
copy=False, force_all_finite=True, ensure_2d=True,
allow_nd=False, ensure_min_samples=1, ensure_min_features=1,
warn_on_dtype=False, estimator=None):
"""Input validation on an array, list, sparse matrix or similar.
By default, the input is converted to an at least 2nd numpy array.
If the dtype of the array is object, attempt converting to float,
raising on failure.
Parameters
----------
array : object
Input object to check / convert.
accept_sparse : string, list of string or None (default=None)
String[s] representing allowed sparse matrix formats, such as 'csc',
'csr', etc. None means that sparse matrix input will raise an error.
If the input is sparse but not in the allowed format, it will be
converted to the first listed format.
dtype : string, type, list of types or None (default="numeric")
Data type of result. If None, the dtype of the input is preserved.
If "numeric", dtype is preserved unless array.dtype is object.
If dtype is a list of types, conversion on the first type is only
performed if the dtype of the input is not in the list.
order : 'F', 'C' or None (default=None)
Whether an array will be forced to be fortran or c-style.
copy : boolean (default=False)
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
ensure_2d : boolean (default=True)
Whether to make X at least 2d.
allow_nd : boolean (default=False)
Whether to allow X.ndim > 2.
ensure_min_samples : int (default=1)
Make sure that the array has a minimum number of samples in its first
axis (rows for a 2D array). Setting to 0 disables this check.
ensure_min_features : int (default=1)
Make sure that the 2D array has some minimum number of features
(columns). The default value of 1 rejects empty datasets.
This check is only enforced when the input data has effectively 2
dimensions or is originally 1D and ``ensure_2d`` is True. Setting to 0
disables this check.
warn_on_dtype : boolean (default=False)
Raise DataConversionWarning if the dtype of the input data structure
does not match the requested dtype, causing a memory copy.
estimator : str or estimator instance (default=None)
If passed, include the name of the estimator in warning messages.
Returns
-------
X_converted : object
The converted and validated X.
"""
if isinstance(accept_sparse, str):
accept_sparse = [accept_sparse]
# store whether originally we wanted numeric dtype
dtype_numeric = dtype == "numeric"
dtype_orig = getattr(array, "dtype", None)
if not hasattr(dtype_orig, 'kind'):
# not a data type (e.g. a column named dtype in a pandas DataFrame)
dtype_orig = None
if dtype_numeric:
if dtype_orig is not None and dtype_orig.kind == "O":
# if input is object, convert to float.
dtype = np.float64
else:
dtype = None
if isinstance(dtype, (list, tuple)):
if dtype_orig is not None and dtype_orig in dtype:
# no dtype conversion required
dtype = None
else:
# dtype conversion required. Let's select the first element of the
# list of accepted types.
dtype = dtype[0]
if sp.issparse(array):
array = _ensure_sparse_format(array, accept_sparse, dtype, copy,
force_all_finite)
else:
if ensure_2d:
array = np.atleast_2d(array)
array = np.array(array, dtype=dtype, order=order, copy=copy)
# make sure we actually converted to numeric:
if dtype_numeric and array.dtype.kind == "O":
array = array.astype(np.float64)
if not allow_nd and array.ndim >= 3:
raise ValueError("Found array with dim %d. Expected <= 2" %
array.ndim)
if force_all_finite:
_assert_all_finite(array)
shape_repr = _shape_repr(array.shape)
if ensure_min_samples > 0:
n_samples = _num_samples(array)
if n_samples < ensure_min_samples:
raise ValueError("Found array with %d sample(s) (shape=%s) while a"
" minimum of %d is required."
% (n_samples, shape_repr, ensure_min_samples))
if ensure_min_features > 0 and array.ndim == 2:
n_features = array.shape[1]
if n_features < ensure_min_features:
raise ValueError("Found array with %d feature(s) (shape=%s) while"
" a minimum of %d is required."
% (n_features, shape_repr, ensure_min_features))
if warn_on_dtype and dtype_orig is not None and array.dtype != dtype_orig:
msg = ("Data with input dtype %s was converted to %s"
% (dtype_orig, array.dtype))
if estimator is not None:
if not isinstance(estimator, six.string_types):
estimator = estimator.__class__.__name__
msg += " by %s" % estimator
warnings.warn(msg, DataConversionWarning)
return array
def check_X_y(X, y, accept_sparse=None, dtype="numeric", order=None, copy=False,
force_all_finite=True, ensure_2d=True, allow_nd=False,
multi_output=False, ensure_min_samples=1,
ensure_min_features=1, y_numeric=False,
warn_on_dtype=False, estimator=None):
"""Input validation for standard estimators.
Checks X and y for consistent length, enforces X 2d and y 1d.
Standard input checks are only applied to y. For multi-label y,
set multi_output=True to allow 2d and sparse y.
If the dtype of X is object, attempt converting to float,
raising on failure.
Parameters
----------
X : nd-array, list or sparse matrix
Input data.
y : nd-array, list or sparse matrix
Labels.
accept_sparse : string, list of string or None (default=None)
String[s] representing allowed sparse matrix formats, such as 'csc',
'csr', etc. None means that sparse matrix input will raise an error.
If the input is sparse but not in the allowed format, it will be
converted to the first listed format.
dtype : string, type, list of types or None (default="numeric")
Data type of result. If None, the dtype of the input is preserved.
If "numeric", dtype is preserved unless array.dtype is object.
If dtype is a list of types, conversion on the first type is only
performed if the dtype of the input is not in the list.
order : 'F', 'C' or None (default=None)
Whether an array will be forced to be fortran or c-style.
copy : boolean (default=False)
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
ensure_2d : boolean (default=True)
Whether to make X at least 2d.
allow_nd : boolean (default=False)
Whether to allow X.ndim > 2.
multi_output : boolean (default=False)
Whether to allow 2-d y (array or sparse matrix). If false, y will be
validated as a vector.
ensure_min_samples : int (default=1)
Make sure that X has a minimum number of samples in its first
axis (rows for a 2D array).
ensure_min_features : int (default=1)
Make sure that the 2D array has some minimum number of features
(columns). The default value of 1 rejects empty datasets.
This check is only enforced when X has effectively 2 dimensions or
is originally 1D and ``ensure_2d`` is True. Setting to 0 disables
this check.
y_numeric : boolean (default=False)
Whether to ensure that y has a numeric type. If dtype of y is object,
it is converted to float64. Should only be used for regression
algorithms.
warn_on_dtype : boolean (default=False)
Raise DataConversionWarning if the dtype of the input data structure
does not match the requested dtype, causing a memory copy.
estimator : str or estimator instance (default=None)
If passed, include the name of the estimator in warning messages.
Returns
-------
X_converted : object
The converted and validated X.
"""
X = check_array(X, accept_sparse, dtype, order, copy, force_all_finite,
ensure_2d, allow_nd, ensure_min_samples,
ensure_min_features, warn_on_dtype, estimator)
if multi_output:
y = check_array(y, 'csr', force_all_finite=True, ensure_2d=False,
dtype=None)
else:
y = column_or_1d(y, warn=True)
_assert_all_finite(y)
if y_numeric and y.dtype.kind == 'O':
y = y.astype(np.float64)
check_consistent_length(X, y)
return X, y
def column_or_1d(y, warn=False):
""" Ravel column or 1d numpy array, else raises an error
Parameters
----------
y : array-like
warn : boolean, default False
To control display of warnings.
Returns
-------
y : array
"""
shape = np.shape(y)
if len(shape) == 1:
return np.ravel(y)
if len(shape) == 2 and shape[1] == 1:
if warn:
warnings.warn("A column-vector y was passed when a 1d array was"
" expected. Please change the shape of y to "
"(n_samples, ), for example using ravel().",
DataConversionWarning, stacklevel=2)
return np.ravel(y)
raise ValueError("bad input shape {0}".format(shape))
def check_random_state(seed):
"""Turn seed into a np.random.RandomState instance
If seed is None, return the RandomState singleton used by np.random.
If seed is an int, return a new RandomState instance seeded with seed.
If seed is already a RandomState instance, return it.
Otherwise raise ValueError.
"""
if seed is None or seed is np.random:
return np.random.mtrand._rand
if isinstance(seed, (numbers.Integral, np.integer)):
return np.random.RandomState(seed)
if isinstance(seed, np.random.RandomState):
return seed
raise ValueError('%r cannot be used to seed a numpy.random.RandomState'
' instance' % seed)
def has_fit_parameter(estimator, parameter):
"""Checks whether the estimator's fit method supports the given parameter.
Examples
--------
>>> from sklearn.svm import SVC
>>> has_fit_parameter(SVC(), "sample_weight")
True
"""
return parameter in getargspec(estimator.fit)[0]
def check_symmetric(array, tol=1E-10, raise_warning=True,
raise_exception=False):
"""Make sure that array is 2D, square and symmetric.
If the array is not symmetric, then a symmetrized version is returned.
Optionally, a warning or exception is raised if the matrix is not
symmetric.
Parameters
----------
array : nd-array or sparse matrix
Input object to check / convert. Must be two-dimensional and square,
otherwise a ValueError will be raised.
tol : float
Absolute tolerance for equivalence of arrays. Default = 1E-10.
raise_warning : boolean (default=True)
If True then raise a warning if conversion is required.
raise_exception : boolean (default=False)
If True then raise an exception if array is not symmetric.
Returns
-------
array_sym : ndarray or sparse matrix
Symmetrized version of the input array, i.e. the average of array
and array.transpose(). If sparse, then duplicate entries are first
summed and zeros are eliminated.
"""
if (array.ndim != 2) or (array.shape[0] != array.shape[1]):
raise ValueError("array must be 2-dimensional and square. "
"shape = {0}".format(array.shape))
if sp.issparse(array):
diff = array - array.T
# only csr, csc, and coo have `data` attribute
if diff.format not in ['csr', 'csc', 'coo']:
diff = diff.tocsr()
symmetric = np.all(abs(diff.data) < tol)
else:
symmetric = np.allclose(array, array.T, atol=tol)
if not symmetric:
if raise_exception:
raise ValueError("Array must be symmetric")
if raise_warning:
warnings.warn("Array is not symmetric, and will be converted "
"to symmetric by average with its transpose.")
if sp.issparse(array):
conversion = 'to' + array.format
array = getattr(0.5 * (array + array.T), conversion)()
else:
array = 0.5 * (array + array.T)
return array
def check_is_fitted(estimator, attributes, msg=None, all_or_any=all):
"""Perform is_fitted validation for estimator.
Checks if the estimator is fitted by verifying the presence of
"all_or_any" of the passed attributes and raises a NotFittedError with the
given message.
Parameters
----------
estimator : estimator instance.
estimator instance for which the check is performed.
attributes : attribute name(s) given as string or a list/tuple of strings
Eg. : ["coef_", "estimator_", ...], "coef_"
msg : string
The default error message is, "This %(name)s instance is not fitted
yet. Call 'fit' with appropriate arguments before using this method."
For custom messages if "%(name)s" is present in the message string,
it is substituted for the estimator name.
Eg. : "Estimator, %(name)s, must be fitted before sparsifying".
all_or_any : callable, {all, any}, default all
Specify whether all or any of the given attributes must exist.
"""
if msg is None:
msg = ("This %(name)s instance is not fitted yet. Call 'fit' with "
"appropriate arguments before using this method.")
if not hasattr(estimator, 'fit'):
raise TypeError("%s is not an estimator instance." % (estimator))
if not isinstance(attributes, (list, tuple)):
attributes = [attributes]
if not all_or_any([hasattr(estimator, attr) for attr in attributes]):
raise NotFittedError(msg % {'name': type(estimator).__name__})
| bsd-3-clause |
ai-se/Tree-Learner | methods1.py | 1 | 2448 | #! /Users/rkrsn/anaconda/bin/python
from os import environ, getcwd, walk
import sys
# Update PYTHONPATH
HOME = environ['HOME']
axe = HOME + '/git/axe/axe/' # AXE
pystat = HOME + '/git/pystats/' # PySTAT
cwd = getcwd() # Current Directory
sys.path.extend([axe, pystat, cwd])
from dtree import *
from table import *
from _imports.where2 import *
import makeAmodel
import matplotlib.mlab as mlab
# import matplotlib.pyplot as plt
import smote
def explore(dir):
datasets = []
for (dirpath, dirnames, filenames) in walk(dir):
datasets.append(dirpath)
training = []
testing = []
for k in datasets[1:]:
train = [[dirPath, fname] for dirPath, _, fname in walk(k)]
test = [train[0][0] + '/' + train[0][1].pop(-1)]
training.append(
[train[0][0] + '/' + p for p in train[0][1] if not p == '.DS_Store'])
testing.append(test)
return training, testing
def newTable(tbl, headerLabel, Rows):
tbl2 = clone(tbl)
newHead = Sym()
newHead.col = len(tbl.headers)
newHead.name = headerLabel
tbl2.headers = tbl.headers + [newHead]
return clone(tbl2, rows=Rows)
def createTbl(
data,
settings=None,
_smote=False,
isBin=False,
bugThres=1,
duplicate=False):
"""
kwargs:
_smote = True/False : SMOTE input data (or not)
_isBin = True/False : Reduce bugs to defects/no defects
_bugThres = int : Threshold for marking stuff as defective,
default = 1. Not defective => Bugs < 1
"""
makeaModel = makeAmodel.makeAModel()
_r = []
for t in data:
m = makeaModel.csv2py(t, _smote=_smote, duplicate=duplicate)
_r += m._rows
m._rows = _r
prepare(m, settings=None) # Initialize all parameters for where2 to run
tree = where2(m, m._rows) # Decision tree using where2
tbl = table(t)
headerLabel = '=klass'
Rows = []
for k, _ in leaves(tree): # for k, _ in leaves(tree):
for j in k.val:
tmp = j.cells
if isBin:
tmp[-1] = 0 if tmp[-1] < bugThres else 1
tmp.append('_' + str(id(k) % 1000))
j.__dict__.update({'cells': tmp})
Rows.append(j.cells)
return newTable(tbl, headerLabel, Rows)
def test_createTbl():
dir = '../Data/camel/camel-1.6.csv'
newTbl = createTbl([dir], _smote=False)
newTblSMOTE = createTbl([dir], _smote=True)
print(len(newTbl._rows), len(newTblSMOTE._rows))
def drop(test, tree):
loc = apex(test, tree)
return loc
if __name__ == '__main__':
test_createTbl()
| unlicense |
saketkc/hatex | 2019_Spring/CSCI-572/HW04/CSCI572_HW4/create_edgerank.py | 1 | 2917 | import pandas as pd
from bs4 import BeautifulSoup
import glob
import ntpath
import networkx as nx
def path_leaf(path):
head, tail = ntpath.split(path)
return tail or ntpath.basename(head)
def get_outgoing_links(html_file):
"""Get list of outgoing links for the input html file.
Parameters
----------
html_file: str
Path to html file
Returns
-------
list_of_urls: list
List of outgoing urls
"""
soup = BeautifulSoup(open(html_file).read().encode("utf-8"))
links = []
for link in soup.findAll(
"a", href=True
): # attrs=['href']: re.compile("^https://")}):
# Skip internal linkes
try:
href = link.get("href")
except IndexError:
continue
if href == "#":
continue
try:
text = link.contents[0]
except IndexError:
# text = ''
pass
links.append(link.get("href"))
return links
def get_filenames_for_URLs(mapping_file_df, list_of_links):
"""Get list of html filenames for a list of links
Parameters
----------
mapping_file_df: pd.DataFrame
Dataframe with mapping.csv loaded
list_of_links: list
List of URLs
Returns
-------
list_of_filenames: list
List of filenames
"""
return mapping_file_df[mapping_file_df.URL.isin(list_of_links)].filename.tolist()
def main():
crawl_data_dir = (
"/media/rna/yahoo_crawl_data/Yahoo-20190406T235503Z-001/Yahoo/yahoo/"
)
csv_file = "/media/rna/yahoo_crawl_data/Yahoo-20190406T235503Z-001/Yahoo/URLtoHTML_yahoo_news.csv"
mapping_file_df = (
pd.read_csv(csv_file).sort_values(by=["filename", "URL"]).reset_index(drop=True)
)
list_of_html_files = glob.glob("{}/*.html".format(crawl_data_dir))
with open("edgeList.txt", "w") as fh:
for filepath in list_of_html_files:
filename = path_leaf(filepath)
links = get_outgoing_links(filepath)
filenames_for_url = get_filenames_for_URLs(mapping_file_df, links)
# connection_matrix.loc[filename, filenames_for_url]+=1
# connection_matrix.loc[filename, filenames_for_url] =1
# with open()
fh.write("{} {}\n".format(filename, " ".join(filenames_for_url)))
G = nx.read_adjlist("edgeList.txt", create_using=nx.DiGraph())
pagerank = nx.pagerank(
G,
alpha=0.85,
personalization=None,
max_iter=100,
tol=1e-06,
nstart=None,
weight="weight",
dangling=None,
)
with open("external_PageRankFile.txt", "w") as fh:
for key, value in pagerank.items():
fh.write("{}/{}={}\n".format(crawl_data_dir, key, value))
if __name__ == "__main__":
main()
| mit |
glemaitre/UnbalancedDataset | examples/under-sampling/plot_random_under_sampler.py | 2 | 2013 | """
=====================
Random under-sampling
=====================
An illustration of the random under-sampling method.
"""
# Authors: Christos Aridas
# Guillaume Lemaitre <[email protected]>
# License: MIT
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import make_classification
from sklearn.decomposition import PCA
from imblearn.under_sampling import RandomUnderSampler
print(__doc__)
# Generate the dataset
X, y = make_classification(n_classes=2, class_sep=2, weights=[0.1, 0.9],
n_informative=3, n_redundant=1, flip_y=0,
n_features=20, n_clusters_per_class=1,
n_samples=200, random_state=10)
# Instanciate a PCA object for the sake of easy visualisation
pca = PCA(n_components=2)
# Fit and transform x to visualise inside a 2D feature space
X_vis = pca.fit_transform(X)
# Apply the random under-sampling
rus = RandomUnderSampler(return_indices=True)
X_resampled, y_resampled, idx_resampled = rus.fit_sample(X, y)
X_res_vis = pca.transform(X_resampled)
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
idx_samples_removed = np.setdiff1d(np.arange(X_vis.shape[0]),
idx_resampled)
idx_class_0 = y_resampled == 0
plt.scatter(X_res_vis[idx_class_0, 0], X_res_vis[idx_class_0, 1],
alpha=.8, label='Class #0')
plt.scatter(X_res_vis[~idx_class_0, 0], X_res_vis[~idx_class_0, 1],
alpha=.8, label='Class #1')
plt.scatter(X_vis[idx_samples_removed, 0], X_vis[idx_samples_removed, 1],
alpha=.8, label='Removed samples')
# make nice plotting
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
ax.spines['left'].set_position(('outward', 10))
ax.spines['bottom'].set_position(('outward', 10))
ax.set_xlim([-6, 6])
ax.set_ylim([-6, 6])
plt.title('Under-sampling using random under-sampling')
plt.legend()
plt.tight_layout()
plt.show()
| mit |
jcatw/scnn | scnn/scnn.py | 1 | 11080 | __author__ = 'jatwood'
import lasagne
import lasagne.layers as layers
import theano
import theano.tensor as T
import numpy as np
import matplotlib.pyplot as plt
import util
# This class is not user facing; it contains the Lasagne internals for the SCNN model.
class SearchConvolution(layers.MergeLayer):
"""
A search-convolutional Lasagne layer.
"""
def __init__(self, incomings, n_hops, n_features,
W=lasagne.init.Normal(0.01),
nonlinearity=lasagne.nonlinearities.tanh,
**kwargs):
super(SearchConvolution, self).__init__(incomings, **kwargs)
self.W = self.add_param(W, (n_hops,n_features), name='W')
self.n_hops = n_hops
self.n_features = n_features
self.nonlinearity = nonlinearity
def get_output_for(self, inputs, **kwargs):
"""
Compute search convolution of inputs.
:param inputs: [Apow, X]
:return: Search convolution of inputs with shape (self.nhops, self.nfeatures)
"""
Apow = inputs[0]
X = inputs[1]
def compute_output(i, w, a, x, h):
"""
:param i: index
:param w: weight vector (n_features,)
:param x: feature vector (n_nodes, n_features)
:param h: n_hops
:param a: adjacency matrix (n_nodes, n_nodes)
:return: output[i]
"""
return (T.dot(a, x).transpose()) * T.addbroadcast(T.reshape(w, (w.shape[0],1)),1)
seq_values = np.arange(self.n_hops)
seq = theano.shared(value = seq_values, name="seq", borrow=True)
out, _ = theano.scan(fn=compute_output,
non_sequences=[X, self.n_hops],
sequences=[seq, self.W, Apow],
n_steps = self.n_hops)
return self.nonlinearity(out.transpose())
def get_output_shape_for(self, input_shapes):
print (input_shapes[1][0], self.n_hops, self.n_features)
return (input_shapes[1][0], self.n_hops, self.n_features)
class DeepSearchConvolution(layers.Layer):
"""
A search-convolutional Lasagne layer.
"""
def __init__(self, incoming, n_hops, n_features,
W=lasagne.init.Normal(0.01),
nonlinearity=lasagne.nonlinearities.tanh,
**kwargs):
super(DeepSearchConvolution, self).__init__(incoming, **kwargs)
self.W = T.addbroadcast(self.add_param(W, (1,n_features,n_hops), name='W'),0)
self.n_hops = n_hops
self.n_features = n_features
self.nonlinearity = nonlinearity
def get_output_for(self, input, **kwargs):
return self.nonlinearity(self.W * input)
def get_output_shape_for(self, input_shape):
print input_shape
return input_shape
# This class is user-facing. It contains a full SCNN model.
class SCNN:
"""
The search-convolutional neural network model.
"""
def __init__(self, n_hops=2, transform_fn=util.rw_laplacian):
self.n_hops = n_hops
self.transform_fn = transform_fn
# Initialize Theano variables
self.var_A = T.matrix('A')
self.var_Apow = T.tensor3('Apow')
self.var_X = T.matrix('X')
self.var_Y = T.imatrix('Y')
def _register_layers(self, batch_size, n_nodes, n_features, n_classes):
self.l_in_apow = lasagne.layers.InputLayer((self.n_hops + 1, batch_size, n_nodes), input_var=self.var_Apow)
self.l_in_x = lasagne.layers.InputLayer((n_nodes, n_features), input_var=self.var_X)
self.l_sc = SearchConvolution([self.l_in_apow, self.l_in_x], self.n_hops + 1, n_features)
self.l_out = layers.DenseLayer(self.l_sc, num_units=n_classes, nonlinearity=lasagne.nonlinearities.tanh)
def _get_output_layer(self):
return self.l_out
def fit(self, A, X, Y, train_indices, valid_indices,
learning_rate=0.05, batch_size=100, n_epochs=100,
loss_fn=lasagne.objectives.multiclass_hinge_loss,
update_fn=lasagne.updates.adagrad,
stop_early=True,
stop_window_size=5,
output_weights=False,
show_weights=False):
# Ensure that data have the correct dimensions
assert A.shape[0] == X.shape[0]
assert X.shape[0] == Y.shape[0]
assert len(Y.shape) > 1
if self.transform_fn is not None:
A = self.transform_fn(A)
# Extract dimensions
n_nodes = A.shape[0]
n_features = X.shape[1] + 1
n_classes = Y.shape[1]
n_batch = n_nodes // batch_size
# Compute the matrix power series
Apow = util.A_power_series(A, self.n_hops)
self.Apow = Apow
# Add bias term to X
X = np.hstack([X, np.ones((X.shape[0],1))]).astype('float32')
# Create Lasagne layers
self._register_layers(batch_size, n_nodes, n_features, n_classes)
# Create symbolic representations of predictions, loss, parameters, and updates.
prediction = layers.get_output(self._get_output_layer())
loss = lasagne.objectives.aggregate(loss_fn(prediction, self.var_Y), mode='mean')
params = lasagne.layers.get_all_params(self._get_output_layer())
updates = update_fn(loss, params, learning_rate=learning_rate)
# Create functions that apply the model to data and return loss
apply_loss = theano.function([self.var_Apow, self.var_X, self.var_Y],
loss, updates=updates)
# Train the model
print 'Training model...'
validation_losses = []
validation_loss_window = np.zeros(stop_window_size)
validation_loss_window[:] = float('+inf')
for epoch in range(n_epochs):
train_loss = 0.0
np.random.shuffle(train_indices)
for batch in range(n_batch):
start = batch * batch_size
end = min((batch + 1) * batch_size, train_indices.shape[0])
if start < end:
train_loss += apply_loss(Apow[:,train_indices[start:end],:],
X,
Y[train_indices[start:end],:])
valid_loss = apply_loss(Apow[:,valid_indices,:],
X,
Y[valid_indices,:])
print "Epoch %d training error: %.6f" % (epoch, train_loss)
print "Epoch %d validation error: %.6f" % (epoch, valid_loss)
validation_losses.append(valid_loss)
if output_weights:
W = layers.get_all_param_values(self.l_sc)[0]
np.savetxt('W_%d.csv' % (epoch,), W, delimiter=',')
if show_weights:
W = layers.get_all_param_values(self.l_sc)[0]
plt.imshow(W, aspect='auto', interpolation='none')
plt.show()
if stop_early:
if valid_loss >= validation_loss_window.mean():
print 'Validation loss did not decrease. Stopping early.'
break
validation_loss_window[epoch % stop_window_size] = valid_loss
def predict(self, X, test_indices, A=None):
if A is None:
Apow = self.Apow
else:
if self.transform_fn is not None:
A = self.transform_fn(A)
# Compute the matrix power series
Apow = util.A_power_series(A, self.n_hops)
# add bias term to X
X = np.hstack([X, np.ones((X.shape[0],1))]).astype('float32')
# Create symbolic representation of predictions
pred = layers.get_output(self.l_out)
# Create a function that applies the model to data to predict a class
pred_fn = theano.function([self.var_Apow, self.var_X], T.argmax(pred, axis=1), allow_input_downcast=True)
# Return the predictions
predictions = pred_fn(Apow[:,test_indices,:], X)
return predictions
def predict_proba(self, X, test_indices, A=None):
if A is None:
Apow = self.Apow
else:
if self.transform_fn is not None:
A = self.transform_fn(A)
# Compute the matrix power series
Apow = util.A_power_series(A, self.n_hops)
# add bias term to X
X = np.hstack([X, np.ones((X.shape[0],1))]).astype('float32')
# Create symbolic representation of predictions
pred = layers.get_output(self.l_out)
# Create a function that applies the model to data to predict a class
pred_fn = theano.function([self.var_Apow, self.var_X], T.exp(pred) / T.exp(pred).sum(axis=1,keepdims=True), allow_input_downcast=True)
# Return the predictions
predictions = pred_fn(Apow[:,test_indices,:], X)
return predictions
class DeepSCNN(SCNN):
def __init__(self, n_hops=2, n_layers=4, transform_fn=util.rw_laplacian):
self.n_hops = n_hops
self.n_layers = n_layers
self.transform_fn = transform_fn
# Initialize Theano variables
self.var_A = T.matrix('A')
self.var_Apow = T.tensor3('Apow')
self.var_X = T.matrix('X')
self.var_Y = T.imatrix('Y')
def _register_layers(self, batch_size, n_nodes, n_features, n_classes):
self.l_in_apow = lasagne.layers.InputLayer((self.n_hops + 1, batch_size, n_nodes), input_var=self.var_Apow)
self.l_in_x = lasagne.layers.InputLayer((n_nodes, n_features), input_var=self.var_X)
self.l_sc = SearchConvolution([self.l_in_apow, self.l_in_x], self.n_hops + 1, n_features)
self.l_deep = self.l_sc
for i in range(self.n_layers):
self.l_deep = DeepSearchConvolution(self.l_deep, n_hops=self.n_hops + 1, n_features=n_features)
self.l_out = layers.DenseLayer(self.l_deep, num_units=n_classes, nonlinearity=lasagne.nonlinearities.tanh)
class DeepFeedForwardSCNN(SCNN):
def __init__(self, n_hops=2, n_layers=4, transform_fn=util.rw_laplacian):
self.n_hops = n_hops
self.n_layers = n_layers
self.transform_fn = transform_fn
# Initialize Theano variables
self.var_A = T.matrix('A')
self.var_Apow = T.tensor3('Apow')
self.var_X = T.matrix('X')
self.var_Y = T.imatrix('Y')
def _register_layers(self, batch_size, n_nodes, n_features, n_classes):
self.l_in_apow = lasagne.layers.InputLayer((self.n_hops + 1, batch_size, n_nodes), input_var=self.var_Apow)
self.l_in_x = lasagne.layers.InputLayer((n_nodes, n_features), input_var=self.var_X)
self.l_sc = SearchConvolution([self.l_in_apow, self.l_in_x], self.n_hops + 1, n_features)
self.l_deep = self.l_sc
for i in range(self.n_layers):
self.l_deep = layers.DenseLayer(self.l_deep, num_units=n_classes, nonlinearity=lasagne.nonlinearities.rectify)
self.l_out = layers.DenseLayer(self.l_deep, num_units=n_classes, nonlinearity=lasagne.nonlinearities.tanh)
| mit |
nikhilnrng/german-credit-risk | src/main.py | 1 | 1468 | import model
import preprocessing
from defines import Metadata
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix
def main():
metadata = Metadata()
data, labels = preprocessing.load(metadata)
data = preprocessing.encode(data, metadata.COLUMNS)
# divide data into training and test sets
x_train, x_test, y_train, y_test = train_test_split(
data, labels, test_size=0.2) #, random_state=33)
# run classifiers classifiers
clf_base = model.baseline_classifier(x_train, y_train)
clf_nb = model.naive_bayes_classifier(x_train, y_train, metadata.COLUMNS)
clf_knn = model.knn_classifier(x_train, y_train, metadata.COLUMNS)
clf_svm = model.svm_classifier(x_train, y_train, metadata.COLUMNS)
# filter best classifier
clf = [(clf[1].best_score_, clf) for clf in [('base', clf_base),
('knn', clf_knn),
('svm', clf_svm),
('nb', clf_nb)]]
name, clf = max(clf, key=lambda x: x[0])[1]
# predict test set
y_pred = clf.predict(x_test)
print 'Best classifier: %s' % name
print '\taccuracy: %0.3f\n' % accuracy_score(y_test, y_pred)
print classification_report(y_test, y_pred)
if __name__ == '__main__':
main()
| mit |
dmsuehir/spark-tk | regression-tests/sparktkregtests/testcases/scoretests/gmm_test.py | 12 | 2125 | # vim: set encoding=utf-8
# Copyright (c) 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Tests GMM scoring engine """
import unittest
import os
from sparktkregtests.lib import sparktk_test
from sparktkregtests.lib import scoring_utils
class GMM(sparktk_test.SparkTKTestCase):
def setUp(self):
"""Build test frame"""
super(GMM, self).setUp()
data_file = self.get_file("gmm_data.csv")
self.frame = self.context.frame.import_csv(
data_file, schema=[("x1", float), ("x2", float)])
def test_model_scoring(self):
"""Test publishing a gmm model"""
model = self.context.models.clustering.gmm.train(
self.frame, ["x1", "x2"],
column_scalings=[1.0, 1.0],
k=5,
max_iterations=500,
seed=20,
convergence_tol=0.0001)
predict = model.predict(self.frame)
test_rows = predict.to_pandas(predict.count())
file_name = self.get_name("gmm")
model_path = model.export_to_mar(self.get_export_file(file_name))
with scoring_utils.scorer(
model_path, self.id()) as scorer:
for i, row in test_rows.iterrows():
res = scorer.score(
[dict(zip(["x1", "x2"], list(row[0:2])))])
self.assertEqual(
row["predicted_cluster"], res.json()["data"][0]['Score'])
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
pelson/cartopy | lib/cartopy/tests/mpl/test_mpl_integration.py | 2 | 22762 | # (C) British Crown Copyright 2011 - 2018, Met Office
#
# This file is part of cartopy.
#
# cartopy is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# cartopy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with cartopy. If not, see <https://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
import math
import re
import warnings
import numpy as np
import matplotlib.pyplot as plt
import pytest
import six
import cartopy.crs as ccrs
from cartopy.tests.mpl import MPL_VERSION, ImageTesting
_ROB_TOL = 0.5 if ccrs.PROJ4_VERSION < (4, 9) else 0.111
_CONTOUR_STYLE = _STREAMPLOT_STYLE = 'classic'
if MPL_VERSION >= '3.0.0':
_CONTOUR_IMAGE = 'global_contour_wrap'
_CONTOUR_STYLE = 'mpl20'
_STREAMPLOT_IMAGE = 'streamplot_mpl_3.0.0'
# Should have been the case for anything but _1.4.3, but we don't want to
# regenerate those images again.
_STREAMPLOT_STYLE = 'mpl20'
else:
_CONTOUR_IMAGE = 'global_contour_wrap_mpl_pre_3.0.0'
if MPL_VERSION >= '2.1.0':
_STREAMPLOT_IMAGE = 'streamplot_mpl_2.1.0'
elif MPL_VERSION >= '2':
_STREAMPLOT_IMAGE = 'streamplot_mpl_2.0.0'
else:
_STREAMPLOT_IMAGE = 'streamplot_mpl_1.4.3'
@pytest.mark.natural_earth
@ImageTesting([_CONTOUR_IMAGE], style=_CONTOUR_STYLE)
def test_global_contour_wrap_new_transform():
ax = plt.axes(projection=ccrs.PlateCarree())
ax.coastlines()
x, y = np.meshgrid(np.linspace(0, 360), np.linspace(-90, 90))
data = np.sin(np.sqrt(x ** 2 + y ** 2))
plt.contour(x, y, data, transform=ccrs.PlateCarree())
@pytest.mark.natural_earth
@ImageTesting([_CONTOUR_IMAGE], style=_CONTOUR_STYLE)
def test_global_contour_wrap_no_transform():
ax = plt.axes(projection=ccrs.PlateCarree())
ax.coastlines()
x, y = np.meshgrid(np.linspace(0, 360), np.linspace(-90, 90))
data = np.sin(np.sqrt(x ** 2 + y ** 2))
plt.contour(x, y, data)
@pytest.mark.natural_earth
@ImageTesting(['global_contourf_wrap'])
def test_global_contourf_wrap_new_transform():
ax = plt.axes(projection=ccrs.PlateCarree())
ax.coastlines()
x, y = np.meshgrid(np.linspace(0, 360), np.linspace(-90, 90))
data = np.sin(np.sqrt(x ** 2 + y ** 2))
plt.contourf(x, y, data, transform=ccrs.PlateCarree())
@pytest.mark.natural_earth
@ImageTesting(['global_contourf_wrap'])
def test_global_contourf_wrap_no_transform():
ax = plt.axes(projection=ccrs.PlateCarree())
ax.coastlines()
x, y = np.meshgrid(np.linspace(0, 360), np.linspace(-90, 90))
data = np.sin(np.sqrt(x ** 2 + y ** 2))
plt.contourf(x, y, data)
@pytest.mark.natural_earth
@ImageTesting(['global_pcolor_wrap'])
def test_global_pcolor_wrap_new_transform():
ax = plt.axes(projection=ccrs.PlateCarree())
ax.coastlines()
x, y = np.meshgrid(np.linspace(0, 360), np.linspace(-90, 90))
data = np.sin(np.sqrt(x ** 2 + y ** 2))
plt.pcolor(x, y, data, transform=ccrs.PlateCarree())
@pytest.mark.natural_earth
@ImageTesting(['global_pcolor_wrap'])
def test_global_pcolor_wrap_no_transform():
ax = plt.axes(projection=ccrs.PlateCarree())
ax.coastlines()
x, y = np.meshgrid(np.linspace(0, 360), np.linspace(-90, 90))
data = np.sin(np.sqrt(x ** 2 + y ** 2))
plt.pcolor(x, y, data)
@pytest.mark.natural_earth
@ImageTesting(['global_scatter_wrap'])
def test_global_scatter_wrap_new_transform():
ax = plt.axes(projection=ccrs.PlateCarree())
# By default the coastline feature will be drawn after patches.
# By setting zorder we can ensure our scatter points are drawn
# after the coastlines.
ax.coastlines(zorder=0)
x, y = np.meshgrid(np.linspace(0, 360), np.linspace(-90, 90))
data = np.sin(np.sqrt(x ** 2 + y ** 2))
plt.scatter(x, y, c=data, transform=ccrs.PlateCarree())
@pytest.mark.natural_earth
@ImageTesting(['global_scatter_wrap'])
def test_global_scatter_wrap_no_transform():
ax = plt.axes(projection=ccrs.PlateCarree())
ax.coastlines(zorder=0)
x, y = np.meshgrid(np.linspace(0, 360), np.linspace(-90, 90))
data = np.sin(np.sqrt(x ** 2 + y ** 2))
plt.scatter(x, y, c=data)
@ImageTesting(['global_map'],
tolerance=16 if ccrs.PROJ4_VERSION < (4, 9) else 0.1)
def test_global_map():
plt.axes(projection=ccrs.Robinson())
# ax.coastlines()
# ax.gridlines(5)
plt.plot(-0.08, 51.53, 'o', transform=ccrs.PlateCarree())
plt.plot([-0.08, 132], [51.53, 43.17], color='red',
transform=ccrs.PlateCarree())
plt.plot([-0.08, 132], [51.53, 43.17], color='blue',
transform=ccrs.Geodetic())
@pytest.mark.natural_earth
@ImageTesting(['simple_global'])
def test_simple_global():
plt.axes(projection=ccrs.PlateCarree())
plt.gca().coastlines()
# produces a global map, despite not having needed to set the limits
@pytest.mark.natural_earth
@ImageTesting(['multiple_projections4' if ccrs.PROJ4_VERSION < (5, 0, 0)
else 'multiple_projections5'])
def test_multiple_projections():
projections = [ccrs.PlateCarree(),
ccrs.Robinson(),
ccrs.RotatedPole(pole_latitude=45, pole_longitude=180),
ccrs.OSGB(),
ccrs.TransverseMercator(),
ccrs.Mercator(
globe=ccrs.Globe(semimajor_axis=math.degrees(1)),
min_latitude=-85., max_latitude=85.),
ccrs.LambertCylindrical(),
ccrs.Miller(),
ccrs.Gnomonic(),
ccrs.Stereographic(),
ccrs.NorthPolarStereo(),
ccrs.SouthPolarStereo(),
ccrs.Orthographic(),
ccrs.Mollweide(),
ccrs.InterruptedGoodeHomolosine(),
ccrs.EckertI(),
ccrs.EckertII(),
ccrs.EckertIII(),
ccrs.EckertIV(),
ccrs.EckertV(),
ccrs.EckertVI(),
]
rows = np.ceil(len(projections) / 5)
fig = plt.figure(figsize=(10, 2 * rows))
for i, prj in enumerate(projections, 1):
ax = fig.add_subplot(rows, 5, i, projection=prj)
ax.set_global()
ax.coastlines()
plt.plot(-0.08, 51.53, 'o', transform=ccrs.PlateCarree())
plt.plot([-0.08, 132], [51.53, 43.17], color='red',
transform=ccrs.PlateCarree())
plt.plot([-0.08, 132], [51.53, 43.17], color='blue',
transform=ccrs.Geodetic())
@pytest.mark.skipif(ccrs.PROJ4_VERSION < (5, 2, 0),
reason='Proj is too old.')
@pytest.mark.natural_earth
@ImageTesting(['multiple_projections520'])
def test_multiple_projections_520():
# Test projections added in Proj 5.2.0.
fig = plt.figure(figsize=(2, 2))
ax = fig.add_subplot(1, 1, 1, projection=ccrs.EqualEarth())
ax.set_global()
ax.coastlines()
ax.plot(-0.08, 51.53, 'o', transform=ccrs.PlateCarree())
ax.plot([-0.08, 132], [51.53, 43.17], color='red',
transform=ccrs.PlateCarree())
ax.plot([-0.08, 132], [51.53, 43.17], color='blue',
transform=ccrs.Geodetic())
def test_cursor_values():
ax = plt.axes(projection=ccrs.NorthPolarStereo())
x, y = np.array([-969100.]), np.array([-4457000.])
r = ax.format_coord(x, y)
assert (r.encode('ascii', 'ignore') ==
six.b('-9.691e+05, -4.457e+06 (50.716617N, 12.267069W)'))
ax = plt.axes(projection=ccrs.PlateCarree())
x, y = np.array([-181.5]), np.array([50.])
r = ax.format_coord(x, y)
assert (r.encode('ascii', 'ignore') ==
six.b('-181.5, 50 (50.000000N, 178.500000E)'))
ax = plt.axes(projection=ccrs.Robinson())
x, y = np.array([16060595.2]), np.array([2363093.4])
r = ax.format_coord(x, y)
assert re.search(six.b('1.606e\\+07, 2.363e\\+06 '
'\\(22.09[0-9]{4}N, 173.70[0-9]{4}E\\)'),
r.encode('ascii', 'ignore'))
plt.close()
@pytest.mark.natural_earth
@ImageTesting(['natural_earth_interface'], tolerance=_ROB_TOL)
def test_axes_natural_earth_interface():
rob = ccrs.Robinson()
ax = plt.axes(projection=rob)
with warnings.catch_warnings(record=True) as all_warnings:
warnings.simplefilter('always')
ax.natural_earth_shp('rivers_lake_centerlines', edgecolor='black',
facecolor='none')
ax.natural_earth_shp('lakes', facecolor='blue')
assert len(all_warnings) == 2
for warning in all_warnings:
msg = str(warning.message)
assert 'deprecated' in msg
assert 'add_feature' in msg
@pytest.mark.natural_earth
@ImageTesting(['pcolormesh_global_wrap1'])
def test_pcolormesh_global_with_wrap1():
# make up some realistic data with bounds (such as data from the UM)
nx, ny = 36, 18
xbnds = np.linspace(0, 360, nx, endpoint=True)
ybnds = np.linspace(-90, 90, ny, endpoint=True)
x, y = np.meshgrid(xbnds, ybnds)
data = np.exp(np.sin(np.deg2rad(x)) + np.cos(np.deg2rad(y)))
data = data[:-1, :-1]
ax = plt.subplot(211, projection=ccrs.PlateCarree())
plt.pcolormesh(xbnds, ybnds, data, transform=ccrs.PlateCarree())
ax.coastlines()
ax.set_global() # make sure everything is visible
ax = plt.subplot(212, projection=ccrs.PlateCarree(180))
plt.pcolormesh(xbnds, ybnds, data, transform=ccrs.PlateCarree())
ax.coastlines()
ax.set_global() # make sure everything is visible
@pytest.mark.natural_earth
@ImageTesting(
['pcolormesh_global_wrap2'],
tolerance=1.8 if (5, 0, 0) <= ccrs.PROJ4_VERSION < (5, 1, 0) else 0.5)
def test_pcolormesh_global_with_wrap2():
# make up some realistic data with bounds (such as data from the UM)
nx, ny = 36, 18
xbnds, xstep = np.linspace(0, 360, nx - 1, retstep=True, endpoint=True)
ybnds, ystep = np.linspace(-90, 90, ny - 1, retstep=True, endpoint=True)
xbnds -= xstep / 2
ybnds -= ystep / 2
xbnds = np.append(xbnds, xbnds[-1] + xstep)
ybnds = np.append(ybnds, ybnds[-1] + ystep)
x, y = np.meshgrid(xbnds, ybnds)
data = np.exp(np.sin(np.deg2rad(x)) + np.cos(np.deg2rad(y)))
data = data[:-1, :-1]
ax = plt.subplot(211, projection=ccrs.PlateCarree())
plt.pcolormesh(xbnds, ybnds, data, transform=ccrs.PlateCarree())
ax.coastlines()
ax.set_global() # make sure everything is visible
ax = plt.subplot(212, projection=ccrs.PlateCarree(180))
plt.pcolormesh(xbnds, ybnds, data, transform=ccrs.PlateCarree())
ax.coastlines()
ax.set_global() # make sure everything is visible
@pytest.mark.natural_earth
@ImageTesting(
['pcolormesh_global_wrap3'],
tolerance=2.4 if (5, 0, 0) <= ccrs.PROJ4_VERSION < (5, 1, 0) else _ROB_TOL)
def test_pcolormesh_global_with_wrap3():
nx, ny = 33, 17
xbnds = np.linspace(-1.875, 358.125, nx, endpoint=True)
ybnds = np.linspace(91.25, -91.25, ny, endpoint=True)
xbnds, ybnds = np.meshgrid(xbnds, ybnds)
data = np.exp(np.sin(np.deg2rad(xbnds)) + np.cos(np.deg2rad(ybnds)))
# this step is not necessary, but makes the plot even harder to do (i.e.
# it really puts cartopy through its paces)
ybnds = np.append(ybnds, ybnds[:, 1:2], axis=1)
xbnds = np.append(xbnds, xbnds[:, 1:2] + 360, axis=1)
data = np.ma.concatenate([data, data[:, 0:1]], axis=1)
data = data[:-1, :-1]
data = np.ma.masked_greater(data, 2.6)
ax = plt.subplot(311, projection=ccrs.PlateCarree(-45))
c = plt.pcolormesh(xbnds, ybnds, data, transform=ccrs.PlateCarree())
assert c._wrapped_collection_fix is not None, \
'No pcolormesh wrapping was done when it should have been.'
ax.coastlines()
ax.set_global() # make sure everything is visible
ax = plt.subplot(312, projection=ccrs.PlateCarree(-1.87499952))
plt.pcolormesh(xbnds, ybnds, data, transform=ccrs.PlateCarree())
ax.coastlines()
ax.set_global() # make sure everything is visible
ax = plt.subplot(313, projection=ccrs.Robinson(-2))
plt.pcolormesh(xbnds, ybnds, data, transform=ccrs.PlateCarree())
ax.coastlines()
ax.set_global() # make sure everything is visible
@pytest.mark.natural_earth
@ImageTesting(['pcolormesh_limited_area_wrap'],
tolerance=1.41 if MPL_VERSION >= '2.1.0' else 0.7)
def test_pcolormesh_limited_area_wrap():
# make up some realistic data with bounds (such as data from the UM's North
# Atlantic Europe model)
nx, ny = 22, 36
xbnds = np.linspace(311.91998291, 391.11999512, nx, endpoint=True)
ybnds = np.linspace(-23.59000015, 24.81000137, ny, endpoint=True)
x, y = np.meshgrid(xbnds, ybnds)
data = ((np.sin(np.deg2rad(x))) / 10. + np.exp(np.cos(np.deg2rad(y))))
data = data[:-1, :-1]
rp = ccrs.RotatedPole(pole_longitude=177.5, pole_latitude=37.5)
plt.figure(figsize=(10, 6))
ax = plt.subplot(221, projection=ccrs.PlateCarree())
plt.pcolormesh(xbnds, ybnds, data, transform=rp, cmap='Spectral')
ax.coastlines()
ax = plt.subplot(222, projection=ccrs.PlateCarree(180))
plt.pcolormesh(xbnds, ybnds, data, transform=rp, cmap='Spectral')
ax.coastlines()
ax.set_global()
# draw the same plot, only more zoomed in, and using the 2d versions
# of the coordinates (just to test that 1d and 2d are both suitably
# being fixed)
ax = plt.subplot(223, projection=ccrs.PlateCarree())
plt.pcolormesh(x, y, data, transform=rp, cmap='Spectral')
ax.coastlines()
ax.set_extent([-70, 0, 0, 80])
ax = plt.subplot(224, projection=rp)
plt.pcolormesh(xbnds, ybnds, data, transform=rp, cmap='Spectral')
ax.coastlines()
@pytest.mark.natural_earth
@ImageTesting(['pcolormesh_single_column_wrap'], tolerance=0.7)
def test_pcolormesh_single_column_wrap():
# Check a wrapped mesh like test_pcolormesh_limited_area_wrap, but only use
# a single column, which could break depending on how wrapping is
# determined.
ny = 36
xbnds = np.array([360.9485619, 364.71999105])
ybnds = np.linspace(-23.59000015, 24.81000137, ny, endpoint=True)
x, y = np.meshgrid(xbnds, ybnds)
data = ((np.sin(np.deg2rad(x))) / 10. + np.exp(np.cos(np.deg2rad(y))))
data = data[:-1, :-1]
rp = ccrs.RotatedPole(pole_longitude=177.5, pole_latitude=37.5)
plt.figure(figsize=(10, 6))
ax = plt.subplot(111, projection=ccrs.PlateCarree(180))
plt.pcolormesh(xbnds, ybnds, data, transform=rp, cmap='Spectral')
ax.coastlines()
ax.set_global()
@pytest.mark.natural_earth
@ImageTesting(['pcolormesh_goode_wrap'])
def test_pcolormesh_goode_wrap():
# global data on an Interrupted Goode Homolosine projection
# shouldn't spill outside projection boundary
x = np.linspace(0, 360, 73)
y = np.linspace(-87.5, 87.5, 36)
X, Y = np.meshgrid(*[np.deg2rad(c) for c in (x, y)])
Z = np.cos(Y) + 0.375 * np.sin(2. * X)
Z = Z[:-1, :-1]
ax = plt.axes(projection=ccrs.InterruptedGoodeHomolosine())
ax.coastlines()
ax.pcolormesh(x, y, Z, transform=ccrs.PlateCarree())
@pytest.mark.natural_earth
@ImageTesting(['pcolormesh_mercator_wrap'])
def test_pcolormesh_mercator_wrap():
x = np.linspace(0, 360, 73)
y = np.linspace(-87.5, 87.5, 36)
X, Y = np.meshgrid(*[np.deg2rad(c) for c in (x, y)])
Z = np.cos(Y) + 0.375 * np.sin(2. * X)
Z = Z[:-1, :-1]
ax = plt.axes(projection=ccrs.Mercator())
ax.coastlines()
ax.pcolormesh(x, y, Z, transform=ccrs.PlateCarree())
@pytest.mark.natural_earth
@ImageTesting(['quiver_plate_carree'])
def test_quiver_plate_carree():
x = np.arange(-60, 42.5, 2.5)
y = np.arange(30, 72.5, 2.5)
x2d, y2d = np.meshgrid(x, y)
u = np.cos(np.deg2rad(y2d))
v = np.cos(2. * np.deg2rad(x2d))
mag = (u**2 + v**2)**.5
plot_extent = [-60, 40, 30, 70]
plt.figure(figsize=(6, 6))
# plot on native projection
ax = plt.subplot(211, projection=ccrs.PlateCarree())
ax.set_extent(plot_extent, crs=ccrs.PlateCarree())
ax.coastlines()
ax.quiver(x, y, u, v, mag)
# plot on a different projection
ax = plt.subplot(212, projection=ccrs.NorthPolarStereo())
ax.set_extent(plot_extent, crs=ccrs.PlateCarree())
ax.coastlines()
ax.quiver(x, y, u, v, mag, transform=ccrs.PlateCarree())
@pytest.mark.natural_earth
@ImageTesting(['quiver_rotated_pole'])
def test_quiver_rotated_pole():
nx, ny = 22, 36
x = np.linspace(311.91998291, 391.11999512, nx, endpoint=True)
y = np.linspace(-23.59000015, 24.81000137, ny, endpoint=True)
x2d, y2d = np.meshgrid(x, y)
u = np.cos(np.deg2rad(y2d))
v = -2. * np.cos(2. * np.deg2rad(y2d)) * np.sin(np.deg2rad(x2d))
mag = (u**2 + v**2)**.5
rp = ccrs.RotatedPole(pole_longitude=177.5, pole_latitude=37.5)
plot_extent = [x[0], x[-1], y[0], y[-1]]
# plot on native projection
plt.figure(figsize=(6, 6))
ax = plt.subplot(211, projection=rp)
ax.set_extent(plot_extent, crs=rp)
ax.coastlines()
ax.quiver(x, y, u, v, mag)
# plot on different projection
ax = plt.subplot(212, projection=ccrs.PlateCarree())
ax.set_extent(plot_extent, crs=rp)
ax.coastlines()
ax.quiver(x, y, u, v, mag, transform=rp)
@pytest.mark.natural_earth
@ImageTesting(['quiver_regrid'], tolerance=1.3)
def test_quiver_regrid():
x = np.arange(-60, 42.5, 2.5)
y = np.arange(30, 72.5, 2.5)
x2d, y2d = np.meshgrid(x, y)
u = np.cos(np.deg2rad(y2d))
v = np.cos(2. * np.deg2rad(x2d))
mag = (u**2 + v**2)**.5
plot_extent = [-60, 40, 30, 70]
plt.figure(figsize=(6, 3))
ax = plt.axes(projection=ccrs.NorthPolarStereo())
ax.set_extent(plot_extent, crs=ccrs.PlateCarree())
ax.coastlines()
ax.quiver(x, y, u, v, mag, transform=ccrs.PlateCarree(),
regrid_shape=30)
@pytest.mark.natural_earth
@ImageTesting(['quiver_regrid_with_extent'])
def test_quiver_regrid_with_extent():
x = np.arange(-60, 42.5, 2.5)
y = np.arange(30, 72.5, 2.5)
x2d, y2d = np.meshgrid(x, y)
u = np.cos(np.deg2rad(y2d))
v = np.cos(2. * np.deg2rad(x2d))
mag = (u**2 + v**2)**.5
plot_extent = [-60, 40, 30, 70]
target_extent = [-3e6, 2e6, -6e6, -2.5e6]
plt.figure(figsize=(6, 3))
ax = plt.axes(projection=ccrs.NorthPolarStereo())
ax.set_extent(plot_extent, crs=ccrs.PlateCarree())
ax.coastlines()
ax.quiver(x, y, u, v, mag, transform=ccrs.PlateCarree(),
regrid_shape=10, target_extent=target_extent)
@pytest.mark.natural_earth
@ImageTesting(['barbs_plate_carree'])
def test_barbs():
x = np.arange(-60, 45, 5)
y = np.arange(30, 75, 5)
x2d, y2d = np.meshgrid(x, y)
u = 40 * np.cos(np.deg2rad(y2d))
v = 40 * np.cos(2. * np.deg2rad(x2d))
plot_extent = [-60, 40, 30, 70]
plt.figure(figsize=(6, 6))
# plot on native projection
ax = plt.subplot(211, projection=ccrs.PlateCarree())
ax.set_extent(plot_extent, crs=ccrs.PlateCarree())
ax.coastlines()
ax.barbs(x, y, u, v, length=4, linewidth=.25)
# plot on a different projection
ax = plt.subplot(212, projection=ccrs.NorthPolarStereo())
ax.set_extent(plot_extent, crs=ccrs.PlateCarree())
ax.coastlines()
ax.barbs(x, y, u, v, transform=ccrs.PlateCarree(), length=4, linewidth=.25)
@pytest.mark.natural_earth
@ImageTesting(['barbs_regrid'])
def test_barbs_regrid():
x = np.arange(-60, 42.5, 2.5)
y = np.arange(30, 72.5, 2.5)
x2d, y2d = np.meshgrid(x, y)
u = 40 * np.cos(np.deg2rad(y2d))
v = 40 * np.cos(2. * np.deg2rad(x2d))
mag = (u**2 + v**2)**.5
plot_extent = [-60, 40, 30, 70]
plt.figure(figsize=(6, 3))
ax = plt.axes(projection=ccrs.NorthPolarStereo())
ax.set_extent(plot_extent, crs=ccrs.PlateCarree())
ax.coastlines()
ax.barbs(x, y, u, v, mag, transform=ccrs.PlateCarree(),
length=4, linewidth=.4, regrid_shape=20)
@pytest.mark.natural_earth
@ImageTesting(['barbs_regrid_with_extent'])
def test_barbs_regrid_with_extent():
x = np.arange(-60, 42.5, 2.5)
y = np.arange(30, 72.5, 2.5)
x2d, y2d = np.meshgrid(x, y)
u = 40 * np.cos(np.deg2rad(y2d))
v = 40 * np.cos(2. * np.deg2rad(x2d))
mag = (u**2 + v**2)**.5
plot_extent = [-60, 40, 30, 70]
target_extent = [-3e6, 2e6, -6e6, -2.5e6]
plt.figure(figsize=(6, 3))
ax = plt.axes(projection=ccrs.NorthPolarStereo())
ax.set_extent(plot_extent, crs=ccrs.PlateCarree())
ax.coastlines()
ax.barbs(x, y, u, v, mag, transform=ccrs.PlateCarree(),
length=4, linewidth=.25, regrid_shape=10,
target_extent=target_extent)
@pytest.mark.natural_earth
@ImageTesting(['barbs_1d'])
def test_barbs_1d():
x = np.array([20., 30., -17., 15.])
y = np.array([-1., 35., 11., 40.])
u = np.array([23., -18., 2., -11.])
v = np.array([5., -4., 19., 11.])
plot_extent = [-21, 40, -5, 45]
plt.figure(figsize=(6, 5))
ax = plt.axes(projection=ccrs.PlateCarree())
ax.set_extent(plot_extent, crs=ccrs.PlateCarree())
ax.coastlines()
ax.barbs(x, y, u, v, transform=ccrs.PlateCarree(),
length=8, linewidth=1, color='#7f7f7f')
@pytest.mark.natural_earth
@ImageTesting(['barbs_1d_transformed'])
def test_barbs_1d_transformed():
x = np.array([20., 30., -17., 15.])
y = np.array([-1., 35., 11., 40.])
u = np.array([23., -18., 2., -11.])
v = np.array([5., -4., 19., 11.])
plot_extent = [-20, 31, -5, 45]
plt.figure(figsize=(6, 5))
ax = plt.axes(projection=ccrs.NorthPolarStereo())
ax.set_extent(plot_extent, crs=ccrs.PlateCarree())
ax.coastlines()
ax.barbs(x, y, u, v, transform=ccrs.PlateCarree(),
length=8, linewidth=1, color='#7f7f7f')
@pytest.mark.natural_earth
@ImageTesting([_STREAMPLOT_IMAGE], style=_STREAMPLOT_STYLE)
def test_streamplot():
x = np.arange(-60, 42.5, 2.5)
y = np.arange(30, 72.5, 2.5)
x2d, y2d = np.meshgrid(x, y)
u = np.cos(np.deg2rad(y2d))
v = np.cos(2. * np.deg2rad(x2d))
mag = (u**2 + v**2)**.5
plot_extent = [-60, 40, 30, 70]
plt.figure(figsize=(6, 3))
ax = plt.axes(projection=ccrs.NorthPolarStereo())
ax.set_extent(plot_extent, crs=ccrs.PlateCarree())
ax.coastlines()
ax.streamplot(x, y, u, v, transform=ccrs.PlateCarree(),
density=(1.5, 2), color=mag, linewidth=2*mag)
| lgpl-3.0 |
wlamond/scikit-learn | examples/ensemble/plot_bias_variance.py | 357 | 7324 | """
============================================================
Single estimator versus bagging: bias-variance decomposition
============================================================
This example illustrates and compares the bias-variance decomposition of the
expected mean squared error of a single estimator against a bagging ensemble.
In regression, the expected mean squared error of an estimator can be
decomposed in terms of bias, variance and noise. On average over datasets of
the regression problem, the bias term measures the average amount by which the
predictions of the estimator differ from the predictions of the best possible
estimator for the problem (i.e., the Bayes model). The variance term measures
the variability of the predictions of the estimator when fit over different
instances LS of the problem. Finally, the noise measures the irreducible part
of the error which is due the variability in the data.
The upper left figure illustrates the predictions (in dark red) of a single
decision tree trained over a random dataset LS (the blue dots) of a toy 1d
regression problem. It also illustrates the predictions (in light red) of other
single decision trees trained over other (and different) randomly drawn
instances LS of the problem. Intuitively, the variance term here corresponds to
the width of the beam of predictions (in light red) of the individual
estimators. The larger the variance, the more sensitive are the predictions for
`x` to small changes in the training set. The bias term corresponds to the
difference between the average prediction of the estimator (in cyan) and the
best possible model (in dark blue). On this problem, we can thus observe that
the bias is quite low (both the cyan and the blue curves are close to each
other) while the variance is large (the red beam is rather wide).
The lower left figure plots the pointwise decomposition of the expected mean
squared error of a single decision tree. It confirms that the bias term (in
blue) is low while the variance is large (in green). It also illustrates the
noise part of the error which, as expected, appears to be constant and around
`0.01`.
The right figures correspond to the same plots but using instead a bagging
ensemble of decision trees. In both figures, we can observe that the bias term
is larger than in the previous case. In the upper right figure, the difference
between the average prediction (in cyan) and the best possible model is larger
(e.g., notice the offset around `x=2`). In the lower right figure, the bias
curve is also slightly higher than in the lower left figure. In terms of
variance however, the beam of predictions is narrower, which suggests that the
variance is lower. Indeed, as the lower right figure confirms, the variance
term (in green) is lower than for single decision trees. Overall, the bias-
variance decomposition is therefore no longer the same. The tradeoff is better
for bagging: averaging several decision trees fit on bootstrap copies of the
dataset slightly increases the bias term but allows for a larger reduction of
the variance, which results in a lower overall mean squared error (compare the
red curves int the lower figures). The script output also confirms this
intuition. The total error of the bagging ensemble is lower than the total
error of a single decision tree, and this difference indeed mainly stems from a
reduced variance.
For further details on bias-variance decomposition, see section 7.3 of [1]_.
References
----------
.. [1] T. Hastie, R. Tibshirani and J. Friedman,
"Elements of Statistical Learning", Springer, 2009.
"""
print(__doc__)
# Author: Gilles Louppe <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import BaggingRegressor
from sklearn.tree import DecisionTreeRegressor
# Settings
n_repeat = 50 # Number of iterations for computing expectations
n_train = 50 # Size of the training set
n_test = 1000 # Size of the test set
noise = 0.1 # Standard deviation of the noise
np.random.seed(0)
# Change this for exploring the bias-variance decomposition of other
# estimators. This should work well for estimators with high variance (e.g.,
# decision trees or KNN), but poorly for estimators with low variance (e.g.,
# linear models).
estimators = [("Tree", DecisionTreeRegressor()),
("Bagging(Tree)", BaggingRegressor(DecisionTreeRegressor()))]
n_estimators = len(estimators)
# Generate data
def f(x):
x = x.ravel()
return np.exp(-x ** 2) + 1.5 * np.exp(-(x - 2) ** 2)
def generate(n_samples, noise, n_repeat=1):
X = np.random.rand(n_samples) * 10 - 5
X = np.sort(X)
if n_repeat == 1:
y = f(X) + np.random.normal(0.0, noise, n_samples)
else:
y = np.zeros((n_samples, n_repeat))
for i in range(n_repeat):
y[:, i] = f(X) + np.random.normal(0.0, noise, n_samples)
X = X.reshape((n_samples, 1))
return X, y
X_train = []
y_train = []
for i in range(n_repeat):
X, y = generate(n_samples=n_train, noise=noise)
X_train.append(X)
y_train.append(y)
X_test, y_test = generate(n_samples=n_test, noise=noise, n_repeat=n_repeat)
# Loop over estimators to compare
for n, (name, estimator) in enumerate(estimators):
# Compute predictions
y_predict = np.zeros((n_test, n_repeat))
for i in range(n_repeat):
estimator.fit(X_train[i], y_train[i])
y_predict[:, i] = estimator.predict(X_test)
# Bias^2 + Variance + Noise decomposition of the mean squared error
y_error = np.zeros(n_test)
for i in range(n_repeat):
for j in range(n_repeat):
y_error += (y_test[:, j] - y_predict[:, i]) ** 2
y_error /= (n_repeat * n_repeat)
y_noise = np.var(y_test, axis=1)
y_bias = (f(X_test) - np.mean(y_predict, axis=1)) ** 2
y_var = np.var(y_predict, axis=1)
print("{0}: {1:.4f} (error) = {2:.4f} (bias^2) "
" + {3:.4f} (var) + {4:.4f} (noise)".format(name,
np.mean(y_error),
np.mean(y_bias),
np.mean(y_var),
np.mean(y_noise)))
# Plot figures
plt.subplot(2, n_estimators, n + 1)
plt.plot(X_test, f(X_test), "b", label="$f(x)$")
plt.plot(X_train[0], y_train[0], ".b", label="LS ~ $y = f(x)+noise$")
for i in range(n_repeat):
if i == 0:
plt.plot(X_test, y_predict[:, i], "r", label="$\^y(x)$")
else:
plt.plot(X_test, y_predict[:, i], "r", alpha=0.05)
plt.plot(X_test, np.mean(y_predict, axis=1), "c",
label="$\mathbb{E}_{LS} \^y(x)$")
plt.xlim([-5, 5])
plt.title(name)
if n == 0:
plt.legend(loc="upper left", prop={"size": 11})
plt.subplot(2, n_estimators, n_estimators + n + 1)
plt.plot(X_test, y_error, "r", label="$error(x)$")
plt.plot(X_test, y_bias, "b", label="$bias^2(x)$"),
plt.plot(X_test, y_var, "g", label="$variance(x)$"),
plt.plot(X_test, y_noise, "c", label="$noise(x)$")
plt.xlim([-5, 5])
plt.ylim([0, 0.1])
if n == 0:
plt.legend(loc="upper left", prop={"size": 11})
plt.show()
| bsd-3-clause |
chenyyx/scikit-learn-doc-zh | doc/en/datasets/mldata_fixture.py | 367 | 1183 | """Fixture module to skip the datasets loading when offline
Mock urllib2 access to mldata.org and create a temporary data folder.
"""
from os import makedirs
from os.path import join
import numpy as np
import tempfile
import shutil
from sklearn import datasets
from sklearn.utils.testing import install_mldata_mock
from sklearn.utils.testing import uninstall_mldata_mock
def globs(globs):
# Create a temporary folder for the data fetcher
global custom_data_home
custom_data_home = tempfile.mkdtemp()
makedirs(join(custom_data_home, 'mldata'))
globs['custom_data_home'] = custom_data_home
return globs
def setup_module():
# setup mock urllib2 module to avoid downloading from mldata.org
install_mldata_mock({
'mnist-original': {
'data': np.empty((70000, 784)),
'label': np.repeat(np.arange(10, dtype='d'), 7000),
},
'iris': {
'data': np.empty((150, 4)),
},
'datasets-uci-iris': {
'double0': np.empty((150, 4)),
'class': np.empty((150,)),
},
})
def teardown_module():
uninstall_mldata_mock()
shutil.rmtree(custom_data_home)
| gpl-3.0 |
startcode/apollo | modules/tools/mapshow/subplot_traj_acc.py | 2 | 3050 | #!/usr/bin/env python
###############################################################################
# Copyright 2017 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import matplotlib.pyplot as plt
from matplotlib import cm as cmx
from matplotlib import colors as mcolors
class TrajAccSubplot:
def __init__(self, ax):
self.ax = ax
self.acc_lines = []
self.acc_lines_size = 30
self.colors = []
self.init_colors()
#self.colors = ['b','r', 'y', 'k']
for i in range(self.acc_lines_size):
line, = ax.plot(
[0], [0],
c=self.colors[i % len(self.colors)],
ls="-",
marker='',
lw=3,
alpha=0.8)
self.acc_lines.append(line)
ax.set_xlabel("t (second)")
#ax.set_xlim([-2, 10])
ax.set_ylim([-6, 6])
self.ax.autoscale_view()
#self.ax.relim()
ax.set_ylabel("acc (m/s^2)")
ax.set_title("PLANNING ACC")
self.set_visible(False)
def init_colors(self):
self.colors = []
values = range(self.acc_lines_size)
jet = plt.get_cmap('brg')
color_norm = mcolors.Normalize(vmin=0, vmax=values[-1])
scalar_map = cmx.ScalarMappable(norm=color_norm, cmap=jet)
for val in values:
color_val = scalar_map.to_rgba(val)
self.colors.append(color_val)
def set_visible(self, visible):
for line in self.acc_lines:
line.set_visible(visible)
def show(self, planning):
planning.traj_data_lock.acquire()
for i in range(len(planning.traj_speed_t_history)):
if i >= self.acc_lines_size:
print "WARNING: number of path lines is more than " \
+ str(self.acc_lines_size)
continue
speed_line = self.acc_lines[self.acc_lines_size-i-1]
speed_line.set_xdata(planning.traj_acc_t_history[i])
speed_line.set_ydata(planning.traj_acc_a_history[i])
#speed_line.set_xdata([1,2,3,4])
#speed_line.set_ydata([1,2,3,4])
#speed_line.set_label(name[0:5])
speed_line.set_visible(True)
#self.ax.legend(loc="upper left", borderaxespad=0., ncol=5)
#self.ax.axis('equal')
planning.traj_data_lock.release()
self.ax.autoscale_view()
self.ax.relim() | apache-2.0 |
scipy/scipy | scipy/special/_precompute/lambertw.py | 12 | 2001 | """Compute a Pade approximation for the principle branch of the
Lambert W function around 0 and compare it to various other
approximations.
"""
import numpy as np
try:
import mpmath
import matplotlib.pyplot as plt # type: ignore[import]
except ImportError:
pass
def lambertw_pade():
derivs = [mpmath.diff(mpmath.lambertw, 0, n=n) for n in range(6)]
p, q = mpmath.pade(derivs, 3, 2)
return p, q
def main():
print(__doc__)
with mpmath.workdps(50):
p, q = lambertw_pade()
p, q = p[::-1], q[::-1]
print("p = {}".format(p))
print("q = {}".format(q))
x, y = np.linspace(-1.5, 1.5, 75), np.linspace(-1.5, 1.5, 75)
x, y = np.meshgrid(x, y)
z = x + 1j*y
lambertw_std = []
for z0 in z.flatten():
lambertw_std.append(complex(mpmath.lambertw(z0)))
lambertw_std = np.array(lambertw_std).reshape(x.shape)
fig, axes = plt.subplots(nrows=3, ncols=1)
# Compare Pade approximation to true result
p = np.array([float(p0) for p0 in p])
q = np.array([float(q0) for q0 in q])
pade_approx = np.polyval(p, z)/np.polyval(q, z)
pade_err = abs(pade_approx - lambertw_std)
axes[0].pcolormesh(x, y, pade_err)
# Compare two terms of asymptotic series to true result
asy_approx = np.log(z) - np.log(np.log(z))
asy_err = abs(asy_approx - lambertw_std)
axes[1].pcolormesh(x, y, asy_err)
# Compare two terms of the series around the branch point to the
# true result
p = np.sqrt(2*(np.exp(1)*z + 1))
series_approx = -1 + p - p**2/3
series_err = abs(series_approx - lambertw_std)
im = axes[2].pcolormesh(x, y, series_err)
fig.colorbar(im, ax=axes.ravel().tolist())
plt.show()
fig, ax = plt.subplots(nrows=1, ncols=1)
pade_better = pade_err < asy_err
im = ax.pcolormesh(x, y, pade_better)
t = np.linspace(-0.3, 0.3)
ax.plot(-2.5*abs(t) - 0.2, t, 'r')
fig.colorbar(im, ax=ax)
plt.show()
if __name__ == '__main__':
main()
| bsd-3-clause |
raysinensis/tcgaAPP | backup/createdb.py | 2 | 1660 | from sqlalchemy import create_engine,MetaData
from sqlalchemy.ext.automap import automap_base
import pandas as pd
##csv to sql
engine = create_engine('sqlite:///static/database/methyl.db')
df = pd.read_csv('./static/methylation db.csv')
df.to_sql(name='methyl', con=engine)
##query from db
metadata = MetaData(engine)
Base = automap_base()
Base.prepare(engine, reflect=True)
#engine.table_names() ##checking table names
#methyldb = Table('methyl',metadata, autoload=True)
#print methyldb.c ##to see column names
Session = sessionmaker(bind=engine)
methyldb = Session()
from sqlalchemy import text
from sqlalchemy.orm import sessionmaker
engine = create_engine('sqlite:///static/database/methyl.db')
Session = sessionmaker(bind=engine)
methyldb = Session()
gene = 'ZFP36L1'
qcol = ['BRCA','COAD','GBM','KICH','LUAD','PAAD','SARC','STAD']
qcolstr = ','.join(qcol)
sqlstr = 'select '+qcolstr+ ' from methyl where gene=\"'+gene+'\"'
sqlcmd = text(sqlstr)
result = methyldb.execute(sqlcmd).fetchall()
##cox coeff from xlsx
trying=pd.read_excel('/home/rf/Downloads/peerj-03-1499-s001.xlsx',sheetname=None )
tempgenes=[]
for cancer in trying.keys():
tempgenes.extend((trying[cancer])['Gene Name'].tolist())
dfcox=pd.DataFrame()
dfcox['Gene Name']=list(set(tempgenes))
for cancer in trying.keys():
tempdf=(trying[cancer])[['Gene Name','Raw Cox Coefficient']].drop_duplicates('Gene Name')
dfcox=dfcox.merge(tempdf,on='Gene Name',how='left')
dfcox.rename(columns={'Raw Cox Coefficient':cancer},inplace=True)
dfcox.rename(columns={'Gene Name':'Gene'},inplace=True)
engine = create_engine('sqlite:///static/database/methyl.db')
dfcox.to_sql(name='cox', con=engine)
| mit |
tony810430/flink | flink-python/setup.py | 2 | 14665 | ################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from __future__ import print_function
import io
import os
import platform
import sys
from distutils.command.build_ext import build_ext
from shutil import copytree, copy, rmtree
from setuptools import setup, Extension
if sys.version_info < (3, 6):
print("Python versions prior to 3.6 are not supported for PyFlink.",
file=sys.stderr)
sys.exit(-1)
def remove_if_exists(file_path):
if os.path.exists(file_path):
if os.path.islink(file_path) or os.path.isfile(file_path):
os.remove(file_path)
else:
assert os.path.isdir(file_path)
rmtree(file_path)
def copy_files(src_paths, output_directory):
for src_path, file_mode in src_paths:
if os.path.isdir(src_path):
child_files = os.listdir(src_path)
for child_file in child_files:
dst_path = copy(os.path.join(src_path, child_file), output_directory)
os.chmod(dst_path, file_mode)
else:
dst_path = copy(src_path, os.path.join(output_directory, os.path.basename(src_path)))
os.chmod(dst_path, file_mode)
def has_unsupported_tag(file_element):
unsupported_tags = ['includes', 'exclueds']
for unsupported_tag in unsupported_tags:
if file_element.getElementsByTagName(unsupported_tag):
print('Unsupported <{0}></{1}> tag'.format(unsupported_tag, unsupported_tag))
return True
return False
def extracted_output_files(base_dir, file_path, output_directory):
extracted_file_paths = []
from xml.dom.minidom import parse
dom = parse(file_path)
root_data = dom.documentElement
file_elements = (root_data.getElementsByTagName("files")[0]).getElementsByTagName("file")
# extracted <files><file></file></files>
for file_element in file_elements:
source = ((file_element.getElementsByTagName('source')[0]).childNodes[0]).data
file_mode = int(((file_element.getElementsByTagName('fileMode')[0]).childNodes[0]).data, 8)
try:
dst = ((file_element.getElementsByTagName('outputDirectory')[0]).childNodes[0]).data
if dst == output_directory:
if has_unsupported_tag(file_element):
sys.exit(-1)
extracted_file_paths.append((os.path.join(base_dir, source), file_mode))
except IndexError:
pass
# extracted <fileSets><fileSet></fileSet></fileSets>
file_elements = (root_data.getElementsByTagName("fileSets")[0]).getElementsByTagName("fileSet")
for file_element in file_elements:
source = ((file_element.getElementsByTagName('directory')[0]).childNodes[0]).data
file_mode = int(((file_element.getElementsByTagName('fileMode')[0]).childNodes[0]).data, 8)
try:
dst = ((file_element.getElementsByTagName('outputDirectory')[0]).childNodes[0]).data
if dst == output_directory:
if has_unsupported_tag(file_element):
sys.exit(-1)
extracted_file_paths.append((os.path.join(base_dir, source), file_mode))
except IndexError:
pass
return extracted_file_paths
# Currently Cython optimizing doesn't support Windows.
if platform.system() == 'Windows':
extensions = ([])
else:
try:
from Cython.Build import cythonize
extensions = cythonize([
Extension(
name="pyflink.fn_execution.coder_impl_fast",
sources=["pyflink/fn_execution/coder_impl_fast.pyx"],
include_dirs=["pyflink/fn_execution/"]),
Extension(
name="pyflink.fn_execution.table.aggregate_fast",
sources=["pyflink/fn_execution/table/aggregate_fast.pyx"],
include_dirs=["pyflink/fn_execution/table/"]),
Extension(
name="pyflink.fn_execution.table.window_aggregate_fast",
sources=["pyflink/fn_execution/table/window_aggregate_fast.pyx"],
include_dirs=["pyflink/fn_execution/table/"]),
Extension(
name="pyflink.fn_execution.stream_fast",
sources=["pyflink/fn_execution/stream_fast.pyx"],
include_dirs=["pyflink/fn_execution/"]),
Extension(
name="pyflink.fn_execution.beam.beam_stream",
sources=["pyflink/fn_execution/beam/beam_stream.pyx"],
include_dirs=["pyflink/fn_execution/beam"]),
Extension(
name="pyflink.fn_execution.beam.beam_coder_impl_fast",
sources=["pyflink/fn_execution/beam/beam_coder_impl_fast.pyx"],
include_dirs=["pyflink/fn_execution/beam"]),
Extension(
name="pyflink.fn_execution.beam.beam_operations_fast",
sources=["pyflink/fn_execution/beam/beam_operations_fast.pyx"],
include_dirs=["pyflink/fn_execution/beam"]),
])
except ImportError:
if os.path.exists("pyflink/fn_execution/coder_impl_fast.c"):
extensions = ([
Extension(
name="pyflink.fn_execution.coder_impl_fast",
sources=["pyflink/fn_execution/coder_impl_fast.c"],
include_dirs=["pyflink/fn_execution/"]),
Extension(
name="pyflink.fn_execution.table.aggregate_fast",
sources=["pyflink/fn_execution/table/aggregate_fast.c"],
include_dirs=["pyflink/fn_execution/table/"]),
Extension(
name="pyflink.fn_execution.table.window_aggregate_fast",
sources=["pyflink/fn_execution/table/window_aggregate_fast.c"],
include_dirs=["pyflink/fn_execution/table/"]),
Extension(
name="pyflink.fn_execution.stream_fast",
sources=["pyflink/fn_execution/stream_fast.c"],
include_dirs=["pyflink/fn_execution/"]),
Extension(
name="pyflink.fn_execution.beam.beam_stream",
sources=["pyflink/fn_execution/beam/beam_stream.c"],
include_dirs=["pyflink/fn_execution/beam"]),
Extension(
name="pyflink.fn_execution.beam.beam_coder_impl_fast",
sources=["pyflink/fn_execution/beam/beam_coder_impl_fast.c"],
include_dirs=["pyflink/fn_execution/beam"]),
Extension(
name="pyflink.fn_execution.beam.beam_operations_fast",
sources=["pyflink/fn_execution/beam/beam_operations_fast.c"],
include_dirs=["pyflink/fn_execution/beam"]),
])
else:
extensions = ([])
this_directory = os.path.abspath(os.path.dirname(__file__))
version_file = os.path.join(this_directory, 'pyflink/version.py')
try:
exec(open(version_file).read())
except IOError:
print("Failed to load PyFlink version file for packaging. " +
"'%s' not found!" % version_file,
file=sys.stderr)
sys.exit(-1)
VERSION = __version__ # noqa
with io.open(os.path.join(this_directory, 'README.md'), 'r', encoding='utf-8') as f:
long_description = f.read()
TEMP_PATH = "deps"
CONF_TEMP_PATH = os.path.join(TEMP_PATH, "conf")
LOG_TEMP_PATH = os.path.join(TEMP_PATH, "log")
EXAMPLES_TEMP_PATH = os.path.join(TEMP_PATH, "examples")
SCRIPTS_TEMP_PATH = os.path.join(TEMP_PATH, "bin")
LICENSE_FILE_TEMP_PATH = os.path.join(this_directory, "LICENSE")
README_FILE_TEMP_PATH = os.path.join("pyflink", "README.txt")
PYFLINK_UDF_RUNNER_SH = "pyflink-udf-runner.sh"
PYFLINK_UDF_RUNNER_BAT = "pyflink-udf-runner.bat"
in_flink_source = os.path.isfile("../flink-java/src/main/java/org/apache/flink/api/java/"
"ExecutionEnvironment.java")
try:
if in_flink_source:
try:
os.mkdir(TEMP_PATH)
except:
print("Temp path for symlink to parent already exists {0}".format(TEMP_PATH),
file=sys.stderr)
sys.exit(-1)
flink_version = VERSION.replace(".dev0", "-SNAPSHOT")
FLINK_HOME = os.path.abspath(
"../flink-dist/target/flink-%s-bin/flink-%s" % (flink_version, flink_version))
FLINK_ROOT = os.path.abspath("..")
FLINK_DIST = os.path.join(FLINK_ROOT, "flink-dist")
FLINK_BIN = os.path.join(FLINK_DIST, "src/main/flink-bin")
EXAMPLES_PATH = os.path.join(this_directory, "pyflink/table/examples")
LICENSE_FILE_PATH = os.path.join(FLINK_ROOT, "LICENSE")
README_FILE_PATH = os.path.join(FLINK_BIN, "README.txt")
FLINK_BIN_XML_FILE = os.path.join(FLINK_BIN, '../assemblies/bin.xml')
# copy conf files
os.mkdir(CONF_TEMP_PATH)
conf_paths = extracted_output_files(FLINK_DIST, FLINK_BIN_XML_FILE, 'conf')
copy_files(conf_paths, CONF_TEMP_PATH)
# copy bin files
os.mkdir(SCRIPTS_TEMP_PATH)
script_paths = extracted_output_files(FLINK_DIST, FLINK_BIN_XML_FILE, 'bin')
copy_files(script_paths, SCRIPTS_TEMP_PATH)
copy(os.path.join(this_directory, "pyflink", "bin", PYFLINK_UDF_RUNNER_SH),
os.path.join(SCRIPTS_TEMP_PATH, PYFLINK_UDF_RUNNER_SH))
copy(os.path.join(this_directory, "pyflink", "bin", PYFLINK_UDF_RUNNER_BAT),
os.path.join(SCRIPTS_TEMP_PATH, PYFLINK_UDF_RUNNER_BAT))
try:
os.symlink(EXAMPLES_PATH, EXAMPLES_TEMP_PATH)
os.symlink(LICENSE_FILE_PATH, LICENSE_FILE_TEMP_PATH)
os.symlink(README_FILE_PATH, README_FILE_TEMP_PATH)
except BaseException: # pylint: disable=broad-except
copytree(EXAMPLES_PATH, EXAMPLES_TEMP_PATH)
copy(LICENSE_FILE_PATH, LICENSE_FILE_TEMP_PATH)
copy(README_FILE_PATH, README_FILE_TEMP_PATH)
os.mkdir(LOG_TEMP_PATH)
with open(os.path.join(LOG_TEMP_PATH, "empty.txt"), 'w') as f:
f.write("This file is used to force setuptools to include the log directory. "
"You can delete it at any time after installation.")
else:
if not os.path.isdir(SCRIPTS_TEMP_PATH):
print("The flink core files are not found. Please make sure your installation package "
"is complete, or do this in the flink-python directory of the flink source "
"directory.")
sys.exit(-1)
if VERSION.find('dev0') != -1:
apache_flink_libraries_dependency = 'apache-flink-libraries==%s' % VERSION
else:
split_versions = VERSION.split('.')
split_versions[-1] = str(int(split_versions[-1]) + 1)
NEXT_VERSION = '.'.join(split_versions)
apache_flink_libraries_dependency = 'apache-flink-libraries>=%s,<%s' % \
(VERSION, NEXT_VERSION)
script_names = ["pyflink-shell.sh", "find-flink-home.sh"]
scripts = [os.path.join(SCRIPTS_TEMP_PATH, script) for script in script_names]
scripts.append("pyflink/find_flink_home.py")
PACKAGES = ['pyflink',
'pyflink.table',
'pyflink.util',
'pyflink.datastream',
'pyflink.common',
'pyflink.fn_execution',
'pyflink.fn_execution.beam',
'pyflink.fn_execution.datastream',
'pyflink.fn_execution.table',
'pyflink.fn_execution.utils',
'pyflink.metrics',
'pyflink.conf',
'pyflink.log',
'pyflink.examples',
'pyflink.bin']
PACKAGE_DIR = {
'pyflink.conf': TEMP_PATH + '/conf',
'pyflink.log': TEMP_PATH + '/log',
'pyflink.examples': TEMP_PATH + '/examples',
'pyflink.bin': TEMP_PATH + '/bin'}
PACKAGE_DATA = {
'pyflink': ['README.txt'],
'pyflink.conf': ['*'],
'pyflink.log': ['*'],
'pyflink.examples': ['*.py', '*/*.py'],
'pyflink.bin': ['*']}
setup(
name='apache-flink',
version=VERSION,
packages=PACKAGES,
include_package_data=True,
package_dir=PACKAGE_DIR,
package_data=PACKAGE_DATA,
scripts=scripts,
url='https://flink.apache.org',
license='https://www.apache.org/licenses/LICENSE-2.0',
author='Apache Software Foundation',
author_email='[email protected]',
python_requires='>=3.6',
install_requires=['py4j==0.10.8.1', 'python-dateutil==2.8.0', 'apache-beam==2.27.0',
'cloudpickle==1.2.2', 'avro-python3>=1.8.1,!=1.9.2,<1.10.0',
'pandas>=1.0,<1.2.0', 'pyarrow>=0.15.1,<3.0.0',
'pytz>=2018.3', 'numpy>=1.14.3,<1.20', 'fastavro>=0.21.4,<0.24',
apache_flink_libraries_dependency],
cmdclass={'build_ext': build_ext},
tests_require=['pytest==4.4.1'],
description='Apache Flink Python API',
long_description=long_description,
long_description_content_type='text/markdown',
zip_safe=False,
classifiers=[
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8'],
ext_modules=extensions
)
finally:
if in_flink_source:
remove_if_exists(TEMP_PATH)
remove_if_exists(LICENSE_FILE_TEMP_PATH)
remove_if_exists(README_FILE_TEMP_PATH)
| apache-2.0 |
ClimbsRocks/scikit-learn | examples/semi_supervised/plot_label_propagation_digits_active_learning.py | 28 | 3417 | """
========================================
Label Propagation digits active learning
========================================
Demonstrates an active learning technique to learn handwritten digits
using label propagation.
We start by training a label propagation model with only 10 labeled points,
then we select the top five most uncertain points to label. Next, we train
with 15 labeled points (original 10 + 5 new ones). We repeat this process
four times to have a model trained with 30 labeled examples.
A plot will appear showing the top 5 most uncertain digits for each iteration
of training. These may or may not contain mistakes, but we will train the next
model with their true labels.
"""
print(__doc__)
# Authors: Clay Woolam <[email protected]>
# License: BSD
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn import datasets
from sklearn.semi_supervised import label_propagation
from sklearn.metrics import classification_report, confusion_matrix
digits = datasets.load_digits()
rng = np.random.RandomState(0)
indices = np.arange(len(digits.data))
rng.shuffle(indices)
X = digits.data[indices[:330]]
y = digits.target[indices[:330]]
images = digits.images[indices[:330]]
n_total_samples = len(y)
n_labeled_points = 10
unlabeled_indices = np.arange(n_total_samples)[n_labeled_points:]
f = plt.figure()
for i in range(5):
y_train = np.copy(y)
y_train[unlabeled_indices] = -1
lp_model = label_propagation.LabelSpreading(gamma=0.25, max_iter=5)
lp_model.fit(X, y_train)
predicted_labels = lp_model.transduction_[unlabeled_indices]
true_labels = y[unlabeled_indices]
cm = confusion_matrix(true_labels, predicted_labels,
labels=lp_model.classes_)
print('Iteration %i %s' % (i, 70 * '_'))
print("Label Spreading model: %d labeled & %d unlabeled (%d total)"
% (n_labeled_points, n_total_samples - n_labeled_points, n_total_samples))
print(classification_report(true_labels, predicted_labels))
print("Confusion matrix")
print(cm)
# compute the entropies of transduced label distributions
pred_entropies = stats.distributions.entropy(
lp_model.label_distributions_.T)
# select five digit examples that the classifier is most uncertain about
uncertainty_index = uncertainty_index = np.argsort(pred_entropies)[-5:]
# keep track of indices that we get labels for
delete_indices = np.array([])
f.text(.05, (1 - (i + 1) * .183),
"model %d\n\nfit with\n%d labels" % ((i + 1), i * 5 + 10), size=10)
for index, image_index in enumerate(uncertainty_index):
image = images[image_index]
sub = f.add_subplot(5, 5, index + 1 + (5 * i))
sub.imshow(image, cmap=plt.cm.gray_r)
sub.set_title('predict: %i\ntrue: %i' % (
lp_model.transduction_[image_index], y[image_index]), size=10)
sub.axis('off')
# labeling 5 points, remote from labeled set
delete_index, = np.where(unlabeled_indices == image_index)
delete_indices = np.concatenate((delete_indices, delete_index))
unlabeled_indices = np.delete(unlabeled_indices, delete_indices)
n_labeled_points += 5
f.suptitle("Active learning with Label Propagation.\nRows show 5 most "
"uncertain labels to learn with the next model.")
plt.subplots_adjust(0.12, 0.03, 0.9, 0.8, 0.2, 0.45)
plt.show()
| bsd-3-clause |
googleapis/python-bigquery | google/cloud/bigquery/magics/magics.py | 1 | 28344 | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""IPython Magics
To use these magics, you must first register them. Run the ``%load_ext`` magic
in a Jupyter notebook cell.
.. code::
%load_ext google.cloud.bigquery
This makes the ``%%bigquery`` magic available.
.. function:: %%bigquery
IPython cell magic to run a query and display the result as a DataFrame
.. code-block:: python
%%bigquery [<destination_var>] [--project <project>] [--use_legacy_sql]
[--verbose] [--params <params>]
<query>
Parameters:
* ``<destination_var>`` (Optional[line argument]):
variable to store the query results. The results are not displayed if
this parameter is used. If an error occurs during the query execution,
the corresponding ``QueryJob`` instance (if available) is stored in
the variable instead.
* ``--destination_table`` (Optional[line argument]):
A dataset and table to store the query results. If table does not exists,
it will be created. If table already exists, its data will be overwritten.
Variable should be in a format <dataset_id>.<table_id>.
* ``--project <project>`` (Optional[line argument]):
Project to use for running the query. Defaults to the context
:attr:`~google.cloud.bigquery.magics.Context.project`.
* ``--use_bqstorage_api`` (Optional[line argument]):
[Deprecated] Not used anymore, as BigQuery Storage API is used by default.
* ``--use_rest_api`` (Optional[line argument]):
Use the BigQuery REST API instead of the Storage API.
* ``--use_legacy_sql`` (Optional[line argument]):
Runs the query using Legacy SQL syntax. Defaults to Standard SQL if
this argument not used.
* ``--verbose`` (Optional[line argument]):
If this flag is used, information including the query job ID and the
amount of time for the query to complete will not be cleared after the
query is finished. By default, this information will be displayed but
will be cleared after the query is finished.
* ``--params <params>`` (Optional[line argument]):
If present, the argument following the ``--params`` flag must be
either:
* :class:`str` - A JSON string representation of a dictionary in the
format ``{"param_name": "param_value"}`` (ex. ``{"num": 17}``). Use
of the parameter in the query should be indicated with
``@param_name``. See ``In[5]`` in the Examples section below.
* :class:`dict` reference - A reference to a ``dict`` in the format
``{"param_name": "param_value"}``, where the value types must be JSON
serializable. The variable reference is indicated by a ``$`` before
the variable name (ex. ``$my_dict_var``). See ``In[6]`` and ``In[7]``
in the Examples section below.
* ``<query>`` (required, cell argument):
SQL query to run. If the query does not contain any whitespace (aside
from leading and trailing whitespace), it is assumed to represent a
fully-qualified table ID, and the latter's data will be fetched.
Returns:
A :class:`pandas.DataFrame` with the query results.
.. note::
All queries run using this magic will run using the context
:attr:`~google.cloud.bigquery.magics.Context.credentials`.
Examples:
The following examples can be run in an IPython notebook after loading
the bigquery IPython extension (see ``In[1]``) and setting up
Application Default Credentials.
.. code-block:: none
In [1]: %load_ext google.cloud.bigquery
In [2]: %%bigquery
...: SELECT name, SUM(number) as count
...: FROM `bigquery-public-data.usa_names.usa_1910_current`
...: GROUP BY name
...: ORDER BY count DESC
...: LIMIT 3
Out[2]: name count
...: -------------------
...: 0 James 4987296
...: 1 John 4866302
...: 2 Robert 4738204
In [3]: %%bigquery df --project my-alternate-project --verbose
...: SELECT name, SUM(number) as count
...: FROM `bigquery-public-data.usa_names.usa_1910_current`
...: WHERE gender = 'F'
...: GROUP BY name
...: ORDER BY count DESC
...: LIMIT 3
Executing query with job ID: bf633912-af2c-4780-b568-5d868058632b
Query executing: 2.61s
Query complete after 2.92s
In [4]: df
Out[4]: name count
...: ----------------------
...: 0 Mary 3736239
...: 1 Patricia 1568495
...: 2 Elizabeth 1519946
In [5]: %%bigquery --params {"num": 17}
...: SELECT @num AS num
Out[5]: num
...: -------
...: 0 17
In [6]: params = {"num": 17}
In [7]: %%bigquery --params $params
...: SELECT @num AS num
Out[7]: num
...: -------
...: 0 17
"""
from __future__ import print_function
import re
import ast
import copy
import functools
import sys
import time
import warnings
from concurrent import futures
try:
import IPython
from IPython import display
from IPython.core import magic_arguments
except ImportError: # pragma: NO COVER
raise ImportError("This module can only be loaded in IPython.")
from google.api_core import client_info
from google.api_core import client_options
from google.api_core.exceptions import NotFound
import google.auth
from google.cloud import bigquery
import google.cloud.bigquery.dataset
from google.cloud.bigquery.dbapi import _helpers
from google.cloud.bigquery.magics import line_arg_parser as lap
IPYTHON_USER_AGENT = "ipython-{}".format(IPython.__version__)
class Context(object):
"""Storage for objects to be used throughout an IPython notebook session.
A Context object is initialized when the ``magics`` module is imported,
and can be found at ``google.cloud.bigquery.magics.context``.
"""
def __init__(self):
self._credentials = None
self._project = None
self._connection = None
self._default_query_job_config = bigquery.QueryJobConfig()
self._bigquery_client_options = client_options.ClientOptions()
self._bqstorage_client_options = client_options.ClientOptions()
self._progress_bar_type = "tqdm"
@property
def credentials(self):
"""google.auth.credentials.Credentials: Credentials to use for queries
performed through IPython magics.
Note:
These credentials do not need to be explicitly defined if you are
using Application Default Credentials. If you are not using
Application Default Credentials, manually construct a
:class:`google.auth.credentials.Credentials` object and set it as
the context credentials as demonstrated in the example below. See
`auth docs`_ for more information on obtaining credentials.
Example:
Manually setting the context credentials:
>>> from google.cloud.bigquery import magics
>>> from google.oauth2 import service_account
>>> credentials = (service_account
... .Credentials.from_service_account_file(
... '/path/to/key.json'))
>>> magics.context.credentials = credentials
.. _auth docs: http://google-auth.readthedocs.io
/en/latest/user-guide.html#obtaining-credentials
"""
if self._credentials is None:
self._credentials, _ = google.auth.default()
return self._credentials
@credentials.setter
def credentials(self, value):
self._credentials = value
@property
def project(self):
"""str: Default project to use for queries performed through IPython
magics.
Note:
The project does not need to be explicitly defined if you have an
environment default project set. If you do not have a default
project set in your environment, manually assign the project as
demonstrated in the example below.
Example:
Manually setting the context project:
>>> from google.cloud.bigquery import magics
>>> magics.context.project = 'my-project'
"""
if self._project is None:
_, self._project = google.auth.default()
return self._project
@project.setter
def project(self, value):
self._project = value
@property
def bigquery_client_options(self):
"""google.api_core.client_options.ClientOptions: client options to be
used through IPython magics.
Note::
The client options do not need to be explicitly defined if no
special network connections are required. Normally you would be
using the https://bigquery.googleapis.com/ end point.
Example:
Manually setting the endpoint:
>>> from google.cloud.bigquery import magics
>>> client_options = {}
>>> client_options['api_endpoint'] = "https://some.special.url"
>>> magics.context.bigquery_client_options = client_options
"""
return self._bigquery_client_options
@bigquery_client_options.setter
def bigquery_client_options(self, value):
self._bigquery_client_options = value
@property
def bqstorage_client_options(self):
"""google.api_core.client_options.ClientOptions: client options to be
used through IPython magics for the storage client.
Note::
The client options do not need to be explicitly defined if no
special network connections are required. Normally you would be
using the https://bigquerystorage.googleapis.com/ end point.
Example:
Manually setting the endpoint:
>>> from google.cloud.bigquery import magics
>>> client_options = {}
>>> client_options['api_endpoint'] = "https://some.special.url"
>>> magics.context.bqstorage_client_options = client_options
"""
return self._bqstorage_client_options
@bqstorage_client_options.setter
def bqstorage_client_options(self, value):
self._bqstorage_client_options = value
@property
def default_query_job_config(self):
"""google.cloud.bigquery.job.QueryJobConfig: Default job
configuration for queries.
The context's :class:`~google.cloud.bigquery.job.QueryJobConfig` is
used for queries. Some properties can be overridden with arguments to
the magics.
Example:
Manually setting the default value for ``maximum_bytes_billed``
to 100 MB:
>>> from google.cloud.bigquery import magics
>>> magics.context.default_query_job_config.maximum_bytes_billed = 100000000
"""
return self._default_query_job_config
@default_query_job_config.setter
def default_query_job_config(self, value):
self._default_query_job_config = value
@property
def progress_bar_type(self):
"""str: Default progress bar type to use to display progress bar while
executing queries through IPython magics.
Note::
Install the ``tqdm`` package to use this feature.
Example:
Manually setting the progress_bar_type:
>>> from google.cloud.bigquery import magics
>>> magics.context.progress_bar_type = "tqdm"
"""
return self._progress_bar_type
@progress_bar_type.setter
def progress_bar_type(self, value):
self._progress_bar_type = value
context = Context()
def _handle_error(error, destination_var=None):
"""Process a query execution error.
Args:
error (Exception):
An exception that ocurred during the query exectution.
destination_var (Optional[str]):
The name of the IPython session variable to store the query job.
"""
if destination_var:
query_job = getattr(error, "query_job", None)
if query_job is not None:
IPython.get_ipython().push({destination_var: query_job})
else:
# this is the case when previewing table rows by providing just
# table ID to cell magic
print(
"Could not save output to variable '{}'.".format(destination_var),
file=sys.stderr,
)
print("\nERROR:\n", str(error), file=sys.stderr)
def _run_query(client, query, job_config=None):
"""Runs a query while printing status updates
Args:
client (google.cloud.bigquery.client.Client):
Client to bundle configuration needed for API requests.
query (str):
SQL query to be executed. Defaults to the standard SQL dialect.
Use the ``job_config`` parameter to change dialects.
job_config (Optional[google.cloud.bigquery.job.QueryJobConfig]):
Extra configuration options for the job.
Returns:
google.cloud.bigquery.job.QueryJob: the query job created
Example:
>>> client = bigquery.Client()
>>> _run_query(client, "SELECT 17")
Executing query with job ID: bf633912-af2c-4780-b568-5d868058632b
Query executing: 1.66s
Query complete after 2.07s
'bf633912-af2c-4780-b568-5d868058632b'
"""
start_time = time.time()
query_job = client.query(query, job_config=job_config)
if job_config and job_config.dry_run:
return query_job
print("Executing query with job ID: {}".format(query_job.job_id))
while True:
print("\rQuery executing: {:0.2f}s".format(time.time() - start_time), end="")
try:
query_job.result(timeout=0.5)
break
except futures.TimeoutError:
continue
print("\nQuery complete after {:0.2f}s".format(time.time() - start_time))
return query_job
def _create_dataset_if_necessary(client, dataset_id):
"""Create a dataset in the current project if it doesn't exist.
Args:
client (google.cloud.bigquery.client.Client):
Client to bundle configuration needed for API requests.
dataset_id (str):
Dataset id.
"""
dataset_reference = bigquery.dataset.DatasetReference(client.project, dataset_id)
try:
dataset = client.get_dataset(dataset_reference)
return
except NotFound:
pass
dataset = bigquery.Dataset(dataset_reference)
dataset.location = client.location
print("Creating dataset: {}".format(dataset_id))
dataset = client.create_dataset(dataset)
@magic_arguments.magic_arguments()
@magic_arguments.argument(
"destination_var",
nargs="?",
help=("If provided, save the output to this variable instead of displaying it."),
)
@magic_arguments.argument(
"--destination_table",
type=str,
default=None,
help=(
"If provided, save the output of the query to a new BigQuery table. "
"Variable should be in a format <dataset_id>.<table_id>. "
"If table does not exists, it will be created. "
"If table already exists, its data will be overwritten."
),
)
@magic_arguments.argument(
"--project",
type=str,
default=None,
help=("Project to use for executing this query. Defaults to the context project."),
)
@magic_arguments.argument(
"--max_results",
default=None,
help=(
"Maximum number of rows in dataframe returned from executing the query."
"Defaults to returning all rows."
),
)
@magic_arguments.argument(
"--maximum_bytes_billed",
default=None,
help=(
"maximum_bytes_billed to use for executing this query. Defaults to "
"the context default_query_job_config.maximum_bytes_billed."
),
)
@magic_arguments.argument(
"--dry_run",
action="store_true",
default=False,
help=(
"Sets query to be a dry run to estimate costs. "
"Defaults to executing the query instead of dry run if this argument is not used."
),
)
@magic_arguments.argument(
"--use_legacy_sql",
action="store_true",
default=False,
help=(
"Sets query to use Legacy SQL instead of Standard SQL. Defaults to "
"Standard SQL if this argument is not used."
),
)
@magic_arguments.argument(
"--bigquery_api_endpoint",
type=str,
default=None,
help=(
"The desired API endpoint, e.g., bigquery.googlepis.com. Defaults to this "
"option's value in the context bigquery_client_options."
),
)
@magic_arguments.argument(
"--bqstorage_api_endpoint",
type=str,
default=None,
help=(
"The desired API endpoint, e.g., bigquerystorage.googlepis.com. Defaults to "
"this option's value in the context bqstorage_client_options."
),
)
@magic_arguments.argument(
"--use_bqstorage_api",
action="store_true",
default=None,
help=(
"[Deprecated] The BigQuery Storage API is already used by default to "
"download large query results, and this option has no effect. "
"If you want to switch to the classic REST API instead, use the "
"--use_rest_api option."
),
)
@magic_arguments.argument(
"--use_rest_api",
action="store_true",
default=False,
help=(
"Use the classic REST API instead of the BigQuery Storage API to "
"download query results."
),
)
@magic_arguments.argument(
"--verbose",
action="store_true",
default=False,
help=(
"If set, print verbose output, including the query job ID and the "
"amount of time for the query to finish. By default, this "
"information will be displayed as the query runs, but will be "
"cleared after the query is finished."
),
)
@magic_arguments.argument(
"--params",
nargs="+",
default=None,
help=(
"Parameters to format the query string. If present, the --params "
"flag should be followed by a string representation of a dictionary "
"in the format {'param_name': 'param_value'} (ex. {\"num\": 17}), "
"or a reference to a dictionary in the same format. The dictionary "
"reference can be made by including a '$' before the variable "
"name (ex. $my_dict_var)."
),
)
@magic_arguments.argument(
"--progress_bar_type",
type=str,
default=None,
help=(
"Sets progress bar type to display a progress bar while executing the query."
"Defaults to use tqdm. Install the ``tqdm`` package to use this feature."
),
)
def _cell_magic(line, query):
"""Underlying function for bigquery cell magic
Note:
This function contains the underlying logic for the 'bigquery' cell
magic. This function is not meant to be called directly.
Args:
line (str): "%%bigquery" followed by arguments as required
query (str): SQL query to run
Returns:
pandas.DataFrame: the query results.
"""
# The built-in parser does not recognize Python structures such as dicts, thus
# we extract the "--params" option and inteprpret it separately.
try:
params_option_value, rest_of_args = _split_args_line(line)
except lap.exceptions.QueryParamsParseError as exc:
rebranded_error = SyntaxError(
"--params is not a correctly formatted JSON string or a JSON "
"serializable dictionary"
)
raise rebranded_error from exc
except lap.exceptions.DuplicateQueryParamsError as exc:
rebranded_error = ValueError("Duplicate --params option.")
raise rebranded_error from exc
except lap.exceptions.ParseError as exc:
rebranded_error = ValueError(
"Unrecognized input, are option values correct? "
"Error details: {}".format(exc.args[0])
)
raise rebranded_error from exc
args = magic_arguments.parse_argstring(_cell_magic, rest_of_args)
if args.use_bqstorage_api is not None:
warnings.warn(
"Deprecated option --use_bqstorage_api, the BigQuery "
"Storage API is already used by default.",
category=DeprecationWarning,
)
use_bqstorage_api = not args.use_rest_api
params = []
if params_option_value:
# A non-existing params variable is not expanded and ends up in the input
# in its raw form, e.g. "$query_params".
if params_option_value.startswith("$"):
msg = 'Parameter expansion failed, undefined variable "{}".'.format(
params_option_value[1:]
)
raise NameError(msg)
params = _helpers.to_query_parameters(ast.literal_eval(params_option_value), {})
project = args.project or context.project
bigquery_client_options = copy.deepcopy(context.bigquery_client_options)
if args.bigquery_api_endpoint:
if isinstance(bigquery_client_options, dict):
bigquery_client_options["api_endpoint"] = args.bigquery_api_endpoint
else:
bigquery_client_options.api_endpoint = args.bigquery_api_endpoint
client = bigquery.Client(
project=project,
credentials=context.credentials,
default_query_job_config=context.default_query_job_config,
client_info=client_info.ClientInfo(user_agent=IPYTHON_USER_AGENT),
client_options=bigquery_client_options,
)
if context._connection:
client._connection = context._connection
bqstorage_client_options = copy.deepcopy(context.bqstorage_client_options)
if args.bqstorage_api_endpoint:
if isinstance(bqstorage_client_options, dict):
bqstorage_client_options["api_endpoint"] = args.bqstorage_api_endpoint
else:
bqstorage_client_options.api_endpoint = args.bqstorage_api_endpoint
bqstorage_client = _make_bqstorage_client(
client, use_bqstorage_api, bqstorage_client_options,
)
close_transports = functools.partial(_close_transports, client, bqstorage_client)
try:
if args.max_results:
max_results = int(args.max_results)
else:
max_results = None
query = query.strip()
if not query:
error = ValueError("Query is missing.")
_handle_error(error, args.destination_var)
return
# Any query that does not contain whitespace (aside from leading and trailing whitespace)
# is assumed to be a table id
if not re.search(r"\s", query):
try:
rows = client.list_rows(query, max_results=max_results)
except Exception as ex:
_handle_error(ex, args.destination_var)
return
result = rows.to_dataframe(bqstorage_client=bqstorage_client)
if args.destination_var:
IPython.get_ipython().push({args.destination_var: result})
return
else:
return result
job_config = bigquery.job.QueryJobConfig()
job_config.query_parameters = params
job_config.use_legacy_sql = args.use_legacy_sql
job_config.dry_run = args.dry_run
if args.destination_table:
split = args.destination_table.split(".")
if len(split) != 2:
raise ValueError(
"--destination_table should be in a <dataset_id>.<table_id> format."
)
dataset_id, table_id = split
job_config.allow_large_results = True
dataset_ref = bigquery.dataset.DatasetReference(client.project, dataset_id)
destination_table_ref = dataset_ref.table(table_id)
job_config.destination = destination_table_ref
job_config.create_disposition = "CREATE_IF_NEEDED"
job_config.write_disposition = "WRITE_TRUNCATE"
_create_dataset_if_necessary(client, dataset_id)
if args.maximum_bytes_billed == "None":
job_config.maximum_bytes_billed = 0
elif args.maximum_bytes_billed is not None:
value = int(args.maximum_bytes_billed)
job_config.maximum_bytes_billed = value
try:
query_job = _run_query(client, query, job_config=job_config)
except Exception as ex:
_handle_error(ex, args.destination_var)
return
if not args.verbose:
display.clear_output()
if args.dry_run and args.destination_var:
IPython.get_ipython().push({args.destination_var: query_job})
return
elif args.dry_run:
print(
"Query validated. This query will process {} bytes.".format(
query_job.total_bytes_processed
)
)
return query_job
progress_bar = context.progress_bar_type or args.progress_bar_type
if max_results:
result = query_job.result(max_results=max_results).to_dataframe(
bqstorage_client=bqstorage_client, progress_bar_type=progress_bar
)
else:
result = query_job.to_dataframe(
bqstorage_client=bqstorage_client, progress_bar_type=progress_bar
)
if args.destination_var:
IPython.get_ipython().push({args.destination_var: result})
else:
return result
finally:
close_transports()
def _split_args_line(line):
"""Split out the --params option value from the input line arguments.
Args:
line (str): The line arguments passed to the cell magic.
Returns:
Tuple[str, str]
"""
lexer = lap.Lexer(line)
scanner = lap.Parser(lexer)
tree = scanner.input_line()
extractor = lap.QueryParamsExtractor()
params_option_value, rest_of_args = extractor.visit(tree)
return params_option_value, rest_of_args
def _make_bqstorage_client(client, use_bqstorage_api, client_options):
if not use_bqstorage_api:
return None
try:
from google.cloud import bigquery_storage # noqa: F401
except ImportError as err:
customized_error = ImportError(
"The default BigQuery Storage API client cannot be used, install "
"the missing google-cloud-bigquery-storage and pyarrow packages "
"to use it. Alternatively, use the classic REST API by specifying "
"the --use_rest_api magic option."
)
raise customized_error from err
try:
from google.api_core.gapic_v1 import client_info as gapic_client_info
except ImportError as err:
customized_error = ImportError(
"Install the grpcio package to use the BigQuery Storage API."
)
raise customized_error from err
return client._ensure_bqstorage_client(
client_options=client_options,
client_info=gapic_client_info.ClientInfo(user_agent=IPYTHON_USER_AGENT),
)
def _close_transports(client, bqstorage_client):
"""Close the given clients' underlying transport channels.
Closing the transport is needed to release system resources, namely open
sockets.
Args:
client (:class:`~google.cloud.bigquery.client.Client`):
bqstorage_client
(Optional[:class:`~google.cloud.bigquery_storage.BigQueryReadClient`]):
A client for the BigQuery Storage API.
"""
client.close()
if bqstorage_client is not None:
bqstorage_client._transport.grpc_channel.close()
| apache-2.0 |
jgors/duecredit | duecredit/tests/test_injections.py | 1 | 8520 | # emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-
# ex: set sts=4 ts=4 sw=4 noet:
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the duecredit package for the
# copyright and license terms.
#
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
import gc
import sys
from six import viewvalues, PY2
if PY2:
import __builtin__
else:
import builtins as __builtin__
_orig__import__ = __builtin__.__import__
from duecredit.collector import DueCreditCollector, InactiveDueCreditCollector
from duecredit.entries import BibTeX, Doi
from ..injections.injector import DueCreditInjector, find_object, get_modules_for_injection
from .. import __version__
from nose import SkipTest
from nose.tools import assert_equal
from nose.tools import assert_false
from nose.tools import assert_true
try:
import mvpa2
_have_mvpa2 = True
except ImportError:
_have_mvpa2 = False
class TestActiveInjector(object):
def setup(self):
self._cleanup_modules()
self.due = DueCreditCollector()
self.injector = DueCreditInjector(collector=self.due)
self.injector.activate(retrospect=False) # numpy might be already loaded...
def teardown(self):
# gc might not pick up inj after some tests complete
# so we will always deactivate explicitly
self.injector.deactivate()
assert_true(__builtin__.__import__ is _orig__import__)
self._cleanup_modules()
def _cleanup_modules(self):
if 'duecredit.tests.mod' in sys.modules:
sys.modules.pop('duecredit.tests.mod')
def _test_simple_injection(self, func, import_stmt, func_call=None):
assert_false('duecredit.tests.mod' in sys.modules)
self.injector.add('duecredit.tests.mod', func,
Doi('1.2.3.4'),
description="Testing %s" % func,
min_version='0.1', max_version='1.0',
tags=["implementation", "very custom"])
assert_false('duecredit.tests.mod' in sys.modules) # no import happening
assert_equal(len(self.due._entries), 0)
assert_equal(len(self.due.citations), 0)
exec(import_stmt)
assert_equal(len(self.due._entries), 1) # we should get an entry now
assert_equal(len(self.due.citations), 0) # but not yet a citation
import duecredit.tests.mod as mod
_, _, obj = find_object(mod, func)
assert_true(obj.__duecredited__) # we wrapped
assert_false(obj.__duecredited__ is obj) # and it is not pointing to the same func
assert_equal(obj.__doc__, "custom docstring") # we preserved docstring
# TODO: test decoration features -- preserver __doc__ etc
exec('ret = %s(None, "somevalue")' % (func_call or func))
# XXX: awkwardly 'ret' is not found in the scope while running nosetests
# under python3.4, although present in locals()... WTF?
assert_equal(locals()['ret'], "%s: None, somevalue" % func)
assert_equal(len(self.due._entries), 1)
assert_equal(len(self.due.citations), 1)
# TODO: there must be a cleaner way to get first value
citation = list(viewvalues(self.due.citations))[0]
# TODO: ATM we don't allow versioning of the submodules -- we should
# assert_equal(citation.version, '0.5')
# ATM it will be the duecredit's version
assert_equal(citation.version, __version__)
assert(citation.tags == ['implementation', 'very custom'])
def test_simple_injection(self):
yield self._test_simple_injection, "testfunc1", 'from duecredit.tests.mod import testfunc1'
yield self._test_simple_injection, "TestClass1.testmeth1", \
'from duecredit.tests.mod import TestClass1; c = TestClass1()', 'c.testmeth1'
yield self._test_simple_injection, "TestClass12.Embed.testmeth1", \
'from duecredit.tests.mod import TestClass12; c = TestClass12.Embed()', 'c.testmeth1'
def test_delayed_entries(self):
# verify that addition of delayed injections happened
modules_for_injection = get_modules_for_injection()
assert_equal(len(self.injector._delayed_injections), len(modules_for_injection))
assert_equal(self.injector._entry_records, {}) # but no entries were added
assert('scipy' in self.injector._delayed_injections) # We must have it ATM
try:
# We do have injections for scipy
import scipy
except ImportError as e:
raise SkipTest("scipy was not found: %s" % (e,))
def test_import_mvpa2_suite(self):
if not _have_mvpa2:
raise SkipTest("no mvpa2 found")
# just a smoke test for now
import mvpa2.suite as mv
def _test_incorrect_path(self, mod, obj):
ref = Doi('1.2.3.4')
# none of them should lead to a failure
self.injector.add(mod, obj, ref)
# now cause the import handling -- it must not fail
# TODO: catch/analyze warnings
exec('from duecredit.tests.mod import testfunc1')
def test_incorrect_path(self):
yield self._test_incorrect_path, "nonexistingmodule", None
yield self._test_incorrect_path, "duecredit.tests.mod.nonexistingmodule", None
yield self._test_incorrect_path, "duecredit.tests.mod", "nonexisting"
yield self._test_incorrect_path, "duecredit.tests.mod", "nonexisting.whocares"
def _test_find_object(mod, path, parent, obj_name, obj):
assert_equal(find_object(mod, path), (parent, obj_name, obj))
def test_find_object():
import duecredit.tests.mod as mod
yield _test_find_object, mod, 'testfunc1', mod, 'testfunc1', mod.testfunc1
yield _test_find_object, mod, 'TestClass1', mod, 'TestClass1', mod.TestClass1
yield _test_find_object, mod, 'TestClass1.testmeth1', mod.TestClass1, 'testmeth1', mod.TestClass1.testmeth1
yield _test_find_object, mod, 'TestClass12.Embed.testmeth1', \
mod.TestClass12.Embed, 'testmeth1', mod.TestClass12.Embed.testmeth1
def test_no_double_activation():
orig__import__ = __builtin__.__import__
try:
due = DueCreditCollector()
injector = DueCreditInjector(collector=due)
injector.activate()
assert_false(__builtin__.__import__ is orig__import__)
duecredited__import__ = __builtin__.__import__
# TODO: catch/analyze/swallow warning
injector.activate()
assert_true(__builtin__.__import__ is duecredited__import__) # we didn't decorate again
finally:
injector.deactivate()
__builtin__.__import__ = orig__import__
def test_get_modules_for_injection():
assert_equal(get_modules_for_injection(), [
'mod_biosig',
'mod_dipy',
'mod_mdp',
'mod_mne',
'mod_nibabel',
'mod_nipy',
'mod_nipype',
'mod_numpy',
'mod_pandas',
'mod_psychopy',
'mod_scipy',
'mod_skimage',
'mod_sklearn'])
def test_cover_our_injections():
# this one tests only import/syntax/api for the injections
due = DueCreditCollector()
inj = DueCreditInjector(collector=due)
for modname in get_modules_for_injection():
mod = __import__('duecredit.injections.' + modname, fromlist=["duecredit.injections"])
mod.inject(inj)
def test_no_harm_from_deactivate():
# if we have not activated one -- shouldn't blow if we deactivate it
# TODO: catch warning being spitted out
DueCreditInjector().deactivate()
def test_injector_del():
orig__import__ = __builtin__.__import__
try:
due = DueCreditCollector()
inj = DueCreditInjector(collector=due)
del inj # delete inactive
assert_true(__builtin__.__import__ is orig__import__)
inj = DueCreditInjector(collector=due)
inj.activate(retrospect=False)
assert_false(__builtin__.__import__ is orig__import__)
assert_false(inj._orig_import is None)
del inj # delete active but not used
inj = None
__builtin__.__import__ = None # We need to do that since otherwise gc will not pick up inj
gc.collect() # To cause __del__
assert_true(__builtin__.__import__ is orig__import__)
import abc # and new imports work just fine
finally:
__builtin__.__import__ = orig__import__
| bsd-2-clause |
apur27/public | ASX-Python/LoadTrainPredict-LSTM-SLR.py | 1 | 3202 | # -*- coding: utf-8 -*-
"""
Created on Thu Nov 28 09:40:59 2019
@author: UPuroAb
"""
import glob
#import os
import pandas as pd
colnames=['Date', 'Open', 'High', 'Low', 'Close', 'Adj Close', 'Volume']
all_files = glob.glob('C:/QM/rnd/SLR/*.csv') # advisable to use os.path.join as this makes concatenation OS independent
df_from_each_file = (pd.read_csv(f, names=colnames, header=None, encoding='utf-8') for f in all_files)
data = pd.concat(df_from_each_file, ignore_index=True, sort=True)
import numpy as np
import matplotlib.pyplot as plt
#importing prophet
from sklearn.preprocessing import MinMaxScaler
from keras.models import Sequential
from keras.layers import Dense,Dropout, LSTM
asxTicker='Close'
ticker=data
ticker=ticker.reset_index()
new_data = pd.DataFrame(index=range(0,len(ticker)),columns=['Date', 'Close'])
for i in range(0,len(ticker)):
new_data['Date'][i] = ticker['Date'][i]
new_data['Close'][i] = ticker[asxTicker][i]
trainSize=1000
#new_data['Date'] = pd.to_datetime(new_data['Date'],format='%Y-%m-%d')
new_data.index = new_data.Date
new_data.drop('Date', axis=1, inplace=True)
#creating train and test sets
dataset = new_data.values
train = dataset[0:trainSize,:]
valid = dataset[trainSize:,:]
#converting dataset into x_train and y_train
scaler = MinMaxScaler(feature_range=(0, 1))
scaled_data = scaler.fit_transform(dataset)
x_train, y_train = [], []
for i in range(60,len(train)):
x_train.append(scaled_data[i-60:i,0])
y_train.append(scaled_data[i,0])
x_train, y_train = np.array(x_train), np.array(y_train)
x_train = np.reshape(x_train, (x_train.shape[0],x_train.shape[1],1))
# create and fit the LSTM network
model = Sequential()
model.add(LSTM(units=50, return_sequences=True, input_shape=(x_train.shape[1],1)))
model.add(Dropout(0.4))
#model.add(LSTM(units=50))
#
## added
#
#model.add(Dropout(0.3))
model.add(LSTM(units = 100, return_sequences = True))
model.add(Dropout(0.3))
model.add(LSTM(units = 100, return_sequences = True))
model.add(Dropout(0.2))
#
#model.add(LSTM(units = 50, return_sequences = True))
#model.add(Dropout(0.2))
#
#model.add(LSTM(units = 50, return_sequences = True))
#model.add(Dropout(0.2))
model.add(LSTM(units = 50))
model.add(Dropout(0.2))
# added
model.add(Dense(1))
model.compile(loss='mean_squared_error', optimizer='adam')
model.fit(x_train, y_train, epochs=30, batch_size=10, verbose=2)
#predicting 246 values, using past 60 from the train data
inputs = new_data[len(new_data) - len(valid) - 60:].values
inputs = inputs.reshape(-1,1)
inputs = scaler.transform(inputs)
X_test = []
for i in range(60,inputs.shape[0]):
X_test.append(inputs[i-60:i,0])
X_test = np.array(X_test)
X_test = np.reshape(X_test, (X_test.shape[0],X_test.shape[1],1))
closing_price = model.predict(X_test)
closing_price = scaler.inverse_transform(closing_price)
rmsL=np.sqrt(np.mean(np.power((valid-closing_price),2)))
#for plotting
train = new_data[:trainSize]
valid = new_data[trainSize:]
valid['Predictions'] = closing_price
plt.plot(train['Close'])
plt.plot(valid[['Close','Predictions']])
| artistic-2.0 |
Morisset/PyNeb_devel | pyneb/sample_scripts/Choroni_School/ex7_3.py | 1 | 2278 | # Analysis of a simple two-component model, meant to illustrate the bias arising from assuming
# that the region is homogeneous in density
# First, an emission region made up of two different subregions is modelled,
# each with a different mass and density. The resulting overall emissivity is computed
# Second, the region is analyzed as if it were a homogeneous region
import pyneb as pn
import matplotlib.pyplot as plt
from pyneb.utils.misc import parseAtom
def plot_2comp(tem1=1e4, tem2=1e4, dens1=3e2, dens2=5e5, mass1=1, mass2=5e-4):
# List of diagnostics used to analyze the region
diags = pn.Diagnostics()
for diag in pn.diags_dict:
if diag[0:7] != '[FeIII]':
diags.addDiag(diag)
diags.addClabel('[SIII] 6312/9069', '[SIII]A')
diags.addClabel('[OIII] 4363/5007', '[OIII]A')
# Define all the ions that are involved in the diagnostics
all_atoms = diags.atomDict
pn.log_.message('Atoms built')
obs = pn.Observation(corrected = True)
for atom in all_atoms:
# Computes all the intensities of all the lines of all the ions considered
for wavelength in all_atoms[atom].lineList:
elem, spec = parseAtom(atom)
intens1 = all_atoms[atom].getEmissivity(tem1, dens1, wave = wavelength) * dens1 * mass1
intens2 = all_atoms[atom].getEmissivity(tem2, dens2, wave = wavelength) * dens2 * mass2
obs.addLine(pn.EmissionLine(elem, spec, wavelength,
obsIntens=[intens1, intens2, intens1+intens2],
obsError=[0.0, 0.0, 0.0]))
pn.log_.message('Virtual observations computed')
emisgrids = pn.getEmisGridDict(atomDict = all_atoms)
pn.log_.message('EmisGrids available')
# Produce a diagnostic plot for each of the two regions and another one for the (misanalyzed) overall region
plt.subplot(2,2,1)
diags.plot(emisgrids, obs, i_obs=0)
plt.subplot(2,2,2)
diags.plot(emisgrids, obs, i_obs=1)
plt.subplot(2,1,2)
pn.log_.level=3
diags.plot(emisgrids, obs, i_obs=2)
if __name__ == '__main__':
plot_2comp(tem1=1e4, tem2=1e4, dens1=3e2, dens2=5e5, mass1=1, mass2=5e-4)
plt.show()
| gpl-3.0 |
rubikloud/scikit-learn | examples/ensemble/plot_gradient_boosting_regularization.py | 355 | 2843 | """
================================
Gradient Boosting regularization
================================
Illustration of the effect of different regularization strategies
for Gradient Boosting. The example is taken from Hastie et al 2009.
The loss function used is binomial deviance. Regularization via
shrinkage (``learning_rate < 1.0``) improves performance considerably.
In combination with shrinkage, stochastic gradient boosting
(``subsample < 1.0``) can produce more accurate models by reducing the
variance via bagging.
Subsampling without shrinkage usually does poorly.
Another strategy to reduce the variance is by subsampling the features
analogous to the random splits in Random Forests
(via the ``max_features`` parameter).
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", Springer, 2009.
"""
print(__doc__)
# Author: Peter Prettenhofer <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import ensemble
from sklearn import datasets
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X = X.astype(np.float32)
# map labels from {-1, 1} to {0, 1}
labels, y = np.unique(y, return_inverse=True)
X_train, X_test = X[:2000], X[2000:]
y_train, y_test = y[:2000], y[2000:]
original_params = {'n_estimators': 1000, 'max_leaf_nodes': 4, 'max_depth': None, 'random_state': 2,
'min_samples_split': 5}
plt.figure()
for label, color, setting in [('No shrinkage', 'orange',
{'learning_rate': 1.0, 'subsample': 1.0}),
('learning_rate=0.1', 'turquoise',
{'learning_rate': 0.1, 'subsample': 1.0}),
('subsample=0.5', 'blue',
{'learning_rate': 1.0, 'subsample': 0.5}),
('learning_rate=0.1, subsample=0.5', 'gray',
{'learning_rate': 0.1, 'subsample': 0.5}),
('learning_rate=0.1, max_features=2', 'magenta',
{'learning_rate': 0.1, 'max_features': 2})]:
params = dict(original_params)
params.update(setting)
clf = ensemble.GradientBoostingClassifier(**params)
clf.fit(X_train, y_train)
# compute test set deviance
test_deviance = np.zeros((params['n_estimators'],), dtype=np.float64)
for i, y_pred in enumerate(clf.staged_decision_function(X_test)):
# clf.loss_ assumes that y_test[i] in {0, 1}
test_deviance[i] = clf.loss_(y_test, y_pred)
plt.plot((np.arange(test_deviance.shape[0]) + 1)[::5], test_deviance[::5],
'-', color=color, label=label)
plt.legend(loc='upper left')
plt.xlabel('Boosting Iterations')
plt.ylabel('Test Set Deviance')
plt.show()
| bsd-3-clause |
jepegit/cellpy | cellpy/parameters/prms.py | 1 | 6381 | """cellpy parameters"""
import os
from pathlib import Path
import sys
import box
# class Parameter(object):
# """class for storing parameters"""
# def __init__(self, name, prm_dict):
# self.name = name
# for key in prm_dict:
# setattr(self, key, prm_dict[key])
#
# def __repr__(self):
# return "<cellpy_prms: %s>" % self.__dict__
# locations etc for reading custom parameters
script_dir = os.path.abspath(os.path.dirname(__file__))
cur_dir = os.path.abspath(os.path.dirname(sys.argv[0]))
user_dir = os.path.expanduser("~")
# search_path = dict()
# search_path["curdir"] = cur_dir
# search_path["filedir"] = script_dir
# search_path["userdir"] = user_dir
#
# search_order = ["curdir", "filedir", "userdir"]
# default_name = "_cellpy_prms_default.ini"
# prm_default = os.path.join(script_dir, default_name)
# prm_filename = prm_default
# --------------------------
# Paths
# --------------------------
Paths = {
"outdatadir": cur_dir,
"rawdatadir": cur_dir,
"cellpydatadir": cur_dir,
"db_path": cur_dir,
"filelogdir": cur_dir,
"examplesdir": cur_dir,
"notebookdir": cur_dir,
"batchfiledir": cur_dir,
"db_filename": "cellpy_db.xlsx",
}
Paths = box.Box(Paths)
# --------------------------
# FileNames
# --------------------------
FileNames = {"file_name_format": "YYYYMMDD_[NAME]EEE_CC_TT_RR"}
FileNames = box.Box(FileNames)
# --------------------------
# Reader
# --------------------------
Reader = {
"diagnostics": False,
"filestatuschecker": "size",
"force_step_table_creation": True,
"force_all": False, # not used yet - should be used when saving
"sep": ";",
"cycle_mode": "anode", # used in cellreader (593)
"sorted_data": True, # finding step-types assumes sorted data
"load_only_summary": False,
"select_minimal": False,
"limit_loaded_cycles": None,
"ensure_step_table": False,
"daniel_number": 5,
"voltage_interpolation_step": 0.01,
"time_interpolation_step": 10.0,
"capacity_interpolation_step": 2.0,
"use_cellpy_stat_file": False,
"raw_datadir": None,
"cellpy_datadir": None,
"auto_dirs": True, # search in prm-file for res and hdf5 dirs in loadcell
}
Reader = box.Box(Reader)
# --------------------------
# DataSet
# --------------------------
DataSet = {
"nom_cap": 3579
} # mAh/g (used for finding c-rates) [should be moved to Materials]
DataSet = box.Box(DataSet)
# --------------------------
# Db
# --------------------------
Db = {
"db_type": "simple_excel_reader",
"db_table_name": "db_table",
"db_header_row": 0,
"db_unit_row": 1,
"db_data_start_row": 2,
"db_search_start_row": 2,
"db_search_end_row": -1,
}
Db = box.Box(Db)
# -----------------------------
# New Excel Reader
# attribute = (header, dtype)
# -----------------------------
DbCols = {
"id": ("id", "int"),
"exists": ("exists", "bol"),
"batch": ("batch", "str"),
"sub_batch_01": ("b01", "str"),
"sub_batch_02": ("b02", "str"),
"sub_batch_03": ("b03", "str"),
"sub_batch_04": ("b04", "str"),
"sub_batch_05": ("b05", "str"),
"sub_batch_06": ("b06", "str"),
"sub_batch_07": ("b07", "str"),
"project": ("project", "str"),
"label": ("label", "str"),
"group": ("group", "int"),
"selected": ("selected", "bol"),
"cell_name": ("cell", "str"),
"cell_type": ("cell_type", "cat"),
"experiment_type": ("experiment_type", "cat"),
"active_material": ("mass_active_material", "float"),
"total_material": ("mass_total", "float"),
"loading": ("loading_active_material", "float"),
"nom_cap": ("nominal_capacity", "float"),
"file_name_indicator": ("file_name_indicator", "str"),
"instrument": ("instrument", "str"),
"raw_file_names": ("raw_file_names", "list"),
"cellpy_file_name": ("cellpy_file_name", "str"),
"comment_slurry": ("comment_slurry", "str"),
"comment_cell": ("comment_cell", "str"),
"comment_general": ("comment_general", "str"),
"freeze": ("freeze", "bol"),
}
DbCols = box.Box(DbCols)
# --------------------------
# Instruments
# --------------------------
Instruments = {"tester": "arbin", "custom_instrument_definitions_file": None}
Instruments = box.Box(Instruments)
# Pre-defined instruments:
Arbin = {
"max_res_filesize": 150_000_000,
"chunk_size": None,
"max_chunks": None,
"use_subprocess": False,
"detect_subprocess_need": False,
"sub_process_path": None,
"office_version": "64bit",
"SQL_server": r"localhost\SQLEXPRESS",
}
# Register pre-defined instruments:
Instruments["Arbin"] = Arbin
# --------------------------
# Materials
# --------------------------
Materials = {"cell_class": "Li-Ion", "default_material": "silicon", "default_mass": 1.0}
Materials = box.Box(Materials)
# --------------------------
# Batch-options
# --------------------------
Batch = {
"template": "standard",
"fig_extension": "png",
"backend": "bokeh",
"notebook": True,
"dpi": 300,
"markersize": 4,
"symbol_label": "simple",
"color_style_label": "seaborn-deep",
"figure_type": "unlimited",
"summary_plot_width": 900,
"summary_plot_height": 800,
"summary_plot_height_fractions": [0.2, 0.5, 0.3],
}
Batch = box.Box(Batch)
# --------------------------
# Other non-config
# --------------------------
_variable_that_is_not_saved_to_config = "Hei"
_prm_default_name = ".cellpy_prms_default.conf"
_prm_globtxt = ".cellpy_prms*.conf"
_odbcs = ["pyodbc", "ado", "pypyodbc"]
_odbc = "pyodbc"
_search_for_odbc_driver = True
_allow_multi_test_file = False
_use_filename_cache = True
_sub_process_path = Path(__file__) / "../../../bin/mdbtools-win/mdb-export"
_sub_process_path = _sub_process_path.resolve()
_sort_if_subprocess = True
_cellpyfile_root = "CellpyData"
_cellpyfile_raw = "/raw"
_cellpyfile_step = "/steps"
_cellpyfile_summary = "/summary"
_cellpyfile_fid = "/fid"
_cellpyfile_complevel = 1
_cellpyfile_complib = None # currently defaults to "zlib"
_cellpyfile_raw_format = "table"
_cellpyfile_summary_format = "table"
_cellpyfile_stepdata_format = "table"
_cellpyfile_infotable_format = "fixed"
_cellpyfile_fidtable_format = "fixed"
# used as global variables
_globals_status = ""
_globals_errors = []
_globals_message = []
# used during development for testing new features
_res_chunk = 0
| mit |
manashmndl/scikit-learn | sklearn/cluster/spectral.py | 233 | 18153 | # -*- coding: utf-8 -*-
"""Algorithms for spectral clustering"""
# Author: Gael Varoquaux [email protected]
# Brian Cheung
# Wei LI <[email protected]>
# License: BSD 3 clause
import warnings
import numpy as np
from ..base import BaseEstimator, ClusterMixin
from ..utils import check_random_state, as_float_array
from ..utils.validation import check_array
from ..utils.extmath import norm
from ..metrics.pairwise import pairwise_kernels
from ..neighbors import kneighbors_graph
from ..manifold import spectral_embedding
from .k_means_ import k_means
def discretize(vectors, copy=True, max_svd_restarts=30, n_iter_max=20,
random_state=None):
"""Search for a partition matrix (clustering) which is closest to the
eigenvector embedding.
Parameters
----------
vectors : array-like, shape: (n_samples, n_clusters)
The embedding space of the samples.
copy : boolean, optional, default: True
Whether to copy vectors, or perform in-place normalization.
max_svd_restarts : int, optional, default: 30
Maximum number of attempts to restart SVD if convergence fails
n_iter_max : int, optional, default: 30
Maximum number of iterations to attempt in rotation and partition
matrix search if machine precision convergence is not reached
random_state: int seed, RandomState instance, or None (default)
A pseudo random number generator used for the initialization of the
of the rotation matrix
Returns
-------
labels : array of integers, shape: n_samples
The labels of the clusters.
References
----------
- Multiclass spectral clustering, 2003
Stella X. Yu, Jianbo Shi
http://www1.icsi.berkeley.edu/~stellayu/publication/doc/2003kwayICCV.pdf
Notes
-----
The eigenvector embedding is used to iteratively search for the
closest discrete partition. First, the eigenvector embedding is
normalized to the space of partition matrices. An optimal discrete
partition matrix closest to this normalized embedding multiplied by
an initial rotation is calculated. Fixing this discrete partition
matrix, an optimal rotation matrix is calculated. These two
calculations are performed until convergence. The discrete partition
matrix is returned as the clustering solution. Used in spectral
clustering, this method tends to be faster and more robust to random
initialization than k-means.
"""
from scipy.sparse import csc_matrix
from scipy.linalg import LinAlgError
random_state = check_random_state(random_state)
vectors = as_float_array(vectors, copy=copy)
eps = np.finfo(float).eps
n_samples, n_components = vectors.shape
# Normalize the eigenvectors to an equal length of a vector of ones.
# Reorient the eigenvectors to point in the negative direction with respect
# to the first element. This may have to do with constraining the
# eigenvectors to lie in a specific quadrant to make the discretization
# search easier.
norm_ones = np.sqrt(n_samples)
for i in range(vectors.shape[1]):
vectors[:, i] = (vectors[:, i] / norm(vectors[:, i])) \
* norm_ones
if vectors[0, i] != 0:
vectors[:, i] = -1 * vectors[:, i] * np.sign(vectors[0, i])
# Normalize the rows of the eigenvectors. Samples should lie on the unit
# hypersphere centered at the origin. This transforms the samples in the
# embedding space to the space of partition matrices.
vectors = vectors / np.sqrt((vectors ** 2).sum(axis=1))[:, np.newaxis]
svd_restarts = 0
has_converged = False
# If there is an exception we try to randomize and rerun SVD again
# do this max_svd_restarts times.
while (svd_restarts < max_svd_restarts) and not has_converged:
# Initialize first column of rotation matrix with a row of the
# eigenvectors
rotation = np.zeros((n_components, n_components))
rotation[:, 0] = vectors[random_state.randint(n_samples), :].T
# To initialize the rest of the rotation matrix, find the rows
# of the eigenvectors that are as orthogonal to each other as
# possible
c = np.zeros(n_samples)
for j in range(1, n_components):
# Accumulate c to ensure row is as orthogonal as possible to
# previous picks as well as current one
c += np.abs(np.dot(vectors, rotation[:, j - 1]))
rotation[:, j] = vectors[c.argmin(), :].T
last_objective_value = 0.0
n_iter = 0
while not has_converged:
n_iter += 1
t_discrete = np.dot(vectors, rotation)
labels = t_discrete.argmax(axis=1)
vectors_discrete = csc_matrix(
(np.ones(len(labels)), (np.arange(0, n_samples), labels)),
shape=(n_samples, n_components))
t_svd = vectors_discrete.T * vectors
try:
U, S, Vh = np.linalg.svd(t_svd)
svd_restarts += 1
except LinAlgError:
print("SVD did not converge, randomizing and trying again")
break
ncut_value = 2.0 * (n_samples - S.sum())
if ((abs(ncut_value - last_objective_value) < eps) or
(n_iter > n_iter_max)):
has_converged = True
else:
# otherwise calculate rotation and continue
last_objective_value = ncut_value
rotation = np.dot(Vh.T, U.T)
if not has_converged:
raise LinAlgError('SVD did not converge')
return labels
def spectral_clustering(affinity, n_clusters=8, n_components=None,
eigen_solver=None, random_state=None, n_init=10,
eigen_tol=0.0, assign_labels='kmeans'):
"""Apply clustering to a projection to the normalized laplacian.
In practice Spectral Clustering is very useful when the structure of
the individual clusters is highly non-convex or more generally when
a measure of the center and spread of the cluster is not a suitable
description of the complete cluster. For instance when clusters are
nested circles on the 2D plan.
If affinity is the adjacency matrix of a graph, this method can be
used to find normalized graph cuts.
Read more in the :ref:`User Guide <spectral_clustering>`.
Parameters
-----------
affinity : array-like or sparse matrix, shape: (n_samples, n_samples)
The affinity matrix describing the relationship of the samples to
embed. **Must be symmetric**.
Possible examples:
- adjacency matrix of a graph,
- heat kernel of the pairwise distance matrix of the samples,
- symmetric k-nearest neighbours connectivity matrix of the samples.
n_clusters : integer, optional
Number of clusters to extract.
n_components : integer, optional, default is n_clusters
Number of eigen vectors to use for the spectral embedding
eigen_solver : {None, 'arpack', 'lobpcg', or 'amg'}
The eigenvalue decomposition strategy to use. AMG requires pyamg
to be installed. It can be faster on very large, sparse problems,
but may also lead to instabilities
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used for the initialization
of the lobpcg eigen vectors decomposition when eigen_solver == 'amg'
and by the K-Means initialization.
n_init : int, optional, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
eigen_tol : float, optional, default: 0.0
Stopping criterion for eigendecomposition of the Laplacian matrix
when using arpack eigen_solver.
assign_labels : {'kmeans', 'discretize'}, default: 'kmeans'
The strategy to use to assign labels in the embedding
space. There are two ways to assign labels after the laplacian
embedding. k-means can be applied and is a popular choice. But it can
also be sensitive to initialization. Discretization is another
approach which is less sensitive to random initialization. See
the 'Multiclass spectral clustering' paper referenced below for
more details on the discretization approach.
Returns
-------
labels : array of integers, shape: n_samples
The labels of the clusters.
References
----------
- Normalized cuts and image segmentation, 2000
Jianbo Shi, Jitendra Malik
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.160.2324
- A Tutorial on Spectral Clustering, 2007
Ulrike von Luxburg
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.165.9323
- Multiclass spectral clustering, 2003
Stella X. Yu, Jianbo Shi
http://www1.icsi.berkeley.edu/~stellayu/publication/doc/2003kwayICCV.pdf
Notes
------
The graph should contain only one connect component, elsewhere
the results make little sense.
This algorithm solves the normalized cut for k=2: it is a
normalized spectral clustering.
"""
if assign_labels not in ('kmeans', 'discretize'):
raise ValueError("The 'assign_labels' parameter should be "
"'kmeans' or 'discretize', but '%s' was given"
% assign_labels)
random_state = check_random_state(random_state)
n_components = n_clusters if n_components is None else n_components
maps = spectral_embedding(affinity, n_components=n_components,
eigen_solver=eigen_solver,
random_state=random_state,
eigen_tol=eigen_tol, drop_first=False)
if assign_labels == 'kmeans':
_, labels, _ = k_means(maps, n_clusters, random_state=random_state,
n_init=n_init)
else:
labels = discretize(maps, random_state=random_state)
return labels
class SpectralClustering(BaseEstimator, ClusterMixin):
"""Apply clustering to a projection to the normalized laplacian.
In practice Spectral Clustering is very useful when the structure of
the individual clusters is highly non-convex or more generally when
a measure of the center and spread of the cluster is not a suitable
description of the complete cluster. For instance when clusters are
nested circles on the 2D plan.
If affinity is the adjacency matrix of a graph, this method can be
used to find normalized graph cuts.
When calling ``fit``, an affinity matrix is constructed using either
kernel function such the Gaussian (aka RBF) kernel of the euclidean
distanced ``d(X, X)``::
np.exp(-gamma * d(X,X) ** 2)
or a k-nearest neighbors connectivity matrix.
Alternatively, using ``precomputed``, a user-provided affinity
matrix can be used.
Read more in the :ref:`User Guide <spectral_clustering>`.
Parameters
-----------
n_clusters : integer, optional
The dimension of the projection subspace.
affinity : string, array-like or callable, default 'rbf'
If a string, this may be one of 'nearest_neighbors', 'precomputed',
'rbf' or one of the kernels supported by
`sklearn.metrics.pairwise_kernels`.
Only kernels that produce similarity scores (non-negative values that
increase with similarity) should be used. This property is not checked
by the clustering algorithm.
gamma : float
Scaling factor of RBF, polynomial, exponential chi^2 and
sigmoid affinity kernel. Ignored for
``affinity='nearest_neighbors'``.
degree : float, default=3
Degree of the polynomial kernel. Ignored by other kernels.
coef0 : float, default=1
Zero coefficient for polynomial and sigmoid kernels.
Ignored by other kernels.
n_neighbors : integer
Number of neighbors to use when constructing the affinity matrix using
the nearest neighbors method. Ignored for ``affinity='rbf'``.
eigen_solver : {None, 'arpack', 'lobpcg', or 'amg'}
The eigenvalue decomposition strategy to use. AMG requires pyamg
to be installed. It can be faster on very large, sparse problems,
but may also lead to instabilities
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used for the initialization
of the lobpcg eigen vectors decomposition when eigen_solver == 'amg'
and by the K-Means initialization.
n_init : int, optional, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
eigen_tol : float, optional, default: 0.0
Stopping criterion for eigendecomposition of the Laplacian matrix
when using arpack eigen_solver.
assign_labels : {'kmeans', 'discretize'}, default: 'kmeans'
The strategy to use to assign labels in the embedding
space. There are two ways to assign labels after the laplacian
embedding. k-means can be applied and is a popular choice. But it can
also be sensitive to initialization. Discretization is another approach
which is less sensitive to random initialization.
kernel_params : dictionary of string to any, optional
Parameters (keyword arguments) and values for kernel passed as
callable object. Ignored by other kernels.
Attributes
----------
affinity_matrix_ : array-like, shape (n_samples, n_samples)
Affinity matrix used for clustering. Available only if after calling
``fit``.
labels_ :
Labels of each point
Notes
-----
If you have an affinity matrix, such as a distance matrix,
for which 0 means identical elements, and high values means
very dissimilar elements, it can be transformed in a
similarity matrix that is well suited for the algorithm by
applying the Gaussian (RBF, heat) kernel::
np.exp(- X ** 2 / (2. * delta ** 2))
Another alternative is to take a symmetric version of the k
nearest neighbors connectivity matrix of the points.
If the pyamg package is installed, it is used: this greatly
speeds up computation.
References
----------
- Normalized cuts and image segmentation, 2000
Jianbo Shi, Jitendra Malik
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.160.2324
- A Tutorial on Spectral Clustering, 2007
Ulrike von Luxburg
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.165.9323
- Multiclass spectral clustering, 2003
Stella X. Yu, Jianbo Shi
http://www1.icsi.berkeley.edu/~stellayu/publication/doc/2003kwayICCV.pdf
"""
def __init__(self, n_clusters=8, eigen_solver=None, random_state=None,
n_init=10, gamma=1., affinity='rbf', n_neighbors=10,
eigen_tol=0.0, assign_labels='kmeans', degree=3, coef0=1,
kernel_params=None):
self.n_clusters = n_clusters
self.eigen_solver = eigen_solver
self.random_state = random_state
self.n_init = n_init
self.gamma = gamma
self.affinity = affinity
self.n_neighbors = n_neighbors
self.eigen_tol = eigen_tol
self.assign_labels = assign_labels
self.degree = degree
self.coef0 = coef0
self.kernel_params = kernel_params
def fit(self, X, y=None):
"""Creates an affinity matrix for X using the selected affinity,
then applies spectral clustering to this affinity matrix.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
OR, if affinity==`precomputed`, a precomputed affinity
matrix of shape (n_samples, n_samples)
"""
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'],
dtype=np.float64)
if X.shape[0] == X.shape[1] and self.affinity != "precomputed":
warnings.warn("The spectral clustering API has changed. ``fit``"
"now constructs an affinity matrix from data. To use"
" a custom affinity matrix, "
"set ``affinity=precomputed``.")
if self.affinity == 'nearest_neighbors':
connectivity = kneighbors_graph(X, n_neighbors=self.n_neighbors, include_self=True)
self.affinity_matrix_ = 0.5 * (connectivity + connectivity.T)
elif self.affinity == 'precomputed':
self.affinity_matrix_ = X
else:
params = self.kernel_params
if params is None:
params = {}
if not callable(self.affinity):
params['gamma'] = self.gamma
params['degree'] = self.degree
params['coef0'] = self.coef0
self.affinity_matrix_ = pairwise_kernels(X, metric=self.affinity,
filter_params=True,
**params)
random_state = check_random_state(self.random_state)
self.labels_ = spectral_clustering(self.affinity_matrix_,
n_clusters=self.n_clusters,
eigen_solver=self.eigen_solver,
random_state=random_state,
n_init=self.n_init,
eigen_tol=self.eigen_tol,
assign_labels=self.assign_labels)
return self
@property
def _pairwise(self):
return self.affinity == "precomputed"
| bsd-3-clause |
NixaSoftware/CVis | venv/lib/python2.7/site-packages/pandas/tests/indexes/test_category.py | 3 | 41440 | # -*- coding: utf-8 -*-
import pytest
import pandas.util.testing as tm
from pandas.core.indexes.api import Index, CategoricalIndex
from .common import Base
from pandas.compat import range, PY3
import numpy as np
from pandas import Categorical, IntervalIndex, compat, notna
from pandas.util.testing import assert_almost_equal
import pandas.core.config as cf
import pandas as pd
if PY3:
unicode = lambda x: x
class TestCategoricalIndex(Base):
_holder = CategoricalIndex
def setup_method(self, method):
self.indices = dict(catIndex=tm.makeCategoricalIndex(100))
self.setup_indices()
def create_index(self, categories=None, ordered=False):
if categories is None:
categories = list('cab')
return CategoricalIndex(
list('aabbca'), categories=categories, ordered=ordered)
def test_construction(self):
ci = self.create_index(categories=list('abcd'))
categories = ci.categories
result = Index(ci)
tm.assert_index_equal(result, ci, exact=True)
assert not result.ordered
result = Index(ci.values)
tm.assert_index_equal(result, ci, exact=True)
assert not result.ordered
# empty
result = CategoricalIndex(categories=categories)
tm.assert_index_equal(result.categories, Index(categories))
tm.assert_numpy_array_equal(result.codes, np.array([], dtype='int8'))
assert not result.ordered
# passing categories
result = CategoricalIndex(list('aabbca'), categories=categories)
tm.assert_index_equal(result.categories, Index(categories))
tm.assert_numpy_array_equal(result.codes,
np.array([0, 0, 1,
1, 2, 0], dtype='int8'))
c = pd.Categorical(list('aabbca'))
result = CategoricalIndex(c)
tm.assert_index_equal(result.categories, Index(list('abc')))
tm.assert_numpy_array_equal(result.codes,
np.array([0, 0, 1,
1, 2, 0], dtype='int8'))
assert not result.ordered
result = CategoricalIndex(c, categories=categories)
tm.assert_index_equal(result.categories, Index(categories))
tm.assert_numpy_array_equal(result.codes,
np.array([0, 0, 1,
1, 2, 0], dtype='int8'))
assert not result.ordered
ci = CategoricalIndex(c, categories=list('abcd'))
result = CategoricalIndex(ci)
tm.assert_index_equal(result.categories, Index(categories))
tm.assert_numpy_array_equal(result.codes,
np.array([0, 0, 1,
1, 2, 0], dtype='int8'))
assert not result.ordered
result = CategoricalIndex(ci, categories=list('ab'))
tm.assert_index_equal(result.categories, Index(list('ab')))
tm.assert_numpy_array_equal(result.codes,
np.array([0, 0, 1,
1, -1, 0], dtype='int8'))
assert not result.ordered
result = CategoricalIndex(ci, categories=list('ab'), ordered=True)
tm.assert_index_equal(result.categories, Index(list('ab')))
tm.assert_numpy_array_equal(result.codes,
np.array([0, 0, 1,
1, -1, 0], dtype='int8'))
assert result.ordered
# turn me to an Index
result = Index(np.array(ci))
assert isinstance(result, Index)
assert not isinstance(result, CategoricalIndex)
def test_construction_with_dtype(self):
# specify dtype
ci = self.create_index(categories=list('abc'))
result = Index(np.array(ci), dtype='category')
tm.assert_index_equal(result, ci, exact=True)
result = Index(np.array(ci).tolist(), dtype='category')
tm.assert_index_equal(result, ci, exact=True)
# these are generally only equal when the categories are reordered
ci = self.create_index()
result = Index(
np.array(ci), dtype='category').reorder_categories(ci.categories)
tm.assert_index_equal(result, ci, exact=True)
# make sure indexes are handled
expected = CategoricalIndex([0, 1, 2], categories=[0, 1, 2],
ordered=True)
idx = Index(range(3))
result = CategoricalIndex(idx, categories=idx, ordered=True)
tm.assert_index_equal(result, expected, exact=True)
def test_create_categorical(self):
# https://github.com/pandas-dev/pandas/pull/17513
# The public CI constructor doesn't hit this code path with
# instances of CategoricalIndex, but we still want to test the code
ci = CategoricalIndex(['a', 'b', 'c'])
# First ci is self, second ci is data.
result = CategoricalIndex._create_categorical(ci, ci)
expected = Categorical(['a', 'b', 'c'])
tm.assert_categorical_equal(result, expected)
def test_disallow_set_ops(self):
# GH 10039
# set ops (+/-) raise TypeError
idx = pd.Index(pd.Categorical(['a', 'b']))
pytest.raises(TypeError, lambda: idx - idx)
pytest.raises(TypeError, lambda: idx + idx)
pytest.raises(TypeError, lambda: idx - ['a', 'b'])
pytest.raises(TypeError, lambda: idx + ['a', 'b'])
pytest.raises(TypeError, lambda: ['a', 'b'] - idx)
pytest.raises(TypeError, lambda: ['a', 'b'] + idx)
def test_method_delegation(self):
ci = CategoricalIndex(list('aabbca'), categories=list('cabdef'))
result = ci.set_categories(list('cab'))
tm.assert_index_equal(result, CategoricalIndex(
list('aabbca'), categories=list('cab')))
ci = CategoricalIndex(list('aabbca'), categories=list('cab'))
result = ci.rename_categories(list('efg'))
tm.assert_index_equal(result, CategoricalIndex(
list('ffggef'), categories=list('efg')))
ci = CategoricalIndex(list('aabbca'), categories=list('cab'))
result = ci.add_categories(['d'])
tm.assert_index_equal(result, CategoricalIndex(
list('aabbca'), categories=list('cabd')))
ci = CategoricalIndex(list('aabbca'), categories=list('cab'))
result = ci.remove_categories(['c'])
tm.assert_index_equal(result, CategoricalIndex(
list('aabb') + [np.nan] + ['a'], categories=list('ab')))
ci = CategoricalIndex(list('aabbca'), categories=list('cabdef'))
result = ci.as_unordered()
tm.assert_index_equal(result, ci)
ci = CategoricalIndex(list('aabbca'), categories=list('cabdef'))
result = ci.as_ordered()
tm.assert_index_equal(result, CategoricalIndex(
list('aabbca'), categories=list('cabdef'), ordered=True))
# invalid
pytest.raises(ValueError, lambda: ci.set_categories(
list('cab'), inplace=True))
def test_contains(self):
ci = self.create_index(categories=list('cabdef'))
assert 'a' in ci
assert 'z' not in ci
assert 'e' not in ci
assert np.nan not in ci
# assert codes NOT in index
assert 0 not in ci
assert 1 not in ci
ci = CategoricalIndex(
list('aabbca') + [np.nan], categories=list('cabdef'))
assert np.nan in ci
def test_min_max(self):
ci = self.create_index(ordered=False)
pytest.raises(TypeError, lambda: ci.min())
pytest.raises(TypeError, lambda: ci.max())
ci = self.create_index(ordered=True)
assert ci.min() == 'c'
assert ci.max() == 'b'
def test_map(self):
ci = pd.CategoricalIndex(list('ABABC'), categories=list('CBA'),
ordered=True)
result = ci.map(lambda x: x.lower())
exp = pd.CategoricalIndex(list('ababc'), categories=list('cba'),
ordered=True)
tm.assert_index_equal(result, exp)
ci = pd.CategoricalIndex(list('ABABC'), categories=list('BAC'),
ordered=False, name='XXX')
result = ci.map(lambda x: x.lower())
exp = pd.CategoricalIndex(list('ababc'), categories=list('bac'),
ordered=False, name='XXX')
tm.assert_index_equal(result, exp)
# GH 12766: Return an index not an array
tm.assert_index_equal(ci.map(lambda x: 1),
Index(np.array([1] * 5, dtype=np.int64),
name='XXX'))
# change categories dtype
ci = pd.CategoricalIndex(list('ABABC'), categories=list('BAC'),
ordered=False)
def f(x):
return {'A': 10, 'B': 20, 'C': 30}.get(x)
result = ci.map(f)
exp = pd.CategoricalIndex([10, 20, 10, 20, 30],
categories=[20, 10, 30],
ordered=False)
tm.assert_index_equal(result, exp)
def test_where(self):
i = self.create_index()
result = i.where(notna(i))
expected = i
tm.assert_index_equal(result, expected)
i2 = pd.CategoricalIndex([np.nan, np.nan] + i[2:].tolist(),
categories=i.categories)
result = i.where(notna(i2))
expected = i2
tm.assert_index_equal(result, expected)
def test_where_array_like(self):
i = self.create_index()
cond = [False] + [True] * (len(i) - 1)
klasses = [list, tuple, np.array, pd.Series]
expected = pd.CategoricalIndex([np.nan] + i[1:].tolist(),
categories=i.categories)
for klass in klasses:
result = i.where(klass(cond))
tm.assert_index_equal(result, expected)
def test_append(self):
ci = self.create_index()
categories = ci.categories
# append cats with the same categories
result = ci[:3].append(ci[3:])
tm.assert_index_equal(result, ci, exact=True)
foos = [ci[:1], ci[1:3], ci[3:]]
result = foos[0].append(foos[1:])
tm.assert_index_equal(result, ci, exact=True)
# empty
result = ci.append([])
tm.assert_index_equal(result, ci, exact=True)
# appending with different categories or reoreded is not ok
pytest.raises(
TypeError,
lambda: ci.append(ci.values.set_categories(list('abcd'))))
pytest.raises(
TypeError,
lambda: ci.append(ci.values.reorder_categories(list('abc'))))
# with objects
result = ci.append(Index(['c', 'a']))
expected = CategoricalIndex(list('aabbcaca'), categories=categories)
tm.assert_index_equal(result, expected, exact=True)
# invalid objects
pytest.raises(TypeError, lambda: ci.append(Index(['a', 'd'])))
# GH14298 - if base object is not categorical -> coerce to object
result = Index(['c', 'a']).append(ci)
expected = Index(list('caaabbca'))
tm.assert_index_equal(result, expected, exact=True)
def test_insert(self):
ci = self.create_index()
categories = ci.categories
# test 0th element
result = ci.insert(0, 'a')
expected = CategoricalIndex(list('aaabbca'), categories=categories)
tm.assert_index_equal(result, expected, exact=True)
# test Nth element that follows Python list behavior
result = ci.insert(-1, 'a')
expected = CategoricalIndex(list('aabbcaa'), categories=categories)
tm.assert_index_equal(result, expected, exact=True)
# test empty
result = CategoricalIndex(categories=categories).insert(0, 'a')
expected = CategoricalIndex(['a'], categories=categories)
tm.assert_index_equal(result, expected, exact=True)
# invalid
pytest.raises(TypeError, lambda: ci.insert(0, 'd'))
def test_delete(self):
ci = self.create_index()
categories = ci.categories
result = ci.delete(0)
expected = CategoricalIndex(list('abbca'), categories=categories)
tm.assert_index_equal(result, expected, exact=True)
result = ci.delete(-1)
expected = CategoricalIndex(list('aabbc'), categories=categories)
tm.assert_index_equal(result, expected, exact=True)
with pytest.raises((IndexError, ValueError)):
# Either depending on NumPy version
ci.delete(10)
def test_astype(self):
ci = self.create_index()
result = ci.astype('category')
tm.assert_index_equal(result, ci, exact=True)
result = ci.astype(object)
tm.assert_index_equal(result, Index(np.array(ci)))
# this IS equal, but not the same class
assert result.equals(ci)
assert isinstance(result, Index)
assert not isinstance(result, CategoricalIndex)
# interval
ii = IntervalIndex.from_arrays(left=[-0.001, 2.0],
right=[2, 4],
closed='right')
ci = CategoricalIndex(Categorical.from_codes(
[0, 1, -1], categories=ii, ordered=True))
result = ci.astype('interval')
expected = ii.take([0, 1, -1])
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_intervals(result.values)
tm.assert_index_equal(result, expected)
def test_reindex_base(self):
# Determined by cat ordering.
idx = CategoricalIndex(list("cab"), categories=list("cab"))
expected = np.arange(len(idx), dtype=np.intp)
actual = idx.get_indexer(idx)
tm.assert_numpy_array_equal(expected, actual)
with tm.assert_raises_regex(ValueError, "Invalid fill method"):
idx.get_indexer(idx, method="invalid")
def test_reindexing(self):
np.random.seed(123456789)
ci = self.create_index()
oidx = Index(np.array(ci))
for n in [1, 2, 5, len(ci)]:
finder = oidx[np.random.randint(0, len(ci), size=n)]
expected = oidx.get_indexer_non_unique(finder)[0]
actual = ci.get_indexer(finder)
tm.assert_numpy_array_equal(expected, actual)
# see gh-17323
#
# Even when indexer is equal to the
# members in the index, we should
# respect duplicates instead of taking
# the fast-track path.
for finder in [list("aabbca"), list("aababca")]:
expected = oidx.get_indexer_non_unique(finder)[0]
actual = ci.get_indexer(finder)
tm.assert_numpy_array_equal(expected, actual)
def test_reindex_dtype(self):
c = CategoricalIndex(['a', 'b', 'c', 'a'])
res, indexer = c.reindex(['a', 'c'])
tm.assert_index_equal(res, Index(['a', 'a', 'c']), exact=True)
tm.assert_numpy_array_equal(indexer,
np.array([0, 3, 2], dtype=np.intp))
c = CategoricalIndex(['a', 'b', 'c', 'a'])
res, indexer = c.reindex(Categorical(['a', 'c']))
exp = CategoricalIndex(['a', 'a', 'c'], categories=['a', 'c'])
tm.assert_index_equal(res, exp, exact=True)
tm.assert_numpy_array_equal(indexer,
np.array([0, 3, 2], dtype=np.intp))
c = CategoricalIndex(['a', 'b', 'c', 'a'],
categories=['a', 'b', 'c', 'd'])
res, indexer = c.reindex(['a', 'c'])
exp = Index(['a', 'a', 'c'], dtype='object')
tm.assert_index_equal(res, exp, exact=True)
tm.assert_numpy_array_equal(indexer,
np.array([0, 3, 2], dtype=np.intp))
c = CategoricalIndex(['a', 'b', 'c', 'a'],
categories=['a', 'b', 'c', 'd'])
res, indexer = c.reindex(Categorical(['a', 'c']))
exp = CategoricalIndex(['a', 'a', 'c'], categories=['a', 'c'])
tm.assert_index_equal(res, exp, exact=True)
tm.assert_numpy_array_equal(indexer,
np.array([0, 3, 2], dtype=np.intp))
def test_reindex_empty_index(self):
# See GH16770
c = CategoricalIndex([])
res, indexer = c.reindex(['a', 'b'])
tm.assert_index_equal(res, Index(['a', 'b']), exact=True)
tm.assert_numpy_array_equal(indexer,
np.array([-1, -1], dtype=np.intp))
def test_is_monotonic(self):
c = CategoricalIndex([1, 2, 3])
assert c.is_monotonic_increasing
assert not c.is_monotonic_decreasing
c = CategoricalIndex([1, 2, 3], ordered=True)
assert c.is_monotonic_increasing
assert not c.is_monotonic_decreasing
c = CategoricalIndex([1, 2, 3], categories=[3, 2, 1])
assert not c.is_monotonic_increasing
assert c.is_monotonic_decreasing
c = CategoricalIndex([1, 3, 2], categories=[3, 2, 1])
assert not c.is_monotonic_increasing
assert not c.is_monotonic_decreasing
c = CategoricalIndex([1, 2, 3], categories=[3, 2, 1], ordered=True)
assert not c.is_monotonic_increasing
assert c.is_monotonic_decreasing
# non lexsorted categories
categories = [9, 0, 1, 2, 3]
c = CategoricalIndex([9, 0], categories=categories)
assert c.is_monotonic_increasing
assert not c.is_monotonic_decreasing
c = CategoricalIndex([0, 1], categories=categories)
assert c.is_monotonic_increasing
assert not c.is_monotonic_decreasing
def test_duplicates(self):
idx = CategoricalIndex([0, 0, 0], name='foo')
assert not idx.is_unique
assert idx.has_duplicates
expected = CategoricalIndex([0], name='foo')
tm.assert_index_equal(idx.drop_duplicates(), expected)
tm.assert_index_equal(idx.unique(), expected)
def test_get_indexer(self):
idx1 = CategoricalIndex(list('aabcde'), categories=list('edabc'))
idx2 = CategoricalIndex(list('abf'))
for indexer in [idx2, list('abf'), Index(list('abf'))]:
r1 = idx1.get_indexer(idx2)
assert_almost_equal(r1, np.array([0, 1, 2, -1], dtype=np.intp))
pytest.raises(NotImplementedError,
lambda: idx2.get_indexer(idx1, method='pad'))
pytest.raises(NotImplementedError,
lambda: idx2.get_indexer(idx1, method='backfill'))
pytest.raises(NotImplementedError,
lambda: idx2.get_indexer(idx1, method='nearest'))
def test_get_loc(self):
# GH 12531
cidx1 = CategoricalIndex(list('abcde'), categories=list('edabc'))
idx1 = Index(list('abcde'))
assert cidx1.get_loc('a') == idx1.get_loc('a')
assert cidx1.get_loc('e') == idx1.get_loc('e')
for i in [cidx1, idx1]:
with pytest.raises(KeyError):
i.get_loc('NOT-EXIST')
# non-unique
cidx2 = CategoricalIndex(list('aacded'), categories=list('edabc'))
idx2 = Index(list('aacded'))
# results in bool array
res = cidx2.get_loc('d')
tm.assert_numpy_array_equal(res, idx2.get_loc('d'))
tm.assert_numpy_array_equal(res, np.array([False, False, False,
True, False, True]))
# unique element results in scalar
res = cidx2.get_loc('e')
assert res == idx2.get_loc('e')
assert res == 4
for i in [cidx2, idx2]:
with pytest.raises(KeyError):
i.get_loc('NOT-EXIST')
# non-unique, slicable
cidx3 = CategoricalIndex(list('aabbb'), categories=list('abc'))
idx3 = Index(list('aabbb'))
# results in slice
res = cidx3.get_loc('a')
assert res == idx3.get_loc('a')
assert res == slice(0, 2, None)
res = cidx3.get_loc('b')
assert res == idx3.get_loc('b')
assert res == slice(2, 5, None)
for i in [cidx3, idx3]:
with pytest.raises(KeyError):
i.get_loc('c')
def test_repr_roundtrip(self):
ci = CategoricalIndex(['a', 'b'], categories=['a', 'b'], ordered=True)
str(ci)
tm.assert_index_equal(eval(repr(ci)), ci, exact=True)
# formatting
if PY3:
str(ci)
else:
compat.text_type(ci)
# long format
# this is not reprable
ci = CategoricalIndex(np.random.randint(0, 5, size=100))
if PY3:
str(ci)
else:
compat.text_type(ci)
def test_isin(self):
ci = CategoricalIndex(
list('aabca') + [np.nan], categories=['c', 'a', 'b'])
tm.assert_numpy_array_equal(
ci.isin(['c']),
np.array([False, False, False, True, False, False]))
tm.assert_numpy_array_equal(
ci.isin(['c', 'a', 'b']), np.array([True] * 5 + [False]))
tm.assert_numpy_array_equal(
ci.isin(['c', 'a', 'b', np.nan]), np.array([True] * 6))
# mismatched categorical -> coerced to ndarray so doesn't matter
result = ci.isin(ci.set_categories(list('abcdefghi')))
expected = np.array([True] * 6)
tm.assert_numpy_array_equal(result, expected)
result = ci.isin(ci.set_categories(list('defghi')))
expected = np.array([False] * 5 + [True])
tm.assert_numpy_array_equal(result, expected)
def test_identical(self):
ci1 = CategoricalIndex(['a', 'b'], categories=['a', 'b'], ordered=True)
ci2 = CategoricalIndex(['a', 'b'], categories=['a', 'b', 'c'],
ordered=True)
assert ci1.identical(ci1)
assert ci1.identical(ci1.copy())
assert not ci1.identical(ci2)
def test_ensure_copied_data(self):
# gh-12309: Check the "copy" argument of each
# Index.__new__ is honored.
#
# Must be tested separately from other indexes because
# self.value is not an ndarray.
_base = lambda ar: ar if ar.base is None else ar.base
for index in self.indices.values():
result = CategoricalIndex(index.values, copy=True)
tm.assert_index_equal(index, result)
assert _base(index.values) is not _base(result.values)
result = CategoricalIndex(index.values, copy=False)
assert _base(index.values) is _base(result.values)
def test_equals_categorical(self):
ci1 = CategoricalIndex(['a', 'b'], categories=['a', 'b'], ordered=True)
ci2 = CategoricalIndex(['a', 'b'], categories=['a', 'b', 'c'],
ordered=True)
assert ci1.equals(ci1)
assert not ci1.equals(ci2)
assert ci1.equals(ci1.astype(object))
assert ci1.astype(object).equals(ci1)
assert (ci1 == ci1).all()
assert not (ci1 != ci1).all()
assert not (ci1 > ci1).all()
assert not (ci1 < ci1).all()
assert (ci1 <= ci1).all()
assert (ci1 >= ci1).all()
assert not (ci1 == 1).all()
assert (ci1 == Index(['a', 'b'])).all()
assert (ci1 == ci1.values).all()
# invalid comparisons
with tm.assert_raises_regex(ValueError, "Lengths must match"):
ci1 == Index(['a', 'b', 'c'])
pytest.raises(TypeError, lambda: ci1 == ci2)
pytest.raises(
TypeError, lambda: ci1 == Categorical(ci1.values, ordered=False))
pytest.raises(
TypeError,
lambda: ci1 == Categorical(ci1.values, categories=list('abc')))
# tests
# make sure that we are testing for category inclusion properly
ci = CategoricalIndex(list('aabca'), categories=['c', 'a', 'b'])
assert not ci.equals(list('aabca'))
# Same categories, but different order
# Unordered
assert ci.equals(CategoricalIndex(list('aabca')))
# Ordered
assert not ci.equals(CategoricalIndex(list('aabca'), ordered=True))
assert ci.equals(ci.copy())
ci = CategoricalIndex(list('aabca') + [np.nan],
categories=['c', 'a', 'b'])
assert not ci.equals(list('aabca'))
assert not ci.equals(CategoricalIndex(list('aabca')))
assert ci.equals(ci.copy())
ci = CategoricalIndex(list('aabca') + [np.nan],
categories=['c', 'a', 'b'])
assert not ci.equals(list('aabca') + [np.nan])
assert ci.equals(CategoricalIndex(list('aabca') + [np.nan]))
assert not ci.equals(CategoricalIndex(list('aabca') + [np.nan],
ordered=True))
assert ci.equals(ci.copy())
def test_string_categorical_index_repr(self):
# short
idx = pd.CategoricalIndex(['a', 'bb', 'ccc'])
if PY3:
expected = u"""CategoricalIndex(['a', 'bb', 'ccc'], categories=['a', 'bb', 'ccc'], ordered=False, dtype='category')""" # noqa
assert repr(idx) == expected
else:
expected = u"""CategoricalIndex([u'a', u'bb', u'ccc'], categories=[u'a', u'bb', u'ccc'], ordered=False, dtype='category')""" # noqa
assert unicode(idx) == expected
# multiple lines
idx = pd.CategoricalIndex(['a', 'bb', 'ccc'] * 10)
if PY3:
expected = u"""CategoricalIndex(['a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a',
'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb',
'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc'],
categories=['a', 'bb', 'ccc'], ordered=False, dtype='category')""" # noqa
assert repr(idx) == expected
else:
expected = u"""CategoricalIndex([u'a', u'bb', u'ccc', u'a', u'bb', u'ccc', u'a', u'bb',
u'ccc', u'a', u'bb', u'ccc', u'a', u'bb', u'ccc', u'a',
u'bb', u'ccc', u'a', u'bb', u'ccc', u'a', u'bb', u'ccc',
u'a', u'bb', u'ccc', u'a', u'bb', u'ccc'],
categories=[u'a', u'bb', u'ccc'], ordered=False, dtype='category')""" # noqa
assert unicode(idx) == expected
# truncated
idx = pd.CategoricalIndex(['a', 'bb', 'ccc'] * 100)
if PY3:
expected = u"""CategoricalIndex(['a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a',
...
'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc'],
categories=['a', 'bb', 'ccc'], ordered=False, dtype='category', length=300)""" # noqa
assert repr(idx) == expected
else:
expected = u"""CategoricalIndex([u'a', u'bb', u'ccc', u'a', u'bb', u'ccc', u'a', u'bb',
u'ccc', u'a',
...
u'ccc', u'a', u'bb', u'ccc', u'a', u'bb', u'ccc', u'a',
u'bb', u'ccc'],
categories=[u'a', u'bb', u'ccc'], ordered=False, dtype='category', length=300)""" # noqa
assert unicode(idx) == expected
# larger categories
idx = pd.CategoricalIndex(list('abcdefghijklmmo'))
if PY3:
expected = u"""CategoricalIndex(['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l',
'm', 'm', 'o'],
categories=['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', ...], ordered=False, dtype='category')""" # noqa
assert repr(idx) == expected
else:
expected = u"""CategoricalIndex([u'a', u'b', u'c', u'd', u'e', u'f', u'g', u'h', u'i', u'j',
u'k', u'l', u'm', u'm', u'o'],
categories=[u'a', u'b', u'c', u'd', u'e', u'f', u'g', u'h', ...], ordered=False, dtype='category')""" # noqa
assert unicode(idx) == expected
# short
idx = pd.CategoricalIndex([u'あ', u'いい', u'ううう'])
if PY3:
expected = u"""CategoricalIndex(['あ', 'いい', 'ううう'], categories=['あ', 'いい', 'ううう'], ordered=False, dtype='category')""" # noqa
assert repr(idx) == expected
else:
expected = u"""CategoricalIndex([u'あ', u'いい', u'ううう'], categories=[u'あ', u'いい', u'ううう'], ordered=False, dtype='category')""" # noqa
assert unicode(idx) == expected
# multiple lines
idx = pd.CategoricalIndex([u'あ', u'いい', u'ううう'] * 10)
if PY3:
expected = u"""CategoricalIndex(['あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ',
'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい',
'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう'],
categories=['あ', 'いい', 'ううう'], ordered=False, dtype='category')""" # noqa
assert repr(idx) == expected
else:
expected = u"""CategoricalIndex([u'あ', u'いい', u'ううう', u'あ', u'いい', u'ううう', u'あ', u'いい',
u'ううう', u'あ', u'いい', u'ううう', u'あ', u'いい', u'ううう', u'あ',
u'いい', u'ううう', u'あ', u'いい', u'ううう', u'あ', u'いい', u'ううう',
u'あ', u'いい', u'ううう', u'あ', u'いい', u'ううう'],
categories=[u'あ', u'いい', u'ううう'], ordered=False, dtype='category')""" # noqa
assert unicode(idx) == expected
# truncated
idx = pd.CategoricalIndex([u'あ', u'いい', u'ううう'] * 100)
if PY3:
expected = u"""CategoricalIndex(['あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ',
...
'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう'],
categories=['あ', 'いい', 'ううう'], ordered=False, dtype='category', length=300)""" # noqa
assert repr(idx) == expected
else:
expected = u"""CategoricalIndex([u'あ', u'いい', u'ううう', u'あ', u'いい', u'ううう', u'あ', u'いい',
u'ううう', u'あ',
...
u'ううう', u'あ', u'いい', u'ううう', u'あ', u'いい', u'ううう', u'あ',
u'いい', u'ううう'],
categories=[u'あ', u'いい', u'ううう'], ordered=False, dtype='category', length=300)""" # noqa
assert unicode(idx) == expected
# larger categories
idx = pd.CategoricalIndex(list(u'あいうえおかきくけこさしすせそ'))
if PY3:
expected = u"""CategoricalIndex(['あ', 'い', 'う', 'え', 'お', 'か', 'き', 'く', 'け', 'こ', 'さ', 'し',
'す', 'せ', 'そ'],
categories=['あ', 'い', 'う', 'え', 'お', 'か', 'き', 'く', ...], ordered=False, dtype='category')""" # noqa
assert repr(idx) == expected
else:
expected = u"""CategoricalIndex([u'あ', u'い', u'う', u'え', u'お', u'か', u'き', u'く', u'け', u'こ',
u'さ', u'し', u'す', u'せ', u'そ'],
categories=[u'あ', u'い', u'う', u'え', u'お', u'か', u'き', u'く', ...], ordered=False, dtype='category')""" # noqa
assert unicode(idx) == expected
# Emable Unicode option -----------------------------------------
with cf.option_context('display.unicode.east_asian_width', True):
# short
idx = pd.CategoricalIndex([u'あ', u'いい', u'ううう'])
if PY3:
expected = u"""CategoricalIndex(['あ', 'いい', 'ううう'], categories=['あ', 'いい', 'ううう'], ordered=False, dtype='category')""" # noqa
assert repr(idx) == expected
else:
expected = u"""CategoricalIndex([u'あ', u'いい', u'ううう'], categories=[u'あ', u'いい', u'ううう'], ordered=False, dtype='category')""" # noqa
assert unicode(idx) == expected
# multiple lines
idx = pd.CategoricalIndex([u'あ', u'いい', u'ううう'] * 10)
if PY3:
expected = u"""CategoricalIndex(['あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい',
'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう',
'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい',
'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう'],
categories=['あ', 'いい', 'ううう'], ordered=False, dtype='category')""" # noqa
assert repr(idx) == expected
else:
expected = u"""CategoricalIndex([u'あ', u'いい', u'ううう', u'あ', u'いい', u'ううう', u'あ',
u'いい', u'ううう', u'あ', u'いい', u'ううう', u'あ',
u'いい', u'ううう', u'あ', u'いい', u'ううう', u'あ',
u'いい', u'ううう', u'あ', u'いい', u'ううう', u'あ',
u'いい', u'ううう', u'あ', u'いい', u'ううう'],
categories=[u'あ', u'いい', u'ううう'], ordered=False, dtype='category')""" # noqa
assert unicode(idx) == expected
# truncated
idx = pd.CategoricalIndex([u'あ', u'いい', u'ううう'] * 100)
if PY3:
expected = u"""CategoricalIndex(['あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい',
'ううう', 'あ',
...
'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう',
'あ', 'いい', 'ううう'],
categories=['あ', 'いい', 'ううう'], ordered=False, dtype='category', length=300)""" # noqa
assert repr(idx) == expected
else:
expected = u"""CategoricalIndex([u'あ', u'いい', u'ううう', u'あ', u'いい', u'ううう', u'あ',
u'いい', u'ううう', u'あ',
...
u'ううう', u'あ', u'いい', u'ううう', u'あ', u'いい',
u'ううう', u'あ', u'いい', u'ううう'],
categories=[u'あ', u'いい', u'ううう'], ordered=False, dtype='category', length=300)""" # noqa
assert unicode(idx) == expected
# larger categories
idx = pd.CategoricalIndex(list(u'あいうえおかきくけこさしすせそ'))
if PY3:
expected = u"""CategoricalIndex(['あ', 'い', 'う', 'え', 'お', 'か', 'き', 'く', 'け', 'こ',
'さ', 'し', 'す', 'せ', 'そ'],
categories=['あ', 'い', 'う', 'え', 'お', 'か', 'き', 'く', ...], ordered=False, dtype='category')""" # noqa
assert repr(idx) == expected
else:
expected = u"""CategoricalIndex([u'あ', u'い', u'う', u'え', u'お', u'か', u'き', u'く',
u'け', u'こ', u'さ', u'し', u'す', u'せ', u'そ'],
categories=[u'あ', u'い', u'う', u'え', u'お', u'か', u'き', u'く', ...], ordered=False, dtype='category')""" # noqa
assert unicode(idx) == expected
def test_fillna_categorical(self):
# GH 11343
idx = CategoricalIndex([1.0, np.nan, 3.0, 1.0], name='x')
# fill by value in categories
exp = CategoricalIndex([1.0, 1.0, 3.0, 1.0], name='x')
tm.assert_index_equal(idx.fillna(1.0), exp)
# fill by value not in categories raises ValueError
with tm.assert_raises_regex(ValueError,
'fill value must be in categories'):
idx.fillna(2.0)
def test_take_fill_value(self):
# GH 12631
# numeric category
idx = pd.CategoricalIndex([1, 2, 3], name='xxx')
result = idx.take(np.array([1, 0, -1]))
expected = pd.CategoricalIndex([2, 1, 3], name='xxx')
tm.assert_index_equal(result, expected)
tm.assert_categorical_equal(result.values, expected.values)
# fill_value
result = idx.take(np.array([1, 0, -1]), fill_value=True)
expected = pd.CategoricalIndex([2, 1, np.nan], categories=[1, 2, 3],
name='xxx')
tm.assert_index_equal(result, expected)
tm.assert_categorical_equal(result.values, expected.values)
# allow_fill=False
result = idx.take(np.array([1, 0, -1]), allow_fill=False,
fill_value=True)
expected = pd.CategoricalIndex([2, 1, 3], name='xxx')
tm.assert_index_equal(result, expected)
tm.assert_categorical_equal(result.values, expected.values)
# object category
idx = pd.CategoricalIndex(list('CBA'), categories=list('ABC'),
ordered=True, name='xxx')
result = idx.take(np.array([1, 0, -1]))
expected = pd.CategoricalIndex(list('BCA'), categories=list('ABC'),
ordered=True, name='xxx')
tm.assert_index_equal(result, expected)
tm.assert_categorical_equal(result.values, expected.values)
# fill_value
result = idx.take(np.array([1, 0, -1]), fill_value=True)
expected = pd.CategoricalIndex(['B', 'C', np.nan],
categories=list('ABC'), ordered=True,
name='xxx')
tm.assert_index_equal(result, expected)
tm.assert_categorical_equal(result.values, expected.values)
# allow_fill=False
result = idx.take(np.array([1, 0, -1]), allow_fill=False,
fill_value=True)
expected = pd.CategoricalIndex(list('BCA'), categories=list('ABC'),
ordered=True, name='xxx')
tm.assert_index_equal(result, expected)
tm.assert_categorical_equal(result.values, expected.values)
msg = ('When allow_fill=True and fill_value is not None, '
'all indices must be >= -1')
with tm.assert_raises_regex(ValueError, msg):
idx.take(np.array([1, 0, -2]), fill_value=True)
with tm.assert_raises_regex(ValueError, msg):
idx.take(np.array([1, 0, -5]), fill_value=True)
with pytest.raises(IndexError):
idx.take(np.array([1, -5]))
def test_take_fill_value_datetime(self):
# datetime category
idx = pd.DatetimeIndex(['2011-01-01', '2011-02-01', '2011-03-01'],
name='xxx')
idx = pd.CategoricalIndex(idx)
result = idx.take(np.array([1, 0, -1]))
expected = pd.DatetimeIndex(['2011-02-01', '2011-01-01', '2011-03-01'],
name='xxx')
expected = pd.CategoricalIndex(expected)
tm.assert_index_equal(result, expected)
# fill_value
result = idx.take(np.array([1, 0, -1]), fill_value=True)
expected = pd.DatetimeIndex(['2011-02-01', '2011-01-01', 'NaT'],
name='xxx')
exp_cats = pd.DatetimeIndex(['2011-01-01', '2011-02-01', '2011-03-01'])
expected = pd.CategoricalIndex(expected, categories=exp_cats)
tm.assert_index_equal(result, expected)
# allow_fill=False
result = idx.take(np.array([1, 0, -1]), allow_fill=False,
fill_value=True)
expected = pd.DatetimeIndex(['2011-02-01', '2011-01-01', '2011-03-01'],
name='xxx')
expected = pd.CategoricalIndex(expected)
tm.assert_index_equal(result, expected)
msg = ('When allow_fill=True and fill_value is not None, '
'all indices must be >= -1')
with tm.assert_raises_regex(ValueError, msg):
idx.take(np.array([1, 0, -2]), fill_value=True)
with tm.assert_raises_regex(ValueError, msg):
idx.take(np.array([1, 0, -5]), fill_value=True)
with pytest.raises(IndexError):
idx.take(np.array([1, -5]))
def test_take_invalid_kwargs(self):
idx = pd.CategoricalIndex([1, 2, 3], name='foo')
indices = [1, 0, -1]
msg = r"take\(\) got an unexpected keyword argument 'foo'"
tm.assert_raises_regex(TypeError, msg, idx.take,
indices, foo=2)
msg = "the 'out' parameter is not supported"
tm.assert_raises_regex(ValueError, msg, idx.take,
indices, out=indices)
msg = "the 'mode' parameter is not supported"
tm.assert_raises_regex(ValueError, msg, idx.take,
indices, mode='clip')
| apache-2.0 |
victor-prado/broker-manager | environment/lib/python3.5/site-packages/pandas/tools/tests/test_merge.py | 7 | 56114 | # pylint: disable=E1103
import nose
from datetime import datetime
from numpy.random import randn
from numpy import nan
import numpy as np
import random
import pandas as pd
from pandas.compat import lrange, lzip
from pandas.tools.merge import merge, concat, MergeError
from pandas.util.testing import (assert_frame_equal,
assert_series_equal,
slow)
from pandas import DataFrame, Index, MultiIndex, Series, Categorical
import pandas.util.testing as tm
N = 50
NGROUPS = 8
def get_test_data(ngroups=NGROUPS, n=N):
unique_groups = lrange(ngroups)
arr = np.asarray(np.tile(unique_groups, n // ngroups))
if len(arr) < n:
arr = np.asarray(list(arr) + unique_groups[:n - len(arr)])
random.shuffle(arr)
return arr
class TestMerge(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
# aggregate multiple columns
self.df = DataFrame({'key1': get_test_data(),
'key2': get_test_data(),
'data1': np.random.randn(N),
'data2': np.random.randn(N)})
# exclude a couple keys for fun
self.df = self.df[self.df['key2'] > 1]
self.df2 = DataFrame({'key1': get_test_data(n=N // 5),
'key2': get_test_data(ngroups=NGROUPS // 2,
n=N // 5),
'value': np.random.randn(N // 5)})
self.left = DataFrame({'key': ['a', 'b', 'c', 'd', 'e', 'e', 'a'],
'v1': np.random.randn(7)})
self.right = DataFrame({'v2': np.random.randn(4)},
index=['d', 'b', 'c', 'a'])
def test_merge_common(self):
joined = merge(self.df, self.df2)
exp = merge(self.df, self.df2, on=['key1', 'key2'])
tm.assert_frame_equal(joined, exp)
def test_merge_index_singlekey_right_vs_left(self):
left = DataFrame({'key': ['a', 'b', 'c', 'd', 'e', 'e', 'a'],
'v1': np.random.randn(7)})
right = DataFrame({'v2': np.random.randn(4)},
index=['d', 'b', 'c', 'a'])
merged1 = merge(left, right, left_on='key',
right_index=True, how='left', sort=False)
merged2 = merge(right, left, right_on='key',
left_index=True, how='right', sort=False)
assert_frame_equal(merged1, merged2.ix[:, merged1.columns])
merged1 = merge(left, right, left_on='key',
right_index=True, how='left', sort=True)
merged2 = merge(right, left, right_on='key',
left_index=True, how='right', sort=True)
assert_frame_equal(merged1, merged2.ix[:, merged1.columns])
def test_merge_index_singlekey_inner(self):
left = DataFrame({'key': ['a', 'b', 'c', 'd', 'e', 'e', 'a'],
'v1': np.random.randn(7)})
right = DataFrame({'v2': np.random.randn(4)},
index=['d', 'b', 'c', 'a'])
# inner join
result = merge(left, right, left_on='key', right_index=True,
how='inner')
expected = left.join(right, on='key').ix[result.index]
assert_frame_equal(result, expected)
result = merge(right, left, right_on='key', left_index=True,
how='inner')
expected = left.join(right, on='key').ix[result.index]
assert_frame_equal(result, expected.ix[:, result.columns])
def test_merge_misspecified(self):
self.assertRaises(ValueError, merge, self.left, self.right,
left_index=True)
self.assertRaises(ValueError, merge, self.left, self.right,
right_index=True)
self.assertRaises(ValueError, merge, self.left, self.left,
left_on='key', on='key')
self.assertRaises(ValueError, merge, self.df, self.df2,
left_on=['key1'], right_on=['key1', 'key2'])
def test_index_and_on_parameters_confusion(self):
self.assertRaises(ValueError, merge, self.df, self.df2, how='left',
left_index=False, right_index=['key1', 'key2'])
self.assertRaises(ValueError, merge, self.df, self.df2, how='left',
left_index=['key1', 'key2'], right_index=False)
self.assertRaises(ValueError, merge, self.df, self.df2, how='left',
left_index=['key1', 'key2'],
right_index=['key1', 'key2'])
def test_merge_overlap(self):
merged = merge(self.left, self.left, on='key')
exp_len = (self.left['key'].value_counts() ** 2).sum()
self.assertEqual(len(merged), exp_len)
self.assertIn('v1_x', merged)
self.assertIn('v1_y', merged)
def test_merge_different_column_key_names(self):
left = DataFrame({'lkey': ['foo', 'bar', 'baz', 'foo'],
'value': [1, 2, 3, 4]})
right = DataFrame({'rkey': ['foo', 'bar', 'qux', 'foo'],
'value': [5, 6, 7, 8]})
merged = left.merge(right, left_on='lkey', right_on='rkey',
how='outer', sort=True)
exp = pd.Series(['bar', 'baz', 'foo', 'foo', 'foo', 'foo', np.nan],
name='lkey')
tm.assert_series_equal(merged['lkey'], exp)
exp = pd.Series(['bar', np.nan, 'foo', 'foo', 'foo', 'foo', 'qux'],
name='rkey')
tm.assert_series_equal(merged['rkey'], exp)
exp = pd.Series([2, 3, 1, 1, 4, 4, np.nan], name='value_x')
tm.assert_series_equal(merged['value_x'], exp)
exp = pd.Series([6, np.nan, 5, 8, 5, 8, 7], name='value_y')
tm.assert_series_equal(merged['value_y'], exp)
def test_merge_copy(self):
left = DataFrame({'a': 0, 'b': 1}, index=lrange(10))
right = DataFrame({'c': 'foo', 'd': 'bar'}, index=lrange(10))
merged = merge(left, right, left_index=True,
right_index=True, copy=True)
merged['a'] = 6
self.assertTrue((left['a'] == 0).all())
merged['d'] = 'peekaboo'
self.assertTrue((right['d'] == 'bar').all())
def test_merge_nocopy(self):
left = DataFrame({'a': 0, 'b': 1}, index=lrange(10))
right = DataFrame({'c': 'foo', 'd': 'bar'}, index=lrange(10))
merged = merge(left, right, left_index=True,
right_index=True, copy=False)
merged['a'] = 6
self.assertTrue((left['a'] == 6).all())
merged['d'] = 'peekaboo'
self.assertTrue((right['d'] == 'peekaboo').all())
def test_intelligently_handle_join_key(self):
# #733, be a bit more 1337 about not returning unconsolidated DataFrame
left = DataFrame({'key': [1, 1, 2, 2, 3],
'value': lrange(5)}, columns=['value', 'key'])
right = DataFrame({'key': [1, 1, 2, 3, 4, 5],
'rvalue': lrange(6)})
joined = merge(left, right, on='key', how='outer')
expected = DataFrame({'key': [1, 1, 1, 1, 2, 2, 3, 4, 5],
'value': np.array([0, 0, 1, 1, 2, 3, 4,
np.nan, np.nan]),
'rvalue': [0, 1, 0, 1, 2, 2, 3, 4, 5]},
columns=['value', 'key', 'rvalue'])
assert_frame_equal(joined, expected)
def test_merge_join_key_dtype_cast(self):
# #8596
df1 = DataFrame({'key': [1], 'v1': [10]})
df2 = DataFrame({'key': [2], 'v1': [20]})
df = merge(df1, df2, how='outer')
self.assertEqual(df['key'].dtype, 'int64')
df1 = DataFrame({'key': [True], 'v1': [1]})
df2 = DataFrame({'key': [False], 'v1': [0]})
df = merge(df1, df2, how='outer')
# GH13169
# this really should be bool
self.assertEqual(df['key'].dtype, 'object')
df1 = DataFrame({'val': [1]})
df2 = DataFrame({'val': [2]})
lkey = np.array([1])
rkey = np.array([2])
df = merge(df1, df2, left_on=lkey, right_on=rkey, how='outer')
self.assertEqual(df['key_0'].dtype, 'int64')
def test_handle_join_key_pass_array(self):
left = DataFrame({'key': [1, 1, 2, 2, 3],
'value': lrange(5)}, columns=['value', 'key'])
right = DataFrame({'rvalue': lrange(6)})
key = np.array([1, 1, 2, 3, 4, 5])
merged = merge(left, right, left_on='key', right_on=key, how='outer')
merged2 = merge(right, left, left_on=key, right_on='key', how='outer')
assert_series_equal(merged['key'], merged2['key'])
self.assertTrue(merged['key'].notnull().all())
self.assertTrue(merged2['key'].notnull().all())
left = DataFrame({'value': lrange(5)}, columns=['value'])
right = DataFrame({'rvalue': lrange(6)})
lkey = np.array([1, 1, 2, 2, 3])
rkey = np.array([1, 1, 2, 3, 4, 5])
merged = merge(left, right, left_on=lkey, right_on=rkey, how='outer')
self.assert_series_equal(merged['key_0'],
Series([1, 1, 1, 1, 2, 2, 3, 4, 5],
name='key_0'))
left = DataFrame({'value': lrange(3)})
right = DataFrame({'rvalue': lrange(6)})
key = np.array([0, 1, 1, 2, 2, 3], dtype=np.int64)
merged = merge(left, right, left_index=True, right_on=key, how='outer')
self.assert_series_equal(merged['key_0'], Series(key, name='key_0'))
def test_no_overlap_more_informative_error(self):
dt = datetime.now()
df1 = DataFrame({'x': ['a']}, index=[dt])
df2 = DataFrame({'y': ['b', 'c']}, index=[dt, dt])
self.assertRaises(MergeError, merge, df1, df2)
def test_merge_non_unique_indexes(self):
dt = datetime(2012, 5, 1)
dt2 = datetime(2012, 5, 2)
dt3 = datetime(2012, 5, 3)
dt4 = datetime(2012, 5, 4)
df1 = DataFrame({'x': ['a']}, index=[dt])
df2 = DataFrame({'y': ['b', 'c']}, index=[dt, dt])
_check_merge(df1, df2)
# Not monotonic
df1 = DataFrame({'x': ['a', 'b', 'q']}, index=[dt2, dt, dt4])
df2 = DataFrame({'y': ['c', 'd', 'e', 'f', 'g', 'h']},
index=[dt3, dt3, dt2, dt2, dt, dt])
_check_merge(df1, df2)
df1 = DataFrame({'x': ['a', 'b']}, index=[dt, dt])
df2 = DataFrame({'y': ['c', 'd']}, index=[dt, dt])
_check_merge(df1, df2)
def test_merge_non_unique_index_many_to_many(self):
dt = datetime(2012, 5, 1)
dt2 = datetime(2012, 5, 2)
dt3 = datetime(2012, 5, 3)
df1 = DataFrame({'x': ['a', 'b', 'c', 'd']},
index=[dt2, dt2, dt, dt])
df2 = DataFrame({'y': ['e', 'f', 'g', ' h', 'i']},
index=[dt2, dt2, dt3, dt, dt])
_check_merge(df1, df2)
def test_left_merge_empty_dataframe(self):
left = DataFrame({'key': [1], 'value': [2]})
right = DataFrame({'key': []})
result = merge(left, right, on='key', how='left')
assert_frame_equal(result, left)
result = merge(right, left, on='key', how='right')
assert_frame_equal(result, left)
def test_merge_left_empty_right_empty(self):
# GH 10824
left = pd.DataFrame([], columns=['a', 'b', 'c'])
right = pd.DataFrame([], columns=['x', 'y', 'z'])
exp_in = pd.DataFrame([], columns=['a', 'b', 'c', 'x', 'y', 'z'],
index=pd.Index([], dtype=object),
dtype=object)
for kwarg in [dict(left_index=True, right_index=True),
dict(left_index=True, right_on='x'),
dict(left_on='a', right_index=True),
dict(left_on='a', right_on='x')]:
result = pd.merge(left, right, how='inner', **kwarg)
tm.assert_frame_equal(result, exp_in)
result = pd.merge(left, right, how='left', **kwarg)
tm.assert_frame_equal(result, exp_in)
result = pd.merge(left, right, how='right', **kwarg)
tm.assert_frame_equal(result, exp_in)
result = pd.merge(left, right, how='outer', **kwarg)
tm.assert_frame_equal(result, exp_in)
def test_merge_left_empty_right_notempty(self):
# GH 10824
left = pd.DataFrame([], columns=['a', 'b', 'c'])
right = pd.DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]],
columns=['x', 'y', 'z'])
exp_out = pd.DataFrame({'a': np.array([np.nan] * 3, dtype=object),
'b': np.array([np.nan] * 3, dtype=object),
'c': np.array([np.nan] * 3, dtype=object),
'x': [1, 4, 7],
'y': [2, 5, 8],
'z': [3, 6, 9]},
columns=['a', 'b', 'c', 'x', 'y', 'z'])
exp_in = exp_out[0:0] # make empty DataFrame keeping dtype
# result will have object dtype
exp_in.index = exp_in.index.astype(object)
def check1(exp, kwarg):
result = pd.merge(left, right, how='inner', **kwarg)
tm.assert_frame_equal(result, exp)
result = pd.merge(left, right, how='left', **kwarg)
tm.assert_frame_equal(result, exp)
def check2(exp, kwarg):
result = pd.merge(left, right, how='right', **kwarg)
tm.assert_frame_equal(result, exp)
result = pd.merge(left, right, how='outer', **kwarg)
tm.assert_frame_equal(result, exp)
for kwarg in [dict(left_index=True, right_index=True),
dict(left_index=True, right_on='x')]:
check1(exp_in, kwarg)
check2(exp_out, kwarg)
kwarg = dict(left_on='a', right_index=True)
check1(exp_in, kwarg)
exp_out['a'] = [0, 1, 2]
check2(exp_out, kwarg)
kwarg = dict(left_on='a', right_on='x')
check1(exp_in, kwarg)
exp_out['a'] = np.array([np.nan] * 3, dtype=object)
check2(exp_out, kwarg)
def test_merge_left_notempty_right_empty(self):
# GH 10824
left = pd.DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]],
columns=['a', 'b', 'c'])
right = pd.DataFrame([], columns=['x', 'y', 'z'])
exp_out = pd.DataFrame({'a': [1, 4, 7],
'b': [2, 5, 8],
'c': [3, 6, 9],
'x': np.array([np.nan] * 3, dtype=object),
'y': np.array([np.nan] * 3, dtype=object),
'z': np.array([np.nan] * 3, dtype=object)},
columns=['a', 'b', 'c', 'x', 'y', 'z'])
exp_in = exp_out[0:0] # make empty DataFrame keeping dtype
# result will have object dtype
exp_in.index = exp_in.index.astype(object)
def check1(exp, kwarg):
result = pd.merge(left, right, how='inner', **kwarg)
tm.assert_frame_equal(result, exp)
result = pd.merge(left, right, how='right', **kwarg)
tm.assert_frame_equal(result, exp)
def check2(exp, kwarg):
result = pd.merge(left, right, how='left', **kwarg)
tm.assert_frame_equal(result, exp)
result = pd.merge(left, right, how='outer', **kwarg)
tm.assert_frame_equal(result, exp)
for kwarg in [dict(left_index=True, right_index=True),
dict(left_index=True, right_on='x'),
dict(left_on='a', right_index=True),
dict(left_on='a', right_on='x')]:
check1(exp_in, kwarg)
check2(exp_out, kwarg)
def test_merge_nosort(self):
# #2098, anything to do?
from datetime import datetime
d = {"var1": np.random.randint(0, 10, size=10),
"var2": np.random.randint(0, 10, size=10),
"var3": [datetime(2012, 1, 12), datetime(2011, 2, 4),
datetime(
2010, 2, 3), datetime(2012, 1, 12),
datetime(
2011, 2, 4), datetime(2012, 4, 3),
datetime(
2012, 3, 4), datetime(2008, 5, 1),
datetime(2010, 2, 3), datetime(2012, 2, 3)]}
df = DataFrame.from_dict(d)
var3 = df.var3.unique()
var3.sort()
new = DataFrame.from_dict({"var3": var3,
"var8": np.random.random(7)})
result = df.merge(new, on="var3", sort=False)
exp = merge(df, new, on='var3', sort=False)
assert_frame_equal(result, exp)
self.assertTrue((df.var3.unique() == result.var3.unique()).all())
def test_merge_nan_right(self):
df1 = DataFrame({"i1": [0, 1], "i2": [0, 1]})
df2 = DataFrame({"i1": [0], "i3": [0]})
result = df1.join(df2, on="i1", rsuffix="_")
expected = (DataFrame({'i1': {0: 0.0, 1: 1}, 'i2': {0: 0, 1: 1},
'i1_': {0: 0, 1: np.nan},
'i3': {0: 0.0, 1: np.nan},
None: {0: 0, 1: 0}})
.set_index(None)
.reset_index()[['i1', 'i2', 'i1_', 'i3']])
assert_frame_equal(result, expected, check_dtype=False)
df1 = DataFrame({"i1": [0, 1], "i2": [0.5, 1.5]})
df2 = DataFrame({"i1": [0], "i3": [0.7]})
result = df1.join(df2, rsuffix="_", on='i1')
expected = (DataFrame({'i1': {0: 0, 1: 1}, 'i1_': {0: 0.0, 1: nan},
'i2': {0: 0.5, 1: 1.5},
'i3': {0: 0.69999999999999996,
1: nan}})
[['i1', 'i2', 'i1_', 'i3']])
assert_frame_equal(result, expected)
def test_merge_type(self):
class NotADataFrame(DataFrame):
@property
def _constructor(self):
return NotADataFrame
nad = NotADataFrame(self.df)
result = nad.merge(self.df2, on='key1')
tm.assertIsInstance(result, NotADataFrame)
def test_join_append_timedeltas(self):
import datetime as dt
from pandas import NaT
# timedelta64 issues with join/merge
# GH 5695
d = {'d': dt.datetime(2013, 11, 5, 5, 56), 't': dt.timedelta(0, 22500)}
df = DataFrame(columns=list('dt'))
df = df.append(d, ignore_index=True)
result = df.append(d, ignore_index=True)
expected = DataFrame({'d': [dt.datetime(2013, 11, 5, 5, 56),
dt.datetime(2013, 11, 5, 5, 56)],
't': [dt.timedelta(0, 22500),
dt.timedelta(0, 22500)]})
assert_frame_equal(result, expected)
td = np.timedelta64(300000000)
lhs = DataFrame(Series([td, td], index=["A", "B"]))
rhs = DataFrame(Series([td], index=["A"]))
result = lhs.join(rhs, rsuffix='r', how="left")
expected = DataFrame({'0': Series([td, td], index=list('AB')),
'0r': Series([td, NaT], index=list('AB'))})
assert_frame_equal(result, expected)
def test_other_datetime_unit(self):
# GH 13389
df1 = pd.DataFrame({'entity_id': [101, 102]})
s = pd.Series([None, None], index=[101, 102], name='days')
for dtype in ['datetime64[D]', 'datetime64[h]', 'datetime64[m]',
'datetime64[s]', 'datetime64[ms]', 'datetime64[us]',
'datetime64[ns]']:
df2 = s.astype(dtype).to_frame('days')
# coerces to datetime64[ns], thus sholuld not be affected
self.assertEqual(df2['days'].dtype, 'datetime64[ns]')
result = df1.merge(df2, left_on='entity_id', right_index=True)
exp = pd.DataFrame({'entity_id': [101, 102],
'days': np.array(['nat', 'nat'],
dtype='datetime64[ns]')},
columns=['entity_id', 'days'])
tm.assert_frame_equal(result, exp)
def test_other_timedelta_unit(self):
# GH 13389
df1 = pd.DataFrame({'entity_id': [101, 102]})
s = pd.Series([None, None], index=[101, 102], name='days')
for dtype in ['timedelta64[D]', 'timedelta64[h]', 'timedelta64[m]',
'timedelta64[s]', 'timedelta64[ms]', 'timedelta64[us]',
'timedelta64[ns]']:
df2 = s.astype(dtype).to_frame('days')
self.assertEqual(df2['days'].dtype, dtype)
result = df1.merge(df2, left_on='entity_id', right_index=True)
exp = pd.DataFrame({'entity_id': [101, 102],
'days': np.array(['nat', 'nat'],
dtype=dtype)},
columns=['entity_id', 'days'])
tm.assert_frame_equal(result, exp)
def test_overlapping_columns_error_message(self):
df = DataFrame({'key': [1, 2, 3],
'v1': [4, 5, 6],
'v2': [7, 8, 9]})
df2 = DataFrame({'key': [1, 2, 3],
'v1': [4, 5, 6],
'v2': [7, 8, 9]})
df.columns = ['key', 'foo', 'foo']
df2.columns = ['key', 'bar', 'bar']
expected = DataFrame({'key': [1, 2, 3],
'v1': [4, 5, 6],
'v2': [7, 8, 9],
'v3': [4, 5, 6],
'v4': [7, 8, 9]})
expected.columns = ['key', 'foo', 'foo', 'bar', 'bar']
assert_frame_equal(merge(df, df2), expected)
# #2649, #10639
df2.columns = ['key1', 'foo', 'foo']
self.assertRaises(ValueError, merge, df, df2)
def test_merge_on_datetime64tz(self):
# GH11405
left = pd.DataFrame({'key': pd.date_range('20151010', periods=2,
tz='US/Eastern'),
'value': [1, 2]})
right = pd.DataFrame({'key': pd.date_range('20151011', periods=3,
tz='US/Eastern'),
'value': [1, 2, 3]})
expected = DataFrame({'key': pd.date_range('20151010', periods=4,
tz='US/Eastern'),
'value_x': [1, 2, np.nan, np.nan],
'value_y': [np.nan, 1, 2, 3]})
result = pd.merge(left, right, on='key', how='outer')
assert_frame_equal(result, expected)
left = pd.DataFrame({'value': pd.date_range('20151010', periods=2,
tz='US/Eastern'),
'key': [1, 2]})
right = pd.DataFrame({'value': pd.date_range('20151011', periods=2,
tz='US/Eastern'),
'key': [2, 3]})
expected = DataFrame({
'value_x': list(pd.date_range('20151010', periods=2,
tz='US/Eastern')) + [pd.NaT],
'value_y': [pd.NaT] + list(pd.date_range('20151011', periods=2,
tz='US/Eastern')),
'key': [1, 2, 3]})
result = pd.merge(left, right, on='key', how='outer')
assert_frame_equal(result, expected)
self.assertEqual(result['value_x'].dtype, 'datetime64[ns, US/Eastern]')
self.assertEqual(result['value_y'].dtype, 'datetime64[ns, US/Eastern]')
def test_merge_on_periods(self):
left = pd.DataFrame({'key': pd.period_range('20151010', periods=2,
freq='D'),
'value': [1, 2]})
right = pd.DataFrame({'key': pd.period_range('20151011', periods=3,
freq='D'),
'value': [1, 2, 3]})
expected = DataFrame({'key': pd.period_range('20151010', periods=4,
freq='D'),
'value_x': [1, 2, np.nan, np.nan],
'value_y': [np.nan, 1, 2, 3]})
result = pd.merge(left, right, on='key', how='outer')
assert_frame_equal(result, expected)
left = pd.DataFrame({'value': pd.period_range('20151010', periods=2,
freq='D'),
'key': [1, 2]})
right = pd.DataFrame({'value': pd.period_range('20151011', periods=2,
freq='D'),
'key': [2, 3]})
exp_x = pd.period_range('20151010', periods=2, freq='D')
exp_y = pd.period_range('20151011', periods=2, freq='D')
expected = DataFrame({'value_x': list(exp_x) + [pd.NaT],
'value_y': [pd.NaT] + list(exp_y),
'key': [1, 2, 3]})
result = pd.merge(left, right, on='key', how='outer')
assert_frame_equal(result, expected)
self.assertEqual(result['value_x'].dtype, 'object')
self.assertEqual(result['value_y'].dtype, 'object')
def test_indicator(self):
# PR #10054. xref #7412 and closes #8790.
df1 = DataFrame({'col1': [0, 1], 'col_left': [
'a', 'b'], 'col_conflict': [1, 2]})
df1_copy = df1.copy()
df2 = DataFrame({'col1': [1, 2, 3, 4, 5], 'col_right': [2, 2, 2, 2, 2],
'col_conflict': [1, 2, 3, 4, 5]})
df2_copy = df2.copy()
df_result = DataFrame({
'col1': [0, 1, 2, 3, 4, 5],
'col_conflict_x': [1, 2, np.nan, np.nan, np.nan, np.nan],
'col_left': ['a', 'b', np.nan, np.nan, np.nan, np.nan],
'col_conflict_y': [np.nan, 1, 2, 3, 4, 5],
'col_right': [np.nan, 2, 2, 2, 2, 2]})
df_result['_merge'] = Categorical(
['left_only', 'both', 'right_only',
'right_only', 'right_only', 'right_only'],
categories=['left_only', 'right_only', 'both'])
df_result = df_result[['col1', 'col_conflict_x', 'col_left',
'col_conflict_y', 'col_right', '_merge']]
test = merge(df1, df2, on='col1', how='outer', indicator=True)
assert_frame_equal(test, df_result)
test = df1.merge(df2, on='col1', how='outer', indicator=True)
assert_frame_equal(test, df_result)
# No side effects
assert_frame_equal(df1, df1_copy)
assert_frame_equal(df2, df2_copy)
# Check with custom name
df_result_custom_name = df_result
df_result_custom_name = df_result_custom_name.rename(
columns={'_merge': 'custom_name'})
test_custom_name = merge(
df1, df2, on='col1', how='outer', indicator='custom_name')
assert_frame_equal(test_custom_name, df_result_custom_name)
test_custom_name = df1.merge(
df2, on='col1', how='outer', indicator='custom_name')
assert_frame_equal(test_custom_name, df_result_custom_name)
# Check only accepts strings and booleans
with tm.assertRaises(ValueError):
merge(df1, df2, on='col1', how='outer', indicator=5)
with tm.assertRaises(ValueError):
df1.merge(df2, on='col1', how='outer', indicator=5)
# Check result integrity
test2 = merge(df1, df2, on='col1', how='left', indicator=True)
self.assertTrue((test2._merge != 'right_only').all())
test2 = df1.merge(df2, on='col1', how='left', indicator=True)
self.assertTrue((test2._merge != 'right_only').all())
test3 = merge(df1, df2, on='col1', how='right', indicator=True)
self.assertTrue((test3._merge != 'left_only').all())
test3 = df1.merge(df2, on='col1', how='right', indicator=True)
self.assertTrue((test3._merge != 'left_only').all())
test4 = merge(df1, df2, on='col1', how='inner', indicator=True)
self.assertTrue((test4._merge == 'both').all())
test4 = df1.merge(df2, on='col1', how='inner', indicator=True)
self.assertTrue((test4._merge == 'both').all())
# Check if working name in df
for i in ['_right_indicator', '_left_indicator', '_merge']:
df_badcolumn = DataFrame({'col1': [1, 2], i: [2, 2]})
with tm.assertRaises(ValueError):
merge(df1, df_badcolumn, on='col1',
how='outer', indicator=True)
with tm.assertRaises(ValueError):
df1.merge(df_badcolumn, on='col1', how='outer', indicator=True)
# Check for name conflict with custom name
df_badcolumn = DataFrame(
{'col1': [1, 2], 'custom_column_name': [2, 2]})
with tm.assertRaises(ValueError):
merge(df1, df_badcolumn, on='col1', how='outer',
indicator='custom_column_name')
with tm.assertRaises(ValueError):
df1.merge(df_badcolumn, on='col1', how='outer',
indicator='custom_column_name')
# Merge on multiple columns
df3 = DataFrame({'col1': [0, 1], 'col2': ['a', 'b']})
df4 = DataFrame({'col1': [1, 1, 3], 'col2': ['b', 'x', 'y']})
hand_coded_result = DataFrame({'col1': [0, 1, 1, 3],
'col2': ['a', 'b', 'x', 'y']})
hand_coded_result['_merge'] = Categorical(
['left_only', 'both', 'right_only', 'right_only'],
categories=['left_only', 'right_only', 'both'])
test5 = merge(df3, df4, on=['col1', 'col2'],
how='outer', indicator=True)
assert_frame_equal(test5, hand_coded_result)
test5 = df3.merge(df4, on=['col1', 'col2'],
how='outer', indicator=True)
assert_frame_equal(test5, hand_coded_result)
def _check_merge(x, y):
for how in ['inner', 'left', 'outer']:
result = x.join(y, how=how)
expected = merge(x.reset_index(), y.reset_index(), how=how,
sort=True)
expected = expected.set_index('index')
# TODO check_names on merge?
assert_frame_equal(result, expected, check_names=False)
class TestMergeMulti(tm.TestCase):
def setUp(self):
self.index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],
['one', 'two', 'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
self.to_join = DataFrame(np.random.randn(10, 3), index=self.index,
columns=['j_one', 'j_two', 'j_three'])
# a little relevant example with NAs
key1 = ['bar', 'bar', 'bar', 'foo', 'foo', 'baz', 'baz', 'qux',
'qux', 'snap']
key2 = ['two', 'one', 'three', 'one', 'two', 'one', 'two', 'two',
'three', 'one']
data = np.random.randn(len(key1))
self.data = DataFrame({'key1': key1, 'key2': key2,
'data': data})
def test_merge_on_multikey(self):
joined = self.data.join(self.to_join, on=['key1', 'key2'])
join_key = Index(lzip(self.data['key1'], self.data['key2']))
indexer = self.to_join.index.get_indexer(join_key)
ex_values = self.to_join.values.take(indexer, axis=0)
ex_values[indexer == -1] = np.nan
expected = self.data.join(DataFrame(ex_values,
columns=self.to_join.columns))
# TODO: columns aren't in the same order yet
assert_frame_equal(joined, expected.ix[:, joined.columns])
left = self.data.join(self.to_join, on=['key1', 'key2'], sort=True)
right = expected.ix[:, joined.columns].sort_values(['key1', 'key2'],
kind='mergesort')
assert_frame_equal(left, right)
def test_left_join_multi_index(self):
icols = ['1st', '2nd', '3rd']
def bind_cols(df):
iord = lambda a: 0 if a != a else ord(a)
f = lambda ts: ts.map(iord) - ord('a')
return (f(df['1st']) + f(df['3rd']) * 1e2 +
df['2nd'].fillna(0) * 1e4)
def run_asserts(left, right):
for sort in [False, True]:
res = left.join(right, on=icols, how='left', sort=sort)
self.assertTrue(len(left) < len(res) + 1)
self.assertFalse(res['4th'].isnull().any())
self.assertFalse(res['5th'].isnull().any())
tm.assert_series_equal(
res['4th'], - res['5th'], check_names=False)
result = bind_cols(res.iloc[:, :-2])
tm.assert_series_equal(res['4th'], result, check_names=False)
self.assertTrue(result.name is None)
if sort:
tm.assert_frame_equal(
res, res.sort_values(icols, kind='mergesort'))
out = merge(left, right.reset_index(), on=icols,
sort=sort, how='left')
res.index = np.arange(len(res))
tm.assert_frame_equal(out, res)
lc = list(map(chr, np.arange(ord('a'), ord('z') + 1)))
left = DataFrame(np.random.choice(lc, (5000, 2)),
columns=['1st', '3rd'])
left.insert(1, '2nd', np.random.randint(0, 1000, len(left)))
i = np.random.permutation(len(left))
right = left.iloc[i].copy()
left['4th'] = bind_cols(left)
right['5th'] = - bind_cols(right)
right.set_index(icols, inplace=True)
run_asserts(left, right)
# inject some nulls
left.loc[1::23, '1st'] = np.nan
left.loc[2::37, '2nd'] = np.nan
left.loc[3::43, '3rd'] = np.nan
left['4th'] = bind_cols(left)
i = np.random.permutation(len(left))
right = left.iloc[i, :-1]
right['5th'] = - bind_cols(right)
right.set_index(icols, inplace=True)
run_asserts(left, right)
def test_merge_right_vs_left(self):
# compare left vs right merge with multikey
for sort in [False, True]:
merged1 = self.data.merge(self.to_join, left_on=['key1', 'key2'],
right_index=True, how='left', sort=sort)
merged2 = self.to_join.merge(self.data, right_on=['key1', 'key2'],
left_index=True, how='right',
sort=sort)
merged2 = merged2.ix[:, merged1.columns]
assert_frame_equal(merged1, merged2)
def test_compress_group_combinations(self):
# ~ 40000000 possible unique groups
key1 = tm.rands_array(10, 10000)
key1 = np.tile(key1, 2)
key2 = key1[::-1]
df = DataFrame({'key1': key1, 'key2': key2,
'value1': np.random.randn(20000)})
df2 = DataFrame({'key1': key1[::2], 'key2': key2[::2],
'value2': np.random.randn(10000)})
# just to hit the label compression code path
merge(df, df2, how='outer')
def test_left_join_index_preserve_order(self):
left = DataFrame({'k1': [0, 1, 2] * 8,
'k2': ['foo', 'bar'] * 12,
'v': np.array(np.arange(24), dtype=np.int64)})
index = MultiIndex.from_tuples([(2, 'bar'), (1, 'foo')])
right = DataFrame({'v2': [5, 7]}, index=index)
result = left.join(right, on=['k1', 'k2'])
expected = left.copy()
expected['v2'] = np.nan
expected.loc[(expected.k1 == 2) & (expected.k2 == 'bar'), 'v2'] = 5
expected.loc[(expected.k1 == 1) & (expected.k2 == 'foo'), 'v2'] = 7
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(
result.sort_values(['k1', 'k2'], kind='mergesort'),
left.join(right, on=['k1', 'k2'], sort=True))
# test join with multi dtypes blocks
left = DataFrame({'k1': [0, 1, 2] * 8,
'k2': ['foo', 'bar'] * 12,
'k3': np.array([0, 1, 2] * 8, dtype=np.float32),
'v': np.array(np.arange(24), dtype=np.int32)})
index = MultiIndex.from_tuples([(2, 'bar'), (1, 'foo')])
right = DataFrame({'v2': [5, 7]}, index=index)
result = left.join(right, on=['k1', 'k2'])
expected = left.copy()
expected['v2'] = np.nan
expected.loc[(expected.k1 == 2) & (expected.k2 == 'bar'), 'v2'] = 5
expected.loc[(expected.k1 == 1) & (expected.k2 == 'foo'), 'v2'] = 7
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(
result.sort_values(['k1', 'k2'], kind='mergesort'),
left.join(right, on=['k1', 'k2'], sort=True))
# do a right join for an extra test
joined = merge(right, left, left_index=True,
right_on=['k1', 'k2'], how='right')
tm.assert_frame_equal(joined.ix[:, expected.columns], expected)
def test_left_join_index_multi_match_multiindex(self):
left = DataFrame([
['X', 'Y', 'C', 'a'],
['W', 'Y', 'C', 'e'],
['V', 'Q', 'A', 'h'],
['V', 'R', 'D', 'i'],
['X', 'Y', 'D', 'b'],
['X', 'Y', 'A', 'c'],
['W', 'Q', 'B', 'f'],
['W', 'R', 'C', 'g'],
['V', 'Y', 'C', 'j'],
['X', 'Y', 'B', 'd']],
columns=['cola', 'colb', 'colc', 'tag'],
index=[3, 2, 0, 1, 7, 6, 4, 5, 9, 8])
right = DataFrame([
['W', 'R', 'C', 0],
['W', 'Q', 'B', 3],
['W', 'Q', 'B', 8],
['X', 'Y', 'A', 1],
['X', 'Y', 'A', 4],
['X', 'Y', 'B', 5],
['X', 'Y', 'C', 6],
['X', 'Y', 'C', 9],
['X', 'Q', 'C', -6],
['X', 'R', 'C', -9],
['V', 'Y', 'C', 7],
['V', 'R', 'D', 2],
['V', 'R', 'D', -1],
['V', 'Q', 'A', -3]],
columns=['col1', 'col2', 'col3', 'val'])
right.set_index(['col1', 'col2', 'col3'], inplace=True)
result = left.join(right, on=['cola', 'colb', 'colc'], how='left')
expected = DataFrame([
['X', 'Y', 'C', 'a', 6],
['X', 'Y', 'C', 'a', 9],
['W', 'Y', 'C', 'e', nan],
['V', 'Q', 'A', 'h', -3],
['V', 'R', 'D', 'i', 2],
['V', 'R', 'D', 'i', -1],
['X', 'Y', 'D', 'b', nan],
['X', 'Y', 'A', 'c', 1],
['X', 'Y', 'A', 'c', 4],
['W', 'Q', 'B', 'f', 3],
['W', 'Q', 'B', 'f', 8],
['W', 'R', 'C', 'g', 0],
['V', 'Y', 'C', 'j', 7],
['X', 'Y', 'B', 'd', 5]],
columns=['cola', 'colb', 'colc', 'tag', 'val'],
index=[3, 3, 2, 0, 1, 1, 7, 6, 6, 4, 4, 5, 9, 8])
tm.assert_frame_equal(result, expected)
result = left.join(right, on=['cola', 'colb', 'colc'],
how='left', sort=True)
tm.assert_frame_equal(
result,
expected.sort_values(['cola', 'colb', 'colc'], kind='mergesort'))
# GH7331 - maintain left frame order in left merge
right.reset_index(inplace=True)
right.columns = left.columns[:3].tolist() + right.columns[-1:].tolist()
result = merge(left, right, how='left', on=left.columns[:-1].tolist())
expected.index = np.arange(len(expected))
tm.assert_frame_equal(result, expected)
def test_left_join_index_multi_match(self):
left = DataFrame([
['c', 0],
['b', 1],
['a', 2],
['b', 3]],
columns=['tag', 'val'],
index=[2, 0, 1, 3])
right = DataFrame([
['a', 'v'],
['c', 'w'],
['c', 'x'],
['d', 'y'],
['a', 'z'],
['c', 'r'],
['e', 'q'],
['c', 's']],
columns=['tag', 'char'])
right.set_index('tag', inplace=True)
result = left.join(right, on='tag', how='left')
expected = DataFrame([
['c', 0, 'w'],
['c', 0, 'x'],
['c', 0, 'r'],
['c', 0, 's'],
['b', 1, nan],
['a', 2, 'v'],
['a', 2, 'z'],
['b', 3, nan]],
columns=['tag', 'val', 'char'],
index=[2, 2, 2, 2, 0, 1, 1, 3])
tm.assert_frame_equal(result, expected)
result = left.join(right, on='tag', how='left', sort=True)
tm.assert_frame_equal(
result, expected.sort_values('tag', kind='mergesort'))
# GH7331 - maintain left frame order in left merge
result = merge(left, right.reset_index(), how='left', on='tag')
expected.index = np.arange(len(expected))
tm.assert_frame_equal(result, expected)
def test_join_multi_dtypes(self):
# test with multi dtypes in the join index
def _test(dtype1, dtype2):
left = DataFrame({'k1': np.array([0, 1, 2] * 8, dtype=dtype1),
'k2': ['foo', 'bar'] * 12,
'v': np.array(np.arange(24), dtype=np.int64)})
index = MultiIndex.from_tuples([(2, 'bar'), (1, 'foo')])
right = DataFrame(
{'v2': np.array([5, 7], dtype=dtype2)}, index=index)
result = left.join(right, on=['k1', 'k2'])
expected = left.copy()
if dtype2.kind == 'i':
dtype2 = np.dtype('float64')
expected['v2'] = np.array(np.nan, dtype=dtype2)
expected.loc[(expected.k1 == 2) & (expected.k2 == 'bar'), 'v2'] = 5
expected.loc[(expected.k1 == 1) & (expected.k2 == 'foo'), 'v2'] = 7
tm.assert_frame_equal(result, expected)
result = left.join(right, on=['k1', 'k2'], sort=True)
expected.sort_values(['k1', 'k2'], kind='mergesort', inplace=True)
tm.assert_frame_equal(result, expected)
for d1 in [np.int64, np.int32, np.int16, np.int8, np.uint8]:
for d2 in [np.int64, np.float64, np.float32, np.float16]:
_test(np.dtype(d1), np.dtype(d2))
def test_left_merge_na_buglet(self):
left = DataFrame({'id': list('abcde'), 'v1': randn(5),
'v2': randn(5), 'dummy': list('abcde'),
'v3': randn(5)},
columns=['id', 'v1', 'v2', 'dummy', 'v3'])
right = DataFrame({'id': ['a', 'b', np.nan, np.nan, np.nan],
'sv3': [1.234, 5.678, np.nan, np.nan, np.nan]})
merged = merge(left, right, on='id', how='left')
rdf = right.drop(['id'], axis=1)
expected = left.join(rdf)
tm.assert_frame_equal(merged, expected)
def test_merge_na_keys(self):
data = [[1950, "A", 1.5],
[1950, "B", 1.5],
[1955, "B", 1.5],
[1960, "B", np.nan],
[1970, "B", 4.],
[1950, "C", 4.],
[1960, "C", np.nan],
[1965, "C", 3.],
[1970, "C", 4.]]
frame = DataFrame(data, columns=["year", "panel", "data"])
other_data = [[1960, 'A', np.nan],
[1970, 'A', np.nan],
[1955, 'A', np.nan],
[1965, 'A', np.nan],
[1965, 'B', np.nan],
[1955, 'C', np.nan]]
other = DataFrame(other_data, columns=['year', 'panel', 'data'])
result = frame.merge(other, how='outer')
expected = frame.fillna(-999).merge(other.fillna(-999), how='outer')
expected = expected.replace(-999, np.nan)
tm.assert_frame_equal(result, expected)
@slow
def test_int64_overflow_issues(self):
from itertools import product
from collections import defaultdict
from pandas.core.groupby import _int64_overflow_possible
# #2690, combinatorial explosion
df1 = DataFrame(np.random.randn(1000, 7),
columns=list('ABCDEF') + ['G1'])
df2 = DataFrame(np.random.randn(1000, 7),
columns=list('ABCDEF') + ['G2'])
# it works!
result = merge(df1, df2, how='outer')
self.assertTrue(len(result) == 2000)
low, high, n = -1 << 10, 1 << 10, 1 << 20
left = DataFrame(np.random.randint(low, high, (n, 7)),
columns=list('ABCDEFG'))
left['left'] = left.sum(axis=1)
# one-2-one match
i = np.random.permutation(len(left))
right = left.iloc[i].copy()
right.columns = right.columns[:-1].tolist() + ['right']
right.index = np.arange(len(right))
right['right'] *= -1
out = merge(left, right, how='outer')
self.assertEqual(len(out), len(left))
assert_series_equal(out['left'], - out['right'], check_names=False)
result = out.iloc[:, :-2].sum(axis=1)
assert_series_equal(out['left'], result, check_names=False)
self.assertTrue(result.name is None)
out.sort_values(out.columns.tolist(), inplace=True)
out.index = np.arange(len(out))
for how in ['left', 'right', 'outer', 'inner']:
assert_frame_equal(out, merge(left, right, how=how, sort=True))
# check that left merge w/ sort=False maintains left frame order
out = merge(left, right, how='left', sort=False)
assert_frame_equal(left, out[left.columns.tolist()])
out = merge(right, left, how='left', sort=False)
assert_frame_equal(right, out[right.columns.tolist()])
# one-2-many/none match
n = 1 << 11
left = DataFrame(np.random.randint(low, high, (n, 7)).astype('int64'),
columns=list('ABCDEFG'))
# confirm that this is checking what it is supposed to check
shape = left.apply(Series.nunique).values
self.assertTrue(_int64_overflow_possible(shape))
# add duplicates to left frame
left = concat([left, left], ignore_index=True)
right = DataFrame(np.random.randint(low, high, (n // 2, 7))
.astype('int64'),
columns=list('ABCDEFG'))
# add duplicates & overlap with left to the right frame
i = np.random.choice(len(left), n)
right = concat([right, right, left.iloc[i]], ignore_index=True)
left['left'] = np.random.randn(len(left))
right['right'] = np.random.randn(len(right))
# shuffle left & right frames
i = np.random.permutation(len(left))
left = left.iloc[i].copy()
left.index = np.arange(len(left))
i = np.random.permutation(len(right))
right = right.iloc[i].copy()
right.index = np.arange(len(right))
# manually compute outer merge
ldict, rdict = defaultdict(list), defaultdict(list)
for idx, row in left.set_index(list('ABCDEFG')).iterrows():
ldict[idx].append(row['left'])
for idx, row in right.set_index(list('ABCDEFG')).iterrows():
rdict[idx].append(row['right'])
vals = []
for k, lval in ldict.items():
rval = rdict.get(k, [np.nan])
for lv, rv in product(lval, rval):
vals.append(k + tuple([lv, rv]))
for k, rval in rdict.items():
if k not in ldict:
for rv in rval:
vals.append(k + tuple([np.nan, rv]))
def align(df):
df = df.sort_values(df.columns.tolist())
df.index = np.arange(len(df))
return df
def verify_order(df):
kcols = list('ABCDEFG')
assert_frame_equal(df[kcols].copy(),
df[kcols].sort_values(kcols, kind='mergesort'))
out = DataFrame(vals, columns=list('ABCDEFG') + ['left', 'right'])
out = align(out)
jmask = {'left': out['left'].notnull(),
'right': out['right'].notnull(),
'inner': out['left'].notnull() & out['right'].notnull(),
'outer': np.ones(len(out), dtype='bool')}
for how in 'left', 'right', 'outer', 'inner':
mask = jmask[how]
frame = align(out[mask].copy())
self.assertTrue(mask.all() ^ mask.any() or how == 'outer')
for sort in [False, True]:
res = merge(left, right, how=how, sort=sort)
if sort:
verify_order(res)
# as in GH9092 dtypes break with outer/right join
assert_frame_equal(frame, align(res),
check_dtype=how not in ('right', 'outer'))
def test_join_multi_levels(self):
# GH 3662
# merge multi-levels
household = (
DataFrame(
dict(household_id=[1, 2, 3],
male=[0, 1, 0],
wealth=[196087.3, 316478.7, 294750]),
columns=['household_id', 'male', 'wealth'])
.set_index('household_id'))
portfolio = (
DataFrame(
dict(household_id=[1, 2, 2, 3, 3, 3, 4],
asset_id=["nl0000301109", "nl0000289783", "gb00b03mlx29",
"gb00b03mlx29", "lu0197800237", "nl0000289965",
np.nan],
name=["ABN Amro", "Robeco", "Royal Dutch Shell",
"Royal Dutch Shell",
"AAB Eastern Europe Equity Fund",
"Postbank BioTech Fonds", np.nan],
share=[1.0, 0.4, 0.6, 0.15, 0.6, 0.25, 1.0]),
columns=['household_id', 'asset_id', 'name', 'share'])
.set_index(['household_id', 'asset_id']))
result = household.join(portfolio, how='inner')
expected = (
DataFrame(
dict(male=[0, 1, 1, 0, 0, 0],
wealth=[196087.3, 316478.7, 316478.7,
294750.0, 294750.0, 294750.0],
name=['ABN Amro', 'Robeco', 'Royal Dutch Shell',
'Royal Dutch Shell',
'AAB Eastern Europe Equity Fund',
'Postbank BioTech Fonds'],
share=[1.00, 0.40, 0.60, 0.15, 0.60, 0.25],
household_id=[1, 2, 2, 3, 3, 3],
asset_id=['nl0000301109', 'nl0000289783', 'gb00b03mlx29',
'gb00b03mlx29', 'lu0197800237',
'nl0000289965']))
.set_index(['household_id', 'asset_id'])
.reindex(columns=['male', 'wealth', 'name', 'share']))
assert_frame_equal(result, expected)
assert_frame_equal(result, expected)
# equivalency
result2 = (merge(household.reset_index(), portfolio.reset_index(),
on=['household_id'], how='inner')
.set_index(['household_id', 'asset_id']))
assert_frame_equal(result2, expected)
result = household.join(portfolio, how='outer')
expected = (concat([
expected,
(DataFrame(
dict(share=[1.00]),
index=MultiIndex.from_tuples(
[(4, np.nan)],
names=['household_id', 'asset_id'])))
], axis=0).reindex(columns=expected.columns))
assert_frame_equal(result, expected)
# invalid cases
household.index.name = 'foo'
def f():
household.join(portfolio, how='inner')
self.assertRaises(ValueError, f)
portfolio2 = portfolio.copy()
portfolio2.index.set_names(['household_id', 'foo'])
def f():
portfolio2.join(portfolio, how='inner')
self.assertRaises(ValueError, f)
def test_join_multi_levels2(self):
# some more advanced merges
# GH6360
household = (
DataFrame(
dict(household_id=[1, 2, 2, 3, 3, 3, 4],
asset_id=["nl0000301109", "nl0000301109", "gb00b03mlx29",
"gb00b03mlx29", "lu0197800237", "nl0000289965",
np.nan],
share=[1.0, 0.4, 0.6, 0.15, 0.6, 0.25, 1.0]),
columns=['household_id', 'asset_id', 'share'])
.set_index(['household_id', 'asset_id']))
log_return = DataFrame(dict(
asset_id=["gb00b03mlx29", "gb00b03mlx29",
"gb00b03mlx29", "lu0197800237", "lu0197800237"],
t=[233, 234, 235, 180, 181],
log_return=[.09604978, -.06524096, .03532373, .03025441, .036997]
)).set_index(["asset_id", "t"])
expected = (
DataFrame(dict(
household_id=[2, 2, 2, 3, 3, 3, 3, 3],
asset_id=["gb00b03mlx29", "gb00b03mlx29",
"gb00b03mlx29", "gb00b03mlx29",
"gb00b03mlx29", "gb00b03mlx29",
"lu0197800237", "lu0197800237"],
t=[233, 234, 235, 233, 234, 235, 180, 181],
share=[0.6, 0.6, 0.6, 0.15, 0.15, 0.15, 0.6, 0.6],
log_return=[.09604978, -.06524096, .03532373,
.09604978, -.06524096, .03532373,
.03025441, .036997]
))
.set_index(["household_id", "asset_id", "t"])
.reindex(columns=['share', 'log_return']))
def f():
household.join(log_return, how='inner')
self.assertRaises(NotImplementedError, f)
# this is the equivalency
result = (merge(household.reset_index(), log_return.reset_index(),
on=['asset_id'], how='inner')
.set_index(['household_id', 'asset_id', 't']))
assert_frame_equal(result, expected)
expected = (
DataFrame(dict(
household_id=[1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 4],
asset_id=["nl0000301109", "nl0000289783", "gb00b03mlx29",
"gb00b03mlx29", "gb00b03mlx29",
"gb00b03mlx29", "gb00b03mlx29", "gb00b03mlx29",
"lu0197800237", "lu0197800237",
"nl0000289965", None],
t=[None, None, 233, 234, 235, 233, 234,
235, 180, 181, None, None],
share=[1.0, 0.4, 0.6, 0.6, 0.6, 0.15,
0.15, 0.15, 0.6, 0.6, 0.25, 1.0],
log_return=[None, None, .09604978, -.06524096, .03532373,
.09604978, -.06524096, .03532373,
.03025441, .036997, None, None]
))
.set_index(["household_id", "asset_id", "t"]))
def f():
household.join(log_return, how='outer')
self.assertRaises(NotImplementedError, f)
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| mit |
elmadjian/pcs5735 | aula1/logistic_regression.py | 1 | 3369 | #Author: Carlos Eduardo Leão Elmadjian
import sys
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import numpy as np
theta_list = []
x_list = []
y_list = []
def main():
if len(sys.argv) != 2:
print("modo de usar: <este_programa> <arquivo_csv>")
sys.exit()
csv_file = sys.argv[1]
with open(csv_file, "r") as arquivo:
classes = arquivo.readline().split(",")
theta_list = [0.0 for i in range(len(classes))]
for line in arquivo:
values = line.split(",")
curr_x = [float(i) for i in values[:-1]]
curr_x.append(1.0)
x_list.append(curr_x)
y_list.append(1.0) if values[-1].startswith("yes") else y_list.append(0.0)
logistic_regression(theta_list, x_list, y_list, 0.0005, 0.0000001)
plot(theta_list, x_list, y_list)
#The logistic regression algorithm using SGD
#-------------------------------------------
def logistic_regression(theta_list, x_list, y_list, alpha, epsilon):
J_prev = 0
J_curr = J(theta_list, x_list, y_list)
count = 0
while abs(J_curr - J_prev) > epsilon:
if count == 10000:
print("too much iterations")
break
count += 1
for j in range(len(theta_list)):
for i in range(len(x_list)):
diff = (h_theta(theta_list, x_list[i]) - y_list[i])
theta_list[j] = theta_list[j] - alpha * diff * x_list[i][j]
J_prev = J_curr
J_curr = J(theta_list, x_list, y_list)
#Calculates the minimum cost function
#------------------------------------
def J(theta_list, x_list, y_list):
sigma = 0
for i in range(len(x_list)):
sigma += (h_theta(theta_list, x_list[i]) - y_list[i])**2
return sigma / 2
#Calculates h_theta
#-------------------
def h_theta(theta, x):
return 1.0/(1.0 + np.exp(-np.dot(theta, x)))
#Binary classifier
#------------------
def predict(theta, x, y):
return (h_theta(theta, x)**y) * ((1.0-h_theta(theta, x))**(1.0-y))
#DEBUG: Plot our findings
#------------------------
def plot(theta_list, x_list, y_list):
new_x_list = [i[0] for i in x_list]
new_y_list = [i[1] for i in x_list]
hit, p1, p2, p3, p4 = 0, 0, 0, 0, 0
for i in range(len(y_list)):
if y_list[i] == 1.0:
if predict(theta_list, x_list[i], y_list[i]) >= 0.5:
p1, = plt.plot(np.dot(theta_list, x_list[i]), y_list[i], 'go')
hit += 1
else:
p2, = plt.plot(np.dot(theta_list, x_list[i]), y_list[i], 'gx')
elif y_list[i] == 0.0 :
if predict(theta_list, x_list[i], y_list[i]) >= 0.5:
p3, = plt.plot(np.dot(theta_list, x_list[i]), y_list[i], 'ro')
hit += 1
else:
p4, = plt.plot(np.dot(theta_list, x_list[i]), y_list[i], 'rx')
plt.title("Regressão logística sobre os dados de 'students.csv'")
plt.xlabel("z")
plt.ylabel("g(z)")
hit_true = 'P(y=admitido) = admitido'
hit_false = 'P(y=admitido) = não admitido'
miss_true = 'P(y=não admitido) = não admitido'
miss_false ='P(y=não admitido) = admitido'
plt.legend([p1,p2,p3,p4],[hit_true, hit_false, miss_true, miss_false])
print("hit rate:", hit/len(y_list))
plt.show()
#-----------------------
if __name__=="__main__":
main()
| mpl-2.0 |
jalexvig/tensorflow | tensorflow/python/estimator/inputs/inputs.py | 20 | 1086 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utility methods to create simple input_fns."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import,line-too-long
from tensorflow.python.estimator.inputs.numpy_io import numpy_input_fn
from tensorflow.python.estimator.inputs.pandas_io import pandas_input_fn
# pylint: enable=unused-import,line-too-long
| apache-2.0 |
blaze/dask | dask/dataframe/io/tests/test_sql.py | 1 | 15033 | from contextlib import contextmanager
import io
import pytest
# import dask
from dask.dataframe.io.sql import read_sql_table
from dask.dataframe.utils import assert_eq, PANDAS_GT_0240
from dask.utils import tmpfile
pd = pytest.importorskip("pandas")
dd = pytest.importorskip("dask.dataframe")
pytest.importorskip("sqlalchemy")
pytest.importorskip("sqlite3")
np = pytest.importorskip("numpy")
data = """
name,number,age,negish
Alice,0,33,-5
Bob,1,40,-3
Chris,2,22,3
Dora,3,16,5
Edith,4,53,0
Francis,5,30,0
Garreth,6,20,0
"""
df = pd.read_csv(io.StringIO(data), index_col="number")
@pytest.yield_fixture
def db():
with tmpfile() as f:
uri = "sqlite:///%s" % f
df.to_sql("test", uri, index=True, if_exists="replace")
yield uri
def test_empty(db):
from sqlalchemy import create_engine, MetaData, Table, Column, Integer
with tmpfile() as f:
uri = "sqlite:///%s" % f
metadata = MetaData()
engine = create_engine(uri)
table = Table(
"empty_table",
metadata,
Column("id", Integer, primary_key=True),
Column("col2", Integer),
)
metadata.create_all(engine)
dask_df = read_sql_table(table.name, uri, index_col="id", npartitions=1)
assert dask_df.index.name == "id"
assert dask_df.col2.dtype == np.dtype("int64")
pd_dataframe = dask_df.compute()
assert pd_dataframe.empty is True
def test_passing_engine_as_uri_raises_helpful_error(db):
# https://github.com/dask/dask/issues/6473
from sqlalchemy import create_engine
df = pd.DataFrame([{"i": i, "s": str(i) * 2} for i in range(4)])
ddf = dd.from_pandas(df, npartitions=2)
with tmpfile() as f:
db = "sqlite:///%s" % f
engine = create_engine(db)
with pytest.raises(ValueError, match="Expected URI to be a string"):
ddf.to_sql("test", engine, if_exists="replace")
@pytest.mark.skip(
reason="Requires a postgres server. Sqlite does not support multiple schemas."
)
def test_empty_other_schema():
from sqlalchemy import create_engine, MetaData, Table, Column, Integer, event, DDL
# Database configurations.
pg_host = "localhost"
pg_port = "5432"
pg_user = "user"
pg_pass = "pass"
pg_db = "db"
db_url = "postgresql://%s:%s@%s:%s/%s" % (pg_user, pg_pass, pg_host, pg_port, pg_db)
# Create an empty table in a different schema.
table_name = "empty_table"
schema_name = "other_schema"
engine = create_engine(db_url)
metadata = MetaData()
table = Table(
table_name,
metadata,
Column("id", Integer, primary_key=True),
Column("col2", Integer),
schema=schema_name,
)
# Create the schema and the table.
event.listen(
metadata, "before_create", DDL("CREATE SCHEMA IF NOT EXISTS %s" % schema_name)
)
metadata.create_all(engine)
# Read the empty table from the other schema.
dask_df = read_sql_table(
table.name, db_url, index_col="id", schema=table.schema, npartitions=1
)
# Validate that the retrieved table is empty.
assert dask_df.index.name == "id"
assert dask_df.col2.dtype == np.dtype("int64")
pd_dataframe = dask_df.compute()
assert pd_dataframe.empty is True
# Drop the schema and the table.
engine.execute("DROP SCHEMA IF EXISTS %s CASCADE" % schema_name)
def test_needs_rational(db):
import datetime
now = datetime.datetime.now()
d = datetime.timedelta(seconds=1)
df = pd.DataFrame(
{
"a": list("ghjkl"),
"b": [now + i * d for i in range(5)],
"c": [True, True, False, True, True],
}
)
df = df.append(
[
{"a": "x", "b": now + d * 1000, "c": None},
{"a": None, "b": now + d * 1001, "c": None},
]
)
with tmpfile() as f:
uri = "sqlite:///%s" % f
df.to_sql("test", uri, index=False, if_exists="replace")
# one partition contains NULL
data = read_sql_table("test", uri, npartitions=2, index_col="b")
df2 = df.set_index("b")
assert_eq(data, df2.astype({"c": bool})) # bools are coerced
# one partition contains NULL, but big enough head
data = read_sql_table("test", uri, npartitions=2, index_col="b", head_rows=12)
df2 = df.set_index("b")
assert_eq(data, df2)
# empty partitions
data = read_sql_table("test", uri, npartitions=20, index_col="b")
part = data.get_partition(12).compute()
assert part.dtypes.tolist() == ["O", bool]
assert part.empty
df2 = df.set_index("b")
assert_eq(data, df2.astype({"c": bool}))
# explicit meta
data = read_sql_table("test", uri, npartitions=2, index_col="b", meta=df2[:0])
part = data.get_partition(1).compute()
assert part.dtypes.tolist() == ["O", "O"]
df2 = df.set_index("b")
assert_eq(data, df2)
def test_simple(db):
# single chunk
data = read_sql_table("test", db, npartitions=2, index_col="number").compute()
assert (data.name == df.name).all()
assert data.index.name == "number"
assert_eq(data, df)
def test_npartitions(db):
data = read_sql_table(
"test", db, columns=list(df.columns), npartitions=2, index_col="number"
)
assert len(data.divisions) == 3
assert (data.name.compute() == df.name).all()
data = read_sql_table(
"test", db, columns=["name"], npartitions=6, index_col="number"
)
assert_eq(data, df[["name"]])
data = read_sql_table(
"test",
db,
columns=list(df.columns),
bytes_per_chunk="2 GiB",
index_col="number",
)
assert data.npartitions == 1
assert (data.name.compute() == df.name).all()
data_1 = read_sql_table(
"test",
db,
columns=list(df.columns),
bytes_per_chunk=2 ** 30,
index_col="number",
head_rows=1,
)
assert data_1.npartitions == 1
assert (data_1.name.compute() == df.name).all()
data = read_sql_table(
"test",
db,
columns=list(df.columns),
bytes_per_chunk=250,
index_col="number",
head_rows=1,
)
assert data.npartitions == 2
def test_divisions(db):
data = read_sql_table(
"test", db, columns=["name"], divisions=[0, 2, 4], index_col="number"
)
assert data.divisions == (0, 2, 4)
assert data.index.max().compute() == 4
assert_eq(data, df[["name"]][df.index <= 4])
def test_division_or_partition(db):
with pytest.raises(TypeError):
read_sql_table(
"test",
db,
columns=["name"],
index_col="number",
divisions=[0, 2, 4],
npartitions=3,
)
out = read_sql_table("test", db, index_col="number", bytes_per_chunk=100)
m = out.map_partitions(
lambda d: d.memory_usage(deep=True, index=True).sum()
).compute()
assert (50 < m).all() and (m < 200).all()
assert_eq(out, df)
def test_meta(db):
data = read_sql_table(
"test", db, index_col="number", meta=dd.from_pandas(df, npartitions=1)
).compute()
assert (data.name == df.name).all()
assert data.index.name == "number"
assert_eq(data, df)
def test_meta_no_head_rows(db):
data = read_sql_table(
"test",
db,
index_col="number",
meta=dd.from_pandas(df, npartitions=1),
npartitions=2,
head_rows=0,
)
assert len(data.divisions) == 3
data = data.compute()
assert (data.name == df.name).all()
assert data.index.name == "number"
assert_eq(data, df)
data = read_sql_table(
"test",
db,
index_col="number",
meta=dd.from_pandas(df, npartitions=1),
divisions=[0, 3, 6],
head_rows=0,
)
assert len(data.divisions) == 3
data = data.compute()
assert (data.name == df.name).all()
assert data.index.name == "number"
assert_eq(data, df)
def test_no_meta_no_head_rows(db):
with pytest.raises(ValueError):
read_sql_table("test", db, index_col="number", head_rows=0, npartitions=1)
def test_range(db):
data = read_sql_table("test", db, npartitions=2, index_col="number", limits=[1, 4])
assert data.index.min().compute() == 1
assert data.index.max().compute() == 4
def test_datetimes():
import datetime
now = datetime.datetime.now()
d = datetime.timedelta(seconds=1)
df = pd.DataFrame(
{"a": list("ghjkl"), "b": [now + i * d for i in range(2, -3, -1)]}
)
with tmpfile() as f:
uri = "sqlite:///%s" % f
df.to_sql("test", uri, index=False, if_exists="replace")
data = read_sql_table("test", uri, npartitions=2, index_col="b")
assert data.index.dtype.kind == "M"
assert data.divisions[0] == df.b.min()
df2 = df.set_index("b")
assert_eq(data.map_partitions(lambda x: x.sort_index()), df2.sort_index())
def test_with_func(db):
from sqlalchemy import sql
index = sql.func.abs(sql.column("negish")).label("abs")
# function for the index, get all columns
data = read_sql_table("test", db, npartitions=2, index_col=index)
assert data.divisions[0] == 0
part = data.get_partition(0).compute()
assert (part.index == 0).all()
# now an arith op for one column too; it's name will be 'age'
data = read_sql_table(
"test",
db,
npartitions=2,
index_col=index,
columns=[index, -(sql.column("age"))],
)
assert (data.age.compute() < 0).all()
# a column that would have no name, give it a label
index = (-(sql.column("negish"))).label("index")
data = read_sql_table(
"test", db, npartitions=2, index_col=index, columns=["negish", "age"]
)
d = data.compute()
assert (-d.index == d["negish"]).all()
def test_no_nameless_index(db):
from sqlalchemy import sql
index = -(sql.column("negish"))
with pytest.raises(ValueError):
read_sql_table(
"test", db, npartitions=2, index_col=index, columns=["negish", "age", index]
)
index = sql.func.abs(sql.column("negish"))
# function for the index, get all columns
with pytest.raises(ValueError):
read_sql_table("test", db, npartitions=2, index_col=index)
def test_select_from_select(db):
from sqlalchemy import sql
s1 = sql.select([sql.column("number"), sql.column("name")]).select_from(
sql.table("test")
)
out = read_sql_table(s1, db, npartitions=2, index_col="number")
assert_eq(out, df[["name"]])
def test_extra_connection_engine_keywords(capsys, db):
data = read_sql_table(
"test", db, npartitions=2, index_col="number", engine_kwargs={"echo": False}
).compute()
# no captured message from the stdout with the echo=False parameter (this is the default)
out, err = capsys.readouterr()
assert "SELECT" not in out
assert_eq(data, df)
# with the echo=True sqlalchemy parameter, you should get all SQL queries in the stdout
data = read_sql_table(
"test", db, npartitions=2, index_col="number", engine_kwargs={"echo": True}
).compute()
out, err = capsys.readouterr()
assert "WHERE test.number >= ? AND test.number < ?" in out
assert "WHERE test.number >= ? AND test.number <= ?" in out
assert_eq(data, df)
def test_no_character_index_without_divisions(db):
# attempt to read the sql table with a character index and no divisions
with pytest.raises(TypeError):
read_sql_table("test", db, npartitions=2, index_col="name", divisions=None)
@contextmanager
def tmp_db_uri():
with tmpfile() as f:
yield "sqlite:///%s" % f
@pytest.mark.parametrize("npartitions", (1, 2))
@pytest.mark.parametrize("parallel", (False, True))
def test_to_sql(npartitions, parallel):
df_by_age = df.set_index("age")
df_appended = pd.concat(
[
df,
df,
]
)
ddf = dd.from_pandas(df, npartitions)
ddf_by_age = ddf.set_index("age")
# Simple round trip test: use existing "number" index_col
with tmp_db_uri() as uri:
ddf.to_sql("test", uri, parallel=parallel)
result = read_sql_table("test", uri, "number")
assert_eq(df, result)
# Test writing no index, and reading back in with one of the other columns as index (`read_sql_table` requires
# an index_col)
with tmp_db_uri() as uri:
ddf.to_sql("test", uri, parallel=parallel, index=False)
result = read_sql_table("test", uri, "negish")
assert_eq(df.set_index("negish"), result)
result = read_sql_table("test", uri, "age")
assert_eq(df_by_age, result)
# Index by "age" instead
with tmp_db_uri() as uri:
ddf_by_age.to_sql("test", uri, parallel=parallel)
result = read_sql_table("test", uri, "age")
assert_eq(df_by_age, result)
# Index column can't have "object" dtype if no partitions are provided
with tmp_db_uri() as uri:
ddf.set_index("name").to_sql("test", uri)
with pytest.raises(
TypeError,
match='Provided index column is of type "object". If divisions is not provided the index column type must be numeric or datetime.', # noqa: E501
):
read_sql_table("test", uri, "name")
# Test various "if_exists" values
with tmp_db_uri() as uri:
ddf.to_sql("test", uri)
# Writing a table that already exists fails
with pytest.raises(ValueError, match="Table 'test' already exists"):
ddf.to_sql("test", uri)
ddf.to_sql("test", uri, parallel=parallel, if_exists="append")
result = read_sql_table("test", uri, "number")
assert_eq(df_appended, result)
ddf_by_age.to_sql("test", uri, parallel=parallel, if_exists="replace")
result = read_sql_table("test", uri, "age")
assert_eq(df_by_age, result)
# Verify number of partitions returned, when compute=False
with tmp_db_uri() as uri:
result = ddf.to_sql("test", uri, parallel=parallel, compute=False)
# the first result is from the "meta" insert
actual = len(result.compute())
assert actual == npartitions
def test_to_sql_kwargs():
ddf = dd.from_pandas(df, 2)
with tmp_db_uri() as uri:
# "method" keyword is allowed iff pandas>=0.24.0
if PANDAS_GT_0240:
ddf.to_sql("test", uri, method="multi")
else:
with pytest.raises(
NotImplementedError,
match=r"'method' requires pandas>=0.24.0. You have version 0.23.\d",
):
ddf.to_sql("test", uri, method="multi")
# Other, unknown keywords always disallowed
with pytest.raises(
TypeError, match="to_sql\\(\\) got an unexpected keyword argument 'unknown'"
):
ddf.to_sql("test", uri, unknown=None)
| bsd-3-clause |
ruchee/vimrc | vimfiles/bundle/vim-python/submodules/pydocstyle/src/tests/test_cases/canonical_numpy_examples.py | 3 | 5315 | """This is the docstring for the example.py module. Modules names should
have short, all-lowercase names. The module name may have underscores if
this improves readability.
Every module should have a docstring at the very top of the file. The
module's docstring may extend over multiple lines. If your docstring does
extend over multiple lines, the closing three quotation marks must be on
a line by itself, preferably preceded by a blank line.
"""
# Example source file from the official "numpydoc docstring guide"
# documentation (with the modification of commenting out all the original
# ``import`` lines, plus adding this note and ``Expectation`` code):
# * As HTML: https://numpydoc.readthedocs.io/en/latest/example.html
# * Source Python:
# https://github.com/numpy/numpydoc/blob/master/doc/example.py
# from __future__ import division, absolute_import, print_function
#
# import os # standard library imports first
#
# Do NOT import using *, e.g. from numpy import *
#
# Import the module using
#
# import numpy
#
# instead or import individual functions as needed, e.g
#
# from numpy import array, zeros
#
# If you prefer the use of abbreviated module names, we suggest the
# convention used by NumPy itself::
#
# import numpy as np
# import matplotlib as mpl
# import matplotlib.pyplot as plt
#
# These abbreviated names are not to be used in docstrings; users must
# be able to paste and execute docstrings after importing only the
# numpy module itself, unabbreviated.
import os
from .expected import Expectation
expectation = Expectation()
expect = expectation.expect
# module docstring expected violations:
expectation.expected.add((
os.path.normcase(__file__),
"D205: 1 blank line required between summary line and description "
"(found 0)"))
expectation.expected.add((
os.path.normcase(__file__),
"D213: Multi-line docstring summary should start at the second line"))
expectation.expected.add((
os.path.normcase(__file__),
"D400: First line should end with a period (not 'd')"))
expectation.expected.add((
os.path.normcase(__file__),
"D404: First word of the docstring should not be `This`"))
expectation.expected.add((
os.path.normcase(__file__),
"D415: First line should end with a period, question mark, or exclamation "
"point (not 'd')"))
@expect("D213: Multi-line docstring summary should start at the second line",
arg_count=3)
@expect("D401: First line should be in imperative mood; try rephrasing "
"(found 'A')", arg_count=3)
@expect("D413: Missing blank line after last section ('Examples')",
arg_count=3)
def foo(var1, var2, long_var_name='hi'):
r"""A one-line summary that does not use variable names.
Several sentences providing an extended description. Refer to
variables using back-ticks, e.g. `var`.
Parameters
----------
var1 : array_like
Array_like means all those objects -- lists, nested lists, etc. --
that can be converted to an array. We can also refer to
variables like `var1`.
var2 : int
The type above can either refer to an actual Python type
(e.g. ``int``), or describe the type of the variable in more
detail, e.g. ``(N,) ndarray`` or ``array_like``.
long_var_name : {'hi', 'ho'}, optional
Choices in brackets, default first when optional.
Returns
-------
type
Explanation of anonymous return value of type ``type``.
describe : type
Explanation of return value named `describe`.
out : type
Explanation of `out`.
type_without_description
Other Parameters
----------------
only_seldom_used_keywords : type
Explanation
common_parameters_listed_above : type
Explanation
Raises
------
BadException
Because you shouldn't have done that.
See Also
--------
numpy.array : Relationship (optional).
numpy.ndarray : Relationship (optional), which could be fairly long, in
which case the line wraps here.
numpy.dot, numpy.linalg.norm, numpy.eye
Notes
-----
Notes about the implementation algorithm (if needed).
This can have multiple paragraphs.
You may include some math:
.. math:: X(e^{j\omega } ) = x(n)e^{ - j\omega n}
And even use a Greek symbol like :math:`\omega` inline.
References
----------
Cite the relevant literature, e.g. [1]_. You may also cite these
references in the notes section above.
.. [1] O. McNoleg, "The integration of GIS, remote sensing,
expert systems and adaptive co-kriging for environmental habitat
modelling of the Highland Haggis using object-oriented, fuzzy-logic
and neural-network techniques," Computers & Geosciences, vol. 22,
pp. 585-588, 1996.
Examples
--------
These are written in doctest format, and should illustrate how to
use the function.
>>> a = [1, 2, 3]
>>> print([x + 3 for x in a])
[4, 5, 6]
>>> print("a\nb")
a
b
"""
# After closing class docstring, there should be one blank line to
# separate following codes (according to PEP257).
# But for function, method and module, there should be no blank lines
# after closing the docstring.
pass
| mit |
gfyoung/pandas | pandas/tests/tslibs/test_conversion.py | 3 | 3973 | from datetime import datetime
import numpy as np
import pytest
from pytz import UTC
from pandas._libs.tslibs import (
OutOfBoundsTimedelta,
conversion,
iNaT,
timezones,
tzconversion,
)
from pandas import Timestamp, date_range
import pandas._testing as tm
def _compare_utc_to_local(tz_didx):
def f(x):
return tzconversion.tz_convert_from_utc_single(x, tz_didx.tz)
result = tzconversion.tz_convert_from_utc(tz_didx.asi8, tz_didx.tz)
expected = np.vectorize(f)(tz_didx.asi8)
tm.assert_numpy_array_equal(result, expected)
def _compare_local_to_utc(tz_didx, naive_didx):
# Check that tz_localize behaves the same vectorized and pointwise.
err1 = err2 = None
try:
result = tzconversion.tz_localize_to_utc(naive_didx.asi8, tz_didx.tz)
err1 = None
except Exception as err:
err1 = err
try:
expected = naive_didx.map(lambda x: x.tz_localize(tz_didx.tz)).asi8
except Exception as err:
err2 = err
if err1 is not None:
assert type(err1) == type(err2)
else:
assert err2 is None
tm.assert_numpy_array_equal(result, expected)
def test_tz_convert_single_matches_tz_convert_hourly(tz_aware_fixture):
tz = tz_aware_fixture
tz_didx = date_range("2014-03-01", "2015-01-10", freq="H", tz=tz)
naive_didx = date_range("2014-03-01", "2015-01-10", freq="H")
_compare_utc_to_local(tz_didx)
_compare_local_to_utc(tz_didx, naive_didx)
@pytest.mark.parametrize("freq", ["D", "A"])
def test_tz_convert_single_matches_tz_convert(tz_aware_fixture, freq):
tz = tz_aware_fixture
tz_didx = date_range("2000-01-01", "2020-01-01", freq=freq, tz=tz)
naive_didx = date_range("2000-01-01", "2020-01-01", freq=freq)
_compare_utc_to_local(tz_didx)
_compare_local_to_utc(tz_didx, naive_didx)
@pytest.mark.parametrize(
"arr",
[
pytest.param(np.array([], dtype=np.int64), id="empty"),
pytest.param(np.array([iNaT], dtype=np.int64), id="all_nat"),
],
)
def test_tz_convert_corner(arr):
result = tzconversion.tz_convert_from_utc(arr, timezones.maybe_get_tz("Asia/Tokyo"))
tm.assert_numpy_array_equal(result, arr)
def test_tz_convert_readonly():
# GH#35530
arr = np.array([0], dtype=np.int64)
arr.setflags(write=False)
result = tzconversion.tz_convert_from_utc(arr, UTC)
tm.assert_numpy_array_equal(result, arr)
@pytest.mark.parametrize("copy", [True, False])
@pytest.mark.parametrize("dtype", ["M8[ns]", "M8[s]"])
def test_length_zero_copy(dtype, copy):
arr = np.array([], dtype=dtype)
result = conversion.ensure_datetime64ns(arr, copy=copy)
assert result.base is (None if copy else arr)
def test_ensure_datetime64ns_bigendian():
# GH#29684
arr = np.array([np.datetime64(1, "ms")], dtype=">M8[ms]")
result = conversion.ensure_datetime64ns(arr)
expected = np.array([np.datetime64(1, "ms")], dtype="M8[ns]")
tm.assert_numpy_array_equal(result, expected)
def test_ensure_timedelta64ns_overflows():
arr = np.arange(10).astype("m8[Y]") * 100
msg = r"Out of bounds for nanosecond timedelta64\[Y\] 900"
with pytest.raises(OutOfBoundsTimedelta, match=msg):
conversion.ensure_timedelta64ns(arr)
class SubDatetime(datetime):
pass
@pytest.mark.parametrize(
"dt, expected",
[
pytest.param(
Timestamp("2000-01-01"), Timestamp("2000-01-01", tz=UTC), id="timestamp"
),
pytest.param(
datetime(2000, 1, 1), datetime(2000, 1, 1, tzinfo=UTC), id="datetime"
),
pytest.param(
SubDatetime(2000, 1, 1),
SubDatetime(2000, 1, 1, tzinfo=UTC),
id="subclassed_datetime",
),
],
)
def test_localize_pydatetime_dt_types(dt, expected):
# GH 25851
# ensure that subclassed datetime works with
# localize_pydatetime
result = conversion.localize_pydatetime(dt, UTC)
assert result == expected
| bsd-3-clause |
kaiseu/pat-data-processing | component/mem.py | 1 | 2129 | #!/usr/bin/python
# encoding: utf-8
"""
@author: xuk1
@license: (C) Copyright 2013-2017
@contact: [email protected]
@file: mem.py
@time: 8/15/2017 10:50
@desc:
"""
import numpy as np
import pandas as pd
from component.base import CommonBase
class Mem(CommonBase):
"""
Node memory attribute, phasing memory data from original PAT file
"""
used_col = ['HostName', 'TimeStamp', 'kbmemfree', 'kbmemused', 'kbbuffers', 'kbcached']
converter = {col: np.int64 for col in used_col[2:]}
def __init__(self, file_path):
self.file_path = file_path
def get_data_by_time(self, start, end):
"""
get average value of this attribute and all raw data within the start and end timestamp.
if start and end all equal to [0] will calculate all the data.
:param start: list of start timestamp
:param end: list of end timestamp, should be the same length of start
:return: dict that contains avg value of all the timestamp pair and all raw data
"""
df = pd.read_csv(self.file_path, delim_whitespace=True,
usecols=self.used_col, header=0)
df = df.loc[0::2] # read every two rows
pd.to_datetime(df['TimeStamp'], unit='s')
df = df.set_index('TimeStamp').astype(self.converter)
avg = []
if start[0] == end[0] == 0: # calc all the data
avg.append(df.iloc[:, 2:len(self.used_col)].mean(axis=0).astype('float32'))
if len(start) == 1:
return avg, df
else:
for i in range(1, len(start)): # calc the data within the pair of time period
avg.append(df.loc[str(start[i]): str(end[i])].iloc[:, 2:len(self.used_col)].mean(axis=0))
return avg, df
for i in range(len(start)): # calc the data within the pair of time period
avg.append(df.loc[str(start[i]): str(end[i]), self.used_col[2:]].mean(axis=0).astype('float32'))
return avg, df
def used_col_num(self):
return len(self.__used_col)
| apache-2.0 |
srm912/servo | tests/heartbeats/process_logs.py | 139 | 16143 | #!/usr/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import argparse
import matplotlib.pyplot as plt
import numpy as np
import os
from os import path
import sys
import warnings
HB_LOG_IDX_START_TIME = 7
HB_LOG_IDX_END_TIME = HB_LOG_IDX_START_TIME + 1
HB_LOG_IDX_START_ENERGY = 14
HB_LOG_IDX_END_ENERGY = HB_LOG_IDX_START_ENERGY + 1
ENERGY_PROFILER_NAME = 'ApplicationHeartbeat'
SUMMARY_OUTPUT = "summary.txt"
SUMMARY_TIME_IDX = 8
SUMMARY_ENERGY_IDX = SUMMARY_TIME_IDX + 1
SUMMARY_POWER_IDX = SUMMARY_ENERGY_IDX + 1
def autolabel(rects, ax):
"""Attach some text labels.
"""
for rect in rects:
ax.text(rect.get_x() + rect.get_width() / 2., 1.05 * rect.get_height(), '', ha='center', va='bottom')
def plot_raw_totals(config, plot_data, max_time, max_time_std, max_energy, max_energy_std, output_dir, normalize):
"""Plot the raw totals for a configuration.
Keyword arguments:
config -- configuration name
plot_data -- (profiler name, total_time, total_time_std, total_energy, total_energy_std)
max_time, max_time_std, max_energy, max_energy_std -- single values
normalize -- True/False
"""
plot_data = sorted(plot_data)
keys = [p for (p, tt, tts, te, tes) in plot_data]
total_times = [tt for (p, tt, tts, te, tes) in plot_data]
total_times_std = [tts for (p, tt, tts, te, tes) in plot_data]
total_energies = [te for (p, tt, tts, te, tes) in plot_data]
total_energies_std = [tes for (p, tt, tts, te, tes) in plot_data]
fig, ax1 = plt.subplots()
ind = np.arange(len(keys)) # the x locations for the groups
width = 0.35 # the width of the bars
# add some text for labels, title and axes ticks
ax1.set_title('Time/Energy Data for Configuration ' + config)
ax1.set_xticks(ind + width)
ax1.set_xticklabels(keys, rotation=45)
fig.set_tight_layout(True)
fig.set_size_inches(len(plot_data) / 1.5, 8)
ax2 = ax1.twinx()
# Normalize
if normalize:
total_times_std /= np.sum(total_times)
total_times /= np.sum(total_times)
total_energies_std /= np.sum(total_energies)
total_energies /= np.sum(total_energies)
ax1.set_ylabel('Time (Normalized)')
ax2.set_ylabel('Energy (Normalized)')
else:
# set time in us instead of ns
total_times_std /= np.array(1000000.0)
total_times /= np.array(1000000.0)
total_energies_std /= np.array(1000000.0)
total_energies /= np.array(1000000.0)
ax1.set_ylabel('Time (ms)')
ax2.set_ylabel('Energy (Joules)')
rects1 = ax1.bar(ind, total_times, width, color='r', yerr=total_times_std)
rects2 = ax2.bar(ind + width, total_energies, width, color='y', yerr=total_energies_std)
ax1.legend([rects1[0], rects2[0]], ['Time', 'Energy'])
# set axis
x1, x2, y1, y2 = plt.axis()
if normalize:
ax1.set_ylim(ymin=0, ymax=1)
ax2.set_ylim(ymin=0, ymax=1)
else:
ax1.set_ylim(ymin=0, ymax=((max_time + max_time_std) * 1.25 / 1000000.0))
ax2.set_ylim(ymin=0, ymax=((max_energy + max_energy_std) * 1.25 / 1000000.0))
autolabel(rects1, ax1)
autolabel(rects2, ax2)
# plt.show()
plt.savefig(path.join(output_dir, config + ".png"))
plt.close(fig)
def create_raw_total_data(config_data):
"""Get the raw data to plot for a configuration
Return: [(profiler, time_mean, time_stddev, energy_mean, energy_stddev)]
Keyword arguments:
config_data -- (trial, trial_data)
"""
# We can't assume that the same number of heartbeats are always issued across trials
# key: profiler name; value: list of timing sums for each trial
profiler_total_times = {}
# key: profiler name; value: list of energy sums for each trial
profiler_total_energies = {}
for (t, td) in config_data:
for (profiler, ts, te, es, ee) in td:
# sum the total times and energies for each profiler in this trial
total_time = np.sum(te - ts)
total_energy = np.sum(ee - es)
# add to list to be averaged later
time_list = profiler_total_times.get(profiler, [])
time_list.append(total_time)
profiler_total_times[profiler] = time_list
energy_list = profiler_total_energies.get(profiler, [])
energy_list.append(total_energy)
profiler_total_energies[profiler] = energy_list
# Get mean and stddev for time and energy totals
return [(profiler,
np.mean(profiler_total_times[profiler]),
np.std(profiler_total_times[profiler]),
np.mean(profiler_total_energies[profiler]),
np.std(profiler_total_energies[profiler]))
for profiler in profiler_total_times.keys()]
def plot_all_raw_totals(config_list, output_dir):
"""Plot column charts of the raw total time/energy spent in each profiler category.
Keyword arguments:
config_list -- [(config, result of process_config_dir(...))]
output_dir -- where to write plots to
"""
raw_total_norm_out_dir = path.join(output_dir, 'raw_totals_normalized')
os.makedirs(raw_total_norm_out_dir)
raw_total_out_dir = path.join(output_dir, 'raw_totals')
os.makedirs(raw_total_out_dir)
# (name, (profiler, (time_mean, time_stddev, energy_mean, energy_stddev)))
raw_totals_data = [(config, create_raw_total_data(config_data)) for (config, config_data) in config_list]
mean_times = []
mean_times_std = []
mean_energies = []
mean_energies_std = []
for profiler_tup in [config_tup[1] for config_tup in raw_totals_data]:
for (p, tt, tts, te, tes) in profiler_tup:
mean_times.append(tt)
mean_times_std.append(tts)
mean_energies.append(te)
mean_energies_std.append(tes)
# get consistent max time/energy values across plots
max_t = np.max(mean_times)
max_t_std = np.max(mean_times_std)
max_e = np.max(mean_energies)
max_e_std = np.max(mean_energies_std)
[plot_raw_totals(data[0], data[1], max_t, max_t_std, max_e, max_e_std, raw_total_norm_out_dir, True)
for data in raw_totals_data]
[plot_raw_totals(data[0], data[1], max_t, max_t_std, max_e, max_e_std, raw_total_out_dir, False)
for data in raw_totals_data]
def plot_trial_time_series(config, trial, trial_data, max_end_time, max_power, output_dir):
"""Plot time series for a single trial.
Keyword arguments:
config -- the config name
trial -- the trial name
trial_data -- [(profiler, [start times], [end times], [start energies], [end energies])]
max_end_time -- single value to use as max X axis value (for consistency across trials)
output_dir -- the output directory
"""
# TODO: Some profilers may have parallel tasks - need to identify this on plots
max_end_time = max_end_time / 1000000.0
trial_data = sorted(trial_data)
fig, ax1 = plt.subplots()
keys = [p for (p, ts, te, es, ee) in trial_data]
# add some text for labels, title and axes ticks
ax1.set_title('Profiler Activity for ' + config + ', ' + trial)
ax1.set_xlabel('Time (ms)')
ax1.grid(True)
width = 8 # the width of the bars
ax1.set_yticks(10 * np.arange(1, len(keys) + 2))
ax1.set_yticklabels(keys)
ax1.set_ylim(ymin=0, ymax=((len(trial_data) + 1) * 10))
ax1.set_xlim(xmin=0, xmax=max_end_time)
fig.set_tight_layout(True)
fig.set_size_inches(16, len(trial_data) / 3)
i = 10
for (p, ts, te, es, ee) in trial_data:
xranges = [(ts[j] / 1000000.0, (te[j] - ts[j]) / 1000000.0) for j in xrange(len(ts))]
ax1.broken_barh(xranges, (i - 0.5 * width, width))
i += 10
# place a vbar at the final time for this trial
last_profiler_times = map(np.nanmax, filter(lambda x: len(x) > 0, [te for (p, ts, te, es, ee) in trial_data]))
plt.axvline(np.max(last_profiler_times) / 1000000.0, color='black')
power_times = []
power_values = []
for (p, ts, te, es, ee) in trial_data:
if p == ENERGY_PROFILER_NAME:
power_times = te / 1000000.0
power_values = (ee - es) / ((te - ts) / 1000.0)
ax2 = ax1.twinx()
ax2.set_xlim(xmin=0, xmax=max_end_time)
ax2.set_ylim(ymin=0, ymax=max_power)
ax2.set_ylabel('Power (Watts)')
ax2.plot(power_times, power_values, color='r')
# plt.show()
plt.savefig(path.join(output_dir, "ts_" + config + "_" + trial + ".png"))
plt.close(fig)
def hb_energy_times_to_power(es, ee, ts, te):
"""Compute power from start and end energy and times.
Return: power values
"""
return (ee - es) / ((te - ts) / 1000.0)
def plot_all_time_series(config_list, output_dir):
"""Plot column charts of the raw total time/energy spent in each profiler category.
Keyword arguments:
config_list -- [(config, result of process_config_dir(...))]
output_dir -- where to write plots to
"""
time_series_out_dir = path.join(output_dir, 'time_series')
os.makedirs(time_series_out_dir)
max_end_times = []
max_power_values = []
for (c, cd) in config_list:
for (t, td) in cd:
trial_max_end_times = map(np.nanmax, filter(lambda x: len(x) > 0, [te for (p, ts, te, es, ee) in td]))
max_end_times.append(np.nanmax(trial_max_end_times))
for (p, ts, te, es, ee) in td:
# We only care about the energy profiler (others aren't reliable for instant power anyway)
if p == ENERGY_PROFILER_NAME and len(te) > 0:
max_power_values.append(np.nanmax(hb_energy_times_to_power(es, ee, ts, te)))
max_time = np.nanmax(max_end_times)
max_power = np.nanmax(np.array(max_power_values)) * 1.2 # leave a little space at the top
for (config, config_data) in config_list:
[plot_trial_time_series(config, trial, trial_data, max_time, max_power, time_series_out_dir)
for (trial, trial_data) in config_data]
def read_heartbeat_log(profiler_hb_log):
"""Read a heartbeat log file.
Return: (profiler name, [start times], [end times], [start energies], [end energies], [instant powers])
Keyword arguments:
profiler_hb_log -- the file to read
"""
with warnings.catch_warnings():
try:
warnings.simplefilter("ignore")
time_start, time_end, energy_start, energy_end = \
np.loadtxt(profiler_hb_log,
dtype=np.dtype('uint64'),
skiprows=1,
usecols=(HB_LOG_IDX_START_TIME,
HB_LOG_IDX_END_TIME,
HB_LOG_IDX_START_ENERGY,
HB_LOG_IDX_END_ENERGY),
unpack=True,
ndmin=1)
except ValueError:
time_start, time_end, energy_start, energy_end = [], [], [], []
name = path.split(profiler_hb_log)[1].split('-')[1].split('.')[0]
return (name,
np.atleast_1d(time_start),
np.atleast_1d(time_end),
np.atleast_1d(energy_start),
np.atleast_1d(energy_end))
def process_trial_dir(trial_dir):
"""Process trial directory.
Return: [(profiler name, [start times], [end times], [start energies], [end energies])]
Time and energy are normalized to 0 start values.
Keyword arguments:
trial_dir -- the directory for this trial
"""
log_data = map(lambda h: read_heartbeat_log(path.join(trial_dir, h)),
filter(lambda f: f.endswith(".log"), os.listdir(trial_dir)))
# Find the earliest timestamps and energy readings
min_t = np.nanmin(map(np.nanmin, filter(lambda x: len(x) > 0, [ts for (profiler, ts, te, es, ee) in log_data])))
min_e = np.nanmin(map(np.nanmin, filter(lambda x: len(x) > 0, [es for (profiler, ts, te, es, ee) in log_data])))
# Normalize timing/energy data to start values of 0
return [(profiler, ts - min_t, te - min_t, es - min_e, ee - min_e) for (profiler, ts, te, es, ee) in log_data]
def process_config_dir(config_dir):
"""Process a configuration directory.
Return: [(trial, [(profiler name, [start times], [end times], [start energies], [end energies])])]
Keyword arguments:
config_dir -- the directory for this configuration - contains subdirectories for each trial
"""
return [(trial_dir, process_trial_dir(path.join(config_dir, trial_dir))) for trial_dir in os.listdir(config_dir)]
def process_logs(log_dir):
"""Process log directory.
Return: [(config, [(trial, [(profiler name, [start times], [end times], [start energies], [end energies])])])]
Keyword arguments:
log_dir -- the log directory to process - contains subdirectories for each configuration
"""
return [((config_dir.split('_')[1], process_config_dir(path.join(log_dir, config_dir))))
for config_dir in os.listdir(log_dir)]
def find_best_executions(log_dir):
"""Get the best time, energy, and power from the characterization summaries.
Return: ((config, trial, min_time), (config, trial, min_energy), (config, trial, min_power))
Keyword arguments:
results -- the results from process_logs(...).
"""
DEFAULT = ('', '', 1000000000.0)
min_time = DEFAULT
min_energy = DEFAULT
min_power = DEFAULT
for config_dir in os.listdir(log_dir):
for trial_dir in os.listdir(path.join(log_dir, config_dir)):
with open(path.join(log_dir, config_dir, trial_dir, SUMMARY_OUTPUT), "r") as s:
lines = s.readlines()
time = float(lines[SUMMARY_TIME_IDX].split(':')[1])
energy = int(lines[SUMMARY_ENERGY_IDX].split(':')[1])
power = float(lines[SUMMARY_POWER_IDX].split(':')[1])
if time < min_time[2]:
min_time = (config_dir, trial_dir, time)
if energy < min_energy[2]:
min_energy = (config_dir, trial_dir, energy)
if power < min_power:
min_power = (config_dir, trial_dir, power)
return (min_time, min_energy, min_power)
def main():
"""This script processes the log files from the "characterize.py" script and produces visualizations.
"""
# Default log directory
directory = 'heartbeat_logs'
# Default output directory
output_dir = 'plots'
# Default android
android = False
# Parsing the input of the script
parser = argparse.ArgumentParser(description="Process Heartbeat log files from characterization")
parser.add_argument("-d", "--directory",
default=directory,
help="Heartbeat log directory \"-d heartbeat_logs\"")
parser.add_argument("-o", "--output",
default=output_dir,
help="Specify the log output directory, for example \"-o plots\"")
parser.add_argument("--android",
action="store_true",
dest="android",
default=False,
help="Specify if processing results from Android")
args = parser.parse_args()
if args.directory:
directory = args.directory
if args.output:
output_dir = args.output
if args.android:
android = args.android
if not os.path.exists(directory):
print "Input directory does not exist: " + directory
sys.exit(1)
if os.path.exists(output_dir):
print "Output directory already exists: " + output_dir
sys.exit(1)
res = process_logs(directory)
if not android:
best = find_best_executions(directory)
print 'Best time:', best[0]
print 'Best energy:', best[1]
print 'Best power:', best[2]
os.makedirs(output_dir)
plot_all_raw_totals(res, output_dir)
plot_all_time_series(res, output_dir)
if __name__ == "__main__":
main()
| mpl-2.0 |
sfletc/scram2_plot | scram_plot/profile_plot.py | 1 | 16126 | import numpy
from pylab import * # @UnusedWildImport
import matplotlib.pyplot as plt # @Reimport
import os.path
class DNA(object):
"""
DNA class
"""
dna_alphabet = set("AGCTN")
def __init__(self, sequence):
self.sequence = sequence.upper()
def __len__(self):
return len(self.sequence)
def __getitem__(self, key):
return self.sequence[key]
def __hash__(self):
return hash(self.sequence)
def __repr__(self):
return self.sequence
def __eq__(self, other):
return self.sequence == other.sequence
def profile_plot(nt_list, search_terms, in_files, cutoff, plot_y_lim, win, pub, save_plot, bin_reads):
"""
Profile plot function
:param nt_list: list of read length ints to plot
:param search_terms: header search terms list
:param in_files: alignment files prefix
:param cutoff: highest count of the most abundant alignment of 21,22,24 nt profiles
:param plot_y_lim: set y limits on plot
:param win: smoothing window size
:param pub: remove box and axis labels
"""
select_win = False
alignment_file_list = _alignment_file_list(in_files, nt_list)
substring = " ".join(search_terms)
all_keys = _get_all_headers(alignment_file_list)
for header in all_keys:
if substring.lower() in header.lower():
nt_pos = 0
header_alignment_tuple = ()
ref_len_tuple = ()
#Get alignments for the search key (each nt length)
for alignment_file in alignment_file_list:
header_alignment_tuple, ref_len_tuple = _get_selected_alignments(alignment_file, header,
header_alignment_tuple,
ref_len_tuple, nt_list[nt_pos])
nt_pos+=1
#Check if one total alignment count for the provided lengths is above the cutoff
above_cutoff = False
for alignment in header_alignment_tuple:
if alignment[2] >= cutoff:
above_cutoff = True
if above_cutoff:
#Check header length - truncate for the save file name if too long
_above_cutoff(bin_reads, header, header_alignment_tuple, in_files, nt_list, plot_y_lim, pub,
ref_len_tuple, save_plot, select_win, win)
def _above_cutoff(bin_reads, header, header_alignment_tuple, in_files, nt_list, plot_y_lim, pub, ref_len_tuple,
save_plot, select_win, win):
"""
Plot if above cutoff
:param bin_reads: bool whether to bin reads
:param header: header
:param header_alignment_tuple: header alignment tuple
:param in_files: path/to/file/prefix
:param nt_list: list of read lengths
:param plot_y_lim: y axes limit
:param pub: bool for whther to remove axes and lgened
:param ref_len_tuple: ref len tuple
:param save_plot: bool whether to save plot
:param select_win: bool wether to auto-select window size
:param win: window size
"""
if header[0] == '"':
plot_name = _save_file_name(in_files, header[1:-2])
else:
plot_name = _save_file_name(in_files, header)
print("Plotting:\n")
print(header)
# Get the ref len
max_ref_len = max(ref_len_tuple)
# Calculate window size
if bin_reads and win == 0:
win = 250
else:
win, select_win = _select_win_size(max_ref_len, select_win, win)
# Convert alignments to y values for plotting (i.e. fill in zeros)
graph_processed_list = []
nt_pos = 0
for alignment in header_alignment_tuple:
if not bin_reads:
graph_processed_list.append(_list_aligned_reads(alignment, max_ref_len, int(nt_list[nt_pos])))
else:
graph_processed_list.append(_bin_aligned_reads(alignment, max_ref_len, int(nt_list[nt_pos])))
nt_pos += 1
# Smooth y-values
plot_data = _smooth_all_plot_data(graph_processed_list, win)
# Plot
_plot_profile_plot(nt_list, graph_processed_list[0][0], plot_data, header, plot_y_lim, pub, save_plot, plot_name,
win)
def _alignment_file_list(in_files, nt_list):
"""
Generate alignment file list
:param in_files: path/to/alignment prefix
:param nt_list: list of read length ints to plot
:return: list of file paths to laod
"""
print("\nLoading scram alignment files:\n")
alignment_file_list = []
for nt in nt_list:
fname = in_files + "_" + nt + ".csv"
if os.path.isfile(fname):
try:
print("{0} \n".format(fname))
in_file, _ = _import_scram_profile(fname)
alignment_file_list.append(in_file)
except:
print("\nCannot load and process {}".format(fname))
sys.exit()
else:
print("\n{} does not exist at this location".format(fname))
sys.exit()
return alignment_file_list
def _import_scram_profile(in_file):
"""
Import a SCRAM csv file to a dictionary
:param in_file: path/to/profile string
:return: alignments dictionary and srna length in the alignment
"""
alignments = {}
srna_len = 0
with open(in_file, 'r') as f:
first_line = True
for line in f:
if first_line:
first_line = False
else:
line = line.strip().rsplit(',', 7)
srna_len = len(line[2])
if line[0] not in alignments:
alignments[line[0]] = [(int(line[1]), DNA(line[2]), int(line[3]), line[4], float(line[5]),
float(line[6]))]
else:
alignments[line[0]].append(
(int(line[1]), DNA(line[2]), int(line[3]), line[4], float(line[5]), float(line[6])))
return alignments, srna_len
def _get_all_headers(alignment_file_list):
"""
Get headers
:param alignment_file_list:
:return: set of headers
"""
print("Extracting headers:\n")
all_keys = set()
for nt in alignment_file_list:
for header in nt.keys():
all_keys.add(header)
return all_keys
def _get_selected_alignments(alignment_file, header, header_alignment_tuple, ref_len_tuple, nt):
"""
Get selected alignments
:param alignment_file: alignment file
:param header: header
:param header_alignment_tuple: header,alignment tuple
:param ref_len_tuple: ref lengths tuple
:param nt: read length
:return: header,alignment tuple and ref lengths tuple
"""
alignment, ref_len = _extract_header_alignment(header, alignment_file, nt)
header_alignment_tuple = header_alignment_tuple + (alignment,)
ref_len_tuple = ref_len_tuple + (ref_len,)
return header_alignment_tuple, ref_len_tuple
def _extract_header_alignment(header, alignments, nt):
"""
With a provided complete header, extract the alignment and process to correct format for fill in zeros
:param header: reference sequence header string
:param alignments: alignments dictionary
:return: sorted_fwd_alignment, sorted_rvs_alignment, aln_count list
"""
sorted_fwd_alignment = []
sorted_rvs_alignment = []
aln_count = 0.0
ref_len = 0
if header in alignments:
extracted_alignments = alignments[header]
for alignment in extracted_alignments:
ref_len = alignment[0]
if alignment[3] =="+":
sorted_fwd_alignment.append((alignment[2], alignment[4], alignment[5]))
elif alignment[3] =="-":
sorted_rvs_alignment.append((alignment[2], -alignment[4], alignment[5]))
aln_count += alignment[4]
return [sorted_fwd_alignment, sorted_rvs_alignment, aln_count], ref_len
def _select_win_size(max_ref_len, select_win, win):
"""
Set smoothing window size
:param max_ref_len: length of reference
:param select_win: True if window size to be selected
:param win: window size
:return: window size, bool whther to select win
"""
if win == 0 or select_win:
win = int(max_ref_len / 30)
select_win = True
if win % 2 != 0:
win += 1
if win < 6:
win = 1
return win, select_win
def _list_aligned_reads(fwd_rvs_align_list, ref_len, nt):
"""
Generate alignment counts for every nucleotide in the reference
:param fwd_rvs_align_list: list of sorted forwards and reverse alignments
:param ref_len: number of nucleotides in the reference sequence (int)
:return: reference_x_axis ([0,0,...] (list(int)) - length of refseq seq,
fwd_alignment_y_axis [2,4,5.2,6,....] (list(float)) - sense strand alignment count (positive),
fwd_rvs_align_list [-3,-4,-5.6,...] (list(float)) - antisense strand alignment count (negative)
"""
sorted_fwd_alignment = fwd_rvs_align_list[0]
sorted_rvs_alignment = fwd_rvs_align_list[1]
fwd_alignment_y_axis_upper = [0] * ref_len
fwd_alignment_y_axis_lower = [0] * ref_len
revs_alignment_y_axis_upper = [0] * ref_len
revs_alignment_y_axis_lower = [0] * ref_len
reference_x_axis = list(range(0, ref_len))
for i in sorted_fwd_alignment:
for j in range(nt):
fwd_alignment_y_axis_upper[i[0]+j-1] += (i[1] + i[2])
fwd_alignment_y_axis_lower[i[0]+j-1] += (i[1] - i[2])
for i in sorted_rvs_alignment:
for j in range(nt):
revs_alignment_y_axis_upper[i[0]+j-1] += (i[1] + i[2])
revs_alignment_y_axis_lower[i[0]+j-1] += (i[1] - i[2])
return reference_x_axis, fwd_alignment_y_axis_upper, fwd_alignment_y_axis_lower, \
revs_alignment_y_axis_upper, revs_alignment_y_axis_lower
def _bin_aligned_reads(fwd_rvs_align_list, ref_len, nt):
"""
Use instead of fill_in_zeros_se for long references (i.e. chromosomes)
:param fwd_rvs_align_list: fwd_rvs_align_list
:param ref_len: length of reference
:param nt: read length aligned
:return: empty ref list of 0s and bin list
"""
bin_list=[10000*[0],10000*[0],10000*[0],10000*[0]]
bin_size = ref_len / 10000
align_count=0
for sorted_alignment in range(2):
for direction in fwd_rvs_align_list[sorted_alignment]:
bin_number=int(direction[0]/bin_size)
bin_list[align_count][bin_number]+=(direction[1] + direction[2])
bin_list[align_count+1][bin_number]+=(direction[1] - direction[2])
align_count = 2
reference_x_axis = list(range(0, 10000))
return [reference_x_axis,]+bin_list
def _smooth_all_plot_data(graph_processed_list, win):
"""
Smooth all plot data
:param graph_processed_list: list of graph_processed
:param win: window size
:return: smoother for plot list
"""
smoothed_for_plot_list = []
for graph_processed in graph_processed_list:
single_nt_size_tuple=()
for direction_se in [1,2,3,4]:
single_nt_size_tuple+=(_smooth(numpy.array(graph_processed[direction_se]), win,
window='blackman'),)
smoothed_for_plot_list.append(single_nt_size_tuple)
return smoothed_for_plot_list
def _smooth(x, window_len, window='hamming'):
"""
Smoothing function from scipy cookbook
:param x: list of vals to smooth
:param window_len: window length
:param window: type of smoothing window
:return: list of smoothed vals
"""
if x.ndim != 1:
raise ValueError("smooth only accepts 1 dimension arrays.")
if x.size < window_len:
raise ValueError("Input vector needs to be bigger than window size.")
if window_len < 6:
return x
if not window in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']:
raise ValueError("Window is one of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'")
s = numpy.r_[x[window_len - 1:0:-1], x, x[-1:-window_len:-1]]
if window == 'flat': # moving average
w = numpy.ones(window_len, 'd')
else:
w = eval('numpy.' + window + '(window_len)')
y = numpy.convolve(w / w.sum(), s, mode='valid')
return y[int(window_len / 2 - 1):-int(window_len / 2)]
def _plot_profile_plot(nt_list, x_ref, smoothed_for_plot_tuple, header, plot_y_lim, pub, save_plot, plot_name, win):
"""
Plot profile plot
:param nt_list: list of read lengths to plot
:param x_ref: x axis reference
:param smoothed_for_plot_tuple: smoothed for plot tuple
:param header: header
:param plot_y_lim: y limits
:param pub: bool to remove axes and legends
:param save_plot: bool to save plot to file
:param plot_name: plot name
:param win: smoothing windows
"""
fig = plt.figure(figsize=(10, 5))
nt_pos = 0
for smoothed_for_plot in smoothed_for_plot_tuple:
plt.plot(x_ref, smoothed_for_plot[0], color=_nt_colour(int(nt_list[nt_pos])), label='{0} nt'.format(nt_list[
nt_pos]),
lw=1, alpha=0.2)
plt.plot(x_ref, smoothed_for_plot[1], color=_nt_colour(int(nt_list[nt_pos])), lw=1, alpha=0.2)
plt.fill_between(x_ref, smoothed_for_plot[0], smoothed_for_plot[1], color=_nt_colour(int(nt_list[nt_pos])),
alpha=0.5)
plt.plot(x_ref, smoothed_for_plot[2], color=_nt_colour(int(nt_list[nt_pos])), lw=1, alpha=0.2)
plt.plot(x_ref, smoothed_for_plot[3], color=_nt_colour(int(nt_list[nt_pos])), lw=1, alpha=0.2)
plt.fill_between(x_ref, smoothed_for_plot[2], smoothed_for_plot[3], color=_nt_colour(int(nt_list[nt_pos])),
alpha=0.5)
nt_pos += 1
axhline(y=0)
if pub:
_pub_plot()
else:
xlabel(header)
if win != 1:
ylabel('Coverage (smoothed RPMR; win = {})'.format(win))
else:
ylabel('Coverage (RPMR)')
plt.legend(loc='best', fancybox=True, framealpha=0.5)
if plot_y_lim != 0:
ylim(-plot_y_lim, plot_y_lim)
if save_plot:
plt.savefig('{0}.png'.format(plot_name), dpi=300)
plt.show()
def _pub_plot():
"""
Remove axis, labels, legend from plot
"""
plt.tick_params(
axis='both', # changes apply to the x-axis
direction='in',
which='both', # both major and minor ticks are affected
bottom=True, # ticks along the bottom edge are off
top=True,
right=True,
left=True, # ticks along the top edge are off
labelbottom=False,
labelleft=False,
labelright=False,
labelsize=15) # labels along the bottom edge are off
_clear_frame()
def _save_file_name(in_files, header):
"""
Construct save file name
:param in_files:
:param header:
:return:
"""
out_file_name = in_files + "_"
for i in header:
if len(out_file_name) > 100:
break
else:
if i == " " or not i.isalnum():
out_file_name += "_"
else:
out_file_name += i
return out_file_name
def _clear_frame(ax=None):
"""
Removes frame for publishing plots
"""
if ax is None:
ax = plt.gca()
ax.xaxis.set_visible(True)
ax.yaxis.set_visible(True)
for spine in ax.spines.values():
spine.set_visible(False)
def _nt_colour(nt):
"""
Set default colours for 21, 22 and 24 nt sRNAs
:param nt: aligned read length (int)
:return: colour code (str)
"""
hex_dict = {18: '#669999', 19: '#33cccc', 20: '#33cccc', 21: '#00CC00',
22: '#FF3399', 23: '#d8d408', 24: '#3333FF', 25: '#cccc00',
26: '#660033', 27: '#996600', 28: '#336699', 29: '#ff6600',
30: '#ff99ff', 31: '#669900', 32: '#993333', "mir": '#ff7b00'}
if nt not in hex_dict:
return "black"
else:
return hex_dict[nt]
| mit |
vanceeasleaf/aces | aces/algorithm/cs.py | 1 | 13800 | import numpy as np
import aces.tools as tl
import h5py
from numpy.linalg import norm
import os
from ase import io
from scipy import optimize
from aces.f import read_forces, writefc2, writefc3, disp2atoms
def shrink(y, a):
return np.sign(y) * np.maximum(np.abs(y) - a, 0.0)
def dot(a, b):
return np.tensordot(a, b, axes=([1], [0]))
def maxeig(A):
B = np.zeros_like(A)
for j in A.shape[1]:
B[:, j] = A[:, j] / A.sum(axis=0)[j]
C = B.sum(axis=1)
W = C / C.sum()
lmax = (A.dot(W) / W).average()
return lmax
class runner:
def __init__(self, NAH=3, split=True, mu=0.1, lam=0.9):
self.NAH = NAH
self.split = split
self.mu = mu
self.lam = lam
# self.db=h5py.File('force.hdf5')
def getForce(self, pos, files):
print("reading vasprun.xml and POSCAR")
u = []
for file in files:
dir = os.path.dirname(file)
atoms = io.read(dir + '/POSCAR')
u.append(atoms.positions - pos)
forces = []
for file in files:
forces.append(read_forces(file))
return np.array(forces), np.array(u)
def getsatoms(self):
filename = 'disp_fc3.yaml'
if (tl.exists(filename)):
return disp2atoms(filename)
filename = 'disp.yaml'
if (tl.exists(filename)):
return disp2atoms(filename)
filename = '3RD.SPOSCAR'
if (tl.exists(filename)):
from ase import io
return io.read(filename, format='vasp')
filename = 'SPOSCAR'
if (tl.exists(filename)):
from ase import io
return io.read(filename, format='vasp')
def getSupercell(self, atoms):
from pyspglib import spglib
s = spglib.get_symmetry(atoms)
symmetry = []
print("building symmetry")
for i, rot in enumerate(s['rotations'][:100]):
print("symetry :", i)
trans = s['translations'][i]
map0 = self.getMap(atoms, rot, trans)
symmetry.append([rot, map0])
return symmetry
def getMap(self, atoms, rot, trans):
v = atoms.copy()
v.positions = v.positions.dot(rot.T)
v.translate(trans.dot(v.cell))
import itertools
from scipy.spatial.distance import cdist
posi = atoms.positions
d2s = np.empty((27, len(v), len(v)))
for j, (ja, jb, jc) in enumerate(
itertools.product(range(-1, 2), range(-1, 2), range(-1, 2))):
posj = v.positions + np.dot([ja, jb, jc], v.cell)
d2s[j, :, :] = cdist(posi, posj, "sqeuclidean")
d2min = d2s.min(axis=0)
map0 = np.argmin(d2min, axis=1)
return map0
def getTrainSets(self, u):
assert len(u) > 0
self.L = len(u)
n = self.natom = len(u[0])
row = 0
rowr = [0]
for i in range(self.NAH):
row += (n * 3)**i
rowr.append(row)
self.rowr = rowr
def getMatrix(self, F, u):
print("getting compressive matrix")
rowr = self.rowr
A = np.zeros([self.L, rowr[-1]])
g = self.mulU
# shape=F.shape
n = self.natom
for j in range(self.L):
for i in range(self.NAH):
r = range(rowr[i], rowr[i + 1])
A[j, r] = -g(u[j].flatten(), i)
F = F.reshape([self.L, 3 * n])
c = 3 * n
F = F.T.flatten().T
A = np.kron(np.eye(c), A)
return F, A
def gauss(a):
m, n = a.shape
b = np.zeros(n)
for i in range(0, n - 1):
for j in range(i + 1, n):
imax = np.abs(a[i:n, i]).maxarg()
if imax != i:
a[i], a[imax] = a[imax], a[i]
if a[j, i] != 0.0 and a[i, i] != 0.0:
lam = float(a[j, i]) / a[i, i]
a[j] = a[j] - lam * a[i]
for k in range(n - 1, -1, -1):
b[k] = (b[k] - np.dot(a[k, (k + 1):], b[(k + 1):])) / a[k, k]
result = b
return result
def mulU(self, x, p):
if p > 0:
return np.kron(self.mulU(x, p - 1), x) / p
else:
return 1.0
def getCsMat(self, F, u, symmetry):
self.getTrainSets(u)
# keep to be the constrain of the newest variables
Q = []
n = u.shape[1]
v = self.rowr[-1]
p = 3 * n
step = 0
nval = p * v
# the connection between oldest variables and newest variables
E = None
for rot, map0 in symmetry:
step += 1
print("step:", step)
for i in range(n):
print("atom:", i)
for j in range(n):
ii = map0[i]
jj = map0[j]
for a in range(3):
for b in range(3):
t = np.zeros(nval)
for r in range(3):
id = (ii * 3 + a) * v + 1 + jj * 3 + r
id1 = (i * 3 + a) * v + 1 + j * 3 + b
if E is None:
t[id] += rot[r, b]
t[id1] -= rot[a, r]
else:
t[id] += E[id] * rot[r, b]
t[id1] -= E[id1] * rot[a, r]
# phi[ii,jj].dot(rot)=rot.dot(phi[i,j])
Q.append(t)
if (len(Q) == 50):
e, c = np.linalg.eig(Q)
if E is None:
E = c
else:
E = E.dot(c)
nval = E.shape[1]
print("nval:", nval)
Q = []
self.R = E
v = norm(u, axis=2)
u0 = v.flatten().max()
F, A = self.getMatrix(F, u / u0)
return F, A.dot(self.R)
def run(self):
atoms = self.getsatoms()
symmetry = self.getSupercell(atoms)
files = tl.shell_exec(
'find dirs/dir_* -name vasprun.xml |sort -n').split('\n')
if len(files) > 100:
files = files[:100]
pos = atoms.positions
f, u = self.getForce(pos, files)
F, A = self.getCsMat(f, u, symmetry)
print("start compressive sensing ")
B = cs(mu=self.mu, split=self.split, lam=self.lam).run(F, A)
print("rebuilding IFCs ")
phi = self.rebuild(B)
print("writing IFCs ")
v = norm(u, axis=2)
u0 = v.flatten().max()
fc2 = np.einsum(phi[1], [1, 0, 3, 2]) / u0
writefc2(fc2, 'csfc2')
if self.NAH >= 3:
a = h5py.File('fc3p.hdf5')
if 'fc3' in a:
del a['fc3']
a['fc3'] = phi[2] / u0 / u0
a.close()
self.fc3()
def fc3(self):
print("writing csfc3 ")
a = h5py.File('fc3p.hdf5')
fc3 = np.einsum(a['fc3'], [0, 2, 1, 3, 5, 4])
from ase import io
atoms = io.read('POSCAR')
satoms = self.getsatoms()
writefc3(fc3, atoms, satoms, 'csfc3')
def rebuild(self, B):
n = self.natom
rowr = self.rowr
B = self.R.dot(B).T.reshape([-1, 3 * n]).T
phi = []
for i in range(self.NAH):
r = range(rowr[i], rowr[i + 1])
x = B[r].reshape([n, 3] * (i + 1))
idx = np.array([0, i + 1])
rdx = []
for j in range(i):
rdx.extend(idx + (j + 1))
rdx.extend(idx)
x = np.einsum(x, rdx)
phi.append(x)
return phi
class cssklearn:
def __init__(self):
pass
def initu(self, f, A):
dim = list(f.shape)
dim[0] = A.shape[1]
# so dim is the shape of u
return np.ones(dim)
def run(self, f, A):
# from sklearn import cross_validation
from sklearn import linear_model
reg = linear_model.Lasso(
alpha=1e-15, fit_intercept=False, max_iter=10000, tol=1e-5)
print(reg.fit([[0, 0, 2], [1, 1, 2]], [[0, 1], [1, 1]]).coef_.T)
print(A.shape, f.shape)
return reg.fit(A, f).coef_.T
# k_fold = cross_validation.KFold(n=len(f), n_folds=10)
# [svc.fit(X_digits[train], y_digits[train])\
# .score(X_digits[test], y_digits[test]) for train, test in kfold]
class csfortran:
def __init__(self):
pass
def initu(self, f, A):
dim = list(f.shape)
dim[0] = A.shape[1]
# so dim is the shape of u
return np.ones(dim)
def run(self, f, A):
u = self.initu(f, A)
import sys
sys.path.append("/home/xggong/home1/zhouy/soft/bcs-master/wrap")
import bcs as p
ebars = np.zeros(len(A[0]))
sigma2 = np.std(f) / 100.
p.bcs.do_wrapped(A, f, sigma2, 1e-8, u, ebars)
return u
class cs:
def __init__(self, mu=0.7, split=True, lam=0.9):
self.mu, self.lam = mu, lam
self.split = split
def initu(self, f, A):
dim = list(f.shape)
dim[0] = A.shape[1]
# so dim is the shape of u
return np.ones(dim)
def testcs(self):
f = (np.ones(1) * 20.0).reshape(1, 1)
A = np.array([7.0, 10.0]).reshape(1, 2)
print(self.run(f, A))
def test2(self):
f = np.array([7.0, 8.0])
A = np.array([[1.0, 0], [1.0, 0]])
print(self.run(f, A))
def test3(self):
f = np.array([7.0, 8.0])
A = np.array([[1.0, 0, 0], [1.0, 0, 0]])
print(self.run(f, A))
def test4(self):
f = np.array([7.0, 8.0]).reshape(1, 2)
A = np.array([[1.0, 0]])
print(self.run(f, A))
def run(self, f, A):
# normalize
print("normalizing sensing matrix")
# from scipy.sparse.linalg import eigsh
"""
aA=eigsh(A.T.dot(A),k=6)[0].max()#the largest eigenvalue
f/=np.sqrt(aA)
# print norm(f)
A/=np.sqrt(aA)
# print norm(A)
"""
aA = np.double(A.shape[0]**A.max().max())
# maxeig(A.T.dot(A))
f /= np.sqrt(aA)
A /= np.sqrt(aA)
"""
v=np.eye(len(A.T))-A.T.dot(A)
for i in range(20):
v=v.dot(v)
print norm(v)
"""
if self.split:
return self.split_bregman(f, A)
else:
return self.bregman(f, A)
def split_bregman(self, f, A):
def cc(u1):
print("CG error:", norm(u1 - self.bb.flatten()) / norm(self.bb))
tt1 = g(u1, A, f, lam, d, mu, b)
print("CG target:", (tt1 - self.tt) / self.tt)
self.tt = tt1
self.bb = u1
def g(u, *args):
A, f, lam, d, mu, b = args
u = u.reshape(shape)
return 1.0 / 2 * norm(np.dot(A, u) - f)**2 + \
lam / 2.0 * norm(d - mu * u - b)**2
def dg(u, *args):
A, f, lam, d, mu, b = args
u = u.reshape(shape)
return (A.T.dot(A.dot(u) - f) - lam * mu *
(d - b - mu * u)).flatten()
u = self.initu(f, A)
shape = u.shape
d = np.zeros_like(u)
b = np.zeros_like(u)
deta = 0.001
erru = 1.0
lam = self.lam
t = 1.0
tt = 1.0
self.tt = tt
mu = self.mu
scale = 1.0 / np.amax(np.abs(f)) * 1000.0
print("scale=" + str(scale))
f0 = np.zeros_like(f)
self.bb = np.zeros_like(u)
# f*=scale
print('dimmensions:', A.shape, u.shape)
while erru > deta:
# g=lambda u:1.0/2*norm(dot(A,u.reshape(shape))-f)**2\
# +lam/2.0*norm(d-mu*u.reshape(shape)-b)**2
f1 = (f * scale - dot(A, u)) + (f0 + dot(A, u)) / 2
u1 = optimize.fmin_cg(
g,
u,
args=(A, f1, lam, d, mu, b),
disp=True,
fprime=dg,
callback=cc,
gtol=deta * 10).reshape(shape)
d1 = shrink(mu * u1 + b, 1.0 / lam)
b1 = b + mu * u1 - d1
erru = norm(u1 - u) / norm(u)
print('split bregman iteration error:', erru)
b = b1
u = u1
d = d1
f0 = f1
t1 = norm(d, 1) + tt
print('change of target func:', (t1 - t) / t)
t = t1
return u / scale
def bregman(self, f, A):
u = self.initu(f, A)
f0 = np.zeros_like(f)
deta = 0.0001
erru = 1
scale = 1000.0
while erru > deta:
f1 = f * scale + f0 - dot(A, u)
u1 = self.FCP(f1, A, u)
erru = norm(u1 - u) / norm(u)
print('bregman iteration error:', erru)
u = u1
f0 = f1
return u / scale
def FCP(self, f, A, u=None):
if u is None:
u = self.initu(f, A)
m, n = A.shape
if len(f.shape) > 1:
n *= list(f.shape)[1]
ta = 1.99999 # min(1.999,max(1.1,-1.665*np.float(m)/n+2.665))
mu = self.mu
deta = 0.01
# errg=1
erru = 1
while erru > deta: # or errg>deta:
p = np.dot(A, u) - f
g = np.dot(A.T, p)
u1 = shrink(u - ta * g, mu * ta)
# errg=1.0/mu*norm(g,np.inf)-1
erru = norm(u1 - u) / norm(u)
print('FCP iteration :', erru)
u = u1
return u
| gpl-2.0 |
agartland/cycluster | clustering.py | 1 | 12300 |
import scipy.cluster.hierarchy as sch
from scipy.spatial import distance
from bootstrap_cluster import bootstrapFeatures, bootstrapObservations
import numpy as np
import pandas as pd
from functools import partial
from .comparison import _alignClusterMats, alignClusters
from .preprocessing import partialCorrNormalize
from copy import deepcopy
from corrplots import partialcorr
import statsmodels.api as sm
__all__ = ['hierClusterFunc',
'corrDmatFunc',
'makeModuleVariables',
'formReliableClusters',
'labels2modules',
'cyclusterClass',
'meanCorr',
'silhouette']
def corrDmatFunc(cyDf, metric='pearson-signed', dfunc=None, minN=None):
if metric is None:
metric = 'pearson-signed'
if dfunc is None:
if metric in ['spearman', 'pearson']:
"""Anti-correlations are also considered as high similarity and will cluster together"""
dmat = (1 - np.abs(cyDf.corr(method=metric, min_periods=minN).values))
dmat[np.isnan(dmat)] = 1
elif metric in ['spearman-signed', 'pearson-signed']:
"""Anti-correlations are considered as dissimilar and will NOT cluster together"""
dmat = ((1 - cyDf.corr(method = metric.replace('-signed', ''), min_periods = minN).values) / 2)
dmat[np.isnan(dmat)] = 1
else:
raise NameError('metric name not recognized')
else:
ncols = cyDf.shape[1]
dmat = np.zeros((ncols, ncols))
for i in range(ncols):
for j in range(ncols):
"""Assume distance is symetric"""
if i <= j:
tmpdf = cyDf.iloc[:, [i, j]]
tmpdf = tmpdf.dropna()
if tmpdf.shape[0] >= minN:
d = dfunc(cyDf.iloc[:, i], cyDf.iloc[:, j])
else:
d = np.nan
dmat[i, j] = d
dmat[j, i] = d
return pd.DataFrame(dmat, columns = cyDf.columns, index = cyDf.columns)
def hierClusterFunc(dmatDf, K=6, method='complete', returnLinkageMat=False, old=False):
if not old:
if dmatDf.shape[0] == dmatDf.shape[1]:
#compressedDmat = dmat.values[np.triu_indices_from(dmat.values)].ravel()
compressedDmat = distance.squareform(dmatDf.values)
else:
raise
else:
compressedDmat = dmatDf.values
hclusters = sch.linkage(compressedDmat, method=method)
labelsVec = sch.fcluster(hclusters, K, criterion='maxclust')
labels = pd.Series(labelsVec, index=dmatDf.columns)
if not returnLinkageMat:
return labels
else:
return labels, hclusters
def formReliableClusters(cyDf, dmatFunc, clusterFunc, bootstraps=500, threshold=0.5):
"""Use bootstrap_clustering to determine the reliable clusters"""
clusters = {}
dmatDf = dmatFunc(cyDf)
#pwrel, labels = bootstrapFeatures(dmat, clusterFunc, bootstraps = bootstraps)
pwrelDf, labels = bootstrapObservations(cyDf, dmatFunc, clusterFunc, bootstraps = bootstraps)
dropped = pd.Series(np.zeros(cyDf.shape[1]).astype(bool), index = cyDf.columns)
for currLab in labels.unique():
cyMembers = labels.index[labels == currLab].tolist()
"""Step-down: start with all members and discard fringe"""
for cy in cyMembers:
meanReliability = (1 - pwrelDf[cy].loc[cyMembers].drop(cy).mean())
if meanReliability < threshold:
dropped[cy] = True
strTuple = (cy, cyDf.sampleStr, 'N' if cyDf.normed else '', currLab, 100 * meanReliability)
print('Excluded %s from cluster %s %sM%s: mean reliability was %1.1f%%' % strTuple)
"""Consider step-up strategy: start with best and add those that fit"""
return pwrelDf, labels, dropped
def labels2modules(labels, dropped = None):
uLabels = np.unique(labels)
out = {lab:labels.index[labels == lab].tolist() for lab in uLabels}
if not dropped is None:
todrop = dropped.index[dropped].tolist()
for lab in list(out.keys()):
out[lab] = [cy for cy in out[lab] if not cy in todrop]
if len(out[lab]) == 0:
_ = out.pop(lab)
return out
def makeModuleVariables(cyDf, labels, sampleStr='M', dropped=None):
"""Define variable for each module by standardizing all the cytokines in the
module and taking the mean. Can be applied to a stacked df with multiple timepoints.
Standardization will be performed across all data.
Each module is also standardized.
Parameters
----------
cyDf : pd.DataFrame [n x cytokines]
Contains columns for making the module.
May include additional columns than included in labels or dropped.
labels : pd.Series [index: cytokines]
Series indicating cluster labels with index containing cytokine vars in cyDf
dropped : pd.Series [index: cytokines]
Series indicating if a cytokine (index) should be dropped when making the module
Returns
-------
out : pd.DataFrame [n x modules]
Modules as columns, one row for every row in cyDf"""
if dropped is None:
dropped = pd.Series(np.zeros((labels.shape[0]), dtype = bool), index = labels.index)
standardizeFunc = lambda col: (col - np.nanmean(col))/np.nanstd(col)
out = None
uLabels = np.unique(labels)
for lab in uLabels:
members = labels.index[(labels == lab) & (~dropped)]
tmpS = cyDf.loc[:, members].apply(standardizeFunc, raw = True).mean(axis = 1, skipna=True)
tmpS.name = '%s%s' % (sampleStr, lab)
if out is None:
out = pd.DataFrame(tmpS)
else:
out = out.join(tmpS)
out = out.apply(standardizeFunc)
return out
def meanCorr(cyDf, meanVar, cyList=None, method='pearson'):
"""Each cytokine's correlation with the mean."""
if cyList is None:
cyList = np.array([c for c in cyDf.columns if not c == meanVar])
cyList = np.asarray(cyList)
tmpCorr = np.zeros((len(cyList), 3))
for i, s in enumerate(cyList):
tmpCorr[i, :2] = partialcorr(cyDf[s], cyDf[meanVar], method=method)
sorti = np.argsort(tmpCorr[:, 0])
tmpCorr = tmpCorr[sorti,:]
_, tmpCorr[:, 2], _, _ = sm.stats.multipletests(tmpCorr[:, 1], alpha=0.2, method='fdr_bh')
return pd.DataFrame(tmpCorr, index=cyList[sorti], columns=['rho', 'pvalue', 'qvalue'])
def silhouette(dmatDf, labels):
"""Compute the silhouette of every analyte."""
def oneSilhouette(cy):
modInd = labels == labels[cy]
a = dmatDf.loc[cy, modInd].sum()
b = None
for lab in labels.unique():
if not lab == labels[cy]:
tmp = dmatDf.loc[cy, labels==lab].sum()
if b is None or tmp < b:
b = tmp
s = (b - a)/max(b, a)
return s
return labels.index.map(oneSilhouette)
class cyclusterClass(object):
def __init__(self, studyStr, sampleStr, normed, rCyDf, compCommVars=None):
self.studyStr = studyStr
self.sampleStr = sampleStr
self.normed = normed
self.cyVars = rCyDf.columns.tolist()
self.rCyDf = rCyDf.copy()
self.nCyDf, self.normModels = partialCorrNormalize(rCyDf, compCommVars=compCommVars, meanVar='Mean')
self.meanS = self.nCyDf['Mean']
self.nCyDf = self.nCyDf[self.cyVars]
if normed:
self.cyDf = self.nCyDf
else:
self.cyDf = self.rCyDf
self.cyDf.sampleStr = sampleStr
self.cyDf.normed = normed
def applyModules(self, target):
"""Use modules from target for computing module values.
Parameters
----------
target : cyclusterClass"""
self.pwrel = target.pwrel
self.Z = target.Z
self.dmatDf = target.dmatDf
self.labels = target.labels
self.dropped = target.dropped
self.sampleStr = target.sampleStr
self.modS = labels2modules(self.labels, dropped=self.dropped)
self.modDf = makeModuleVariables(self.cyDf, self.labels, sampleStr=self.sampleStr, dropped=self.dropped)
if self.normed:
self.rModDf = makeModuleVariables(self.rCyDf, self.labels, sampleStr=self.sampleStr, dropped=self.dropped)
else:
self.rModDf = self.modDf
def clusterCytokines(self, K=6, alignLabels=None, labelMap=None, metric=None, minN=None):
corrFunc = partial(corrDmatFunc, metric=metric, minN=minN)
self.pwrel, self.labels, self.dropped = formReliableClusters(self.cyDf, corrFunc, partial(hierClusterFunc, K=K), threshold=0)
if not labelMap is None:
self.labels = self.labels.map(labelMap)
if not alignLabels is None:
self.labels = alignClusters(alignLabels, self.labels)
self.modS = labels2modules(self.labels, dropped=self.dropped)
self.modDf = makeModuleVariables(self.cyDf, self.labels, sampleStr=self.sampleStr, dropped=self.dropped)
if self.normed:
self.rModDf = makeModuleVariables(self.rCyDf, self.labels, sampleStr=self.sampleStr, dropped=self.dropped)
else:
self.rModDf = self.modDf
_, self.Z = hierClusterFunc(self.pwrel, returnLinkageMat=True)
self.dmatDf = corrDmatFunc(self.cyDf)
def printModules(self, modules=None):
tmp = labels2modules(self.labels, dropped=None)
for m in list(tmp.keys()):
mStr = '%s%d' % (self.sampleStr, m)
if modules is None or mStr == modules or mStr in modules:
print(mStr)
for c in sorted(tmp[m]):
if self.dropped[c]:
print('*', end=' ')
print(c)
print()
def modMembers(self, modStr):
return self.modS[int(modStr[-1])]
def meanICD(self, dmat='dmat', dropped=None):
"""Compute mean intra-cluster distance using either dmatDf or pwrel"""
def _micd(df, labels):
"""Should this be weighted by the size of each cluster? Yes."""
count = 0
tot = 0
for lab in np.unique(labels):
members = labels.index[labels == lab]
tmp = df[members].loc[members].values.flatten()
count += len(tmp)
tot += tmp.sum()
return tot/count
if dropped is None:
tmpLabels = labels
else:
tmpLabels = labels.loc[~self.dropped]
if dmat == 'dmat':
return _micd(self.dmatDf, self.tmpLabels)
elif dmat == 'pwrel':
return _micd(self.pwrel, self.tmpLabels)
else:
raise IndexError('Value for dmat not understood (%s)' % dmat)
def pwrelStats(self):
"""Return the mean and standard deviation of values from self.pwrel
for all non-identical cytokines. This is representative of
how reliable the clusters are overall. Returns mean of (1 - pwrel)"""
vec = 1 - self.pwrel.values[np.triu_indices_from(self.pwrel, k=1)].ravel()
return vec.mean(), vec.std()
def randCycluster(self):
"""Return a copy of self with shuffled rows, destroying covariation
among cytokines. Requires that each column be shuffled, independently."""
out = deepcopy(self)
N = out.rCyDf.shape[0]
for cy in out.cyVars:
vals = out.rCyDf[cy].values
nonnanInd = ~np.isnan(vals)
nonnan = vals[nonnanInd]
rind = np.random.permutation(nonnan.shape[0])
nonnan = nonnan[rind]
vals[nonnanInd] = nonnan
out.rCyDf.loc[:, cy] = vals
vals = out.nCyDf[cy].values
nonnan = vals[nonnanInd]
nonnan = nonnan[rind]
vals[nonnanInd] = nonnan
out.nCyDf.loc[:, cy] = vals
return out
@property
def name(self):
return '%s_%s_%s_' % (self.studyStr, self.sampleStr, 'normed' if self.normed else 'raw')
@property
def withMean(self):
return self.cyDf.join(self.meanS)
@property
def modWithMean(self):
return self.modDf.join(self.meanS)
| mit |
great-expectations/great_expectations | great_expectations/dataset/dataset.py | 1 | 198226 | import inspect
import logging
from datetime import datetime
from functools import lru_cache, wraps
from itertools import zip_longest
from numbers import Number
from typing import Any, List, Optional, Set, Union
import numpy as np
import pandas as pd
from dateutil.parser import parse
from scipy import stats
from great_expectations.data_asset.data_asset import DataAsset
from great_expectations.data_asset.util import DocInherit, parse_result_format
from great_expectations.dataset.util import (
build_categorical_partition_object,
build_continuous_partition_object,
is_valid_categorical_partition_object,
is_valid_partition_object,
)
logger = logging.getLogger(__name__)
try:
from sqlalchemy.sql import quoted_name
except:
logger.debug(
"Unable to load quoted name from SqlAlchemy; install optional sqlalchemy dependency for support"
)
quoted_name = None
class MetaDataset(DataAsset):
"""
Holds expectation decorators.
"""
@classmethod
def column_map_expectation(cls, func):
"""Constructs an expectation using column-map semantics.
The column_map_expectation decorator handles boilerplate issues surrounding the common pattern of evaluating
truthiness of some condition on a per-row basis.
Args:
func (function): \
The function implementing a row-wise expectation. The function should take a column of data and \
return an equally-long column of boolean values corresponding to the truthiness of the \
underlying expectation.
Notes:
column_map_expectation intercepts and takes action based on the following parameters:
mostly (None or a float between 0 and 1): \
Return `"success": True` if at least mostly fraction of values match the expectation. \
For more detail, see :ref:`mostly`.
column_map_expectation *excludes null values* from being passed to the function
Depending on the `result_format` selected, column_map_expectation can additional data to a return object, \
including `element_count`, `nonnull_values`, `nonnull_count`, `success_count`, `unexpected_list`, and \
`unexpected_index_list`. \
See :func:`_format_map_output <great_expectations.data_asset.dataset.Dataset._format_map_output>`
See also:
:func:`expect_column_values_to_be_in_set \
<great_expectations.dataset.dataset.Dataset.expect_column_values_to_be_in_set>` \
for an example of a column_map_expectation
"""
raise NotImplementedError
@classmethod
def column_aggregate_expectation(cls, func):
"""Constructs an expectation using column-aggregate semantics.
The column_aggregate_expectation decorator handles boilerplate issues surrounding the common pattern of \
evaluating truthiness of some condition on an aggregated-column basis.
Args:
func (function): \
The function implementing an expectation using an aggregate property of a column. \
The function should take a column of data and return the aggregate value it computes.
Notes:
column_aggregate_expectation *excludes null values* from being passed to the function
See also:
:func:`expect_column_mean_to_be_between \
<great_expectations.dataset.dataset.Dataset.expect_column_mean_to_be_between>` \
for an example of a column_aggregate_expectation
"""
argspec = inspect.getfullargspec(func)[0][1:]
@cls.expectation(argspec)
@wraps(func)
def inner_wrapper(
self,
column=None,
result_format=None,
row_condition=None,
condition_parser=None,
*args,
**kwargs
):
if result_format is None:
result_format = self.default_expectation_args["result_format"]
# Retain support for string-only output formats:
result_format = parse_result_format(result_format)
if row_condition and self._supports_row_condition:
self = self.query(row_condition, parser=condition_parser).reset_index(
drop=True
)
element_count = self.get_row_count()
if kwargs.get("column"):
column = kwargs.get("column")
if column is not None:
# We test whether the dataset is a sqlalchemy_dataset by seeing if it has an engine. We don't test
# whether it is actually an instance to avoid circular dependency issues.
if (
hasattr(self, "engine")
and self.batch_kwargs.get("use_quoted_name")
and quoted_name
):
column = quoted_name(column, quote=True)
nonnull_count = self.get_column_nonnull_count(
kwargs.get("column", column)
)
# column is treated specially as a positional argument in most expectations
args = tuple((column, *args))
elif kwargs.get("column_A") and kwargs.get("column_B"):
try:
nonnull_count = (
self[kwargs.get("column_A")].notnull()
& self[kwargs.get("column_B")].notnull()
).sum()
except TypeError:
nonnull_count = None
else:
raise ValueError(
"The column_aggregate_expectation wrapper requires either column or "
"both column_A and column_B as input."
)
if nonnull_count:
null_count = element_count - nonnull_count
else:
null_count = None
evaluation_result = func(self, *args, **kwargs)
if "success" not in evaluation_result:
raise ValueError(
"Column aggregate expectation failed to return required information: success"
)
if ("result" not in evaluation_result) or (
"observed_value" not in evaluation_result["result"]
):
raise ValueError(
"Column aggregate expectation failed to return required information: observed_value"
)
return_obj = {"success": bool(evaluation_result["success"])}
if result_format["result_format"] == "BOOLEAN_ONLY":
return return_obj
return_obj["result"] = {
"observed_value": evaluation_result["result"]["observed_value"],
"element_count": element_count,
}
if null_count:
return_obj["result"]["missing_count"] = null_count
if element_count > 0:
return_obj["result"]["missing_percent"] = (
null_count * 100.0 / element_count
)
else:
return_obj["result"]["missing_percent"] = None
else:
return_obj["result"]["missing_count"] = None
return_obj["result"]["missing_percent"] = None
if result_format["result_format"] == "BASIC":
return return_obj
if "details" in evaluation_result["result"]:
return_obj["result"]["details"] = evaluation_result["result"]["details"]
if result_format["result_format"] in ["SUMMARY", "COMPLETE"]:
return return_obj
raise ValueError(
"Unknown result_format %s." % result_format["result_format"]
)
return inner_wrapper
# noinspection PyIncorrectDocstring
class Dataset(MetaDataset):
# This should in general only be changed when a subclass *adds expectations* or *changes expectation semantics*
# That way, multiple backends can implement the same data_asset_type
_data_asset_type = "Dataset"
_supports_row_condition = False
# getter functions with hashable arguments - can be cached
hashable_getters = [
"get_column_min",
"get_column_max",
"get_column_mean",
"get_column_modes",
"get_column_median",
"get_column_quantiles",
"get_column_nonnull_count",
"get_column_stdev",
"get_column_sum",
"get_column_unique_count",
"get_column_value_counts",
"get_row_count",
"get_column_count",
"get_table_columns",
"get_column_count_in_range",
]
def __init__(self, *args, **kwargs):
# NOTE: using caching makes the strong assumption that the user will not modify the core data store
# (e.g. self.spark_df) over the lifetime of the dataset instance
self.caching = kwargs.pop("caching", True)
super().__init__(*args, **kwargs)
if self.caching:
for func in self.hashable_getters:
caching_func = lru_cache(maxsize=None)(getattr(self, func))
setattr(self, func, caching_func)
@classmethod
def from_dataset(cls, dataset=None):
"""This base implementation naively passes arguments on to the real constructor, which
is suitable really when a constructor knows to take its own type. In general, this should be overridden"""
return cls(dataset)
def get_row_count(self):
"""Returns: int, table row count"""
raise NotImplementedError
def get_column_count(self):
"""Returns: int, table column count"""
raise NotImplementedError
def get_table_columns(self) -> List[str]:
"""Returns: List[str], list of column names"""
raise NotImplementedError
def get_column_nonnull_count(self, column):
"""Returns: int"""
raise NotImplementedError
def get_column_mean(self, column):
"""Returns: float"""
raise NotImplementedError
def get_column_value_counts(self, column, sort="value", collate=None):
"""Get a series containing the frequency counts of unique values from the named column.
Args:
column: the column for which to obtain value_counts
sort (string): must be one of "value", "count", or "none".
- if "value" then values in the resulting partition object will be sorted lexigraphically
- if "count" then values will be sorted according to descending count (frequency)
- if "none" then values will not be sorted
collate (string): the collate (sort) method to be used on supported backends (SqlAlchemy only)
Returns:
pd.Series of value counts for a column, sorted according to the value requested in sort
"""
raise NotImplementedError
def get_column_sum(self, column):
"""Returns: float"""
raise NotImplementedError
def get_column_max(self, column, parse_strings_as_datetimes=False):
"""Returns: Any"""
raise NotImplementedError
def get_column_min(self, column, parse_strings_as_datetimes=False):
"""Returns: Any"""
raise NotImplementedError
def get_column_unique_count(self, column):
"""Returns: int"""
raise NotImplementedError
def get_column_modes(self, column):
"""Returns: List[Any], list of modes (ties OK)"""
raise NotImplementedError
def get_column_median(self, column):
"""Returns: Any"""
raise NotImplementedError
def get_column_quantiles(
self, column, quantiles, allow_relative_error=False
) -> List[Any]:
"""Get the values in column closest to the requested quantiles
Args:
column (string): name of column
quantiles (tuple of float): the quantiles to return. quantiles \
*must* be a tuple to ensure caching is possible
Returns:
List[Any]: the nearest values in the dataset to those quantiles
"""
raise NotImplementedError
def get_column_stdev(self, column):
"""Returns: float"""
raise NotImplementedError
def get_column_partition(
self, column, bins="uniform", n_bins=10, allow_relative_error=False
):
"""Get a partition of the range of values in the specified column.
Args:
column: the name of the column
bins: 'uniform' for evenly spaced bins or 'quantile' for bins spaced according to quantiles
n_bins: the number of bins to produce
allow_relative_error: passed to get_column_quantiles, set to False for only precise
values, True to allow approximate values on systems with only binary choice (e.g. Redshift), and to a
value between zero and one for systems that allow specification of relative error (e.g.
SparkDFDataset).
Returns:
A list of bins
"""
if bins == "uniform":
# TODO: in the event that we shift the compute model for
# min and max to have a single pass, use that instead of
# quantiles for clarity
# min_ = self.get_column_min(column)
# max_ = self.get_column_max(column)
min_, max_ = self.get_column_quantiles(
column, (0.0, 1.0), allow_relative_error=allow_relative_error
)
# PRECISION NOTE: some implementations of quantiles could produce
# varying levels of precision (e.g. a NUMERIC column producing
# Decimal from a SQLAlchemy source, so we cast to float for numpy)
bins = np.linspace(start=float(min_), stop=float(max_), num=n_bins + 1)
elif bins in ["ntile", "quantile", "percentile"]:
bins = self.get_column_quantiles(
column,
tuple(np.linspace(start=0, stop=1, num=n_bins + 1)),
allow_relative_error=allow_relative_error,
)
elif bins == "auto":
# Use the method from numpy histogram_bin_edges
nonnull_count = self.get_column_nonnull_count(column)
sturges = np.log2(nonnull_count + 1)
min_, _25, _75, max_ = self.get_column_quantiles(
column,
(0.0, 0.25, 0.75, 1.0),
allow_relative_error=allow_relative_error,
)
iqr = _75 - _25
if iqr < 1e-10: # Consider IQR 0 and do not use variance-based estimator
n_bins = sturges
else:
fd = (2 * float(iqr)) / (nonnull_count ** (1 / 3))
n_bins = max(
int(np.ceil(sturges)), int(np.ceil(float(max_ - min_) / fd))
)
bins = np.linspace(start=float(min_), stop=float(max_), num=n_bins + 1)
else:
raise ValueError("Invalid parameter for bins argument")
return bins
def get_column_hist(self, column, bins):
"""Get a histogram of column values
Args:
column: the column for which to generate the histogram
bins (tuple): the bins to slice the histogram. bins *must* be a tuple to ensure caching is possible
Returns: List[int], a list of counts corresponding to bins"""
raise NotImplementedError
def get_column_count_in_range(
self, column, min_val=None, max_val=None, strict_min=False, strict_max=True
):
"""Returns: int"""
raise NotImplementedError
def get_crosstab(
self,
column_A,
column_B,
bins_A=None,
bins_B=None,
n_bins_A=None,
n_bins_B=None,
):
"""Get crosstab of column_A and column_B, binning values if necessary"""
raise NotImplementedError
def test_column_map_expectation_function(self, function, *args, **kwargs):
"""Test a column map expectation function
Args:
function (func): The function to be tested. (Must be a valid column_map_expectation function.)
*args : Positional arguments to be passed the the function
**kwargs : Keyword arguments to be passed the the function
Returns:
An ExpectationSuiteValidationResult
Notes:
This function is a thin layer to allow quick testing of new expectation functions, without having to \
define custom classes, etc. To use developed expectations from the command-line tool, you'll still need to \
define custom classes, etc.
Check out :ref:`how_to_guides__creating_and_editing_expectations__how_to_create_custom_expectations` for more information.
"""
new_function = self.column_map_expectation(function)
return new_function(self, *args, **kwargs)
def test_column_aggregate_expectation_function(self, function, *args, **kwargs):
"""Test a column aggregate expectation function
Args:
function (func): The function to be tested. (Must be a valid column_aggregate_expectation function.)
*args : Positional arguments to be passed the the function
**kwargs : Keyword arguments to be passed the the function
Returns:
An ExpectationSuiteValidationResult
Notes:
This function is a thin layer to allow quick testing of new expectation functions, without having to \
define custom classes, etc. To use developed expectations from the command-line tool, you'll still need to \
define custom classes, etc.
Check out :ref:`how_to_guides__creating_and_editing_expectations__how_to_create_custom_expectations` for more information.
"""
new_function = self.column_aggregate_expectation(function)
return new_function(self, *args, **kwargs)
#####
#
# Table shape expectations
#
#####
@DocInherit
@DataAsset.expectation(["column"])
def expect_column_to_exist(
self,
column,
column_index=None,
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
"""Expect the specified column to exist.
expect_column_to_exist is a :func:`expectation \
<great_expectations.data_asset.data_asset.DataAsset.expectation>`, not a
``column_map_expectation`` or ``column_aggregate_expectation``.
Args:
column (str): \
The column name.
Other Parameters:
column_index (int or None): \
If not None, checks the order of the columns. The expectation will fail if the \
column is not in location column_index (zero-indexed).
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without modification. \
For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
"""
columns = self.get_table_columns()
if column in columns:
return {
# FIXME: list.index does not check for duplicate values.
"success": (column_index is None)
or (columns.index(column) == column_index)
}
else:
return {"success": False}
@DocInherit
@DataAsset.expectation(["column_list"])
def expect_table_columns_to_match_ordered_list(
self,
column_list,
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
"""Expect the columns to exactly match a specified list.
expect_table_columns_to_match_ordered_list is a :func:`expectation \
<great_expectations.data_asset.data_asset.DataAsset.expectation>`, not a
``column_map_expectation`` or ``column_aggregate_expectation``.
Args:
column_list (list of str): \
The column names, in the correct order.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
"""
columns = self.get_table_columns()
if column_list is None or list(columns) == list(column_list):
return {"success": True, "result": {"observed_value": list(columns)}}
else:
# In the case of differing column lengths between the defined expectation and the observed column set, the
# max is determined to generate the column_index.
number_of_columns = max(len(column_list), len(columns))
column_index = range(number_of_columns)
# Create a list of the mismatched details
compared_lists = list(
zip_longest(column_index, list(column_list), list(columns))
)
mismatched = [
{"Expected Column Position": i, "Expected": k, "Found": v}
for i, k, v in compared_lists
if k != v
]
return {
"success": False,
"result": {
"observed_value": list(columns),
"details": {"mismatched": mismatched},
},
}
@DocInherit
@DataAsset.expectation(["column_set", "exact_match"])
def expect_table_columns_to_match_set(
self,
column_set: Optional[Union[Set[str], List[str]]],
exact_match: Optional[bool] = True,
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
"""Expect the columns to match a specified set.
expect_table_columns_to_match_set is a :func:`expectation \
<great_expectations.data_asset.data_asset.DataAsset.expectation>`, not a
``column_map_expectation`` or ``column_aggregate_expectation``.
Args:
column_set (set of str or list of str): \
The column names you wish to check. If given a list, it will be converted to \
a set before processing. Column names are case sensitive.
exact_match (bool): \
Whether to make sure there are no extra columns in either the dataset or in \
the column_set.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
"""
column_set = set(column_set) if column_set is not None else set()
dataset_columns_list = self.get_table_columns()
dataset_columns_set = set(dataset_columns_list)
if (
(column_set is None) and (exact_match is not True)
) or dataset_columns_set == column_set:
return {"success": True, "result": {"observed_value": dataset_columns_list}}
else:
# Convert to lists and sort to lock order for testing and output rendering
# unexpected_list contains items from the dataset columns that are not in column_set
unexpected_list = sorted(list(dataset_columns_set - column_set))
# missing_list contains items from column_set that are not in the dataset columns
missing_list = sorted(list(column_set - dataset_columns_set))
# observed_value contains items that are in the dataset columns
observed_value = sorted(dataset_columns_list)
mismatched = {}
if len(unexpected_list) > 0:
mismatched["unexpected"] = unexpected_list
if len(missing_list) > 0:
mismatched["missing"] = missing_list
result = {
"observed_value": observed_value,
"details": {"mismatched": mismatched},
}
return_success = {
"success": True,
"result": result,
}
return_failed = {
"success": False,
"result": result,
}
if exact_match:
return return_failed
else:
# Failed if there are items in the missing list (but OK to have unexpected_list)
if len(missing_list) > 0:
return return_failed
# Passed if there are no items in the missing list
else:
return return_success
# noinspection PyUnusedLocal
@DocInherit
@DataAsset.expectation(["min_value", "max_value"])
def expect_table_column_count_to_be_between(
self,
min_value=None,
max_value=None,
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
"""Expect the number of columns to be between two values.
expect_table_column_count_to_be_between is a :func:`expectation \
<great_expectations.data_asset.data_asset.DataAsset.expectation>`, not a
``column_map_expectation`` or ``column_aggregate_expectation``.
Keyword Args:
min_value (int or None): \
The minimum number of columns, inclusive.
max_value (int or None): \
The maximum number of columns, inclusive.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
Notes:
* min_value and max_value are both inclusive.
* If min_value is None, then max_value is treated as an upper bound, and the number of acceptable columns \
has no minimum.
* If max_value is None, then min_value is treated as a lower bound, and the number of acceptable columns \
has no maximum.
See Also:
expect_table_column_count_to_equal
"""
try:
if min_value is not None:
if not float(min_value).is_integer():
raise ValueError("min_value must be integer")
if max_value is not None:
if not float(max_value).is_integer():
raise ValueError("max_value must be integer")
except ValueError:
raise ValueError("min_value and max_value must be integers")
# check that min_value or max_value is set
# if min_value is None and max_value is None:
# raise Exception('Must specify either or both of min_value and max_value')
column_count = self.get_column_count()
if min_value is not None:
above_min = column_count >= min_value
else:
above_min = True
if max_value is not None:
below_max = column_count <= max_value
else:
below_max = True
outcome = above_min and below_max
return {"success": outcome, "result": {"observed_value": column_count}}
# noinspection PyUnusedLocal
@DocInherit
@DataAsset.expectation(["value"])
def expect_table_column_count_to_equal(
self,
value,
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
"""Expect the number of columns to equal a value.
expect_table_column_count_to_equal is a :func:`expectation \
<great_expectations.data_asset.data_asset.DataAsset.expectation>`, not a
``column_map_expectation`` or ``column_aggregate_expectation``.
Args:
value (int): \
The expected number of columns.
Other Parameters:
result_format (string or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
See Also:
expect_table_column_count_to_be_between
"""
try:
if not float(value).is_integer():
raise ValueError("value must be an integer")
except ValueError:
raise ValueError("value must be an integer")
column_count = self.get_column_count()
return {
"success": column_count == value,
"result": {"observed_value": column_count},
}
# noinspection PyUnusedLocal
@DocInherit
@DataAsset.expectation(["min_value", "max_value"])
def expect_table_row_count_to_be_between(
self,
min_value=None,
max_value=None,
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
"""Expect the number of rows to be between two values.
expect_table_row_count_to_be_between is a :func:`expectation \
<great_expectations.data_asset.data_asset.DataAsset.expectation>`, not a
``column_map_expectation`` or ``column_aggregate_expectation``.
Keyword Args:
min_value (int or None): \
The minimum number of rows, inclusive.
max_value (int or None): \
The maximum number of rows, inclusive.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
Notes:
* min_value and max_value are both inclusive.
* If min_value is None, then max_value is treated as an upper bound, and the number of acceptable rows has \
no minimum.
* If max_value is None, then min_value is treated as a lower bound, and the number of acceptable rows has \
no maximum.
See Also:
expect_table_row_count_to_equal
"""
try:
if min_value is not None:
if not float(min_value).is_integer():
raise ValueError("min_value must be integer")
if max_value is not None:
if not float(max_value).is_integer():
raise ValueError("max_value must be integer")
except ValueError:
raise ValueError("min_value and max_value must be integers")
if min_value is not None and max_value is not None and min_value > max_value:
raise ValueError("min_value cannot be greater than max_value")
# check that min_value or max_value is set
# if min_value is None and max_value is None:
# raise Exception('Must specify either or both of min_value and max_value')
row_count = self.get_row_count()
if min_value is not None:
above_min = row_count >= min_value
else:
above_min = True
if max_value is not None:
below_max = row_count <= max_value
else:
below_max = True
outcome = above_min and below_max
return {"success": outcome, "result": {"observed_value": row_count}}
# noinspection PyUnusedLocal
@DocInherit
@DataAsset.expectation(["value"])
def expect_table_row_count_to_equal(
self,
value,
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
"""Expect the number of rows to equal a value.
expect_table_row_count_to_equal is a :func:`expectation \
<great_expectations.data_asset.data_asset.DataAsset.expectation>`, not a
``column_map_expectation`` or ``column_aggregate_expectation``.
Args:
value (int): \
The expected number of rows.
Other Parameters:
result_format (string or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
See Also:
expect_table_row_count_to_be_between
"""
try:
if not float(value).is_integer():
raise ValueError("value must be an integer")
except ValueError:
raise ValueError("value must be an integer")
row_count = self.get_row_count()
return {"success": row_count == value, "result": {"observed_value": row_count}}
###
#
# Missing values, unique values, and types
#
###
def expect_column_values_to_be_unique(
self,
column,
mostly=None,
result_format=None,
row_condition=None,
condition_parser=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
"""Expect each column value to be unique.
This expectation detects duplicates. All duplicated values are counted as exceptions.
For example, `[1, 2, 3, 3, 3]` will return `[3, 3, 3]` in `result.exceptions_list`, with \
`unexpected_percent = 60.0`.
expect_column_values_to_be_unique is a \
:func:`column_map_expectation <great_expectations.dataset.dataset.MetaDataset.column_map_expectation>`.
Args:
column (str): \
The column name.
Keyword Args:
mostly (None or a float between 0 and 1): \
Return `"success": True` if at least mostly fraction of values match the expectation. \
For more detail, see :ref:`mostly`.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
"""
raise NotImplementedError
def expect_column_values_to_not_be_null(
self,
column,
mostly=None,
result_format=None,
row_condition=None,
condition_parser=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
"""Expect column values to not be null.
To be counted as an exception, values must be explicitly null or missing, such as a NULL in PostgreSQL or an
np.NaN in pandas. Empty strings don't count as null unless they have been coerced to a null type.
expect_column_values_to_not_be_null is a \
:func:`column_map_expectation <great_expectations.dataset.dataset.MetaDataset.column_map_expectation>`.
Args:
column (str): \
The column name.
Keyword Args:
mostly (None or a float between 0 and 1): \
Return `"success": True` if at least mostly fraction of values match the expectation. \
For more detail, see :ref:`mostly`.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
See Also:
:func:`expect_column_values_to_be_null \
<great_expectations.dataset.dataset.Dataset.expect_column_values_to_be_null>`
"""
raise NotImplementedError
def expect_column_values_to_be_null(
self,
column,
mostly=None,
result_format=None,
row_condition=None,
condition_parser=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
"""Expect column values to be null.
expect_column_values_to_be_null is a \
:func:`column_map_expectation <great_expectations.dataset.dataset.MetaDataset.column_map_expectation>`.
Args:
column (str): \
The column name.
Keyword Args:
mostly (None or a float between 0 and 1): \
Return `"success": True` if at least mostly fraction of values match the expectation. \
For more detail, see :ref:`mostly`.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
See Also:
:func:`expect_column_values_to_not_be_null \
<great_expectations.dataset.dataset.Dataset.expect_column_values_to_not_be_null>`
"""
raise NotImplementedError
def expect_column_values_to_be_of_type(
self,
column,
type_,
mostly=None,
result_format=None,
row_condition=None,
condition_parser=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
"""Expect a column to contain values of a specified data type.
expect_column_values_to_be_of_type is a :func:`column_aggregate_expectation \
<great_expectations.dataset.dataset.MetaDataset.column_aggregate_expectation>` for typed-column backends,
and also for PandasDataset where the column dtype and provided type_ are unambiguous constraints (any dtype
except 'object' or dtype of 'object' with type_ specified as 'object').
For PandasDataset columns with dtype of 'object' expect_column_values_to_be_of_type is a
:func:`column_map_expectation <great_expectations.dataset.dataset.MetaDataset.column_map_expectation>` and will
independently check each row's type.
Args:
column (str): \
The column name.
type\\_ (str): \
A string representing the data type that each column should have as entries. Valid types are defined
by the current backend implementation and are dynamically loaded. For example, valid types for
PandasDataset include any numpy dtype values (such as 'int64') or native python types (such as 'int'),
whereas valid types for a SqlAlchemyDataset include types named by the current driver such as 'INTEGER'
in most SQL dialects and 'TEXT' in dialects such as postgresql. Valid types for SparkDFDataset include
'StringType', 'BooleanType' and other pyspark-defined type names.
Keyword Args:
mostly (None or a float between 0 and 1): \
Return `"success": True` if at least mostly fraction of values match the expectation. \
For more detail, see :ref:`mostly`.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
See also:
:func:`expect_column_values_to_be_in_type_list \
<great_expectations.dataset.dataset.Dataset.expect_column_values_to_be_in_type_list>`
"""
raise NotImplementedError
def expect_column_values_to_be_in_type_list(
self,
column,
type_list,
mostly=None,
result_format=None,
row_condition=None,
condition_parser=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
"""Expect a column to contain values from a specified type list.
expect_column_values_to_be_in_type_list is a :func:`column_aggregate_expectation \
<great_expectations.dataset.dataset.MetaDataset.column_aggregate_expectation>` for typed-column backends,
and also for PandasDataset where the column dtype provides an unambiguous constraints (any dtype except
'object'). For PandasDataset columns with dtype of 'object' expect_column_values_to_be_of_type is a
:func:`column_map_expectation <great_expectations.dataset.dataset.MetaDataset.column_map_expectation>` and will
independently check each row's type.
Args:
column (str): \
The column name.
type_list (str): \
A list of strings representing the data type that each column should have as entries. Valid types are
defined by the current backend implementation and are dynamically loaded. For example, valid types for
PandasDataset include any numpy dtype values (such as 'int64') or native python types (such as 'int'),
whereas valid types for a SqlAlchemyDataset include types named by the current driver such as 'INTEGER'
in most SQL dialects and 'TEXT' in dialects such as postgresql. Valid types for SparkDFDataset include
'StringType', 'BooleanType' and other pyspark-defined type names.
Keyword Args:
mostly (None or a float between 0 and 1): \
Return `"success": True` if at least mostly fraction of values match the expectation. \
For more detail, see :ref:`mostly`.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
See also:
:func:`expect_column_values_to_be_of_type \
<great_expectations.dataset.dataset.Dataset.expect_column_values_to_be_of_type>`
"""
raise NotImplementedError
###
#
# Sets and ranges
#
####
def expect_column_values_to_be_in_set(
self,
column,
value_set,
mostly=None,
parse_strings_as_datetimes=None,
result_format=None,
row_condition=None,
condition_parser=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
# noinspection PyUnresolvedReferences
"""Expect each column value to be in a given set.
For example:
::
# my_df.my_col = [1,2,2,3,3,3]
>>> my_df.expect_column_values_to_be_in_set(
"my_col",
[2,3]
)
{
"success": false
"result": {
"unexpected_count": 1
"unexpected_percent": 16.66666666666666666,
"unexpected_percent_nonmissing": 16.66666666666666666,
"partial_unexpected_list": [
1
],
},
}
expect_column_values_to_be_in_set is a \
:func:`column_map_expectation <great_expectations.dataset.dataset.MetaDataset.column_map_expectation>`.
Args:
column (str): \
The column name.
value_set (set-like): \
A set of objects used for comparison.
Keyword Args:
mostly (None or a float between 0 and 1): \
Return `"success": True` if at least mostly fraction of values match the expectation. \
For more detail, see :ref:`mostly`.
parse_strings_as_datetimes (boolean or None) : If True values provided in value_set will be parsed as \
datetimes before making comparisons.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
See Also:
:func:`expect_column_values_to_not_be_in_set \
<great_expectations.dataset.dataset.Dataset.expect_column_values_to_not_be_in_set>`
"""
raise NotImplementedError
def expect_column_values_to_not_be_in_set(
self,
column,
value_set,
mostly=None,
parse_strings_as_datetimes=None,
result_format=None,
row_condition=None,
condition_parser=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
# noinspection PyUnresolvedReferences
"""Expect column entries to not be in the set.
For example:
::
# my_df.my_col = [1,2,2,3,3,3]
>>> my_df.expect_column_values_to_not_be_in_set(
"my_col",
[1,2]
)
{
"success": false
"result": {
"unexpected_count": 3
"unexpected_percent": 50.0,
"unexpected_percent_nonmissing": 50.0,
"partial_unexpected_list": [
1, 2, 2
],
},
}
expect_column_values_to_not_be_in_set is a \
:func:`column_map_expectation <great_expectations.dataset.dataset.MetaDataset.column_map_expectation>`.
Args:
column (str): \
The column name.
value_set (set-like): \
A set of objects used for comparison.
Keyword Args:
mostly (None or a float between 0 and 1): \
Return `"success": True` if at least mostly fraction of values match the expectation. \
For more detail, see :ref:`mostly`.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
See Also:
:func:`expect_column_values_to_be_in_set \
<great_expectations.dataset.dataset.Dataset.expect_column_values_to_be_in_set>`
"""
raise NotImplementedError
def expect_column_values_to_be_between(
self,
column,
min_value=None,
max_value=None,
strict_min=False,
strict_max=False,
# tolerance=1e-9,
allow_cross_type_comparisons=None,
parse_strings_as_datetimes=False,
output_strftime_format=None,
mostly=None,
row_condition=None,
condition_parser=None,
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
"""Expect column entries to be between a minimum value and a maximum value (inclusive).
expect_column_values_to_be_between is a \
:func:`column_map_expectation <great_expectations.dataset.dataset.MetaDataset.column_map_expectation>`.
Args:
column (str): \
The column name.
min_value (comparable type or None): The minimum value for a column entry.
max_value (comparable type or None): The maximum value for a column entry.
Keyword Args:
strict_min (boolean):
If True, values must be strictly larger than min_value, default=False
strict_max (boolean):
If True, values must be strictly smaller than max_value, default=False
allow_cross_type_comparisons (boolean or None) : If True, allow comparisons between types (e.g. integer and\
string). Otherwise, attempting such comparisons will raise an exception.
parse_strings_as_datetimes (boolean or None) : If True, parse min_value, max_value, and all non-null column\
values to datetimes before making comparisons.
output_strftime_format (str or None): \
A valid strfime format for datetime output. Only used if parse_strings_as_datetimes=True.
mostly (None or a float between 0 and 1): \
Return `"success": True` if at least mostly fraction of values match the expectation. \
For more detail, see :ref:`mostly`.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
Notes:
* min_value and max_value are both inclusive unless strict_min or strict_max are set to True.
* If min_value is None, then max_value is treated as an upper bound, and there is no minimum value checked.
* If max_value is None, then min_value is treated as a lower bound, and there is no maximum value checked.
See Also:
:func:`expect_column_value_lengths_to_be_between \
<great_expectations.dataset.dataset.Dataset.expect_column_value_lengths_to_be_between>`
"""
raise NotImplementedError
def expect_column_values_to_be_increasing(
self,
column,
strictly=None,
parse_strings_as_datetimes=False,
mostly=None,
row_condition=None,
condition_parser=None,
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
"""Expect column values to be increasing.
By default, this expectation only works for numeric or datetime data.
When `parse_strings_as_datetimes=True`, it can also parse strings to datetimes.
If `strictly=True`, then this expectation is only satisfied if each consecutive value
is strictly increasing--equal values are treated as failures.
expect_column_values_to_be_increasing is a \
:func:`column_map_expectation <great_expectations.dataset.dataset.MetaDataset.column_map_expectation>`.
Args:
column (str): \
The column name.
Keyword Args:
strictly (Boolean or None): \
If True, values must be strictly greater than previous values
parse_strings_as_datetimes (boolean or None) : \
If True, all non-null column values to datetimes before making comparisons
mostly (None or a float between 0 and 1): \
Return `"success": True` if at least mostly fraction of values match the expectation. \
For more detail, see :ref:`mostly`.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
See Also:
:func:`expect_column_values_to_be_decreasing \
<great_expectations.dataset.dataset.Dataset.expect_column_values_to_be_decreasing>`
"""
raise NotImplementedError
def expect_column_values_to_be_decreasing(
self,
column,
strictly=None,
parse_strings_as_datetimes=False,
mostly=None,
row_condition=None,
condition_parser=None,
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
"""Expect column values to be decreasing.
By default, this expectation only works for numeric or datetime data.
When `parse_strings_as_datetimes=True`, it can also parse strings to datetimes.
If `strictly=True`, then this expectation is only satisfied if each consecutive value
is strictly decreasing--equal values are treated as failures.
expect_column_values_to_be_decreasing is a \
:func:`column_map_expectation <great_expectations.dataset.dataset.MetaDataset.column_map_expectation>`.
Args:
column (str): \
The column name.
Keyword Args:
strictly (Boolean or None): \
If True, values must be strictly greater than previous values
parse_strings_as_datetimes (boolean or None) : \
If True, all non-null column values to datetimes before making comparisons
mostly (None or a float between 0 and 1): \
Return `"success": True` if at least mostly fraction of values match the expectation. \
For more detail, see :ref:`mostly`.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
See Also:
:func:`expect_column_values_to_be_increasing \
<great_expectations.dataset.dataset.Dataset.expect_column_values_to_be_increasing>`
"""
raise NotImplementedError
###
#
# String matching
#
###
def expect_column_value_lengths_to_be_between(
self,
column,
min_value=None,
max_value=None,
mostly=None,
row_condition=None,
condition_parser=None,
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
"""Expect column entries to be strings with length between a minimum value and a maximum value (inclusive).
This expectation only works for string-type values. Invoking it on ints or floats will raise a TypeError.
expect_column_value_lengths_to_be_between is a \
:func:`column_map_expectation <great_expectations.dataset.dataset.MetaDataset.column_map_expectation>`.
Args:
column (str): \
The column name.
Keyword Args:
min_value (int or None): \
The minimum value for a column entry length.
max_value (int or None): \
The maximum value for a column entry length.
mostly (None or a float between 0 and 1): \
Return `"success": True` if at least mostly fraction of values match the expectation. \
For more detail, see :ref:`mostly`.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
Notes:
* min_value and max_value are both inclusive.
* If min_value is None, then max_value is treated as an upper bound, and the number of acceptable rows has \
no minimum.
* If max_value is None, then min_value is treated as a lower bound, and the number of acceptable rows has \
no maximum.
See Also:
:func:`expect_column_value_lengths_to_equal \
<great_expectations.dataset.dataset.Dataset.expect_column_value_lengths_to_equal>`
"""
raise NotImplementedError
def expect_column_value_lengths_to_equal(
self,
column,
value,
mostly=None,
result_format=None,
row_condition=None,
condition_parser=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
"""Expect column entries to be strings with length equal to the provided value.
This expectation only works for string-type values. Invoking it on ints or floats will raise a TypeError.
expect_column_values_to_be_between is a \
:func:`column_map_expectation <great_expectations.dataset.dataset.MetaDataset.column_map_expectation>`.
Args:
column (str): \
The column name.
value (int or None): \
The expected value for a column entry length.
Keyword Args:
mostly (None or a float between 0 and 1): \
Return `"success": True` if at least mostly fraction of values match the expectation. \
For more detail, see :ref:`mostly`.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
See Also:
:func:`expect_column_value_lengths_to_be_between \
<great_expectations.dataset.dataset.Dataset.expect_column_value_lengths_to_be_between>`
"""
raise NotImplementedError
def expect_column_values_to_match_regex(
self,
column,
regex,
mostly=None,
result_format=None,
row_condition=None,
condition_parser=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
"""Expect column entries to be strings that match a given regular expression. Valid matches can be found \
anywhere in the string, for example "[at]+" will identify the following strings as expected: "cat", "hat", \
"aa", "a", and "t", and the following strings as unexpected: "fish", "dog".
expect_column_values_to_match_regex is a \
:func:`column_map_expectation <great_expectations.dataset.dataset.MetaDataset.column_map_expectation>`.
Args:
column (str): \
The column name.
regex (str): \
The regular expression the column entries should match.
Keyword Args:
mostly (None or a float between 0 and 1): \
Return `"success": True` if at least mostly fraction of values match the expectation. \
For more detail, see :ref:`mostly`.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
See Also:
:func:`expect_column_values_to_not_match_regex \
<great_expectations.dataset.dataset.Dataset.expect_column_values_to_not_match_regex>`
:func:`expect_column_values_to_match_regex_list \
<great_expectations.dataset.dataset.Dataset.expect_column_values_to_match_regex_list>`
"""
raise NotImplementedError
def expect_column_values_to_not_match_regex(
self,
column,
regex,
mostly=None,
result_format=None,
row_condition=None,
condition_parser=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
"""Expect column entries to be strings that do NOT match a given regular expression. The regex must not match \
any portion of the provided string. For example, "[at]+" would identify the following strings as expected: \
"fish", "dog", and the following as unexpected: "cat", "hat".
expect_column_values_to_not_match_regex is a \
:func:`column_map_expectation <great_expectations.dataset.dataset.MetaDataset.column_map_expectation>`.
Args:
column (str): \
The column name.
regex (str): \
The regular expression the column entries should NOT match.
Keyword Args:
mostly (None or a float between 0 and 1): \
Return `"success": True` if at least mostly fraction of values match the expectation. \
For more detail, see :ref:`mostly`.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
See Also:
:func:`expect_column_values_to_match_regex \
<great_expectations.dataset.dataset.Dataset.expect_column_values_to_match_regex>`
:func:`expect_column_values_to_match_regex_list \
<great_expectations.dataset.dataset.Dataset.expect_column_values_to_match_regex_list>`
"""
raise NotImplementedError
def expect_column_values_to_match_regex_list(
self,
column,
regex_list,
match_on="any",
mostly=None,
result_format=None,
row_condition=None,
condition_parser=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
"""Expect the column entries to be strings that can be matched to either any of or all of a list of regular
expressions. Matches can be anywhere in the string.
expect_column_values_to_match_regex_list is a \
:func:`column_map_expectation <great_expectations.dataset.dataset.MetaDataset.column_map_expectation>`.
Args:
column (str): \
The column name.
regex_list (list): \
The list of regular expressions which the column entries should match
Keyword Args:
match_on= (string): \
"any" or "all".
Use "any" if the value should match at least one regular expression in the list.
Use "all" if it should match each regular expression in the list.
mostly (None or a float between 0 and 1): \
Return `"success": True` if at least mostly fraction of values match the expectation. \
For more detail, see :ref:`mostly`.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
See Also:
:func:`expect_column_values_to_match_regex \
<great_expectations.dataset.dataset.Dataset.expect_column_values_to_match_regex>`
:func:`expect_column_values_to_not_match_regex \
<great_expectations.dataset.dataset.Dataset.expect_column_values_to_not_match_regex>`
"""
raise NotImplementedError
def expect_column_values_to_not_match_regex_list(
self,
column,
regex_list,
mostly=None,
result_format=None,
row_condition=None,
condition_parser=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
"""Expect the column entries to be strings that do not match any of a list of regular expressions. Matches can
be anywhere in the string.
expect_column_values_to_not_match_regex_list is a \
:func:`column_map_expectation <great_expectations.dataset.dataset.MetaDataset.column_map_expectation>`.
Args:
column (str): \
The column name.
regex_list (list): \
The list of regular expressions which the column entries should not match
Keyword Args:
mostly (None or a float between 0 and 1): \
Return `"success": True` if at least mostly fraction of values match the expectation. \
For more detail, see :ref:`mostly`.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`. \
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
See Also:
:func:`expect_column_values_to_match_regex_list \
<great_expectations.dataset.dataset.Dataset.expect_column_values_to_match_regex_list>`
"""
raise NotImplementedError
###
#
# Datetime and JSON parsing
#
###
def expect_column_values_to_match_strftime_format(
self,
column,
strftime_format,
mostly=None,
result_format=None,
row_condition=None,
condition_parser=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
"""Expect column entries to be strings representing a date or time with a given format.
expect_column_values_to_match_strftime_format is a \
:func:`column_map_expectation <great_expectations.dataset.dataset.MetaDataset.column_map_expectation>`.
Args:
column (str): \
The column name.
strftime_format (str): \
A strftime format string to use for matching
Keyword Args:
mostly (None or a float between 0 and 1): \
Return `"success": True` if at least mostly fraction of values match the expectation. \
For more detail, see :ref:`mostly`.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
"""
raise NotImplementedError
def expect_column_values_to_be_dateutil_parseable(
self,
column,
mostly=None,
result_format=None,
row_condition=None,
condition_parser=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
"""Expect column entries to be parsable using dateutil.
expect_column_values_to_be_dateutil_parseable is a \
:func:`column_map_expectation <great_expectations.dataset.dataset.MetaDataset.column_map_expectation>`.
Args:
column (str): \
The column name.
Keyword Args:
mostly (None or a float between 0 and 1): \
Return `"success": True` if at least mostly fraction of values match the expectation. \
For more detail, see :ref:`mostly`.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
"""
raise NotImplementedError
def expect_column_values_to_be_json_parseable(
self,
column,
mostly=None,
result_format=None,
row_condition=None,
condition_parser=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
"""Expect column entries to be data written in JavaScript Object Notation.
expect_column_values_to_be_json_parseable is a \
:func:`column_map_expectation <great_expectations.dataset.dataset.MetaDataset.column_map_expectation>`.
Args:
column (str): \
The column name.
Keyword Args:
mostly (None or a float between 0 and 1): \
Return `"success": True` if at least mostly fraction of values match the expectation. \
For more detail, see :ref:`mostly`.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
See Also:
:func:`expect_column_values_to_match_json_schema \
<great_expectations.dataset.dataset.Dataset.expect_column_values_to_match_json_schema>`
"""
raise NotImplementedError
def expect_column_values_to_match_json_schema(
self,
column,
json_schema,
mostly=None,
result_format=None,
row_condition=None,
condition_parser=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
"""Expect column entries to be JSON objects matching a given JSON schema.
expect_column_values_to_match_json_schema is a \
:func:`column_map_expectation <great_expectations.dataset.dataset.MetaDataset.column_map_expectation>`.
Args:
column (str): \
The column name.
Keyword Args:
mostly (None or a float between 0 and 1): \
Return `"success": True` if at least mostly fraction of values match the expectation. \
For more detail, see :ref:`mostly`.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
See Also:
:func:`expect_column_values_to_be_json_parseable \
<great_expectations.dataset.dataset.Dataset.expect_column_values_to_be_json_parseable>`
The `JSON-schema docs <http://json-schema.org/>`_.
"""
raise NotImplementedError
###
#
# Aggregate functions
#
####
def expect_column_parameterized_distribution_ks_test_p_value_to_be_greater_than(
self,
column,
distribution,
p_value=0.05,
params=None,
result_format=None,
row_condition=None,
condition_parser=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
"""
Expect the column values to be distributed similarly to a scipy distribution. \
This expectation compares the provided column to the specified continuous distribution with a parametric \
Kolmogorov-Smirnov test. The K-S test compares the provided column to the cumulative density function (CDF) of \
the specified scipy distribution. If you don't know the desired distribution shape parameters, use the \
`ge.dataset.util.infer_distribution_parameters()` utility function to estimate them.
It returns 'success'=True if the p-value from the K-S test is greater than or equal to the provided p-value.
``expect_column_parameterized_distribution_ks_test_p_value_to_be_greater_than`` is a \
:func:`column_aggregate_expectation \
<great_expectations.dataset.dataset.MetaDataset.column_aggregate_expectation>`.
Args:
column (str): \
The column name.
distribution (str): \
The scipy distribution name. See: `<https://docs.scipy.org/doc/scipy/reference/stats.html>`_ Currently
supported distributions are listed in the Notes section below.
p_value (float): \
The threshold p-value for a passing test. Default is 0.05.
params (dict or list) : \
A dictionary or positional list of shape parameters that describe the distribution you want to test the\
data against. Include key values specific to the distribution from the appropriate scipy \
distribution CDF function. 'loc' and 'scale' are used as translational parameters.\
See `<https://docs.scipy.org/doc/scipy/reference/stats.html#continuous-distributions>`_
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
Notes:
These fields in the result object are customized for this expectation:
::
{
"details":
"expected_params" (dict): The specified or inferred parameters of the distribution to test \
against
"ks_results" (dict): The raw result of stats.kstest()
}
* The Kolmogorov-Smirnov test's null hypothesis is that the column is similar to the provided distribution.
* Supported scipy distributions:
* norm
* beta
* gamma
* uniform
* chi2
* expon
"""
raise NotImplementedError
# noinspection PyUnusedLocal
@DocInherit
@MetaDataset.column_aggregate_expectation
def expect_column_distinct_values_to_be_in_set(
self,
column,
value_set,
parse_strings_as_datetimes=None,
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
# noinspection PyUnresolvedReferences
"""Expect the set of distinct column values to be contained by a given set.
The success value for this expectation will match that of expect_column_values_to_be_in_set. However,
expect_column_distinct_values_to_be_in_set is a \
:func:`column_aggregate_expectation \
<great_expectations.dataset.dataset.MetaDataset.column_aggregate_expectation>`.
For example:
::
# my_df.my_col = [1,2,2,3,3,3]
>>> my_df.expect_column_distinct_values_to_be_in_set(
"my_col",
[2, 3, 4]
)
{
"success": false
"result": {
"observed_value": [1,2,3],
"details": {
"value_counts": [
{
"value": 1,
"count": 1
},
{
"value": 2,
"count": 1
},
{
"value": 3,
"count": 1
}
]
}
}
}
Args:
column (str): \
The column name.
value_set (set-like): \
A set of objects used for comparison.
Keyword Args:
parse_strings_as_datetimes (boolean or None) : If True values provided in value_set will be parsed \
as datetimes before making comparisons.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`. \
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
See Also:
:func:`expect_column_distinct_values_to_contain_set \
<great_expectations.dataset.dataset.Dataset.expect_column_distinct_values_to_contain_set>`
"""
observed_value_counts = self.get_column_value_counts(column)
if value_set is None:
# Vacuously true
success = True
parsed_observed_value_set = set(observed_value_counts.index)
else:
if parse_strings_as_datetimes:
parsed_value_set = self._parse_value_set(value_set)
parsed_observed_value_set = set(
self._parse_value_set(observed_value_counts.index)
)
else:
parsed_value_set = value_set
parsed_observed_value_set = set(observed_value_counts.index)
expected_value_set = set(parsed_value_set)
success = parsed_observed_value_set.issubset(expected_value_set)
return {
"success": success,
"result": {
"observed_value": sorted(list(parsed_observed_value_set)),
"details": {"value_counts": observed_value_counts},
},
}
# noinspection PyUnusedLocal
@DocInherit
@MetaDataset.column_aggregate_expectation
def expect_column_distinct_values_to_equal_set(
self,
column,
value_set,
parse_strings_as_datetimes=None,
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
# noinspection PyUnresolvedReferences
"""Expect the set of distinct column values to equal a given set.
In contrast to expect_column_distinct_values_to_contain_set() this ensures not only that a certain set of \
values are present in the column but that these *and only these* values are present.
expect_column_distinct_values_to_equal_set is a \
:func:`column_aggregate_expectation \
<great_expectations.dataset.dataset.MetaDataset.column_aggregate_expectation>`.
For example:
::
# my_df.my_col = [1,2,2,3,3,3]
>>> my_df.expect_column_distinct_values_to_equal_set(
"my_col",
[2,3]
)
{
"success": false
"result": {
"observed_value": [1,2,3]
},
}
Args:
column (str): \
The column name.
value_set (set-like): \
A set of objects used for comparison.
Keyword Args:
parse_strings_as_datetimes (boolean or None) : If True values provided in value_set will be parsed as \
datetimes before making comparisons.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
See Also:
:func:`expect_column_distinct_values_to_contain_set \
<great_expectations.dataset.dataset.Dataset.expect_column_distinct_values_to_contain_set>`
"""
if parse_strings_as_datetimes:
parsed_value_set = self._parse_value_set(value_set)
else:
parsed_value_set = value_set
observed_value_counts = self.get_column_value_counts(column)
expected_value_set = set(parsed_value_set)
observed_value_set = set(observed_value_counts.index)
return {
"success": observed_value_set == expected_value_set,
"result": {
"observed_value": sorted(list(observed_value_set)),
"details": {"value_counts": observed_value_counts},
},
}
# noinspection PyUnusedLocal
@DocInherit
@MetaDataset.column_aggregate_expectation
def expect_column_distinct_values_to_contain_set(
self,
column,
value_set,
parse_strings_as_datetimes=None,
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
# noinspection PyUnresolvedReferences
"""Expect the set of distinct column values to contain a given set.
In contrast to expect_column_values_to_be_in_set() this ensures not that all column values are members of the
given set but that values from the set *must* be present in the column.
expect_column_distinct_values_to_contain_set is a \
:func:`column_aggregate_expectation \
<great_expectations.dataset.dataset.MetaDataset.column_aggregate_expectation>`.
For example:
::
# my_df.my_col = [1,2,2,3,3,3]
>>> my_df.expect_column_distinct_values_to_contain_set(
"my_col",
[2,3]
)
{
"success": true
"result": {
"observed_value": [1,2,3]
},
}
Args:
column (str): \
The column name.
value_set (set-like): \
A set of objects used for comparison.
Keyword Args:
parse_strings_as_datetimes (boolean or None) : If True values provided in value_set will be parsed as \
datetimes before making comparisons.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
See Also:
:func:`expect_column_distinct_values_to_equal_set \
<great_expectations.dataset.dataset.Dataset.expect_column_distinct_values_to_equal_set>`
"""
observed_value_counts = self.get_column_value_counts(column)
if parse_strings_as_datetimes:
parsed_value_set = self._parse_value_set(value_set)
observed_value_counts.index = pd.to_datetime(observed_value_counts.index)
else:
parsed_value_set = value_set
expected_value_set = set(parsed_value_set)
observed_value_set = set(observed_value_counts.index)
return {
"success": observed_value_set.issuperset(expected_value_set),
"result": {
"observed_value": sorted(list(observed_value_set)),
"details": {"value_counts": observed_value_counts},
},
}
# noinspection PyUnusedLocal
@DocInherit
@MetaDataset.column_aggregate_expectation
def expect_column_mean_to_be_between(
self,
column,
min_value=None,
max_value=None,
strict_min=False,
strict_max=False, # tolerance=1e-9,
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
"""Expect the column mean to be between a minimum value and a maximum value (inclusive).
expect_column_mean_to_be_between is a \
:func:`column_aggregate_expectation \
<great_expectations.dataset.dataset.MetaDataset.column_aggregate_expectation>`.
Args:
column (str): \
The column name.
min_value (float or None): \
The minimum value for the column mean.
max_value (float or None): \
The maximum value for the column mean.
strict_min (boolean):
If True, the column mean must be strictly larger than min_value, default=False
strict_max (boolean):
If True, the column mean must be strictly smaller than max_value, default=False
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
Notes:
These fields in the result object are customized for this expectation:
::
{
"observed_value": (float) The true mean for the column
}
* min_value and max_value are both inclusive unless strict_min or strict_max are set to True.
* If min_value is None, then max_value is treated as an upper bound.
* If max_value is None, then min_value is treated as a lower bound.
See Also:
:func:`expect_column_median_to_be_between \
<great_expectations.dataset.dataset.Dataset.expect_column_median_to_be_between>`
:func:`expect_column_stdev_to_be_between \
<great_expectations.dataset.dataset.Dataset.expect_column_stdev_to_be_between>`
"""
if min_value is not None and not isinstance(min_value, Number):
raise ValueError("min_value must be a number")
if max_value is not None and not isinstance(max_value, Number):
raise ValueError("max_value must be a number")
column_mean = self.get_column_mean(column)
# if strict_min and min_value:
# min_value += tolerance
#
# if strict_max and max_value:
# max_value -= tolerance
# Handle possible missing values
if column_mean is None:
return {"success": False, "result": {"observed_value": column_mean}}
if min_value is not None:
if strict_min:
above_min = column_mean > min_value
else:
above_min = column_mean >= min_value
else:
above_min = True
if max_value is not None:
if strict_max:
below_max = column_mean < max_value
else:
below_max = column_mean <= max_value
else:
below_max = True
success = above_min and below_max
return {"success": success, "result": {"observed_value": column_mean}}
# noinspection PyUnusedLocal
@DocInherit
@MetaDataset.column_aggregate_expectation
def expect_column_median_to_be_between(
self,
column,
min_value=None,
max_value=None,
strict_min=False,
strict_max=False, # tolerance=1e-9,
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
"""Expect the column median to be between a minimum value and a maximum value.
expect_column_median_to_be_between is a \
:func:`column_aggregate_expectation \
<great_expectations.dataset.dataset.MetaDataset.column_aggregate_expectation>`.
Args:
column (str): \
The column name.
min_value (int or None): \
The minimum value for the column median.
max_value (int or None): \
The maximum value for the column median.
strict_min (boolean):
If True, the column median must be strictly larger than min_value, default=False
strict_max (boolean):
If True, the column median must be strictly smaller than max_value, default=False
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
Notes:
These fields in the result object are customized for this expectation:
::
{
"observed_value": (float) The true median for the column
}
* min_value and max_value are both inclusive unless strict_min or strict_max are set to True.
* If min_value is None, then max_value is treated as an upper bound
* If max_value is None, then min_value is treated as a lower bound
See Also:
:func:`expect_column_mean_to_be_between \
<great_expectations.dataset.dataset.Dataset.expect_column_mean_to_be_between>`
:func:`expect_column_stdev_to_be_between \
<great_expectations.dataset.dataset.Dataset.expect_column_stdev_to_be_between>`
"""
column_median = self.get_column_median(column)
if column_median is None:
return {"success": False, "result": {"observed_value": None}}
# if strict_min and min_value:
# min_value += tolerance
#
# if strict_max and max_value:
# max_value -= tolerance
if min_value is not None:
if strict_min:
above_min = column_median > min_value
else:
above_min = column_median >= min_value
else:
above_min = True
if max_value is not None:
if strict_max:
below_max = column_median < max_value
else:
below_max = column_median <= max_value
else:
below_max = True
success = above_min and below_max
return {"success": success, "result": {"observed_value": column_median}}
# noinspection PyUnusedLocal
@DocInherit
@MetaDataset.column_aggregate_expectation
def expect_column_quantile_values_to_be_between(
self,
column,
quantile_ranges,
allow_relative_error=False,
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
# noinspection PyUnresolvedReferences
"""Expect specific provided column quantiles to be between provided minimum and maximum values.
``quantile_ranges`` must be a dictionary with two keys:
* ``quantiles``: (list of float) increasing ordered list of desired quantile values
* ``value_ranges``: (list of lists): Each element in this list consists of a list with two values, a lower \
and upper bound (inclusive) for the corresponding quantile.
For each provided range:
* min_value and max_value are both inclusive.
* If min_value is None, then max_value is treated as an upper bound only
* If max_value is None, then min_value is treated as a lower bound only
The length of the quantiles list and quantile_values list must be equal.
For example:
::
# my_df.my_col = [1,2,2,3,3,3,4]
>>> my_df.expect_column_quantile_values_to_be_between(
"my_col",
{
"quantiles": [0., 0.333, 0.6667, 1.],
"value_ranges": [[0,1], [2,3], [3,4], [4,5]]
}
)
{
"success": True,
"result": {
"observed_value": {
"quantiles: [0., 0.333, 0.6667, 1.],
"values": [1, 2, 3, 4],
}
"element_count": 7,
"missing_count": 0,
"missing_percent": 0.0,
"details": {
"success_details": [true, true, true, true]
}
}
}
}
`expect_column_quantile_values_to_be_between` can be computationally intensive for large datasets.
expect_column_quantile_values_to_be_between is a \
:func:`column_aggregate_expectation <great_expectations.dataset.MetaDataset.column_aggregate_expectation>`.
Args:
column (str): \
The column name.
quantile_ranges (dictionary): \
Quantiles and associated value ranges for the column. See above for details.
allow_relative_error (boolean): \
Whether to allow relative error in quantile communications on backends that support or require it.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
Notes:
These fields in the result object are customized for this expectation:
::
details.success_details
See Also:
:func:`expect_column_min_to_be_between \
<great_expectations.dataset.dataset.Dataset.expect_column_min_to_be_between>`
:func:`expect_column_max_to_be_between \
<great_expectations.dataset.dataset.Dataset.expect_column_max_to_be_between>`
:func:`expect_column_median_to_be_between \
<great_expectations.dataset.dataset.Dataset.expect_column_median_to_be_between>`
"""
quantiles = quantile_ranges["quantiles"]
quantile_value_ranges = quantile_ranges["value_ranges"]
if len(quantiles) != len(quantile_value_ranges):
raise ValueError(
"quntile_values and quantiles must have the same number of elements"
)
quantile_vals = self.get_column_quantiles(
column, tuple(quantiles), allow_relative_error=allow_relative_error
)
# We explicitly allow "None" to be interpreted as +/- infinity
comparison_quantile_ranges = [
[
-np.inf if lower_bound is None else lower_bound,
np.inf if upper_bound is None else upper_bound,
]
for (lower_bound, upper_bound) in quantile_value_ranges
]
success_details = [
range_[0] <= quantile_vals[idx] <= range_[1]
for idx, range_ in enumerate(comparison_quantile_ranges)
]
return {
"success": np.all(success_details),
"result": {
"observed_value": {"quantiles": quantiles, "values": quantile_vals},
"details": {"success_details": success_details},
},
}
# noinspection PyUnusedLocal
@DocInherit
@MetaDataset.column_aggregate_expectation
def expect_column_stdev_to_be_between(
self,
column,
min_value=None,
max_value=None,
strict_min=False,
strict_max=False, # tolerance=1e-9,
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
"""Expect the column standard deviation to be between a minimum value and a maximum value.
Uses sample standard deviation (normalized by N-1).
expect_column_stdev_to_be_between is a \
:func:`column_aggregate_expectation <great_expectations.dataset.MetaDataset.column_aggregate_expectation>`.
Args:
column (str): \
The column name.
min_value (float or None): \
The minimum value for the column standard deviation.
max_value (float or None): \
The maximum value for the column standard deviation.
strict_min (boolean):
If True, the column standard deviation must be strictly larger than min_value, default=False
strict_max (boolean):
If True, the column standard deviation must be strictly smaller than max_value, default=False
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`. \
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
Notes:
These fields in the result object are customized for this expectation:
::
{
"observed_value": (float) The true standard deviation for the column
}
* min_value and max_value are both inclusive unless strict_min or strict_max are set to True.
* If min_value is None, then max_value is treated as an upper bound
* If max_value is None, then min_value is treated as a lower bound
See Also:
:func:`expect_column_mean_to_be_between \
<great_expectations.dataset.dataset.Dataset.expect_column_mean_to_be_between>`
:func:`expect_column_median_to_be_between \
<great_expectations.dataset.dataset.Dataset.expect_column_median_to_be_between>`
"""
column_stdev = self.get_column_stdev(column)
# if strict_min and min_value:
# min_value += tolerance
#
# if strict_max and max_value:
# max_value -= tolerance
if min_value is not None:
if strict_min:
above_min = column_stdev > min_value
else:
above_min = column_stdev >= min_value
else:
above_min = True
if max_value is not None:
if strict_max:
below_max = column_stdev < max_value
else:
below_max = column_stdev <= max_value
else:
below_max = True
success = above_min and below_max
return {"success": success, "result": {"observed_value": column_stdev}}
# noinspection PyUnusedLocal
@DocInherit
@MetaDataset.column_aggregate_expectation
def expect_column_unique_value_count_to_be_between(
self,
column,
min_value=None,
max_value=None,
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
"""Expect the number of unique values to be between a minimum value and a maximum value.
expect_column_unique_value_count_to_be_between is a \
:func:`column_aggregate_expectation <great_expectations.dataset.MetaDataset.column_aggregate_expectation>`.
Args:
column (str): \
The column name.
min_value (int or None): \
The minimum number of unique values allowed.
max_value (int or None): \
The maximum number of unique values allowed.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
Notes:
These fields in the result object are customized for this expectation:
::
{
"observed_value": (int) The number of unique values in the column
}
* min_value and max_value are both inclusive.
* If min_value is None, then max_value is treated as an upper bound
* If max_value is None, then min_value is treated as a lower bound
See Also:
:func:`expect_column_proportion_of_unique_values_to_be_between \
<great_expectations.dataset.dataset.Dataset.expect_column_proportion_of_unique_values_to_be_between>`
"""
unique_value_count = self.get_column_unique_count(column)
if unique_value_count is None:
return {"success": False, "result": {"observed_value": unique_value_count}}
if min_value is not None:
above_min = unique_value_count >= min_value
else:
above_min = True
if max_value is not None:
below_max = unique_value_count <= max_value
else:
below_max = True
success = above_min and below_max
return {"success": success, "result": {"observed_value": unique_value_count}}
# noinspection PyUnusedLocal
@DocInherit
@MetaDataset.column_aggregate_expectation
def expect_column_proportion_of_unique_values_to_be_between(
self,
column,
min_value=0,
max_value=1,
strict_min=False,
strict_max=False, # tolerance=1e-9,
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
"""Expect the proportion of unique values to be between a minimum value and a maximum value.
For example, in a column containing [1, 2, 2, 3, 3, 3, 4, 4, 4, 4], there are 4 unique values and 10 total \
values for a proportion of 0.4.
expect_column_proportion_of_unique_values_to_be_between is a \
:func:`column_aggregate_expectation <great_expectations.dataset.MetaDataset.column_aggregate_expectation>`.
Args:
column (str): \
The column name.
min_value (float or None): \
The minimum proportion of unique values. (Proportions are on the range 0 to 1)
max_value (float or None): \
The maximum proportion of unique values. (Proportions are on the range 0 to 1)
strict_min (boolean):
If True, the minimum proportion of unique values must be strictly larger than min_value, default=False
strict_max (boolean):
If True, the maximum proportion of unique values must be strictly smaller than max_value, default=False
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`. \
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
Notes:
These fields in the result object are customized for this expectation:
::
{
"observed_value": (float) The proportion of unique values in the column
}
* min_value and max_value are both inclusive unless strict_min or strict_max are set to True.
* If min_value is None, then max_value is treated as an upper bound
* If max_value is None, then min_value is treated as a lower bound
See Also:
:func:`expect_column_unique_value_count_to_be_between \
<great_expectations.dataset.dataset.Dataset.expect_column_unique_value_count_to_be_between>`
"""
# Tolerance docstring for later use:
# tolerance (float):
# tolerance for strict_min, strict_max, default=1e-9
unique_value_count = self.get_column_unique_count(column)
total_value_count = self.get_column_nonnull_count(column)
if total_value_count > 0:
proportion_unique = float(unique_value_count) / total_value_count
else:
proportion_unique = None
# if strict_min:
# if min_value:
# min_value += tolerance
#
# if strict_max:
# if max_value:
# max_value -= tolerance
if min_value is not None:
if strict_min:
above_min = proportion_unique > min_value
else:
above_min = proportion_unique >= min_value
else:
above_min = True
if max_value is not None:
if strict_max:
below_max = proportion_unique < max_value
else:
below_max = proportion_unique <= max_value
else:
below_max = True
success = above_min and below_max
return {"success": success, "result": {"observed_value": proportion_unique}}
# noinspection PyUnusedLocal
@DocInherit
@MetaDataset.column_aggregate_expectation
def expect_column_most_common_value_to_be_in_set(
self,
column,
value_set,
ties_okay=None,
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
"""Expect the most common value to be within the designated value set
expect_column_most_common_value_to_be_in_set is a \
:func:`column_aggregate_expectation <great_expectations.dataset.MetaDataset.column_aggregate_expectation>`.
Args:
column (str): \
The column name
value_set (set-like): \
A list of potential values to match
Keyword Args:
ties_okay (boolean or None): \
If True, then the expectation will still succeed if values outside the designated set are as common \
(but not more common) than designated values
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
Notes:
These fields in the result object are customized for this expectation:
::
{
"observed_value": (list) The most common values in the column
}
`observed_value` contains a list of the most common values.
Often, this will just be a single element. But if there's a tie for most common among multiple values,
`observed_value` will contain a single copy of each most common value.
"""
mode_list = self.get_column_modes(column)
intersection_count = len(set(value_set).intersection(mode_list))
if ties_okay:
success = intersection_count > 0
else:
success = len(mode_list) == 1 and intersection_count == 1
return {"success": success, "result": {"observed_value": mode_list}}
# noinspection PyUnusedLocal
@DocInherit
@MetaDataset.column_aggregate_expectation
def expect_column_sum_to_be_between(
self,
column,
min_value=None,
max_value=None,
strict_min=False,
strict_max=False, # tolerance=1e-9,
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
"""Expect the column to sum to be between an min and max value
expect_column_sum_to_be_between is a \
:func:`column_aggregate_expectation <great_expectations.dataset.MetaDataset.column_aggregate_expectation>`.
Args:
column (str): \
The column name
min_value (comparable type or None): \
The minimal sum allowed.
max_value (comparable type or None): \
The maximal sum allowed.
strict_min (boolean):
If True, the minimal sum must be strictly larger than min_value, default=False
strict_max (boolean):
If True, the maximal sum must be strictly smaller than max_value, default=False
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
Notes:
These fields in the result object are customized for this expectation:
::
{
"observed_value": (list) The actual column sum
}
* min_value and max_value are both inclusive unless strict_min or strict_max are set to True.
* If min_value is None, then max_value is treated as an upper bound
* If max_value is None, then min_value is treated as a lower bound
"""
column_sum = self.get_column_sum(column)
# Handle possible missing values
if column_sum is None:
return {"success": False, "result": {"observed_value": column_sum}}
# if strict_min and min_value:
# min_value += tolerance
#
# if strict_max and max_value:
# max_value -= tolerance
if min_value is not None:
if strict_min:
above_min = column_sum > min_value
else:
above_min = column_sum >= min_value
else:
above_min = True
if max_value is not None:
if strict_max:
below_max = column_sum < max_value
else:
below_max = column_sum <= max_value
else:
below_max = True
success = above_min and below_max
return {"success": success, "result": {"observed_value": column_sum}}
# noinspection PyUnusedLocal
@DocInherit
@MetaDataset.column_aggregate_expectation
def expect_column_min_to_be_between(
self,
column,
min_value=None,
max_value=None,
strict_min=False,
strict_max=False, # tolerance=1e-9,
parse_strings_as_datetimes=False,
output_strftime_format=None,
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
"""Expect the column minimum to be between an min and max value
expect_column_min_to_be_between is a \
:func:`column_aggregate_expectation <great_expectations.dataset.MetaDataset.column_aggregate_expectation>`.
Args:
column (str): \
The column name
min_value (comparable type or None): \
The minimal column minimum allowed.
max_value (comparable type or None): \
The maximal column minimum allowed.
strict_min (boolean):
If True, the minimal column minimum must be strictly larger than min_value, default=False
strict_max (boolean):
If True, the maximal column minimum must be strictly smaller than max_value, default=False
Keyword Args:
parse_strings_as_datetimes (Boolean or None): \
If True, parse min_value, max_values, and all non-null column values to datetimes before making \
comparisons.
output_strftime_format (str or None): \
A valid strfime format for datetime output. Only used if parse_strings_as_datetimes=True.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`. \
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
Notes:
These fields in the result object are customized for this expectation:
::
{
"observed_value": (list) The actual column min
}
* min_value and max_value are both inclusive unless strict_min or strict_max are set to True.
* If min_value is None, then max_value is treated as an upper bound
* If max_value is None, then min_value is treated as a lower bound
"""
# Tolerance docstring for later implementation:
# tolerance(float):
# tolerance for strict_min, strict_max, default=1e-9.If parse_strings_as_datetimes is True, this tolerance is measured in number of days
if parse_strings_as_datetimes:
# tolerance = timedelta(days=tolerance)
if min_value:
min_value = parse(min_value)
if max_value:
max_value = parse(max_value)
column_min = self.get_column_min(column, parse_strings_as_datetimes)
# if strict_min and min_value:
# min_value += tolerance
#
# if strict_max and max_value:
# max_value -= tolerance
if column_min is None:
success = False
else:
if min_value is not None:
if isinstance(column_min, datetime):
try:
min_value = parse(min_value)
except (ValueError, TypeError) as e:
pass
if strict_min:
above_min = column_min > min_value
else:
above_min = column_min >= min_value
else:
above_min = True
if max_value is not None:
if isinstance(column_min, datetime):
try:
max_value = parse(max_value)
except (ValueError, TypeError) as e:
pass
if strict_max:
below_max = column_min < max_value
else:
below_max = column_min <= max_value
else:
below_max = True
success = above_min and below_max
if parse_strings_as_datetimes:
if output_strftime_format:
column_min = datetime.strftime(column_min, output_strftime_format)
else:
column_min = str(column_min)
return {"success": success, "result": {"observed_value": column_min}}
# noinspection PyUnusedLocal
@DocInherit
@MetaDataset.column_aggregate_expectation
def expect_column_max_to_be_between(
self,
column,
min_value=None,
max_value=None,
strict_min=False,
strict_max=False,
# tolerance=1e-9,
parse_strings_as_datetimes=False,
output_strftime_format=None,
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
"""Expect the column max to be between an min and max value
expect_column_max_to_be_between is a \
:func:`column_aggregate_expectation <great_expectations.dataset.MetaDataset.column_aggregate_expectation>`.
Args:
column (str): \
The column name
min_value (comparable type or None): \
The minimum number of unique values allowed.
max_value (comparable type or None): \
The maximum number of unique values allowed.
Keyword Args:
parse_strings_as_datetimes (Boolean or None): \
If True, parse min_value, max_values, and all non-null column values to datetimes before making \
comparisons.
output_strftime_format (str or None): \
A valid strfime format for datetime output. Only used if parse_strings_as_datetimes=True.
strict_min (boolean):
If True, the minimal column minimum must be strictly larger than min_value, default=False
strict_max (boolean):
If True, the maximal column minimum must be strictly smaller than max_value, default=False
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
Notes:
These fields in the result object are customized for this expectation:
::
{
"observed_value": (list) The actual column max
}
* min_value and max_value are both inclusive unless strict_min or strict_max are set to True.
* If min_value is None, then max_value is treated as an upper bound
* If max_value is None, then min_value is treated as a lower bound
"""
if parse_strings_as_datetimes:
# tolerance = timedelta(days=tolerance)
if min_value:
min_value = parse(min_value)
if max_value:
max_value = parse(max_value)
# else:
# if strict_min and min_value:
# min_value += tolerance
#
# if strict_max and max_value:
# max_value -= tolerance
column_max = self.get_column_max(column, parse_strings_as_datetimes)
if column_max is None:
success = False
else:
if min_value is not None:
if isinstance(column_max, datetime):
try:
min_value = parse(min_value)
except (ValueError, TypeError) as e:
pass
if strict_min:
above_min = column_max > min_value
else:
above_min = column_max >= min_value
else:
above_min = True
if max_value is not None:
if isinstance(column_max, datetime):
try:
max_value = parse(max_value)
except (ValueError, TypeError) as e:
pass
if strict_max:
below_max = column_max < max_value
else:
below_max = column_max <= max_value
else:
below_max = True
success = above_min and below_max
if parse_strings_as_datetimes:
if output_strftime_format:
column_max = datetime.strftime(column_max, output_strftime_format)
else:
column_max = str(column_max)
return {"success": success, "result": {"observed_value": column_max}}
###
#
# Distributional expectations
#
###
# noinspection PyUnusedLocal
@DocInherit
@MetaDataset.column_aggregate_expectation
def expect_column_chisquare_test_p_value_to_be_greater_than(
self,
column,
partition_object=None,
p=0.05,
tail_weight_holdout=0,
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
"""Expect column values to be distributed similarly to the provided categorical partition. \
This expectation compares categorical distributions using a Chi-squared test. \
It returns `success=True` if values in the column match the distribution of the provided partition.
expect_column_chisquare_test_p_value_to_be_greater_than is a \
:func:`column_aggregate_expectation <great_expectations.dataset.MetaDataset.column_aggregate_expectation>`.
Args:
column (str): \
The column name.
partition_object (dict): \
The expected partition object (see :ref:`partition_object`).
p (float): \
The p-value threshold for rejecting the null hypothesis of the Chi-Squared test.\
For values below the specified threshold, the expectation will return `success=False`,\
rejecting the null hypothesis that the distributions are the same.\
Defaults to 0.05.
Keyword Args:
tail_weight_holdout (float between 0 and 1 or None): \
The amount of weight to split uniformly between values observed in the data but not present in the \
provided partition. tail_weight_holdout provides a mechanism to make the test less strict by \
assigning positive weights to unknown values observed in the data that are not present in the \
partition.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`. \
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
Notes:
These fields in the result object are customized for this expectation:
::
{
"observed_value": (float) The true p-value of the Chi-squared test
"details": {
"observed_partition" (dict):
The partition observed in the data.
"expected_partition" (dict):
The partition expected from the data, after including tail_weight_holdout
}
}
"""
if not is_valid_categorical_partition_object(partition_object):
raise ValueError("Invalid partition object.")
element_count = self.get_column_nonnull_count(column)
observed_frequencies = self.get_column_value_counts(column)
# Convert to Series object to allow joining on index values
expected_column = (
pd.Series(
partition_object["weights"],
index=partition_object["values"],
name="expected",
)
* element_count
)
# Join along the indices to allow proper comparison of both types of possible missing values
# Sort parameter not available before pandas 0.23.0
# test_df = pd.concat([expected_column, observed_frequencies], axis=1, sort=True)
test_df = pd.concat([expected_column, observed_frequencies], axis=1)
na_counts = test_df.isnull().sum()
# Handle NaN: if we expected something that's not there, it's just not there.
test_df["count"] = test_df["count"].fillna(0)
# Handle NaN: if something's there that was not expected, substitute the relevant value for tail_weight_holdout
if na_counts["expected"] > 0:
# Scale existing expected values
test_df["expected"] *= 1 - tail_weight_holdout
# Fill NAs with holdout.
test_df["expected"] = test_df["expected"].fillna(
element_count * (tail_weight_holdout / na_counts["expected"])
)
test_result = stats.chisquare(test_df["count"], test_df["expected"])[1]
# Normalize the ouputs so they can be used as partitions into other expectations
# GH653
expected_weights = (test_df["expected"] / test_df["expected"].sum()).tolist()
observed_weights = (test_df["count"] / test_df["count"].sum()).tolist()
return {
"success": test_result > p,
"result": {
"observed_value": test_result,
"details": {
"observed_partition": {
"values": test_df.index.tolist(),
"weights": observed_weights,
},
"expected_partition": {
"values": test_df.index.tolist(),
"weights": expected_weights,
},
},
},
}
def expect_column_bootstrapped_ks_test_p_value_to_be_greater_than(
self,
column,
partition_object=None,
p=0.05,
bootstrap_samples=None,
bootstrap_sample_size=None,
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
"""Expect column values to be distributed similarly to the provided continuous partition. This expectation \
compares continuous distributions using a bootstrapped Kolmogorov-Smirnov test. It returns `success=True` if \
values in the column match the distribution of the provided partition.
The expected cumulative density function (CDF) is constructed as a linear interpolation between the bins, \
using the provided weights. Consequently the test expects a piecewise uniform distribution using the bins from \
the provided partition object.
``expect_column_bootstrapped_ks_test_p_value_to_be_greater_than`` is a \
:func:`column_aggregate_expectation <great_expectations.dataset.MetaDataset.column_aggregate_expectation>`.
Args:
column (str): \
The column name.
partition_object (dict): \
The expected partition object (see :ref:`partition_object`).
p (float): \
The p-value threshold for the Kolmogorov-Smirnov test.
For values below the specified threshold the expectation will return `success=False`, rejecting the \
null hypothesis that the distributions are the same. \
Defaults to 0.05.
Keyword Args:
bootstrap_samples (int): \
The number bootstrap rounds. Defaults to 1000.
bootstrap_sample_size (int): \
The number of samples to take from the column for each bootstrap. A larger sample will increase the \
specificity of the test. Defaults to 2 * len(partition_object['weights'])
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
Notes:
These fields in the result object are customized for this expectation:
::
{
"observed_value": (float) The true p-value of the KS test
"details": {
"bootstrap_samples": The number of bootstrap rounds used
"bootstrap_sample_size": The number of samples taken from
the column in each bootstrap round
"observed_cdf": The cumulative density function observed
in the data, a dict containing 'x' values and cdf_values
(suitable for plotting)
"expected_cdf" (dict):
The cumulative density function expected based on the
partition object, a dict containing 'x' values and
cdf_values (suitable for plotting)
"observed_partition" (dict):
The partition observed on the data, using the provided
bins but also expanding from min(column) to max(column)
"expected_partition" (dict):
The partition expected from the data. For KS test,
this will always be the partition_object parameter
}
}
"""
raise NotImplementedError
# noinspection PyUnusedLocal
@DocInherit
@MetaDataset.column_aggregate_expectation
def expect_column_kl_divergence_to_be_less_than(
self,
column,
partition_object=None,
threshold=None,
tail_weight_holdout=0,
internal_weight_holdout=0,
bucketize_data=True,
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
"""Expect the Kulback-Leibler (KL) divergence (relative entropy) of the specified column with respect to the \
partition object to be lower than the provided threshold.
KL divergence compares two distributions. The higher the divergence value (relative entropy), the larger the \
difference between the two distributions. A relative entropy of zero indicates that the data are \
distributed identically, `when binned according to the provided partition`.
In many practical contexts, choosing a value between 0.5 and 1 will provide a useful test.
This expectation works on both categorical and continuous partitions. See notes below for details.
``expect_column_kl_divergence_to_be_less_than`` is a \
:func:`column_aggregate_expectation <great_expectations.dataset.MetaDataset.column_aggregate_expectation>`.
Args:
column (str): \
The column name.
partition_object (dict): \
The expected partition object (see :ref:`partition_object`).
threshold (float): \
The maximum KL divergence to for which to return `success=True`. If KL divergence is larger than the\
provided threshold, the test will return `success=False`.
Keyword Args:
internal_weight_holdout (float between 0 and 1 or None): \
The amount of weight to split uniformly among zero-weighted partition bins. internal_weight_holdout \
provides a mechanisms to make the test less strict by assigning positive weights to values observed in \
the data for which the partition explicitly expected zero weight. With no internal_weight_holdout, \
any value observed in such a region will cause KL divergence to rise to +Infinity.\
Defaults to 0.
tail_weight_holdout (float between 0 and 1 or None): \
The amount of weight to add to the tails of the histogram. Tail weight holdout is split evenly between\
(-Infinity, min(partition_object['bins'])) and (max(partition_object['bins']), +Infinity). \
tail_weight_holdout provides a mechanism to make the test less strict by assigning positive weights to \
values observed in the data that are not present in the partition. With no tail_weight_holdout, \
any value observed outside the provided partition_object will cause KL divergence to rise to +Infinity.\
Defaults to 0.
bucketize_data (boolean): If True, then continuous data will be bucketized before evaluation. Setting
this parameter to false allows evaluation of KL divergence with a None partition object for profiling
against discrete data.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
Notes:
These fields in the result object are customized for this expectation:
::
{
"observed_value": (float) The true KL divergence (relative entropy) or None if the value is \
calculated as infinity, -infinity, or NaN
"details": {
"observed_partition": (dict) The partition observed in the data
"expected_partition": (dict) The partition against which the data were compared,
after applying specified weight holdouts.
}
}
If the partition_object is categorical, this expectation will expect the values in column to also be \
categorical.
* If the column includes values that are not present in the partition, the tail_weight_holdout will be \
equally split among those values, providing a mechanism to weaken the strictness of the expectation \
(otherwise, relative entropy would immediately go to infinity).
* If the partition includes values that are not present in the column, the test will simply include \
zero weight for that value.
If the partition_object is continuous, this expectation will discretize the values in the column according \
to the bins specified in the partition_object, and apply the test to the resulting distribution.
* The internal_weight_holdout and tail_weight_holdout parameters provide a mechanism to weaken the \
expectation, since an expected weight of zero would drive relative entropy to be infinite if any data \
are observed in that interval.
* If internal_weight_holdout is specified, that value will be distributed equally among any intervals \
with weight zero in the partition_object.
* If tail_weight_holdout is specified, that value will be appended to the tails of the bins \
((-Infinity, min(bins)) and (max(bins), Infinity).
If relative entropy/kl divergence goes to infinity for any of the reasons mentioned above, the observed value\
will be set to None. This is because inf, -inf, Nan, are not json serializable and cause some json parsers to\
crash when encountered. The python None token will be serialized to null in json.
See also:
:func:`expect_column_chisquare_test_p_value_to_be_greater_than \
<great_expectations.dataset.dataset.Dataset.expect_column_unique_value_count_to_be_between>`
:func:`expect_column_bootstrapped_ks_test_p_value_to_be_greater_than \
<great_expectations.dataset.dataset.Dataset.expect_column_unique_value_count_to_be_between>`
"""
if partition_object is None:
if bucketize_data:
partition_object = build_continuous_partition_object(
dataset=self, column=column
)
else:
partition_object = build_categorical_partition_object(
dataset=self, column=column
)
if not is_valid_partition_object(partition_object):
raise ValueError("Invalid partition object.")
if threshold is not None and (
(not isinstance(threshold, (int, float))) or (threshold < 0)
):
raise ValueError(
"Threshold must be specified, greater than or equal to zero."
)
if (
(not isinstance(tail_weight_holdout, (int, float)))
or (tail_weight_holdout < 0)
or (tail_weight_holdout > 1)
):
raise ValueError("tail_weight_holdout must be between zero and one.")
if (
(not isinstance(internal_weight_holdout, (int, float)))
or (internal_weight_holdout < 0)
or (internal_weight_holdout > 1)
):
raise ValueError("internal_weight_holdout must be between zero and one.")
if tail_weight_holdout != 0 and "tail_weights" in partition_object:
raise ValueError(
"tail_weight_holdout must be 0 when using tail_weights in partition object"
)
# TODO: add checks for duplicate values in is_valid_categorical_partition_object
if is_valid_categorical_partition_object(partition_object):
if internal_weight_holdout > 0:
raise ValueError(
"Internal weight holdout cannot be used for discrete data."
)
# Data are expected to be discrete, use value_counts
observed_weights = self.get_column_value_counts(
column
) / self.get_column_nonnull_count(column)
expected_weights = pd.Series(
partition_object["weights"],
index=partition_object["values"],
name="expected",
)
# Sort not available before pandas 0.23.0
# test_df = pd.concat([expected_weights, observed_weights], axis=1, sort=True)
test_df = pd.concat([expected_weights, observed_weights], axis=1)
na_counts = test_df.isnull().sum()
# Handle NaN: if we expected something that's not there, it's just not there.
pk = test_df["count"].fillna(0)
# Handle NaN: if something's there that was not expected,
# substitute the relevant value for tail_weight_holdout
if na_counts["expected"] > 0:
# Scale existing expected values
test_df["expected"] *= 1 - tail_weight_holdout
# Fill NAs with holdout.
qk = test_df["expected"].fillna(
tail_weight_holdout / na_counts["expected"]
)
else:
qk = test_df["expected"]
kl_divergence = stats.entropy(pk, qk)
if np.isinf(kl_divergence) or np.isnan(kl_divergence):
observed_value = None
else:
observed_value = kl_divergence
if threshold is None:
success = True
else:
success = kl_divergence <= threshold
return_obj = {
"success": success,
"result": {
"observed_value": observed_value,
"details": {
"observed_partition": {
"values": test_df.index.tolist(),
"weights": pk.tolist(),
},
"expected_partition": {
"values": test_df.index.tolist(),
"weights": qk.tolist(),
},
},
},
}
else:
# Data are expected to be continuous; discretize first
if bucketize_data is False:
raise ValueError(
"KL Divergence cannot be computed with a continuous partition object and the bucketize_data "
"parameter set to false."
)
# Build the histogram first using expected bins so that the largest bin is >=
hist = np.array(
self.get_column_hist(column, tuple(partition_object["bins"]))
)
# np.histogram(column, partition_object['bins'], density=False)
bin_edges = partition_object["bins"]
# Add in the frequencies observed above or below the provided partition
# below_partition = len(np.where(column < partition_object['bins'][0])[0])
# above_partition = len(np.where(column > partition_object['bins'][-1])[0])
below_partition = self.get_column_count_in_range(
column, max_val=partition_object["bins"][0]
)
above_partition = self.get_column_count_in_range(
column, min_val=partition_object["bins"][-1], strict_min=True
)
# Observed Weights is just the histogram values divided by the total number of observations
observed_weights = np.array(hist) / self.get_column_nonnull_count(column)
# Adjust expected_weights to account for tail_weight and internal_weight
if "tail_weights" in partition_object:
partition_tail_weight_holdout = np.sum(partition_object["tail_weights"])
else:
partition_tail_weight_holdout = 0
expected_weights = np.array(partition_object["weights"]) * (
1 - tail_weight_holdout - internal_weight_holdout
)
# Assign internal weight holdout values if applicable
if internal_weight_holdout > 0:
zero_count = len(expected_weights) - np.count_nonzero(expected_weights)
if zero_count > 0:
for index, value in enumerate(expected_weights):
if value == 0:
expected_weights[index] = (
internal_weight_holdout / zero_count
)
# Assign tail weight holdout if applicable
# We need to check cases to only add tail weight holdout if it makes sense based on the provided partition.
if (partition_object["bins"][0] == -np.inf) and (
partition_object["bins"][-1]
) == np.inf:
if tail_weight_holdout > 0:
raise ValueError(
"tail_weight_holdout cannot be used for partitions with infinite endpoints."
)
if "tail_weights" in partition_object:
raise ValueError(
"There can be no tail weights for partitions with one or both endpoints at infinity"
)
# Remove -inf and inf
expected_bins = partition_object["bins"][1:-1]
comb_expected_weights = expected_weights
# Set aside tail weights
expected_tail_weights = np.concatenate(
([expected_weights[0]], [expected_weights[-1]])
)
# Remove tail weights
expected_weights = expected_weights[1:-1]
comb_observed_weights = observed_weights
# Set aside tail weights
observed_tail_weights = np.concatenate(
([observed_weights[0]], [observed_weights[-1]])
)
# Remove tail weights
observed_weights = observed_weights[1:-1]
elif partition_object["bins"][0] == -np.inf:
if "tail_weights" in partition_object:
raise ValueError(
"There can be no tail weights for partitions with one or both endpoints at infinity"
)
# Remove -inf
expected_bins = partition_object["bins"][1:]
comb_expected_weights = np.concatenate(
(expected_weights, [tail_weight_holdout])
)
# Set aside left tail weight and holdout
expected_tail_weights = np.concatenate(
([expected_weights[0]], [tail_weight_holdout])
)
# Remove left tail weight from main expected_weights
expected_weights = expected_weights[1:]
comb_observed_weights = np.concatenate(
(
observed_weights,
[above_partition / self.get_column_nonnull_count(column)],
)
)
# Set aside left tail weight and above partition weight
observed_tail_weights = np.concatenate(
(
[observed_weights[0]],
[above_partition / self.get_column_nonnull_count(column)],
)
)
# Remove left tail weight from main observed_weights
observed_weights = observed_weights[1:]
elif partition_object["bins"][-1] == np.inf:
if "tail_weights" in partition_object:
raise ValueError(
"There can be no tail weights for partitions with one or both endpoints at infinity"
)
# Remove inf
expected_bins = partition_object["bins"][:-1]
comb_expected_weights = np.concatenate(
([tail_weight_holdout], expected_weights)
)
# Set aside right tail weight and holdout
expected_tail_weights = np.concatenate(
([tail_weight_holdout], [expected_weights[-1]])
)
# Remove right tail weight from main expected_weights
expected_weights = expected_weights[:-1]
comb_observed_weights = np.concatenate(
(
[below_partition / self.get_column_nonnull_count(column)],
observed_weights,
)
)
# Set aside right tail weight and below partition weight
observed_tail_weights = np.concatenate(
(
[below_partition / self.get_column_nonnull_count(column)],
[observed_weights[-1]],
)
)
# Remove right tail weight from main observed_weights
observed_weights = observed_weights[:-1]
else:
# No need to remove -inf or inf
expected_bins = partition_object["bins"]
if "tail_weights" in partition_object:
tail_weights = partition_object["tail_weights"]
# Tack on tail weights
comb_expected_weights = np.concatenate(
([tail_weights[0]], expected_weights, [tail_weights[1]])
)
# Tail weights are just tail_weights
expected_tail_weights = np.array(tail_weights)
else:
comb_expected_weights = np.concatenate(
(
[tail_weight_holdout / 2],
expected_weights,
[tail_weight_holdout / 2],
)
)
# Tail weights are just tail_weight holdout divided equally to both tails
expected_tail_weights = np.concatenate(
([tail_weight_holdout / 2], [tail_weight_holdout / 2])
)
comb_observed_weights = np.concatenate(
(
[below_partition / self.get_column_nonnull_count(column)],
observed_weights,
[above_partition / self.get_column_nonnull_count(column)],
)
)
# Tail weights are just the counts on either side of the partition
observed_tail_weights = np.concatenate(
([below_partition], [above_partition])
) / self.get_column_nonnull_count(column)
# Main expected_weights and main observed weights had no tail_weights, so nothing needs to be removed.
# TODO: VERIFY THAT THIS STILL WORKS BASED ON CHANGE TO HIST
# comb_expected_weights = np.array(comb_expected_weights).astype(float)
# comb_observed_weights = np.array(comb_observed_weights).astype(float)
kl_divergence = stats.entropy(comb_observed_weights, comb_expected_weights)
if np.isinf(kl_divergence) or np.isnan(kl_divergence):
observed_value = None
else:
observed_value = kl_divergence
if threshold is None:
success = True
else:
success = kl_divergence <= threshold
return_obj = {
"success": success,
"result": {
"observed_value": observed_value,
"details": {
"observed_partition": {
# return expected_bins, since we used those bins to compute the observed_weights
"bins": expected_bins,
"weights": observed_weights.tolist(),
"tail_weights": observed_tail_weights.tolist(),
},
"expected_partition": {
"bins": expected_bins,
"weights": expected_weights.tolist(),
"tail_weights": expected_tail_weights.tolist(),
},
},
},
}
return return_obj
@MetaDataset.column_aggregate_expectation
def expect_column_pair_cramers_phi_value_to_be_less_than(
self,
column_A,
column_B,
bins_A=None,
bins_B=None,
n_bins_A=None,
n_bins_B=None,
threshold=0.1,
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
"""
Expect the values in column_A to be independent of those in column_B.
Args:
column_A (str): The first column name
column_B (str): The second column name
threshold (float): Maximum allowed value of cramers V for expectation to pass.
Keyword Args:
bins_A (list of float): Bins for column_A.
bins_B (list of float): Bins for column_B.
n_bins_A (int): Number of bins for column_A. Ignored if bins_A is not None.
n_bins_B (int): Number of bins for column_B. Ignored if bins_B is not None.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
A JSON-serializable expectation result object.
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
"""
crosstab = self.get_crosstab(
column_A, column_B, bins_A, bins_B, n_bins_A, n_bins_B
)
chi2_result = stats.chi2_contingency(crosstab)
# See e.g. https://en.wikipedia.org/wiki/Cram%C3%A9r%27s_V
cramers_V = max(
min(
np.sqrt(
chi2_result[0] / self.get_row_count() / (min(crosstab.shape) - 1)
),
1,
),
0,
)
return_obj = {
"success": cramers_V <= threshold,
"result": {
"observed_value": cramers_V,
"unexpected_list": crosstab,
"details": {"crosstab": crosstab},
},
}
return return_obj
###
#
# Column pairs
#
###
def expect_column_pair_values_to_be_equal(
self,
column_A,
column_B,
ignore_row_if="both_values_are_missing",
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
"""
Expect the values in column A to be the same as column B.
Args:
column_A (str): The first column name
column_B (str): The second column name
Keyword Args:
ignore_row_if (str): "both_values_are_missing", "either_value_is_missing", "neither"
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
"""
raise NotImplementedError
def expect_column_pair_values_A_to_be_greater_than_B(
self,
column_A,
column_B,
or_equal=None,
parse_strings_as_datetimes=False,
allow_cross_type_comparisons=None,
ignore_row_if="both_values_are_missing",
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
"""
Expect values in column A to be greater than column B.
Args:
column_A (str): The first column name
column_B (str): The second column name
or_equal (boolean or None): If True, then values can be equal, not strictly greater
Keyword Args:
allow_cross_type_comparisons (boolean or None) : If True, allow comparisons between types (e.g. integer and\
string). Otherwise, attempting such comparisons will raise an exception.
Keyword Args:
ignore_row_if (str): "both_values_are_missing", "either_value_is_missing", "neither
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
"""
raise NotImplementedError
def expect_column_pair_values_to_be_in_set(
self,
column_A,
column_B,
value_pairs_set,
ignore_row_if="both_values_are_missing",
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
"""
Expect paired values from columns A and B to belong to a set of valid pairs.
Args:
column_A (str): The first column name
column_B (str): The second column name
value_pairs_set (list of tuples): All the valid pairs to be matched
Keyword Args:
ignore_row_if (str): "both_values_are_missing", "either_value_is_missing", "never"
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
"""
raise NotImplementedError
###
#
# Multicolumn
#
###
def expect_multicolumn_values_to_be_unique(
self,
column_list,
ignore_row_if="all_values_are_missing",
result_format=None,
row_condition=None,
condition_parser=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
"""
NOTE: This method is deprecated. Please use expect_select_column_values_to_be_unique_within_record instead
Expect the values for each record to be unique across the columns listed.
Note that records can be duplicated.
For example::
A B C
1 1 2 Fail
1 2 3 Pass
8 2 7 Pass
1 2 3 Pass
4 4 4 Fail
Args:
column_list (tuple or list): The column names to evaluate
Keyword Args:
ignore_row_if (str): "all_values_are_missing", "any_value_is_missing", "never"
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
"""
raise NotImplementedError
def expect_select_column_values_to_be_unique_within_record(
self,
column_list,
ignore_row_if="all_values_are_missing",
result_format=None,
row_condition=None,
condition_parser=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
"""
Expect the values for each record to be unique across the columns listed.
Note that records can be duplicated.
For example::
A B C
1 1 2 Fail
1 2 3 Pass
8 2 7 Pass
1 2 3 Pass
4 4 4 Fail
Args:
column_list (tuple or list): The column names to evaluate
Keyword Args:
ignore_row_if (str): "all_values_are_missing", "any_value_is_missing", "never"
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
"""
raise NotImplementedError
def expect_compound_columns_to_be_unique(
self,
column_list,
ignore_row_if="all_values_are_missing",
result_format=None,
row_condition=None,
condition_parser=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
"""
Expect that the columns are unique together, e.g. a multi-column primary key
Note that all instances of any duplicates are considered failed
For example::
A B C
1 1 2 Fail
1 2 3 Pass
1 1 2 Fail
2 2 2 Pass
3 2 3 Pass
Args:
column_list (tuple or list): The column names to evaluate
Keyword Args:
ignore_row_if (str): "all_values_are_missing", "any_value_is_missing", "never"
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
"""
raise NotImplementedError
def expect_multicolumn_sum_to_equal(
self,
column_list,
sum_total,
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
""" Multi-Column Map Expectation
Expects that sum of all rows for a set of columns is equal to a specific value
Args:
column_list (List[str]): \
Set of columns to be checked
sum_total (int): \
expected sum of columns
"""
raise NotImplementedError
@staticmethod
def _parse_value_set(value_set):
parsed_value_set = [
parse(value) if isinstance(value, str) else value for value in value_set
]
return parsed_value_set
def attempt_allowing_relative_error(self) -> Union[bool, float]:
"""
Subclasses can override this method if the respective data source (e.g., Redshift) supports "approximate" mode.
In certain cases (e.g., for SparkDFDataset), a fraction between 0 and 1 (i.e., not only a boolean) is allowed.
"""
return False
| apache-2.0 |
DarkEnergyScienceCollaboration/SLCosmo | python/desc/slcosmo/TDC2.py | 1 | 6769 | import numpy as np
import scipy.misc
c = 3e5 #km/s
class TDC2ensemble(object):
"""
In TDC2, we expect time delays to be inferred by the Good Teams and
submitted as ensembles of posterior samples, in plain text tables,
one time delay per column (AB, AC, AD) for quads, and just (AB) for
doubles. The headers of these files will contain the same Fermat
potential information that was provided in the data file, ie an
'observed' FP difference and its uncertainty for each image pair,
plus an overall 'Q' factor that enables the conversion between time
delay, FP, and time delay distance.
This class is a data structure, for storing all the information
provided in a TDC2 inferred time delay sample file.
It could be nice if we moved to using pandas dataframes, so that we
can refer to the time delays as eg dt['AB'], and the corresponding
FP differences as DeltaFP['AB'] +/- DeltaFP_err['AB'].
Use cases:
1. Get samples and header information from a file
2. Enable SLCosmo factory method to create mock TDC2 ensembles from scratch
3. Write mock samples and header information to a file
"""
def __init__(self):
self.source = None
self.Nsamples = None
self.dt_obs = []
return
@staticmethod
def read_in_from(tdc2samplefile):
"""
Read in both the posterior sample time delays and the Fermat potential header information, and store it for re-use.
Parameters:
-----------
tdc2samplefile : string
Name of the file to read from.
Returns:
--------
TDC2ensemble object
A TDC2ensemble object filled with the read in data.
Notes:
------
The samples are stored in a 2D numpy array with one row for each
lens, and one column for each time delay. Doubles will only have
one time delay ('AB'), while quads will have at least three
('AB', 'AC', 'AD').
Possible failure modes:
1. File does not exist
2. File has no samples in it
3. File has no header in it
4. Samples are not 2D numpy array
5. Array has wrong number of columns (time delays - should be 1 or 3, and equal to Ndt)
"""
my_object = TDC2ensemble()
my_object.source = tdc2samplefile
my_object._read_header()
my_object.dt_obs = np.loadtxt(my_object.source)
if len(my_object.dt_obs.shape) == 1:
my_object.Nim = 2
else:
my_object.Nim = 4
my_object.Nsamples = len(my_object.dt_obs)
return my_object
def _read_header(self):
self.DeltaFP_obs = []
self.DeltaFP_err = []
with open(self.source) as input_:
for line in input_:
if line.startswith('# Q'):
self.Q = float(line.strip().split(':')[1])
if line.startswith('# Delta'):
key, value = line.strip()[1:].split(':')
if key.find('err') != -1:
self.DeltaFP_err.append(float(value))
else:
self.DeltaFP_obs.append(float(value))
self.DeltaFP_obs = np.array(self.DeltaFP_obs)
self.DeltaFP_err = np.array(self.DeltaFP_err)
def write_out_to(self, tdc2samplefile):
"""
Write out both the posterior sample time delays and the Fermat
potential header information in a plain text file.
Parameters:
-----------
tdc2samplefile : string
The name of the file to be written to.
Notes:
------
Possible failure modes:
1. Samples array has no samples in it, even if Nsamples is not None
2. File is not actually written
"""
if self.Nsamples is None:
print("No samples to write out, skipping.")
else:
# First write out the header, over-writing the file:
self.form_header()
np.savetxt(tdc2samplefile, self.dt_obs,
header=self.header, comments='# ')
return
def log_likelihood(self, H0, fast=True):
"""
Compute the likelihood of the proposed Hubble constant H0 given
the Fermat potential difference data, marginalizing over the
time delay PDF provided (approximately) in the samples.
Parameters:
-----------
H0 : float
The Hubble constant under evaluation.
fast : Boolean, optional
Just in case you want to do the calculation
without vectorisation.
Returns:
--------
logL : float
The value of the log likelihood.
See Also:
---------
SLCosmo.compute_the_joint_log_likelihood
Notes:
------
Don't choose `fast=False`.
"""
if fast:
x = self.DeltaFP_obs - (c * self.dt_obs * H0 / self.Q)
chisq = (x/self.DeltaFP_err)**2.0
logL = -0.5 * chisq \
- np.log(np.sqrt(2*np.pi) * self.DeltaFP_err)
else:
logL = np.array([])
Ns = 0
for i in range(self.Nsamples):
for j in range(self.Nim - 1):
Ns += 1
x = self.DeltaFP_obs[j] - \
(c * self.dt_obs[i,j] * H0 / self.Q)
chisq = (x/self.DeltaFP_err[j])**2.0
logL_el = -0.5 * chisq \
- np.log(np.sqrt(2*np.pi) * self.DeltaFP_err[j])
logL = np.append(logL,logL_el)
return scipy.misc.logsumexp(logL) - np.log(len(np.ravel(logL)))
def form_header(self):
self.header = \
"Time Delay Challenge 2 Posterior Sample Time Delays\n\
\n\
Notes:\n\
* Time delays should be given in days. Positive dt_AB means that light\n\
curve A leads (not lags) light curve B.\n\
* Q is the H0-free time delay distance, a function of zd, zs and\n\
cosmology. Q has units of km / s: D_dt = Q / H0\n\
* Fermat potential differences DeltaFP are given in units of\n\
day km / s / Mpc, such that the predicted time delay is\n\
dt = (Q / (c * H0)) * DeltaFP, in days. c = 3.00e5 km/s\n\
\n\
Q: "+str(self.Q)+"\n"
names = ['AB', 'AC', 'AD']
for k in range(self.Nim - 1):
self.header = self.header + \
"DeltaFP_"+names[k]+": "+str(self.DeltaFP_obs[k])+"\n" + \
"DeltaFP_"+names[k]+"_err: "+str(self.DeltaFP_err[k])+"\n"
self.header = self.header + "\n"
for k in range(self.Nim - 1):
self.header = self.header + \
" dt_"+names[k]
return
| bsd-3-clause |
Garrett-R/scikit-learn | examples/linear_model/plot_polynomial_interpolation.py | 251 | 1895 | #!/usr/bin/env python
"""
========================
Polynomial interpolation
========================
This example demonstrates how to approximate a function with a polynomial of
degree n_degree by using ridge regression. Concretely, from n_samples 1d
points, it suffices to build the Vandermonde matrix, which is n_samples x
n_degree+1 and has the following form:
[[1, x_1, x_1 ** 2, x_1 ** 3, ...],
[1, x_2, x_2 ** 2, x_2 ** 3, ...],
...]
Intuitively, this matrix can be interpreted as a matrix of pseudo features (the
points raised to some power). The matrix is akin to (but different from) the
matrix induced by a polynomial kernel.
This example shows that you can do non-linear regression with a linear model,
using a pipeline to add non-linear features. Kernel methods extend this idea
and can induce very high (even infinite) dimensional feature spaces.
"""
print(__doc__)
# Author: Mathieu Blondel
# Jake Vanderplas
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import Ridge
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import make_pipeline
def f(x):
""" function to approximate by polynomial interpolation"""
return x * np.sin(x)
# generate points used to plot
x_plot = np.linspace(0, 10, 100)
# generate points and keep a subset of them
x = np.linspace(0, 10, 100)
rng = np.random.RandomState(0)
rng.shuffle(x)
x = np.sort(x[:20])
y = f(x)
# create matrix versions of these arrays
X = x[:, np.newaxis]
X_plot = x_plot[:, np.newaxis]
plt.plot(x_plot, f(x_plot), label="ground truth")
plt.scatter(x, y, label="training points")
for degree in [3, 4, 5]:
model = make_pipeline(PolynomialFeatures(degree), Ridge())
model.fit(X, y)
y_plot = model.predict(X_plot)
plt.plot(x_plot, y_plot, label="degree %d" % degree)
plt.legend(loc='lower left')
plt.show()
| bsd-3-clause |
niketanpansare/incubator-systemml | src/main/python/systemml/converters.py | 5 | 14040 | # -------------------------------------------------------------
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# -------------------------------------------------------------
__all__ = [
'getNumCols',
'convertToMatrixBlock',
'convert_caffemodel',
'convert_lmdb_to_jpeg',
'convertToNumPyArr',
'convertToPandasDF',
'SUPPORTED_TYPES',
'convertToLabeledDF',
'convertImageToNumPyArr',
'getDatasetMean']
import numpy as np
import pandas as pd
import os
import math
from pyspark.context import SparkContext
from scipy.sparse import coo_matrix, spmatrix, csr_matrix
from .classloader import *
SUPPORTED_TYPES = (np.ndarray, pd.DataFrame, spmatrix)
DATASET_MEAN = {'VGG_ILSVRC_19_2014': [103.939, 116.779, 123.68]}
def getNumCols(numPyArr):
if numPyArr.ndim == 1:
return 1
else:
return numPyArr.shape[1]
def get_pretty_str(key, value):
return '\t"' + key + '": ' + str(value) + ',\n'
def save_tensor_csv(tensor, file_path, shouldTranspose):
w = w.reshape(w.shape[0], -1)
if shouldTranspose:
w = w.T
np.savetxt(file_path, w, delimiter=',')
with open(file_path + '.mtd', 'w') as file:
file.write('{\n\t"data_type": "matrix",\n\t"value_type": "double",\n')
file.write(get_pretty_str('rows', w.shape[0]))
file.write(get_pretty_str('cols', w.shape[1]))
file.write(get_pretty_str('nnz', np.count_nonzero(w)))
file.write(
'\t"format": "csv",\n\t"description": {\n\t\t"author": "SystemML"\n\t}\n}\n')
def convert_caffemodel(sc, deploy_file, caffemodel_file,
output_dir, format="binary", is_caffe_installed=False):
"""
Saves the weights and bias in the caffemodel file to output_dir in the specified format.
This method does not requires caffe to be installed.
Parameters
----------
sc: SparkContext
SparkContext
deploy_file: string
Path to the input network file
caffemodel_file: string
Path to the input caffemodel file
output_dir: string
Path to the output directory
format: string
Format of the weights and bias (can be binary, csv or text)
is_caffe_installed: bool
True if caffe is installed
"""
if is_caffe_installed:
if format != 'csv':
raise ValueError(
'The format ' +
str(format) +
' is not supported when caffe is installed. Hint: Please specify format=csv')
import caffe
net = caffe.Net(deploy_file, caffemodel_file, caffe.TEST)
for layerName in net.params.keys():
num_parameters = len(net.params[layerName])
if num_parameters == 0:
continue
elif num_parameters == 2:
# Weights and Biases
layerType = net.layers[list(
net._layer_names).index(layerName)].type
shouldTranspose = True if layerType == 'InnerProduct' else False
save_tensor_csv(
net.params[layerName][0].data,
os.path.join(
output_dir,
layerName +
'_weight.mtx'),
shouldTranspose)
save_tensor_csv(
net.params[layerName][1].data,
os.path.join(
output_dir,
layerName +
'_bias.mtx'),
shouldTranspose)
elif num_parameters == 1:
# Only Weight
layerType = net.layers[list(
net._layer_names).index(layerName)].type
shouldTranspose = True if layerType == 'InnerProduct' else False
save_tensor_csv(
net.params[layerName][0].data,
os.path.join(
output_dir,
layerName +
'_weight.mtx'),
shouldTranspose)
else:
raise ValueError(
'Unsupported number of parameters:' +
str(num_parameters))
else:
createJavaObject(sc, 'dummy')
utilObj = sc._jvm.org.apache.sysml.api.dl.Utils()
utilObj.saveCaffeModelFile(
sc._jsc,
deploy_file,
caffemodel_file,
output_dir,
format)
def convert_lmdb_to_jpeg(lmdb_img_file, output_dir):
"""
Saves the images in the lmdb file as jpeg in the output_dir. This method requires caffe to be installed along with lmdb and cv2 package.
To install cv2 package, do `pip install opencv-python`.
Parameters
----------
lmdb_img_file: string
Path to the input lmdb file
output_dir: string
Output directory for images (local filesystem)
"""
import lmdb
import caffe
import cv2
lmdb_cursor = lmdb.open(lmdb_file, readonly=True).begin().cursor()
datum = caffe.proto.caffe_pb2.Datum()
i = 1
for _, value in lmdb_cursor:
datum.ParseFromString(value)
data = caffe.io.datum_to_array(datum)
output_file_path = os.path.join(output_dir, 'file_' + str(i) + '.jpg')
image = np.transpose(data, (1, 2, 0)) # CxHxW to HxWxC in cv2
cv2.imwrite(output_file_path, image)
i = i + 1
def convertToLabeledDF(sparkSession, X, y=None):
from pyspark.ml.feature import VectorAssembler
if y is not None:
pd1 = pd.DataFrame(X)
pd2 = pd.DataFrame(y, columns=['label'])
pdf = pd.concat([pd1, pd2], axis=1)
inputColumns = ['C' + str(i) for i in pd1.columns]
outputColumns = inputColumns + ['label']
else:
pdf = pd.DataFrame(X)
inputColumns = ['C' + str(i) for i in pdf.columns]
outputColumns = inputColumns
assembler = VectorAssembler(inputCols=inputColumns, outputCol='features')
out = assembler.transform(sparkSession.createDataFrame(pdf, outputColumns))
if y is not None:
return out.select('features', 'label')
else:
return out.select('features')
def _convertSPMatrixToMB(sc, src):
src = coo_matrix(src, dtype=np.float64)
numRows = src.shape[0]
numCols = src.shape[1]
data = src.data
row = src.row.astype(np.int32)
col = src.col.astype(np.int32)
nnz = len(src.col)
buf1 = bytearray(data.tostring())
buf2 = bytearray(row.tostring())
buf3 = bytearray(col.tostring())
createJavaObject(sc, 'dummy')
return sc._jvm.org.apache.sysml.runtime.instructions.spark.utils.RDDConverterUtilsExt.convertSciPyCOOToMB(
buf1, buf2, buf3, numRows, numCols, nnz)
def _convertDenseMatrixToMB(sc, src):
numCols = getNumCols(src)
numRows = src.shape[0]
src = np.asarray(src, dtype=np.float64) if not isinstance(src, np.ndarray) else src
# data_type: 0: int, 1: float and 2: double
if src.dtype is np.dtype(np.int32):
arr = src.ravel().astype(np.int32)
dataType = 0
elif src.dtype is np.dtype(np.float32):
arr = src.ravel().astype(np.float32)
dataType = 1
else:
arr = src.ravel().astype(np.float64)
dataType = 2
buf = bytearray(arr.tostring())
createJavaObject(sc, 'dummy')
return sc._jvm.org.apache.sysml.runtime.instructions.spark.utils.RDDConverterUtilsExt.convertPy4JArrayToMB(
buf, numRows, numCols, dataType)
def _copyRowBlock(i, sc, ret, src, numRowsPerBlock, rlen, clen):
rowIndex = int(i / numRowsPerBlock)
tmp = src[i:min(i + numRowsPerBlock, rlen), ]
mb = _convertSPMatrixToMB(
sc,
tmp) if isinstance(
src,
spmatrix) else _convertDenseMatrixToMB(
sc,
tmp)
sc._jvm.org.apache.sysml.runtime.instructions.spark.utils.RDDConverterUtilsExt.copyRowBlocks(
mb, rowIndex, ret, numRowsPerBlock, rlen, clen)
return i
def convertToMatrixBlock(sc, src, maxSizeBlockInMB=128):
if not isinstance(sc, SparkContext):
raise TypeError('sc needs to be of type SparkContext')
if isinstance(src, spmatrix):
isSparse = True
else:
isSparse = False
src = np.asarray(src, dtype=np.float64) if not isinstance(src, np.ndarray) else src
if len(src.shape) != 2:
src_type = str(type(src).__name__)
raise TypeError('Expected 2-dimensional ' +
src_type +
', instead passed ' +
str(len(src.shape)) +
'-dimensional ' +
src_type)
worstCaseSizeInMB = (8*(src.getnnz()*3 if isSparse else src.shape[0]*src.shape[1])) / 1000000
# Ignoring sparsity for computing numRowsPerBlock for now
numRowsPerBlock = int(
math.ceil((maxSizeBlockInMB * 1000000) / (src.shape[1] * 8)))
if worstCaseSizeInMB <= maxSizeBlockInMB:
return _convertSPMatrixToMB(
sc, src) if isSparse else _convertDenseMatrixToMB(sc, src)
else:
# Since coo_matrix does not have range indexing
src = csr_matrix(src) if isSparse else src
rlen = int(src.shape[0])
clen = int(src.shape[1])
ret = sc._jvm.org.apache.sysml.runtime.instructions.spark.utils.RDDConverterUtilsExt.allocateDenseOrSparse(
rlen, clen, isSparse)
[_copyRowBlock(i, sc, ret, src, numRowsPerBlock, rlen, clen)
for i in range(0, src.shape[0], numRowsPerBlock)]
sc._jvm.org.apache.sysml.runtime.instructions.spark.utils.RDDConverterUtilsExt.postProcessAfterCopying(
ret)
return ret
def convertToNumPyArr(sc, mb):
if isinstance(sc, SparkContext):
numRows = mb.getNumRows()
numCols = mb.getNumColumns()
createJavaObject(sc, 'dummy')
buf = sc._jvm.org.apache.sysml.runtime.instructions.spark.utils.RDDConverterUtilsExt.convertMBtoPy4JDenseArr(
mb)
return np.frombuffer(buf, count=numRows * numCols,
dtype=np.float64).reshape((numRows, numCols))
else:
# TODO: We can generalize this by creating py4j gateway ourselves
raise TypeError('sc needs to be of type SparkContext')
# Returns the mean of a model if defined otherwise None
def getDatasetMean(dataset_name):
"""
Parameters
----------
dataset_name: Name of the dataset used to train model. This name is artificial name based on dataset used to train the model.
Returns
-------
mean: Mean value of model if its defined in the list DATASET_MEAN else None.
"""
try:
mean = DATASET_MEAN[dataset_name.upper()]
except BaseException:
mean = None
return mean
# Example usage: convertImageToNumPyArr(im, img_shape=(3, 224, 224), add_rotated_images=True, add_mirrored_images=True)
# The above call returns a numpy array of shape (6, 50176) in NCHW format
def convertImageToNumPyArr(im, img_shape=None, add_rotated_images=False, add_mirrored_images=False,
color_mode='RGB', mean=None):
# Input Parameters
# color_mode: In case of VGG models which expect image data in BGR format instead of RGB for other most models,
# color_mode parameter is used to process image data in BGR format.
# mean: mean value is used to subtract from input data from every pixel
# value. By default value is None, so mean value not subtracted.
if img_shape is not None:
num_channels = img_shape[0]
size = (img_shape[1], img_shape[2])
else:
num_channels = 1 if im.mode == 'L' else 3
size = None
if num_channels != 1 and num_channels != 3:
raise ValueError('Expected the number of channels to be either 1 or 3')
from PIL import Image
if size is not None:
im = im.resize(size, Image.LANCZOS)
expected_mode = 'L' if num_channels == 1 else 'RGB'
if expected_mode is not im.mode:
im = im.convert(expected_mode)
def _im2NumPy(im):
if expected_mode == 'L':
return np.asarray(im.getdata()).reshape((1, -1))
else:
im = (np.array(im).astype(np.float))
# (H,W,C) -> (C,H,W)
im = im.transpose(2, 0, 1)
# RGB -> BGR
if color_mode == 'BGR':
im = im[..., ::-1]
# Subtract Mean
if mean is not None:
for c in range(3):
im[:, :, c] = im[:, :, c] - mean[c]
# (C,H,W) --> (1, C*H*W)
return im.reshape((1, -1))
ret = _im2NumPy(im)
if add_rotated_images:
ret = np.vstack(
(ret, _im2NumPy(
im.rotate(90)), _im2NumPy(
im.rotate(180)), _im2NumPy(
im.rotate(270))))
if add_mirrored_images:
ret = np.vstack(
(ret, _im2NumPy(
im.transpose(
Image.FLIP_LEFT_RIGHT)), _im2NumPy(
im.transpose(
Image.FLIP_TOP_BOTTOM))))
return ret
def convertToPandasDF(X):
if not isinstance(X, pd.DataFrame):
return pd.DataFrame(X, columns=['C' + str(i)
for i in range(getNumCols(X))])
return X
| apache-2.0 |
daStrauss/sparseConv | src/solver.py | 1 | 1971 | '''
Created on Dec 29, 2012
@author: dstrauss
Copyright 2013 David Strauss
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
method definition for doing CG solves with a pipelined matrix-vector multiplication
'''
import numpy as np
def cg(A,b,maxiter=30,tol=1e-6,pll=False):
''' run CG iterations in order to solve the equation Ax=b,
A ==> a function that implements "matrix vector" multiplication, must be Positive Semidefinite
b ==> right hand side in Ax=b
maxiter ==> maximum number of CG iterations
tol ==> exit tolerance, if ||Ax-b|| < tol the program exits
pll ==> boolean flag for doing plotting or not
'''
x = np.zeros(b.size,dtype=b.dtype)
r=b-A(x)
p=r
rsold=np.dot(r.T,r)
rsn = list()
ix = 0
while ix < maxiter:
ix += 1
Ap = A(p);
alpha=rsold/np.dot(p.T,Ap);
x=x+alpha*p;
r=r-alpha*Ap
rsnew = np.dot(r.T,r)
rsn.append(np.sqrt(rsnew))
if np.sqrt(rsnew)<tol:
break
p = r+ (rsnew/rsold)*p;
rsold=rsnew;
if pll:
import matplotlib.pyplot as plt
plt.figure(1)
plt.plot(rsn)
plt.title('rsn')
plt.show()
return x,ix
def test():
import scipy.sparse
opr = -np.ones((3,20))
opr[1,:] = 2
M = scipy.sparse.spdiags(opr, [-1,0,1], 20,20);
b = np.zeros(20)
b[9] = 1
cg(lambda x: M*x,b)
| apache-2.0 |
ctozlm/Dato-Core | src/unity/python/graphlab/data_structures/gframe.py | 13 | 10841 | '''
Copyright (C) 2015 Dato, Inc.
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the DATO-PYTHON-LICENSE file for details.
'''
from graphlab.data_structures.sframe import SFrame
from graphlab.data_structures.sframe import SArray
from graphlab.cython.context import debug_trace as cython_context
from graphlab.data_structures.sarray import SArray, _create_sequential_sarray
import copy
VERTEX_GFRAME = 0
EDGE_GFRAME = 1
class GFrame(SFrame):
"""
GFrame is similar to SFrame but is associated with an SGraph.
- GFrame can be obtained from either the `vertices` or `edges`
attributed in any SGraph:
>>> import graphlab
>>> g = graphlab.load_sgraph(...)
>>> vertices_gf = g.vertices
>>> edges_gf = g.edges
- GFrame has the same API as SFrame:
>>> sa = vertices_gf['pagerank']
>>> # column lambda transform
>>> vertices_gf['pagerank'] = vertices_gf['pagerank'].apply(lambda x: 0.15 + 0.85 * x)
>>> # frame lambda transform
>>> vertices_gf['score'] = vertices_gf.apply(lambda x: 0.2 * x['triangle_count'] + 0.8 * x['pagerank'])
>>> del vertices_gf['pagerank']
- GFrame can be converted to SFrame:
>>> # extract an SFrame
>>> sf = vertices_gf.__to_sframe__()
"""
def __init__(self, graph, gframe_type):
self.__type__ = gframe_type
self.__graph__ = graph
self.__sframe_cache__ = None
self.__is_dirty__ = False
def __to_sframe__(self):
return copy.copy(self._get_cache())
#/**************************************************************************/
#/* */
#/* Modifiers */
#/* */
#/**************************************************************************/
def add_column(self, data, name=""):
"""
Adds the specified column to this SFrame. The number of elements in
the data given must match every other column of the SFrame.
Parameters
----------
data : SArray
The 'column' of data.
name : string
The name of the column. If no name is given, a default name is chosen.
"""
# Check type for pandas dataframe or SArray?
if not isinstance(data, SArray):
raise TypeError("Must give column as SArray")
if not isinstance(name, str):
raise TypeError("Invalid column name: must be str")
self.__is_dirty__ = True
with cython_context():
if self._is_vertex_frame():
graph_proxy = self.__graph__.__proxy__.add_vertex_field(data.__proxy__, name)
self.__graph__.__proxy__ = graph_proxy
elif self._is_edge_frame():
graph_proxy = self.__graph__.__proxy__.add_edge_field(data.__proxy__, name)
self.__graph__.__proxy__ = graph_proxy
def add_columns(self, datalist, namelist):
"""
Adds columns to the SFrame. The number of elements in all columns must
match every other column of the SFrame.
Parameters
----------
datalist : list of SArray
A list of columns
namelist : list of string
A list of column names. All names must be specified.
"""
if not hasattr(datalist, '__iter__'):
raise TypeError("datalist must be an iterable")
if not hasattr(namelist, '__iter__'):
raise TypeError("namelist must be an iterable")
if not all([isinstance(x, SArray) for x in datalist]):
raise TypeError("Must give column as SArray")
if not all([isinstance(x, str) for x in namelist]):
raise TypeError("Invalid column name in list: must all be str")
for (data, name) in zip(datalist, namelist):
self.add_column(data, name)
def remove_column(self, name):
"""
Removes the column with the given name from the SFrame.
Parameters
----------
name : string
The name of the column to remove.
"""
if name not in self.column_names():
raise KeyError('Cannot find column %s' % name)
self.__is_dirty__ = True
try:
with cython_context():
if self._is_vertex_frame():
assert name != '__id', 'Cannot remove \"__id\" column'
graph_proxy = self.__graph__.__proxy__.delete_vertex_field(name)
self.__graph__.__proxy__ = graph_proxy
elif self._is_edge_frame():
assert name != '__src_id', 'Cannot remove \"__src_id\" column'
assert name != '__dst_id', 'Cannot remove \"__dst_id\" column'
graph_proxy = self.__graph__.__proxy__.delete_edge_field(name)
self.__graph__.__proxy__ = graph_proxy
except:
self.__is_dirty__ = False
raise
def swap_columns(self, column_1, column_2):
"""
Swaps the columns with the given names.
Parameters
----------
column_1 : string
Name of column to swap
column_2 : string
Name of other column to swap
"""
self.__is_dirty__ = True
with cython_context():
if self._is_vertex_frame():
graph_proxy = self.__graph__.__proxy__.swap_vertex_fields(column_1, column_2)
self.__graph__.__proxy__ = graph_proxy
elif self._is_edge_frame():
graph_proxy = self.__graph__.__proxy__.swap_edge_fields(column_1, column_2)
self.__graph__.__proxy__ = graph_proxy
def rename(self, names):
"""
Rename the columns using the 'names' dict. This changes the names of
the columns given as the keys and replaces them with the names given as
the values.
Parameters
----------
names : dict[string, string]
Dictionary of [old_name, new_name]
"""
if (type(names) is not dict):
raise TypeError('names must be a dictionary: oldname -> newname')
self.__is_dirty__ = True
with cython_context():
if self._is_vertex_frame():
graph_proxy = self.__graph__.__proxy__.rename_vertex_fields(names.keys(), names.values())
self.__graph__.__proxy__ = graph_proxy
elif self._is_edge_frame():
graph_proxy = self.__graph__.__proxy__.rename_edge_fields(names.keys(), names.values())
self.__graph__.__proxy__ = graph_proxy
def add_row_number(self, column_name='id', start=0):
if type(column_name) is not str:
raise TypeError("Must give column_name as str")
if column_name in self.column_names():
raise RuntimeError("Column name %s already exists" % str(column_name))
if type(start) is not int:
raise TypeError("Must give start as int")
the_col = _create_sequential_sarray(self.num_rows(), start)
self[column_name] = the_col
return self
def __setitem__(self, key, value):
"""
A wrapper around add_column(s). Key can be either a list or a str. If
value is an SArray, it is added to the SFrame as a column. If it is a
constant value (int, str, or float), then a column is created where
every entry is equal to the constant value. Existing columns can also
be replaced using this wrapper.
"""
if (key in ['__id', '__src_id', '__dst_id']):
raise KeyError('Cannot modify column %s. Changing __id column will\
change the graph structure' % key)
else:
self.__is_dirty__ = True
super(GFrame, self).__setitem__(key, value)
#/**************************************************************************/
#/* */
#/* Read-only Accessor */
#/* */
#/**************************************************************************/
def num_rows(self):
"""
Returns the number of rows.
Returns
-------
out : int
Number of rows in the SFrame.
"""
if self._is_vertex_frame():
return self.__graph__.summary()['num_vertices']
elif self._is_edge_frame():
return self.__graph__.summary()['num_edges']
def num_cols(self):
"""
Returns the number of columns.
Returns
-------
out : int
Number of columns in the SFrame.
"""
return len(self.column_names())
def column_names(self):
"""
Returns the column names.
Returns
-------
out : list[string]
Column names of the SFrame.
"""
if self._is_vertex_frame():
return self.__graph__.__proxy__.get_vertex_fields()
elif self._is_edge_frame():
return self.__graph__.__proxy__.get_edge_fields()
def column_types(self):
"""
Returns the column types.
Returns
-------
out : list[type]
Column types of the SFrame.
"""
if self.__type__ == VERTEX_GFRAME:
return self.__graph__.__proxy__.get_vertex_field_types()
elif self.__type__ == EDGE_GFRAME:
return self.__graph__.__proxy__.get_edge_field_types()
#/**************************************************************************/
#/* */
#/* Internal Private Methods */
#/* */
#/**************************************************************************/
def _get_cache(self):
if self.__sframe_cache__ is None or self.__is_dirty__:
if self._is_vertex_frame():
self.__sframe_cache__ = self.__graph__.get_vertices()
elif self._is_edge_frame():
self.__sframe_cache__ = self.__graph__.get_edges()
else:
raise TypeError
self.__is_dirty__ = False
return self.__sframe_cache__
def _is_vertex_frame(self):
return self.__type__ == VERTEX_GFRAME
def _is_edge_frame(self):
return self.__type__ == EDGE_GFRAME
@property
def __proxy__(self):
return self._get_cache().__proxy__
| agpl-3.0 |
cactusbin/nyt | matplotlib/examples/axes_grid/demo_colorbar_with_inset_locator.py | 7 | 1052 | import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=[6, 3])
axins1 = inset_axes(ax1,
width="50%", # width = 10% of parent_bbox width
height="5%", # height : 50%
loc=1)
im1=ax1.imshow([[1,2],[2, 3]])
plt.colorbar(im1, cax=axins1, orientation="horizontal", ticks=[1,2,3])
axins1.xaxis.set_ticks_position("bottom")
axins = inset_axes(ax2,
width="5%", # width = 10% of parent_bbox width
height="50%", # height : 50%
loc=3,
bbox_to_anchor=(1.05, 0., 1, 1),
bbox_transform=ax2.transAxes,
borderpad=0,
)
# Controlling the placement of the inset axes is basically same as that
# of the legend. you may want to play with the borderpad value and
# the bbox_to_anchor coordinate.
im=ax2.imshow([[1,2],[2, 3]])
plt.colorbar(im, cax=axins, ticks=[1,2,3])
plt.draw()
plt.show()
| unlicense |
zzcclp/spark | python/pyspark/sql/tests/test_dataframe.py | 9 | 42005 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import pydoc
import shutil
import tempfile
import time
import unittest
from pyspark.sql import SparkSession, Row
from pyspark.sql.types import StringType, IntegerType, DoubleType, StructType, StructField, \
BooleanType, DateType, TimestampType, FloatType
from pyspark.sql.utils import AnalysisException, IllegalArgumentException
from pyspark.testing.sqlutils import ReusedSQLTestCase, SQLTestUtils, have_pyarrow, have_pandas, \
pandas_requirement_message, pyarrow_requirement_message
from pyspark.testing.utils import QuietTest
class DataFrameTests(ReusedSQLTestCase):
def test_range(self):
self.assertEqual(self.spark.range(1, 1).count(), 0)
self.assertEqual(self.spark.range(1, 0, -1).count(), 1)
self.assertEqual(self.spark.range(0, 1 << 40, 1 << 39).count(), 2)
self.assertEqual(self.spark.range(-2).count(), 0)
self.assertEqual(self.spark.range(3).count(), 3)
def test_duplicated_column_names(self):
df = self.spark.createDataFrame([(1, 2)], ["c", "c"])
row = df.select('*').first()
self.assertEqual(1, row[0])
self.assertEqual(2, row[1])
self.assertEqual("Row(c=1, c=2)", str(row))
# Cannot access columns
self.assertRaises(AnalysisException, lambda: df.select(df[0]).first())
self.assertRaises(AnalysisException, lambda: df.select(df.c).first())
self.assertRaises(AnalysisException, lambda: df.select(df["c"]).first())
def test_freqItems(self):
vals = [Row(a=1, b=-2.0) if i % 2 == 0 else Row(a=i, b=i * 1.0) for i in range(100)]
df = self.sc.parallelize(vals).toDF()
items = df.stat.freqItems(("a", "b"), 0.4).collect()[0]
self.assertTrue(1 in items[0])
self.assertTrue(-2.0 in items[1])
def test_help_command(self):
# Regression test for SPARK-5464
rdd = self.sc.parallelize(['{"foo":"bar"}', '{"foo":"baz"}'])
df = self.spark.read.json(rdd)
# render_doc() reproduces the help() exception without printing output
pydoc.render_doc(df)
pydoc.render_doc(df.foo)
pydoc.render_doc(df.take(1))
def test_dropna(self):
schema = StructType([
StructField("name", StringType(), True),
StructField("age", IntegerType(), True),
StructField("height", DoubleType(), True)])
# shouldn't drop a non-null row
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', 50, 80.1)], schema).dropna().count(),
1)
# dropping rows with a single null value
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', None, 80.1)], schema).dropna().count(),
0)
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', None, 80.1)], schema).dropna(how='any').count(),
0)
# if how = 'all', only drop rows if all values are null
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', None, 80.1)], schema).dropna(how='all').count(),
1)
self.assertEqual(self.spark.createDataFrame(
[(None, None, None)], schema).dropna(how='all').count(),
0)
# how and subset
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', 50, None)], schema).dropna(how='any', subset=['name', 'age']).count(),
1)
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', None, None)], schema).dropna(how='any', subset=['name', 'age']).count(),
0)
# threshold
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', None, 80.1)], schema).dropna(thresh=2).count(),
1)
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', None, None)], schema).dropna(thresh=2).count(),
0)
# threshold and subset
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', 50, None)], schema).dropna(thresh=2, subset=['name', 'age']).count(),
1)
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', None, 180.9)], schema).dropna(thresh=2, subset=['name', 'age']).count(),
0)
# thresh should take precedence over how
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', 50, None)], schema).dropna(
how='any', thresh=2, subset=['name', 'age']).count(),
1)
def test_fillna(self):
schema = StructType([
StructField("name", StringType(), True),
StructField("age", IntegerType(), True),
StructField("height", DoubleType(), True),
StructField("spy", BooleanType(), True)])
# fillna shouldn't change non-null values
row = self.spark.createDataFrame([(u'Alice', 10, 80.1, True)], schema).fillna(50).first()
self.assertEqual(row.age, 10)
# fillna with int
row = self.spark.createDataFrame([(u'Alice', None, None, None)], schema).fillna(50).first()
self.assertEqual(row.age, 50)
self.assertEqual(row.height, 50.0)
# fillna with double
row = self.spark.createDataFrame(
[(u'Alice', None, None, None)], schema).fillna(50.1).first()
self.assertEqual(row.age, 50)
self.assertEqual(row.height, 50.1)
# fillna with bool
row = self.spark.createDataFrame(
[(u'Alice', None, None, None)], schema).fillna(True).first()
self.assertEqual(row.age, None)
self.assertEqual(row.spy, True)
# fillna with string
row = self.spark.createDataFrame([(None, None, None, None)], schema).fillna("hello").first()
self.assertEqual(row.name, u"hello")
self.assertEqual(row.age, None)
# fillna with subset specified for numeric cols
row = self.spark.createDataFrame(
[(None, None, None, None)], schema).fillna(50, subset=['name', 'age']).first()
self.assertEqual(row.name, None)
self.assertEqual(row.age, 50)
self.assertEqual(row.height, None)
self.assertEqual(row.spy, None)
# fillna with subset specified for string cols
row = self.spark.createDataFrame(
[(None, None, None, None)], schema).fillna("haha", subset=['name', 'age']).first()
self.assertEqual(row.name, "haha")
self.assertEqual(row.age, None)
self.assertEqual(row.height, None)
self.assertEqual(row.spy, None)
# fillna with subset specified for bool cols
row = self.spark.createDataFrame(
[(None, None, None, None)], schema).fillna(True, subset=['name', 'spy']).first()
self.assertEqual(row.name, None)
self.assertEqual(row.age, None)
self.assertEqual(row.height, None)
self.assertEqual(row.spy, True)
# fillna with dictionary for boolean types
row = self.spark.createDataFrame([Row(a=None), Row(a=True)]).fillna({"a": True}).first()
self.assertEqual(row.a, True)
def test_repartitionByRange_dataframe(self):
schema = StructType([
StructField("name", StringType(), True),
StructField("age", IntegerType(), True),
StructField("height", DoubleType(), True)])
df1 = self.spark.createDataFrame(
[(u'Bob', 27, 66.0), (u'Alice', 10, 10.0), (u'Bob', 10, 66.0)], schema)
df2 = self.spark.createDataFrame(
[(u'Alice', 10, 10.0), (u'Bob', 10, 66.0), (u'Bob', 27, 66.0)], schema)
# test repartitionByRange(numPartitions, *cols)
df3 = df1.repartitionByRange(2, "name", "age")
self.assertEqual(df3.rdd.getNumPartitions(), 2)
self.assertEqual(df3.rdd.first(), df2.rdd.first())
self.assertEqual(df3.rdd.take(3), df2.rdd.take(3))
# test repartitionByRange(numPartitions, *cols)
df4 = df1.repartitionByRange(3, "name", "age")
self.assertEqual(df4.rdd.getNumPartitions(), 3)
self.assertEqual(df4.rdd.first(), df2.rdd.first())
self.assertEqual(df4.rdd.take(3), df2.rdd.take(3))
# test repartitionByRange(*cols)
df5 = df1.repartitionByRange(5, "name", "age")
self.assertEqual(df5.rdd.first(), df2.rdd.first())
self.assertEqual(df5.rdd.take(3), df2.rdd.take(3))
def test_replace(self):
schema = StructType([
StructField("name", StringType(), True),
StructField("age", IntegerType(), True),
StructField("height", DoubleType(), True)])
# replace with int
row = self.spark.createDataFrame([(u'Alice', 10, 10.0)], schema).replace(10, 20).first()
self.assertEqual(row.age, 20)
self.assertEqual(row.height, 20.0)
# replace with double
row = self.spark.createDataFrame(
[(u'Alice', 80, 80.0)], schema).replace(80.0, 82.1).first()
self.assertEqual(row.age, 82)
self.assertEqual(row.height, 82.1)
# replace with string
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace(u'Alice', u'Ann').first()
self.assertEqual(row.name, u"Ann")
self.assertEqual(row.age, 10)
# replace with subset specified by a string of a column name w/ actual change
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace(10, 20, subset='age').first()
self.assertEqual(row.age, 20)
# replace with subset specified by a string of a column name w/o actual change
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace(10, 20, subset='height').first()
self.assertEqual(row.age, 10)
# replace with subset specified with one column replaced, another column not in subset
# stays unchanged.
row = self.spark.createDataFrame(
[(u'Alice', 10, 10.0)], schema).replace(10, 20, subset=['name', 'age']).first()
self.assertEqual(row.name, u'Alice')
self.assertEqual(row.age, 20)
self.assertEqual(row.height, 10.0)
# replace with subset specified but no column will be replaced
row = self.spark.createDataFrame(
[(u'Alice', 10, None)], schema).replace(10, 20, subset=['name', 'height']).first()
self.assertEqual(row.name, u'Alice')
self.assertEqual(row.age, 10)
self.assertEqual(row.height, None)
# replace with lists
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace([u'Alice'], [u'Ann']).first()
self.assertTupleEqual(row, (u'Ann', 10, 80.1))
# replace with dict
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace({10: 11}).first()
self.assertTupleEqual(row, (u'Alice', 11, 80.1))
# test backward compatibility with dummy value
dummy_value = 1
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace({'Alice': 'Bob'}, dummy_value).first()
self.assertTupleEqual(row, (u'Bob', 10, 80.1))
# test dict with mixed numerics
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace({10: -10, 80.1: 90.5}).first()
self.assertTupleEqual(row, (u'Alice', -10, 90.5))
# replace with tuples
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace((u'Alice', ), (u'Bob', )).first()
self.assertTupleEqual(row, (u'Bob', 10, 80.1))
# replace multiple columns
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.0)], schema).replace((10, 80.0), (20, 90)).first()
self.assertTupleEqual(row, (u'Alice', 20, 90.0))
# test for mixed numerics
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.0)], schema).replace((10, 80), (20, 90.5)).first()
self.assertTupleEqual(row, (u'Alice', 20, 90.5))
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.0)], schema).replace({10: 20, 80: 90.5}).first()
self.assertTupleEqual(row, (u'Alice', 20, 90.5))
# replace with boolean
row = (self
.spark.createDataFrame([(u'Alice', 10, 80.0)], schema)
.selectExpr("name = 'Bob'", 'age <= 15')
.replace(False, True).first())
self.assertTupleEqual(row, (True, True))
# replace string with None and then drop None rows
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.0)], schema).replace(u'Alice', None).dropna()
self.assertEqual(row.count(), 0)
# replace with number and None
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.0)], schema).replace([10, 80], [20, None]).first()
self.assertTupleEqual(row, (u'Alice', 20, None))
# should fail if subset is not list, tuple or None
with self.assertRaises(TypeError):
self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace({10: 11}, subset=1).first()
# should fail if to_replace and value have different length
with self.assertRaises(ValueError):
self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace(["Alice", "Bob"], ["Eve"]).first()
# should fail if when received unexpected type
with self.assertRaises(TypeError):
from datetime import datetime
self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace(datetime.now(), datetime.now()).first()
# should fail if provided mixed type replacements
with self.assertRaises(ValueError):
self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace(["Alice", 10], ["Eve", 20]).first()
with self.assertRaises(ValueError):
self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace({u"Alice": u"Bob", 10: 20}).first()
with self.assertRaisesRegex(
TypeError,
'value argument is required when to_replace is not a dictionary.'):
self.spark.createDataFrame(
[(u'Alice', 10, 80.0)], schema).replace(["Alice", "Bob"]).first()
def test_with_column_with_existing_name(self):
keys = self.df.withColumn("key", self.df.key).select("key").collect()
self.assertEqual([r.key for r in keys], list(range(100)))
# regression test for SPARK-10417
def test_column_iterator(self):
def foo():
for x in self.df.key:
break
self.assertRaises(TypeError, foo)
def test_generic_hints(self):
from pyspark.sql import DataFrame
df1 = self.spark.range(10e10).toDF("id")
df2 = self.spark.range(10e10).toDF("id")
self.assertIsInstance(df1.hint("broadcast"), DataFrame)
self.assertIsInstance(df1.hint("broadcast", []), DataFrame)
# Dummy rules
self.assertIsInstance(df1.hint("broadcast", "foo", "bar"), DataFrame)
self.assertIsInstance(df1.hint("broadcast", ["foo", "bar"]), DataFrame)
plan = df1.join(df2.hint("broadcast"), "id")._jdf.queryExecution().executedPlan()
self.assertEqual(1, plan.toString().count("BroadcastHashJoin"))
# add tests for SPARK-23647 (test more types for hint)
def test_extended_hint_types(self):
df = self.spark.range(10e10).toDF("id")
such_a_nice_list = ["itworks1", "itworks2", "itworks3"]
hinted_df = df.hint("my awesome hint", 1.2345, "what", such_a_nice_list)
logical_plan = hinted_df._jdf.queryExecution().logical()
self.assertEqual(1, logical_plan.toString().count("1.2345"))
self.assertEqual(1, logical_plan.toString().count("what"))
self.assertEqual(3, logical_plan.toString().count("itworks"))
def test_sample(self):
self.assertRaisesRegex(
TypeError,
"should be a bool, float and number",
lambda: self.spark.range(1).sample())
self.assertRaises(
TypeError,
lambda: self.spark.range(1).sample("a"))
self.assertRaises(
TypeError,
lambda: self.spark.range(1).sample(seed="abc"))
self.assertRaises(
IllegalArgumentException,
lambda: self.spark.range(1).sample(-1.0))
def test_toDF_with_schema_string(self):
data = [Row(key=i, value=str(i)) for i in range(100)]
rdd = self.sc.parallelize(data, 5)
df = rdd.toDF("key: int, value: string")
self.assertEqual(df.schema.simpleString(), "struct<key:int,value:string>")
self.assertEqual(df.collect(), data)
# different but compatible field types can be used.
df = rdd.toDF("key: string, value: string")
self.assertEqual(df.schema.simpleString(), "struct<key:string,value:string>")
self.assertEqual(df.collect(), [Row(key=str(i), value=str(i)) for i in range(100)])
# field names can differ.
df = rdd.toDF(" a: int, b: string ")
self.assertEqual(df.schema.simpleString(), "struct<a:int,b:string>")
self.assertEqual(df.collect(), data)
# number of fields must match.
self.assertRaisesRegex(Exception, "Length of object",
lambda: rdd.toDF("key: int").collect())
# field types mismatch will cause exception at runtime.
self.assertRaisesRegex(Exception, "FloatType can not accept",
lambda: rdd.toDF("key: float, value: string").collect())
# flat schema values will be wrapped into row.
df = rdd.map(lambda row: row.key).toDF("int")
self.assertEqual(df.schema.simpleString(), "struct<value:int>")
self.assertEqual(df.collect(), [Row(key=i) for i in range(100)])
# users can use DataType directly instead of data type string.
df = rdd.map(lambda row: row.key).toDF(IntegerType())
self.assertEqual(df.schema.simpleString(), "struct<value:int>")
self.assertEqual(df.collect(), [Row(key=i) for i in range(100)])
def test_join_without_on(self):
df1 = self.spark.range(1).toDF("a")
df2 = self.spark.range(1).toDF("b")
with self.sql_conf({"spark.sql.crossJoin.enabled": False}):
self.assertRaises(AnalysisException, lambda: df1.join(df2, how="inner").collect())
with self.sql_conf({"spark.sql.crossJoin.enabled": True}):
actual = df1.join(df2, how="inner").collect()
expected = [Row(a=0, b=0)]
self.assertEqual(actual, expected)
# Regression test for invalid join methods when on is None, Spark-14761
def test_invalid_join_method(self):
df1 = self.spark.createDataFrame([("Alice", 5), ("Bob", 8)], ["name", "age"])
df2 = self.spark.createDataFrame([("Alice", 80), ("Bob", 90)], ["name", "height"])
self.assertRaises(IllegalArgumentException, lambda: df1.join(df2, how="invalid-join-type"))
# Cartesian products require cross join syntax
def test_require_cross(self):
df1 = self.spark.createDataFrame([(1, "1")], ("key", "value"))
df2 = self.spark.createDataFrame([(1, "1")], ("key", "value"))
with self.sql_conf({"spark.sql.crossJoin.enabled": False}):
# joins without conditions require cross join syntax
self.assertRaises(AnalysisException, lambda: df1.join(df2).collect())
# works with crossJoin
self.assertEqual(1, df1.crossJoin(df2).count())
def test_cache(self):
spark = self.spark
with self.tempView("tab1", "tab2"):
spark.createDataFrame([(2, 2), (3, 3)]).createOrReplaceTempView("tab1")
spark.createDataFrame([(2, 2), (3, 3)]).createOrReplaceTempView("tab2")
self.assertFalse(spark.catalog.isCached("tab1"))
self.assertFalse(spark.catalog.isCached("tab2"))
spark.catalog.cacheTable("tab1")
self.assertTrue(spark.catalog.isCached("tab1"))
self.assertFalse(spark.catalog.isCached("tab2"))
spark.catalog.cacheTable("tab2")
spark.catalog.uncacheTable("tab1")
self.assertFalse(spark.catalog.isCached("tab1"))
self.assertTrue(spark.catalog.isCached("tab2"))
spark.catalog.clearCache()
self.assertFalse(spark.catalog.isCached("tab1"))
self.assertFalse(spark.catalog.isCached("tab2"))
self.assertRaisesRegex(
AnalysisException,
"does_not_exist",
lambda: spark.catalog.isCached("does_not_exist"))
self.assertRaisesRegex(
AnalysisException,
"does_not_exist",
lambda: spark.catalog.cacheTable("does_not_exist"))
self.assertRaisesRegex(
AnalysisException,
"does_not_exist",
lambda: spark.catalog.uncacheTable("does_not_exist"))
def _to_pandas(self):
from datetime import datetime, date
schema = StructType().add("a", IntegerType()).add("b", StringType())\
.add("c", BooleanType()).add("d", FloatType())\
.add("dt", DateType()).add("ts", TimestampType())
data = [
(1, "foo", True, 3.0, date(1969, 1, 1), datetime(1969, 1, 1, 1, 1, 1)),
(2, "foo", True, 5.0, None, None),
(3, "bar", False, -1.0, date(2012, 3, 3), datetime(2012, 3, 3, 3, 3, 3)),
(4, "bar", False, 6.0, date(2100, 4, 4), datetime(2100, 4, 4, 4, 4, 4)),
]
df = self.spark.createDataFrame(data, schema)
return df.toPandas()
@unittest.skipIf(not have_pandas, pandas_requirement_message) # type: ignore
def test_to_pandas(self):
import numpy as np
pdf = self._to_pandas()
types = pdf.dtypes
self.assertEqual(types[0], np.int32)
self.assertEqual(types[1], np.object)
self.assertEqual(types[2], np.bool)
self.assertEqual(types[3], np.float32)
self.assertEqual(types[4], np.object) # datetime.date
self.assertEqual(types[5], 'datetime64[ns]')
@unittest.skipIf(not have_pandas, pandas_requirement_message) # type: ignore
def test_to_pandas_with_duplicated_column_names(self):
import numpy as np
sql = "select 1 v, 1 v"
for arrowEnabled in [False, True]:
with self.sql_conf({"spark.sql.execution.arrow.pyspark.enabled": arrowEnabled}):
df = self.spark.sql(sql)
pdf = df.toPandas()
types = pdf.dtypes
self.assertEqual(types.iloc[0], np.int32)
self.assertEqual(types.iloc[1], np.int32)
@unittest.skipIf(not have_pandas, pandas_requirement_message) # type: ignore
def test_to_pandas_on_cross_join(self):
import numpy as np
sql = """
select t1.*, t2.* from (
select explode(sequence(1, 3)) v
) t1 left join (
select explode(sequence(1, 3)) v
) t2
"""
for arrowEnabled in [False, True]:
with self.sql_conf({"spark.sql.crossJoin.enabled": True,
"spark.sql.execution.arrow.pyspark.enabled": arrowEnabled}):
df = self.spark.sql(sql)
pdf = df.toPandas()
types = pdf.dtypes
self.assertEqual(types.iloc[0], np.int32)
self.assertEqual(types.iloc[1], np.int32)
@unittest.skipIf(have_pandas, "Required Pandas was found.")
def test_to_pandas_required_pandas_not_found(self):
with QuietTest(self.sc):
with self.assertRaisesRegex(ImportError, 'Pandas >= .* must be installed'):
self._to_pandas()
@unittest.skipIf(not have_pandas, pandas_requirement_message) # type: ignore
def test_to_pandas_avoid_astype(self):
import numpy as np
schema = StructType().add("a", IntegerType()).add("b", StringType())\
.add("c", IntegerType())
data = [(1, "foo", 16777220), (None, "bar", None)]
df = self.spark.createDataFrame(data, schema)
types = df.toPandas().dtypes
self.assertEqual(types[0], np.float64) # doesn't convert to np.int32 due to NaN value.
self.assertEqual(types[1], np.object)
self.assertEqual(types[2], np.float64)
@unittest.skipIf(not have_pandas, pandas_requirement_message) # type: ignore
def test_to_pandas_from_empty_dataframe(self):
with self.sql_conf({"spark.sql.execution.arrow.pyspark.enabled": False}):
# SPARK-29188 test that toPandas() on an empty dataframe has the correct dtypes
import numpy as np
sql = """
SELECT CAST(1 AS TINYINT) AS tinyint,
CAST(1 AS SMALLINT) AS smallint,
CAST(1 AS INT) AS int,
CAST(1 AS BIGINT) AS bigint,
CAST(0 AS FLOAT) AS float,
CAST(0 AS DOUBLE) AS double,
CAST(1 AS BOOLEAN) AS boolean,
CAST('foo' AS STRING) AS string,
CAST('2019-01-01' AS TIMESTAMP) AS timestamp
"""
dtypes_when_nonempty_df = self.spark.sql(sql).toPandas().dtypes
dtypes_when_empty_df = self.spark.sql(sql).filter("False").toPandas().dtypes
self.assertTrue(np.all(dtypes_when_empty_df == dtypes_when_nonempty_df))
@unittest.skipIf(not have_pandas, pandas_requirement_message) # type: ignore
def test_to_pandas_from_null_dataframe(self):
with self.sql_conf({"spark.sql.execution.arrow.pyspark.enabled": False}):
# SPARK-29188 test that toPandas() on a dataframe with only nulls has correct dtypes
import numpy as np
sql = """
SELECT CAST(NULL AS TINYINT) AS tinyint,
CAST(NULL AS SMALLINT) AS smallint,
CAST(NULL AS INT) AS int,
CAST(NULL AS BIGINT) AS bigint,
CAST(NULL AS FLOAT) AS float,
CAST(NULL AS DOUBLE) AS double,
CAST(NULL AS BOOLEAN) AS boolean,
CAST(NULL AS STRING) AS string,
CAST(NULL AS TIMESTAMP) AS timestamp
"""
pdf = self.spark.sql(sql).toPandas()
types = pdf.dtypes
self.assertEqual(types[0], np.float64)
self.assertEqual(types[1], np.float64)
self.assertEqual(types[2], np.float64)
self.assertEqual(types[3], np.float64)
self.assertEqual(types[4], np.float32)
self.assertEqual(types[5], np.float64)
self.assertEqual(types[6], np.object)
self.assertEqual(types[7], np.object)
self.assertTrue(np.can_cast(np.datetime64, types[8]))
@unittest.skipIf(not have_pandas, pandas_requirement_message) # type: ignore
def test_to_pandas_from_mixed_dataframe(self):
with self.sql_conf({"spark.sql.execution.arrow.pyspark.enabled": False}):
# SPARK-29188 test that toPandas() on a dataframe with some nulls has correct dtypes
import numpy as np
sql = """
SELECT CAST(col1 AS TINYINT) AS tinyint,
CAST(col2 AS SMALLINT) AS smallint,
CAST(col3 AS INT) AS int,
CAST(col4 AS BIGINT) AS bigint,
CAST(col5 AS FLOAT) AS float,
CAST(col6 AS DOUBLE) AS double,
CAST(col7 AS BOOLEAN) AS boolean,
CAST(col8 AS STRING) AS string,
timestamp_seconds(col9) AS timestamp
FROM VALUES (1, 1, 1, 1, 1, 1, 1, 1, 1),
(NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL)
"""
pdf_with_some_nulls = self.spark.sql(sql).toPandas()
pdf_with_only_nulls = self.spark.sql(sql).filter('tinyint is null').toPandas()
self.assertTrue(np.all(pdf_with_only_nulls.dtypes == pdf_with_some_nulls.dtypes))
def test_create_dataframe_from_array_of_long(self):
import array
data = [Row(longarray=array.array('l', [-9223372036854775808, 0, 9223372036854775807]))]
df = self.spark.createDataFrame(data)
self.assertEqual(df.first(), Row(longarray=[-9223372036854775808, 0, 9223372036854775807]))
@unittest.skipIf(not have_pandas, pandas_requirement_message) # type: ignore
def test_create_dataframe_from_pandas_with_timestamp(self):
import pandas as pd
from datetime import datetime
pdf = pd.DataFrame({"ts": [datetime(2017, 10, 31, 1, 1, 1)],
"d": [pd.Timestamp.now().date()]}, columns=["d", "ts"])
# test types are inferred correctly without specifying schema
df = self.spark.createDataFrame(pdf)
self.assertTrue(isinstance(df.schema['ts'].dataType, TimestampType))
self.assertTrue(isinstance(df.schema['d'].dataType, DateType))
# test with schema will accept pdf as input
df = self.spark.createDataFrame(pdf, schema="d date, ts timestamp")
self.assertTrue(isinstance(df.schema['ts'].dataType, TimestampType))
self.assertTrue(isinstance(df.schema['d'].dataType, DateType))
@unittest.skipIf(have_pandas, "Required Pandas was found.")
def test_create_dataframe_required_pandas_not_found(self):
with QuietTest(self.sc):
with self.assertRaisesRegex(
ImportError,
"(Pandas >= .* must be installed|No module named '?pandas'?)"):
import pandas as pd
from datetime import datetime
pdf = pd.DataFrame({"ts": [datetime(2017, 10, 31, 1, 1, 1)],
"d": [pd.Timestamp.now().date()]})
self.spark.createDataFrame(pdf)
# Regression test for SPARK-23360
@unittest.skipIf(not have_pandas, pandas_requirement_message) # type: ignore
def test_create_dataframe_from_pandas_with_dst(self):
import pandas as pd
from pandas.testing import assert_frame_equal
from datetime import datetime
pdf = pd.DataFrame({'time': [datetime(2015, 10, 31, 22, 30)]})
df = self.spark.createDataFrame(pdf)
assert_frame_equal(pdf, df.toPandas())
orig_env_tz = os.environ.get('TZ', None)
try:
tz = 'America/Los_Angeles'
os.environ['TZ'] = tz
time.tzset()
with self.sql_conf({'spark.sql.session.timeZone': tz}):
df = self.spark.createDataFrame(pdf)
assert_frame_equal(pdf, df.toPandas())
finally:
del os.environ['TZ']
if orig_env_tz is not None:
os.environ['TZ'] = orig_env_tz
time.tzset()
def test_repr_behaviors(self):
import re
pattern = re.compile(r'^ *\|', re.MULTILINE)
df = self.spark.createDataFrame([(1, "1"), (22222, "22222")], ("key", "value"))
# test when eager evaluation is enabled and _repr_html_ will not be called
with self.sql_conf({"spark.sql.repl.eagerEval.enabled": True}):
expected1 = """+-----+-----+
|| key|value|
|+-----+-----+
|| 1| 1|
||22222|22222|
|+-----+-----+
|"""
self.assertEqual(re.sub(pattern, '', expected1), df.__repr__())
with self.sql_conf({"spark.sql.repl.eagerEval.truncate": 3}):
expected2 = """+---+-----+
||key|value|
|+---+-----+
|| 1| 1|
||222| 222|
|+---+-----+
|"""
self.assertEqual(re.sub(pattern, '', expected2), df.__repr__())
with self.sql_conf({"spark.sql.repl.eagerEval.maxNumRows": 1}):
expected3 = """+---+-----+
||key|value|
|+---+-----+
|| 1| 1|
|+---+-----+
|only showing top 1 row
|"""
self.assertEqual(re.sub(pattern, '', expected3), df.__repr__())
# test when eager evaluation is enabled and _repr_html_ will be called
with self.sql_conf({"spark.sql.repl.eagerEval.enabled": True}):
expected1 = """<table border='1'>
|<tr><th>key</th><th>value</th></tr>
|<tr><td>1</td><td>1</td></tr>
|<tr><td>22222</td><td>22222</td></tr>
|</table>
|"""
self.assertEqual(re.sub(pattern, '', expected1), df._repr_html_())
with self.sql_conf({"spark.sql.repl.eagerEval.truncate": 3}):
expected2 = """<table border='1'>
|<tr><th>key</th><th>value</th></tr>
|<tr><td>1</td><td>1</td></tr>
|<tr><td>222</td><td>222</td></tr>
|</table>
|"""
self.assertEqual(re.sub(pattern, '', expected2), df._repr_html_())
with self.sql_conf({"spark.sql.repl.eagerEval.maxNumRows": 1}):
expected3 = """<table border='1'>
|<tr><th>key</th><th>value</th></tr>
|<tr><td>1</td><td>1</td></tr>
|</table>
|only showing top 1 row
|"""
self.assertEqual(re.sub(pattern, '', expected3), df._repr_html_())
# test when eager evaluation is disabled and _repr_html_ will be called
with self.sql_conf({"spark.sql.repl.eagerEval.enabled": False}):
expected = "DataFrame[key: bigint, value: string]"
self.assertEqual(None, df._repr_html_())
self.assertEqual(expected, df.__repr__())
with self.sql_conf({"spark.sql.repl.eagerEval.truncate": 3}):
self.assertEqual(None, df._repr_html_())
self.assertEqual(expected, df.__repr__())
with self.sql_conf({"spark.sql.repl.eagerEval.maxNumRows": 1}):
self.assertEqual(None, df._repr_html_())
self.assertEqual(expected, df.__repr__())
def test_to_local_iterator(self):
df = self.spark.range(8, numPartitions=4)
expected = df.collect()
it = df.toLocalIterator()
self.assertEqual(expected, list(it))
# Test DataFrame with empty partition
df = self.spark.range(3, numPartitions=4)
it = df.toLocalIterator()
expected = df.collect()
self.assertEqual(expected, list(it))
def test_to_local_iterator_prefetch(self):
df = self.spark.range(8, numPartitions=4)
expected = df.collect()
it = df.toLocalIterator(prefetchPartitions=True)
self.assertEqual(expected, list(it))
def test_to_local_iterator_not_fully_consumed(self):
# SPARK-23961: toLocalIterator throws exception when not fully consumed
# Create a DataFrame large enough so that write to socket will eventually block
df = self.spark.range(1 << 20, numPartitions=2)
it = df.toLocalIterator()
self.assertEqual(df.take(1)[0], next(it))
with QuietTest(self.sc):
it = None # remove iterator from scope, socket is closed when cleaned up
# Make sure normal df operations still work
result = []
for i, row in enumerate(df.toLocalIterator()):
result.append(row)
if i == 7:
break
self.assertEqual(df.take(8), result)
def test_same_semantics_error(self):
with QuietTest(self.sc):
with self.assertRaisesRegex(TypeError, "should be of DataFrame.*int"):
self.spark.range(10).sameSemantics(1)
def test_input_files(self):
tpath = tempfile.mkdtemp()
shutil.rmtree(tpath)
try:
self.spark.range(1, 100, 1, 10).write.parquet(tpath)
# read parquet file and get the input files list
input_files_list = self.spark.read.parquet(tpath).inputFiles()
# input files list should contain 10 entries
self.assertEqual(len(input_files_list), 10)
# all file paths in list must contain tpath
for file_path in input_files_list:
self.assertTrue(tpath in file_path)
finally:
shutil.rmtree(tpath)
def test_df_show(self):
# SPARK-35408: ensure better diagnostics if incorrect parameters are passed
# to DataFrame.show
df = self.spark.createDataFrame([('foo',)])
df.show(5)
df.show(5, True)
df.show(5, 1, True)
df.show(n=5, truncate='1', vertical=False)
df.show(n=5, truncate=1.5, vertical=False)
with self.assertRaisesRegex(TypeError, "Parameter 'n'"):
df.show(True)
with self.assertRaisesRegex(TypeError, "Parameter 'vertical'"):
df.show(vertical='foo')
with self.assertRaisesRegex(TypeError, "Parameter 'truncate=foo'"):
df.show(truncate='foo')
@unittest.skipIf(
not have_pandas or not have_pyarrow,
pandas_requirement_message or pyarrow_requirement_message) # type: ignore
def test_to_pandas_on_spark(self):
import pandas as pd
from pandas.testing import assert_frame_equal
sdf = self.spark.createDataFrame([("a", 1), ("b", 2), ("c", 3)], ["Col1", "Col2"])
psdf_from_sdf = sdf.to_pandas_on_spark()
psdf_from_sdf_with_index = sdf.to_pandas_on_spark(index_col="Col1")
pdf = pd.DataFrame({"Col1": ["a", "b", "c"], "Col2": [1, 2, 3]})
pdf_with_index = pdf.set_index("Col1")
assert_frame_equal(pdf, psdf_from_sdf.to_pandas())
assert_frame_equal(pdf_with_index, psdf_from_sdf_with_index.to_pandas())
class QueryExecutionListenerTests(unittest.TestCase, SQLTestUtils):
# These tests are separate because it uses 'spark.sql.queryExecutionListeners' which is
# static and immutable. This can't be set or unset, for example, via `spark.conf`.
@classmethod
def setUpClass(cls):
import glob
from pyspark.find_spark_home import _find_spark_home
SPARK_HOME = _find_spark_home()
filename_pattern = (
"sql/core/target/scala-*/test-classes/org/apache/spark/sql/"
"TestQueryExecutionListener.class")
cls.has_listener = bool(glob.glob(os.path.join(SPARK_HOME, filename_pattern)))
if cls.has_listener:
# Note that 'spark.sql.queryExecutionListeners' is a static immutable configuration.
cls.spark = SparkSession.builder \
.master("local[4]") \
.appName(cls.__name__) \
.config(
"spark.sql.queryExecutionListeners",
"org.apache.spark.sql.TestQueryExecutionListener") \
.getOrCreate()
def setUp(self):
if not self.has_listener:
raise self.skipTest(
"'org.apache.spark.sql.TestQueryExecutionListener' is not "
"available. Will skip the related tests.")
@classmethod
def tearDownClass(cls):
if hasattr(cls, "spark"):
cls.spark.stop()
def tearDown(self):
self.spark._jvm.OnSuccessCall.clear()
def test_query_execution_listener_on_collect(self):
self.assertFalse(
self.spark._jvm.OnSuccessCall.isCalled(),
"The callback from the query execution listener should not be called before 'collect'")
self.spark.sql("SELECT * FROM range(1)").collect()
self.spark.sparkContext._jsc.sc().listenerBus().waitUntilEmpty(10000)
self.assertTrue(
self.spark._jvm.OnSuccessCall.isCalled(),
"The callback from the query execution listener should be called after 'collect'")
@unittest.skipIf(
not have_pandas or not have_pyarrow,
pandas_requirement_message or pyarrow_requirement_message) # type: ignore
def test_query_execution_listener_on_collect_with_arrow(self):
with self.sql_conf({"spark.sql.execution.arrow.pyspark.enabled": True}):
self.assertFalse(
self.spark._jvm.OnSuccessCall.isCalled(),
"The callback from the query execution listener should not be "
"called before 'toPandas'")
self.spark.sql("SELECT * FROM range(1)").toPandas()
self.spark.sparkContext._jsc.sc().listenerBus().waitUntilEmpty(10000)
self.assertTrue(
self.spark._jvm.OnSuccessCall.isCalled(),
"The callback from the query execution listener should be called after 'toPandas'")
if __name__ == "__main__":
from pyspark.sql.tests.test_dataframe import * # noqa: F401
try:
import xmlrunner # type: ignore
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports', verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| apache-2.0 |
mlperf/inference_results_v0.7 | closed/Neuchips/code/dlrm-99/Server/python/criteo.py | 1 | 14814 | """
implementation of criteo dataset
"""
# pylint: disable=unused-argument,missing-docstring
import logging
import os
import sys
import re
import time
import random
import numpy as np
import sklearn.metrics
import inspect
# pytorch
import torch
from torch.utils.data import Dataset, RandomSampler
logging.basicConfig(level=logging.INFO)
log = logging.getLogger("criteo")
# add dlrm code path
try:
dlrm_dir_path = os.environ['DLRM_DIR']
sys.path.append(dlrm_dir_path)
except KeyError:
print("ERROR: Please set DLRM_DIR environment variable to the dlrm code location")
sys.exit(0)
#import dataset
import dlrm_data_pytorch as dp
import data_loader_terabyte
class Criteo(Dataset):
def __init__(self,
model,
data_path,
name,
pre_process,
use_cache,
count=None,
samples_to_aggregate_fix=None,
samples_to_aggregate_min=None,
samples_to_aggregate_max=None,
samples_to_aggregate_quantile_file=None,
samples_to_aggregate_trace_file=None,
test_num_workers=0,
max_ind_range=-1,
sub_sample_rate=0.0,
mlperf_bin_loader=False,
randomize="total",
memory_map=False):
super().__init__()
self.model = model
self.count = count
self.random_offsets = []
self.use_fixed_size = ((samples_to_aggregate_quantile_file is None) and
(samples_to_aggregate_min is None or samples_to_aggregate_max is None))
if self.use_fixed_size:
# fixed size queries
self.samples_to_aggregate = 1 if samples_to_aggregate_fix is None else samples_to_aggregate_fix
self.samples_to_aggregate_min = None
self.samples_to_aggregate_max = None
else:
# variable size queries
self.samples_to_aggregate = 1
self.samples_to_aggregate_min = samples_to_aggregate_min
self.samples_to_aggregate_max = samples_to_aggregate_max
self.samples_to_aggregate_quantile_file = samples_to_aggregate_quantile_file
if name == "kaggle":
raw_data_file = data_path + "/train.txt"
processed_data_file = data_path + "/kaggleAdDisplayChallenge_processed.npz"
elif name == "terabyte":
raw_data_file = data_path + "/day"
processed_data_file = data_path + "/terabyte_processed.npz"
else:
raise ValueError("only kaggle|terabyte dataset options are supported")
self.use_mlperf_bin_loader = mlperf_bin_loader and memory_map and name == "terabyte"
# debug prints
# print("dataset filenames", raw_data_file, processed_data_file)
self.test_data = dp.CriteoDataset(
dataset=name,
max_ind_range=max_ind_range,
sub_sample_rate=sub_sample_rate,
randomize=randomize,
split="test",
raw_path=raw_data_file,
pro_data=processed_data_file,
memory_map=memory_map
)
self.num_individual_samples = len(self.test_data)
if self.use_mlperf_bin_loader:
test_file = data_path + "/terabyte_processed_test.bin"
counts_file = raw_data_file + '_fea_count.npz'
data_loader_terabyte.numpy_to_binary(
input_files=[raw_data_file + '_23_reordered.npz'],
output_file_path=data_path + "/terabyte_processed_test.bin",
split="test")
self.test_data = data_loader_terabyte.CriteoBinDataset(
data_file=test_file,
counts_file=counts_file,
batch_size=self.samples_to_aggregate,
max_ind_range=max_ind_range
)
self.test_loader = torch.utils.data.DataLoader(
self.test_data,
batch_size=None,
batch_sampler=None,
shuffle=False,
num_workers=0,
collate_fn=None,
pin_memory=False,
drop_last=False,
)
else:
self.test_loader = torch.utils.data.DataLoader(
self.test_data,
batch_size=self.samples_to_aggregate,
shuffle=False,
num_workers=test_num_workers,
collate_fn=dp.collate_wrapper_criteo,
pin_memory=False,
drop_last=False,
)
# WARNING: Note that the orignal dataset returns number of samples, while the
# binary dataset returns the number of batches. Therefore, when using a mini-batch
# of size samples_to_aggregate as an item we need to adjust the original dataset item_count.
# On the other hand, data loader always returns number of batches.
if self.use_fixed_size:
# the offsets for fixed query size will be generated on-the-fly later on
print("Using fixed query size: " + str(self.samples_to_aggregate))
if self.use_mlperf_bin_loader:
self.num_aggregated_samples = len(self.test_data)
# self.num_aggregated_samples2 = len(self.test_loader)
else:
self.num_aggregated_samples = (self.num_individual_samples + self.samples_to_aggregate - 1) // self.samples_to_aggregate
# self.num_aggregated_samples2 = len(self.test_loader)
else:
# the offsets for variable query sizes will be pre-generated here
if self.samples_to_aggregate_quantile_file is None:
# generate number of samples in a query from a uniform(min,max) distribution
print("Using variable query size: uniform distribution (" + str(self.samples_to_aggregate_min) + "," + str(self.samples_to_aggregate_max) + ")")
done = False
qo = 0
while done == False:
self.random_offsets.append(int(qo))
qs = random.randint(self.samples_to_aggregate_min, self.samples_to_aggregate_max)
qo = min(qo + qs, self.num_individual_samples)
if qo >= self.num_individual_samples:
done = True
self.random_offsets.append(int(qo))
# compute min and max number of samples
nas_max = (self.num_individual_samples + self.samples_to_aggregate_min - 1) // self.samples_to_aggregate_min
nas_min = (self.num_individual_samples + self.samples_to_aggregate_max - 1) // self.samples_to_aggregate_max
else:
# generate number of samples in a query from a custom distribution,
# with quantile (inverse of its cdf) given in the file. Note that
# quantile is related to the concept of percentile in statistics.
#
# For instance, assume that we have the following distribution for query length
# length = [100, 200, 300, 400, 500, 600, 700] # x
# pdf = [0.1, 0.6, 0.1, 0.05, 0.05, 0.05, 0.05] # p(x)
# cdf = [0.1, 0.7, 0.8, 0.85, 0.9, 0.95, 1.0] # f(x) = prefix-sum of p(x)
# The inverse of its cdf with granularity of 0.05 can be written as
# quantile_p = [.05, .10, .15, .20, .25, .30, .35, .40, .45, .50, .55, .60, .65, .70, .75, .80, .85, .90, .95, 1.0] # p
# quantile_x = [100, 100, 200, 200, 200, 200, 200, 200, 200, 200, 200, 200, 200, 200, 300, 300, 400, 500, 600, 700] # q(p) = x, such that f(x) >= p
# Notice that once we have quantile, we can apply inverse transform sampling method.
print("Using variable query size: custom distribution (file " + str(samples_to_aggregate_quantile_file) + ")")
with open(self.samples_to_aggregate_quantile_file, 'r') as f:
line = f.readline()
quantile = np.fromstring(line, dtype=int, sep=", ")
# debug prints
# print(quantile)
# print(len(quantile))
l = len(quantile)
done = False
qo = 0
while done == False:
self.random_offsets.append(int(qo))
pr = np.random.randint(low=0, high=l)
qs = quantile[pr]
qo = min(qo + qs, self.num_individual_samples)
if qo >= self.num_individual_samples:
done = True
self.random_offsets.append(int(qo))
# compute min and max number of samples
nas_max = (self.num_individual_samples + quantile[0] - 1) // quantile[0]
nas_min = (self.num_individual_samples + quantile[-1]- 1) // quantile[-1]
# reset num_aggregated_samples
self.num_aggregated_samples = len(self.random_offsets) - 1
# check num_aggregated_samples
if self.num_aggregated_samples < nas_min or nas_max < self.num_aggregated_samples:
raise ValueError("Sannity check failed")
# limit number of items to count if needed
if self.count is not None:
self.num_aggregated_samples = min(self.count, self.num_aggregated_samples)
# dump the trace of aggregated samples
if samples_to_aggregate_trace_file is not None:
with open(samples_to_aggregate_trace_file, 'w') as f:
for l in range(self.num_aggregated_samples):
if self.use_fixed_size:
s = l * self.samples_to_aggregate
e = min((l + 1) * self.samples_to_aggregate, self.num_individual_samples)
else:
s = self.random_offsets[l]
e = self.random_offsets[l+1]
f.write(str(s) + ", " + str(e) + ", " + str(e-s) + "\n")
def get_item_count(self):
# get number of items in the dataset
return self.num_aggregated_samples
''' lg compatibilty routine '''
def unload_query_samples(self, sample_list):
self.items_in_memory = {}
''' lg compatibilty routine '''
def load_query_samples(self, sample_list):
self.items_in_memory = {}
# WARNING: notice that while DataLoader is iterable-style, the Dataset
# can be iterable- or map-style, and Criteo[Bin]Dataset are the latter
# This means that we can not index into DataLoader, but can enumerate it,
# while we can index into the dataset itself.
for l in sample_list:
# approach 1: single sample as an item
'''
self.items_in_memory[l] = self.test_data[l]
'''
# approach 2: multiple samples as an item
if self.use_fixed_size:
s = l * self.samples_to_aggregate
e = min((l + 1) * self.samples_to_aggregate, self.num_individual_samples)
else:
s = self.random_offsets[l]
e = self.random_offsets[l+1]
ls = [self.test_data[i] for i in range(s, e)]
if self.use_mlperf_bin_loader:
# NOTE: in binary dataset the values are transformed
ls_t = list(zip(*ls))
X = torch.cat(ls_t[0])
lS_i = torch.cat(ls_t[2], dim=1)
T = torch.cat(ls_t[3])
d, s = self.model.collate_pre(X, lS_i)
exp = self.model.collate_post(d, s)
self.items_in_memory[l] = (T, exp)
else:
# NOTE: in original dataset the values are not transformed
# and collate besides stacking them also transforms them
self.items_in_memory[l] = self.test_loader.collate_fn(ls)
self.last_loaded = time.time()
''' lg compatibilty routine '''
def get_samples(self, id_list):
# build list tuples as need by the batch conversion routine
# index i from id_list corresponds to a particular query_id
idx_offsets = [0]
ls = []
for i in id_list:
(T, _) = self.items_in_memory[i]
idx_offsets.append(idx_offsets[-1] + T.numel())
ls.append(self.items_in_memory[i])
# debug prints
# print(idx_offsets)
# approach 1: collate a mini-batch of single samples
'''
if self.use_mlperf_bin_loader:
# NOTE: in binary dataset the values are transformed
ls_t = list(zip(*ls))
X = torch.cat(ls_t[0])
(num_s, len_ls) = torch.cat(ls_t[1], dim=1).size()
lS_o = torch.stack([torch.tensor(range(len_ls)) for _ in range(num_s)])
lS_i = torch.cat(ls_t[2], dim=1)
T = torch.cat(ls_t[3])
else:
# NOTE: in original dataset the values are not transformed and collate besides stacking transforms them
X, lS_o, lS_i, T = self.test_loader.collate_fn(ls)
'''
# approach 2: collate a mini-batch of multiple samples
# NOTE: recall that the samples have already been transformed for both datasets
# (by earlier calls in load_query_samples), therefore we just need to stack them
ls_t = list(zip(*ls))
T = torch.cat(ls_t[0])
exp = b''.join(ls_t[1])
return (T, idx_offsets, exp)
# Pre processing
def pre_process_criteo_dlrm(x):
return x
# Post processing
class DlrmPostProcess:
def __init__(self):
self.good = 0
self.total = 0
self.roc_auc = 0
self.results = []
def __call__(self, results, expected=None, result_dict=None):
n = len(results)
res = np.asarray(results)
exp = np.array(expected)
processed_results = np.column_stack((res, exp))
self.good += (int((res.round() == exp).sum()))
self.total += n
return processed_results
def add_results(self, results):
self.results.append(results)
def start(self):
self.good = 0
self.total = 0
self.roc_auc = 0
self.results = []
def finalize(self, result_dict, ds=False, output_dir=None):
# AUC metric
self.results = np.concatenate(self.results, axis=0)
results, targets = list(zip(*self.results))
results = np.array(results)
targets = np.array(targets)
self.roc_auc = sklearn.metrics.roc_auc_score(targets, results)
result_dict["good"] = self.good
result_dict["total"] = self.total
result_dict["roc_auc"] = self.roc_auc
| apache-2.0 |
iurilarosa/thesis | codici/Archiviati/Notebook Completi/Hough64HWI.py | 1 | 11654 | import tensorflow as tf
import numpy
import scipy.io
from tensorflow.python.client import timeline
import time
sessione = tf.Session()
# CARICO DATI
tFft = 8192
inputFreq = 187
if inputFreq > 128:
tFft = 4096
print(tFft)
#tFft = 4096
tObs = 9 #mesi
tObs = tObs*30*24*60*60
nPunti = 2
cands = 100
percorsoDati = ("dati/dati9mesi%dHWI.mat"% inputFreq)
percorsoQuad = ("quadHWI%d.mat"% inputFreq)
percorsoPatch = ("quadHWI%dEcl.mat"% inputFreq)
percorsoOut = ("/home/protoss/Documenti/TESI/HWITest/mio%d.mat" % inputFreq)
#percorsoQuad = ("quad%dLIL.mat" % tFft)
#percorsoPatch = ("quad%dEclNew.mat" % tFft)
#carico file dati
quadrato = scipy.io.loadmat(percorsoQuad)['quad'].astype(numpy.float64)
patch = scipy.io.loadmat(percorsoPatch)['quadratoEclNew'].astype(numpy.float64)
print(patch)
struttura = scipy.io.loadmat(percorsoDati)['job_pack_0']
tempi = struttura['peaks'][0,0][0]#.astype(numpy.float32)
frequenze = struttura['peaks'][0,0][1]#.astype(numpy.float32)
pesi = (struttura['peaks'][0,0][4]+1)#.astype(numpy.float32)
#print(tempi.size,frequenze.size)
#nb: picchi ha 0-tempi
# 1-frequenze
# 2-pesi
#headers vari
securbelt = 4000
#securbelt = 4000*3
#frequenze
stepFreq = 1/tFft
enhancement = 10
stepFreqRaffinato = stepFreq/enhancement
#tempi
#epoca definita come mediana di tempi di tutto il run #WARNING da ridefinire con durata dati che prendo
epoca = (57722+57990)/2
#spindowns
spindownMin = -1e-9
spindownMax = 1e-10
if inputFreq > 128:
securbelt = 4000*2
spindownMin = -9e-9
spindownMax = -8e-9
stepSpindown = stepFreq/tObs
nstepSpindown = numpy.round((spindownMax-spindownMin)/stepSpindown).astype(numpy.int64)
spindowns = numpy.arange(0, nstepSpindown)
spindowns = numpy.multiply(spindowns,stepSpindown)
spindowns = numpy.add(spindowns, spindownMin)
#LO STEP DI SPINDOWN LO SI DEFINISCE TRAMITE LA BANDA IN FREQUENZA DELLA PEAKMAP (in hz)
#SU TEMPO DI OSSERVAZIONE (in sec):
#STEPFREQ/TOBS! DA QUI SCEGLIAMO QUALE SD MASSIMO E MINIMO TENERE
# COME VALORE MASSIMO TENERE SUI 1*-10^-9
# COME MASSIMO ANCHE CIRCA 1*+10^-10
# CONSIDERARE EVENTUALE SORGENTE NELLA SCELTA DI INTERVALLO SPINDOWN
#per doppler corr
veloc = struttura['basic_info'][0,0]['velpos'][0,0][0:3,:].astype(numpy.float64)
nTempi = struttura['basic_info'][0,0]['ntim'][0,0][0,0]
primoTempo = struttura['basic_info'][0,0]['tim0'][0,0][0,0]
indices = struttura['basic_info'][0,0]['index'][0,0][0]
#SPLITTO DOPP CORR E HOUGH TRANS
# PREPARO FUNZIONI
#TODO STUDIARE FATTIBILITÀ DI USARE TUTTO A 32BYTES
# calcola la correzione doppler per ogni punto del cielo
def doppcorr(i):
quadratoNP = quadrato[i]
indicesOpt = indices-1
inizi = indicesOpt[:-1]
fini = indicesOpt[1:]
velocitas = numpy.zeros((3,frequenze.size))
for i in numpy.arange(0,nTempi-1):
velocitas[:,inizi[i]:fini[i]+1] = veloc[:,i:i+1]
velPerPosIndex = numpy.dot(quadratoNP,velocitas)
divisoreIndex = 1+velPerPosIndex
freqCorr = frequenze / divisoreIndex
#print(freqCorr)
#mi ricavo l'header per le frequenze
freqMin = numpy.amin(freqCorr)
#freqMax = tf.reduce_max(freqCorr)
freqIniz = freqMin- stepFreq/2 - stepFreqRaffinato
freqFinal = freqCorr-freqIniz
freqFinal = (freqFinal/stepFreqRaffinato)-round(enhancement/2+0.001)
#freqs = tf.concat([[freqIniz], freqCorr], 0)
return freqIniz, freqCorr, freqFinal#, nstepFrequenze
def noncorr():
freqMin = numpy.amin(frequenze)
freqIniz = freqMin - stepFreq/2 -stepFreqRaffinato
freqNonCor = (frequenze -freqIniz)/stepFreqRaffinato-round(enhancement/2+0.001)
freqNonCor = tf.constant(freqNonCor, dtype = tf.float64)
return freqNonCor
# calcola la hough per ogni punto del cielo (per ogni spindown)
def inDaHough(i, freqHM):
#def houghizza(stepIesimo):
#sdTimed = tf.multiply(spindownsTF[stepIesimo], tempiHM, name = "Tdotpert")
##sdTimed = tf.cast(sdTimed, dtype=tf.float32)
#appoggio = tf.round(freqHM-sdTimed+securbelt/2, name = "appoggioperindici")
#appoggio = tf.cast(appoggio, dtype=tf.int32)
#valori = tf.bincount(appoggio,weights=pesiTF)
#zeriDopo = tf.zeros([nColumns - tf.size(valori)], dtype=tf.float64)
#riga = tf.concat([valori,zeriDopo],0, name = "rigadihough")
#return riga
def houghizza(stepIesimo):
sdTimed = tf.multiply(spindownsTF[stepIesimo], tempiHM, name = "Tdotpert")
#sdTimed = tf.cast(sdTimed, dtype=tf.float32)
appoggio = tf.round(freqHM-sdTimed+securbelt/2, name = "appoggioperindici")
appoggio = tf.cast(appoggio, dtype=tf.int32)
valorisx = tf.unsorted_segment_sum(pesiHM, appoggio, nColumns)
valorisx = tf.cast(valorisx, dtype=tf.float32)
return valorisx
houghDiff = tf.map_fn(houghizza, tf.range(0, nRows), dtype=tf.float32, parallel_iterations=8)
def sliceInt():
#faccio integrazione finale (vecchia versione senza conv)
semiLarghezza = tf.round(enhancement/2+0.001)
semiLarghezza = tf.cast(semiLarghezza, tf.int64)
houghInt = houghDiff[:,enhancement:nColumns]-houghDiff[:,0:nColumns - enhancement]
houghInt = tf.concat([houghDiff[:,0:enhancement],houghInt],1)
return houghInt
hough = sliceInt()
houghinal = tf.cumsum(hough, axis = 1)
return houghinal
def manchurian_candidates(numCand, freqIniz, image, coord):
minDistance = enhancement*4
candidati = numpy.zeros((9,numCand*2))
primaFreq = freqIniz-(securbelt/2)*stepFreqRaffinato
freqIniziale = struttura['basic_info'][0,0]['frin'][0,0][0,0]
freqFinale = struttura['basic_info'][0,0]['frfi'][0,0][0,0]
#QUI ANALOGO FUNZIONE CUT GD2
#%time indexInizialewh = numpy.where(freqniu>freqIniziale)[0][0]
#%time indexFinalewh = numpy.where(freqniu>freqFinale)[0][0]
start = time.time()
indexIniziale = ((freqIniziale-primaFreq)/stepFreqRaffinato).astype(numpy.int64)
indexFinale = ((freqFinale-primaFreq)/stepFreqRaffinato+1).astype(numpy.int64)
imageCand = image[:,indexIniziale:indexFinale].astype(numpy.int64)
#imageCand = numpy.flip(imageCand,0)
size = numpy.shape(imageCand)[1]
freqniu = numpy.arange(0,size)*stepFreqRaffinato+freqIniziale
maxPerColumn = numpy.amax(imageCand, axis = 0)
rigaMax = numpy.argmax(imageCand, axis = 0)
#######################
stepFrequenzaNiu = maxPerColumn.size/numCand
indiciFreq = numpy.arange(0,maxPerColumn.size,stepFrequenzaNiu)
indiciFreq = numpy.append(indiciFreq, maxPerColumn.size)
indiciFreq = numpy.round(indiciFreq).astype(numpy.int64)
print(indiciFreq)
def statistics(ndArray):
mediana = numpy.median(ndArray)
sigmana = numpy.median(numpy.absolute(ndArray-mediana))/0.6745
return mediana, sigmana
stats = statistics(imageCand)
medianaTot = stats[0]
sigmanaTot = stats[1]
#print(medianaTot, sigmanaTot)
iniziali = numpy.concatenate(([indiciFreq[0]],indiciFreq[0:numCand-2],[indiciFreq[indiciFreq.size-3]]),0)
finali = numpy.concatenate(([indiciFreq[2]],indiciFreq[3:numCand+1],[indiciFreq[indiciFreq.size-1]]),0)
def statsPerCand(i):
stat = statistics(maxPerColumn[iniziali[i]:finali[i]])#[0]
return stat
statPerCand = numpy.array(list(map(statsPerCand, numpy.arange(numCand))))
#statPerCand = numpy.zeros((numCand,2))
#for i in numpy.arange(numCand):
#statPerCand[i] = statsPerCand(i)
#print(statPerCand)
medianaPerCand = statPerCand[:,0]
sigmanaPerCand = statPerCand[:,1]
percorsoRob = ("/home/protoss/Documenti/TESI/HWITest/mioRob%d.mat" % inputFreq)
scipy.io.savemat(percorsoRob,{"stat": statPerCand,
"maxs": maxPerColumn,
"imax": rigaMax,
"imageCand": imageCand})
filtro = numpy.where(medianaPerCand > 0)[0]
counter = 0
for i in filtro:
inizio = indiciFreq[i]
fine = indiciFreq[i+1]
porzioneMaxPerColumn = maxPerColumn[inizio:fine]
localMax = numpy.amax(porzioneMaxPerColumn)
localInd = numpy.argmax(porzioneMaxPerColumn)
if i == 1:
print(inizio, fine)
print(porzioneMaxPerColumn, porzioneMaxPerColumn.size)
print(localMax, localInd)
print(medianaPerCand[i])
print(medianaTot/2)
if localMax > medianaPerCand[i] and localMax > medianaTot/2:
counter = counter + 1
index = indiciFreq[i] + localInd
riga = rigaMax[index]
candidati[0,counter] = freqniu[index]
candidati[1,counter] = coord[0]
candidati[2,counter] = coord[1]
candidati[3,counter] = spindowns[riga]
candidati[4,counter] = localMax
candidati[5,counter] = (localMax-medianaPerCand[i])/sigmanaPerCand[i]
candidati[6,counter] = coord[2]/2
candidati[7,counter] = numpy.abs(coord[3]-coord[4])/4
candidati[8,counter] = 1
limite1 = numpy.amax([localInd-minDistance,1]).astype(numpy.int64)
limite2 = numpy.amin([localInd+minDistance,porzioneMaxPerColumn.size]).astype(numpy.int64)
porzioneMaxPerColumn[limite1:limite2] = 0
secondLocMax = numpy.amax(porzioneMaxPerColumn)
secondLocInd = numpy.argmax(porzioneMaxPerColumn)
if numpy.absolute(secondLocInd-localInd) > 2 * minDistance and secondLocMax > medianaPerCand[i]:
counter = counter + 1
index = indiciFreq[i] + secondLocInd
riga = rigaMax[index]
candidati[0,counter] = freqniu[index]
candidati[1,counter] = coord[0]
candidati[2,counter] = coord[1]
candidati[3,counter] = spindowns[riga]
candidati[4,counter] = secondLocMax
candidati[5,counter] = (secondLocMax-medianaPerCand[i])/sigmanaPerCand[i]
candidati[6,counter] = coord[2]/2
candidati[7,counter] = numpy.abs(coord[3]-coord[4])/4
candidati[8,counter] = 2
candidati[3,:]=numpy.round(candidati[3,:] / stepSpindown) * stepSpindown
return candidati
#da qui si usa tensorflow
#definisco tutte le costanti necessarie
tempiTF = tf.constant(tempi,dtype=tf.float64)
pesiTF = tf.constant(pesi,dtype=tf.float64)
spindownsTF = tf.constant(spindowns, dtype=tf.float64)
tempiHM = tempiTF-epoca
tempiHM = ((tempiHM)*3600*24/stepFreqRaffinato)
tempiHM = tf.cast(tempiHM, tf.float64)
pesiHM = tf.reshape(pesiTF,(1,tf.size(pesiTF)))
pesiHM = pesiHM[0]
nRows = tf.constant(nstepSpindown, dtype=tf.int64)
#problema! num step freq cambia a seconda della correzione doppler
#perché freq min e freq max possono variare e lo step lo si lascia uguale
#posso andarci in 2 modi: uno è tagliando a 96000 tutto
#uno è mettendo un po' di zeri prima e dopo, cercando con la doppler corr quale è la max assoluta
#e quale è la min assoluta
#freqTF = noncorr()
#mettere un for
start = time.time()
for punto in numpy.arange(0,nPunti-1):
freqInCorr,freqCorr, freqPerHough = doppcorr(punto)
nstepsFreq = numpy.ceil(securbelt+(numpy.amax(freqCorr)-numpy.amin(freqCorr) + stepFreq + 2*stepFreqRaffinato)/stepFreqRaffinato)
#print(nstepsFreq)
nColumns = tf.cast(nstepsFreq, dtype=tf.int32)
freqTF = tf.constant(freqPerHough, dtype = tf.float64)
houghmap = inDaHough(punto,freqTF)
hough = sessione.run(houghmap)
candidati = manchurian_candidates(cands, freqInCorr, hough, patch[punto])
nonzeri = numpy.nonzero(candidati[0])
finalCand = candidati[:,nonzeri]
stop = time.time()
print(stop-start)
#print(freqCorr)
scipy.io.savemat(percorsoOut,{"freqCorr": freqCorr,
"hough": hough,
"candidati": finalCand
})
from matplotlib import pyplot
pyplot.figure(figsize=(10, 8))
##posxTick = numpy.arange(5551, 89454, round((89454-5551)/10))
#posxTick = numpy.arange(2330, 210300, round((210300-2330)/50))
##labelxTick = numpy.arange(0,1.1,0.1)+inputFreq
#labelxTick = numpy.arange(0,5.1,0.1)+inputFreq
#pyplot.xticks(posxTick,labelxTick)
#posyTick = numpy.arange(11)*nstepSpindown/10
#labelyTick = numpy.arange(spindownMin, spindownMax, stepSpindown*nstepSpindown/10)
#pyplot.yticks(posyTick,labelyTick)
a = pyplot.imshow(hough, aspect = 400)
pyplot.colorbar(shrink = 1,aspect = 10)
pyplot.show() | gpl-3.0 |
google-research/group_testing | setup.py | 1 | 1480 | # Copyright 2020 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Setup script for installing group_testing as a pip module."""
import setuptools
VERSION = '1.0.0'
install_requires = [
'absl-py>=0.7.0',
'gin-config>=0.3.0',
'jax>=0.1.67',
'jaxlib>=0.1.47',
'pandas>=1.0.3',
'numpy>=1.16.0',
'scipy>=1.4.1',
'scikit-learn>=0.23.0'
]
description = ('Group Testing. This is the code that allows reproducing '
'the results in the scientific paper '
'https://arxiv.org/pdf/2004.12508.pdf.')
setuptools.setup(
name='group_testing',
version=VERSION,
packages=setuptools.find_packages(),
description=description,
long_description=description,
url='https://github.com/google-research/group_testing',
author='Google LLC',
author_email='[email protected]',
install_requires=install_requires,
license='Apache 2.0',
keywords='bayesian group testing monte carlo',
)
| apache-2.0 |
chanceraine/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/backends/backend_qtagg.py | 73 | 4972 | """
Render to qt from agg
"""
from __future__ import division
import os, sys
import matplotlib
from matplotlib import verbose
from matplotlib.figure import Figure
from backend_agg import FigureCanvasAgg
from backend_qt import qt, FigureManagerQT, FigureCanvasQT,\
show, draw_if_interactive, backend_version, \
NavigationToolbar2QT
DEBUG = False
def new_figure_manager( num, *args, **kwargs ):
"""
Create a new figure manager instance
"""
if DEBUG: print 'backend_qtagg.new_figure_manager'
FigureClass = kwargs.pop('FigureClass', Figure)
thisFig = FigureClass( *args, **kwargs )
canvas = FigureCanvasQTAgg( thisFig )
return FigureManagerQTAgg( canvas, num )
class NavigationToolbar2QTAgg(NavigationToolbar2QT):
def _get_canvas(self, fig):
return FigureCanvasQTAgg(fig)
class FigureManagerQTAgg(FigureManagerQT):
def _get_toolbar(self, canvas, parent):
# must be inited after the window, drawingArea and figure
# attrs are set
if matplotlib.rcParams['toolbar']=='classic':
print "Classic toolbar is not yet supported"
elif matplotlib.rcParams['toolbar']=='toolbar2':
toolbar = NavigationToolbar2QTAgg(canvas, parent)
else:
toolbar = None
return toolbar
class FigureCanvasQTAgg( FigureCanvasAgg, FigureCanvasQT ):
"""
The canvas the figure renders into. Calls the draw and print fig
methods, creates the renderers, etc...
Public attribute
figure - A Figure instance
"""
def __init__( self, figure ):
if DEBUG: print 'FigureCanvasQtAgg: ', figure
FigureCanvasQT.__init__( self, figure )
FigureCanvasAgg.__init__( self, figure )
self.drawRect = False
self.rect = []
self.replot = True
self.pixmap = qt.QPixmap()
def resizeEvent( self, e ):
FigureCanvasQT.resizeEvent( self, e )
def drawRectangle( self, rect ):
self.rect = rect
self.drawRect = True
# False in repaint does not clear the image before repainting
self.repaint( False )
def paintEvent( self, e ):
"""
Draw to the Agg backend and then copy the image to the qt.drawable.
In Qt, all drawing should be done inside of here when a widget is
shown onscreen.
"""
FigureCanvasQT.paintEvent( self, e )
if DEBUG: print 'FigureCanvasQtAgg.paintEvent: ', self, \
self.get_width_height()
p = qt.QPainter( self )
# only replot data when needed
if type(self.replot) is bool: # might be a bbox for blitting
if self.replot:
FigureCanvasAgg.draw( self )
#stringBuffer = str( self.buffer_rgba(0,0) )
# matplotlib is in rgba byte order.
# qImage wants to put the bytes into argb format and
# is in a 4 byte unsigned int. little endian system is LSB first
# and expects the bytes in reverse order (bgra).
if ( qt.QImage.systemByteOrder() == qt.QImage.LittleEndian ):
stringBuffer = self.renderer._renderer.tostring_bgra()
else:
stringBuffer = self.renderer._renderer.tostring_argb()
qImage = qt.QImage( stringBuffer, self.renderer.width,
self.renderer.height, 32, None, 0,
qt.QImage.IgnoreEndian )
self.pixmap.convertFromImage( qImage, qt.QPixmap.Color )
p.drawPixmap( qt.QPoint( 0, 0 ), self.pixmap )
# draw the zoom rectangle to the QPainter
if ( self.drawRect ):
p.setPen( qt.QPen( qt.Qt.black, 1, qt.Qt.DotLine ) )
p.drawRect( self.rect[0], self.rect[1], self.rect[2], self.rect[3] )
# we are blitting here
else:
bbox = self.replot
l, b, r, t = bbox.extents
w = int(r) - int(l)
h = int(t) - int(b)
reg = self.copy_from_bbox(bbox)
stringBuffer = reg.to_string_argb()
qImage = qt.QImage(stringBuffer, w, h, 32, None, 0, qt.QImage.IgnoreEndian)
self.pixmap.convertFromImage(qImage, qt.QPixmap.Color)
p.drawPixmap(qt.QPoint(l, self.renderer.height-t), self.pixmap)
p.end()
self.replot = False
self.drawRect = False
def draw( self ):
"""
Draw the figure when xwindows is ready for the update
"""
if DEBUG: print "FigureCanvasQtAgg.draw", self
self.replot = True
FigureCanvasAgg.draw(self)
self.repaint(False)
def blit(self, bbox=None):
"""
Blit the region in bbox
"""
self.replot = bbox
self.repaint(False)
def print_figure(self, *args, **kwargs):
FigureCanvasAgg.print_figure(self, *args, **kwargs)
self.draw()
| agpl-3.0 |
tardis-sn/tardis | tardis/montecarlo/tests/test_base.py | 1 | 1085 | import os
import pandas as pd
import numpy as np
import pytest
from astropy import units as u
from numpy.testing import assert_almost_equal
###
# Save and Load
###
@pytest.fixture(scope="module", autouse=True)
def to_hdf_buffer(hdf_file_path, simulation_verysimple):
simulation_verysimple.runner.to_hdf(
hdf_file_path, name="runner", overwrite=True
)
runner_properties = [
"output_nu",
"output_energy",
"nu_bar_estimator",
"j_estimator",
"montecarlo_virtual_luminosity",
"last_interaction_in_nu",
"last_interaction_type",
"last_line_interaction_in_id",
"last_line_interaction_out_id",
"last_line_interaction_shell_id",
"packet_luminosity",
]
@pytest.mark.parametrize("attr", runner_properties)
def test_hdf_runner(hdf_file_path, simulation_verysimple, attr):
actual = getattr(simulation_verysimple.runner, attr)
if hasattr(actual, "cgs"):
actual = actual.cgs.value
path = os.path.join("runner", attr)
expected = pd.read_hdf(hdf_file_path, path)
assert_almost_equal(actual, expected.values)
| bsd-3-clause |
mxjl620/scikit-learn | examples/neighbors/plot_digits_kde_sampling.py | 251 | 2022 | """
=========================
Kernel Density Estimation
=========================
This example shows how kernel density estimation (KDE), a powerful
non-parametric density estimation technique, can be used to learn
a generative model for a dataset. With this generative model in place,
new samples can be drawn. These new samples reflect the underlying model
of the data.
"""
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_digits
from sklearn.neighbors import KernelDensity
from sklearn.decomposition import PCA
from sklearn.grid_search import GridSearchCV
# load the data
digits = load_digits()
data = digits.data
# project the 64-dimensional data to a lower dimension
pca = PCA(n_components=15, whiten=False)
data = pca.fit_transform(digits.data)
# use grid search cross-validation to optimize the bandwidth
params = {'bandwidth': np.logspace(-1, 1, 20)}
grid = GridSearchCV(KernelDensity(), params)
grid.fit(data)
print("best bandwidth: {0}".format(grid.best_estimator_.bandwidth))
# use the best estimator to compute the kernel density estimate
kde = grid.best_estimator_
# sample 44 new points from the data
new_data = kde.sample(44, random_state=0)
new_data = pca.inverse_transform(new_data)
# turn data into a 4x11 grid
new_data = new_data.reshape((4, 11, -1))
real_data = digits.data[:44].reshape((4, 11, -1))
# plot real digits and resampled digits
fig, ax = plt.subplots(9, 11, subplot_kw=dict(xticks=[], yticks=[]))
for j in range(11):
ax[4, j].set_visible(False)
for i in range(4):
im = ax[i, j].imshow(real_data[i, j].reshape((8, 8)),
cmap=plt.cm.binary, interpolation='nearest')
im.set_clim(0, 16)
im = ax[i + 5, j].imshow(new_data[i, j].reshape((8, 8)),
cmap=plt.cm.binary, interpolation='nearest')
im.set_clim(0, 16)
ax[0, 5].set_title('Selection from the input data')
ax[5, 5].set_title('"New" digits drawn from the kernel density model')
plt.show()
| bsd-3-clause |
wzbozon/scikit-learn | sklearn/cluster/dbscan_.py | 92 | 12380 | # -*- coding: utf-8 -*-
"""
DBSCAN: Density-Based Spatial Clustering of Applications with Noise
"""
# Author: Robert Layton <[email protected]>
# Joel Nothman <[email protected]>
# Lars Buitinck
#
# License: BSD 3 clause
import warnings
import numpy as np
from scipy import sparse
from ..base import BaseEstimator, ClusterMixin
from ..metrics import pairwise_distances
from ..utils import check_array, check_consistent_length
from ..utils.fixes import astype
from ..neighbors import NearestNeighbors
from ._dbscan_inner import dbscan_inner
def dbscan(X, eps=0.5, min_samples=5, metric='minkowski',
algorithm='auto', leaf_size=30, p=2, sample_weight=None,
random_state=None):
"""Perform DBSCAN clustering from vector array or distance matrix.
Read more in the :ref:`User Guide <dbscan>`.
Parameters
----------
X : array or sparse (CSR) matrix of shape (n_samples, n_features), or \
array of shape (n_samples, n_samples)
A feature array, or array of distances between samples if
``metric='precomputed'``.
eps : float, optional
The maximum distance between two samples for them to be considered
as in the same neighborhood.
min_samples : int, optional
The number of samples (or total weight) in a neighborhood for a point
to be considered as a core point. This includes the point itself.
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string or callable, it must be one of
the options allowed by metrics.pairwise.pairwise_distances for its
metric parameter.
If metric is "precomputed", X is assumed to be a distance matrix and
must be square. X may be a sparse matrix, in which case only "nonzero"
elements may be considered neighbors for DBSCAN.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
The algorithm to be used by the NearestNeighbors module
to compute pointwise distances and find nearest neighbors.
See NearestNeighbors module documentation for details.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or cKDTree. This can affect the speed
of the construction and query, as well as the memory required
to store the tree. The optimal value depends
on the nature of the problem.
p : float, optional
The power of the Minkowski metric to be used to calculate distance
between points.
sample_weight : array, shape (n_samples,), optional
Weight of each sample, such that a sample with a weight of at least
``min_samples`` is by itself a core sample; a sample with negative
weight may inhibit its eps-neighbor from being core.
Note that weights are absolute, and default to 1.
random_state: numpy.RandomState, optional
Deprecated and ignored as of version 0.16, will be removed in version
0.18. DBSCAN does not use random initialization.
Returns
-------
core_samples : array [n_core_samples]
Indices of core samples.
labels : array [n_samples]
Cluster labels for each point. Noisy samples are given the label -1.
Notes
-----
See examples/cluster/plot_dbscan.py for an example.
This implementation bulk-computes all neighborhood queries, which increases
the memory complexity to O(n.d) where d is the average number of neighbors,
while original DBSCAN had memory complexity O(n).
Sparse neighborhoods can be precomputed using
:func:`NearestNeighbors.radius_neighbors_graph
<sklearn.neighbors.NearestNeighbors.radius_neighbors_graph>`
with ``mode='distance'``.
References
----------
Ester, M., H. P. Kriegel, J. Sander, and X. Xu, "A Density-Based
Algorithm for Discovering Clusters in Large Spatial Databases with Noise".
In: Proceedings of the 2nd International Conference on Knowledge Discovery
and Data Mining, Portland, OR, AAAI Press, pp. 226-231. 1996
"""
if not eps > 0.0:
raise ValueError("eps must be positive.")
if random_state is not None:
warnings.warn("The parameter random_state is deprecated in 0.16 "
"and will be removed in version 0.18. "
"DBSCAN is deterministic except for rare border cases.",
category=DeprecationWarning)
X = check_array(X, accept_sparse='csr')
if sample_weight is not None:
sample_weight = np.asarray(sample_weight)
check_consistent_length(X, sample_weight)
# Calculate neighborhood for all samples. This leaves the original point
# in, which needs to be considered later (i.e. point i is in the
# neighborhood of point i. While True, its useless information)
if metric == 'precomputed' and sparse.issparse(X):
neighborhoods = np.empty(X.shape[0], dtype=object)
X.sum_duplicates() # XXX: modifies X's internals in-place
X_mask = X.data <= eps
masked_indices = astype(X.indices, np.intp, copy=False)[X_mask]
masked_indptr = np.cumsum(X_mask)[X.indptr[1:] - 1]
# insert the diagonal: a point is its own neighbor, but 0 distance
# means absence from sparse matrix data
masked_indices = np.insert(masked_indices, masked_indptr,
np.arange(X.shape[0]))
masked_indptr = masked_indptr[:-1] + np.arange(1, X.shape[0])
# split into rows
neighborhoods[:] = np.split(masked_indices, masked_indptr)
else:
neighbors_model = NearestNeighbors(radius=eps, algorithm=algorithm,
leaf_size=leaf_size,
metric=metric, p=p)
neighbors_model.fit(X)
# This has worst case O(n^2) memory complexity
neighborhoods = neighbors_model.radius_neighbors(X, eps,
return_distance=False)
if sample_weight is None:
n_neighbors = np.array([len(neighbors)
for neighbors in neighborhoods])
else:
n_neighbors = np.array([np.sum(sample_weight[neighbors])
for neighbors in neighborhoods])
# Initially, all samples are noise.
labels = -np.ones(X.shape[0], dtype=np.intp)
# A list of all core samples found.
core_samples = np.asarray(n_neighbors >= min_samples, dtype=np.uint8)
dbscan_inner(core_samples, neighborhoods, labels)
return np.where(core_samples)[0], labels
class DBSCAN(BaseEstimator, ClusterMixin):
"""Perform DBSCAN clustering from vector array or distance matrix.
DBSCAN - Density-Based Spatial Clustering of Applications with Noise.
Finds core samples of high density and expands clusters from them.
Good for data which contains clusters of similar density.
Read more in the :ref:`User Guide <dbscan>`.
Parameters
----------
eps : float, optional
The maximum distance between two samples for them to be considered
as in the same neighborhood.
min_samples : int, optional
The number of samples (or total weight) in a neighborhood for a point
to be considered as a core point. This includes the point itself.
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string or callable, it must be one of
the options allowed by metrics.pairwise.calculate_distance for its
metric parameter.
If metric is "precomputed", X is assumed to be a distance matrix and
must be square. X may be a sparse matrix, in which case only "nonzero"
elements may be considered neighbors for DBSCAN.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
The algorithm to be used by the NearestNeighbors module
to compute pointwise distances and find nearest neighbors.
See NearestNeighbors module documentation for details.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or cKDTree. This can affect the speed
of the construction and query, as well as the memory required
to store the tree. The optimal value depends
on the nature of the problem.
random_state: numpy.RandomState, optional
Deprecated and ignored as of version 0.16, will be removed in version
0.18. DBSCAN does not use random initialization.
Attributes
----------
core_sample_indices_ : array, shape = [n_core_samples]
Indices of core samples.
components_ : array, shape = [n_core_samples, n_features]
Copy of each core sample found by training.
labels_ : array, shape = [n_samples]
Cluster labels for each point in the dataset given to fit().
Noisy samples are given the label -1.
Notes
-----
See examples/cluster/plot_dbscan.py for an example.
This implementation bulk-computes all neighborhood queries, which increases
the memory complexity to O(n.d) where d is the average number of neighbors,
while original DBSCAN had memory complexity O(n).
Sparse neighborhoods can be precomputed using
:func:`NearestNeighbors.radius_neighbors_graph
<sklearn.neighbors.NearestNeighbors.radius_neighbors_graph>`
with ``mode='distance'``.
References
----------
Ester, M., H. P. Kriegel, J. Sander, and X. Xu, "A Density-Based
Algorithm for Discovering Clusters in Large Spatial Databases with Noise".
In: Proceedings of the 2nd International Conference on Knowledge Discovery
and Data Mining, Portland, OR, AAAI Press, pp. 226-231. 1996
"""
def __init__(self, eps=0.5, min_samples=5, metric='euclidean',
algorithm='auto', leaf_size=30, p=None, random_state=None):
self.eps = eps
self.min_samples = min_samples
self.metric = metric
self.algorithm = algorithm
self.leaf_size = leaf_size
self.p = p
self.random_state = random_state
def fit(self, X, y=None, sample_weight=None):
"""Perform DBSCAN clustering from features or distance matrix.
Parameters
----------
X : array or sparse (CSR) matrix of shape (n_samples, n_features), or \
array of shape (n_samples, n_samples)
A feature array, or array of distances between samples if
``metric='precomputed'``.
sample_weight : array, shape (n_samples,), optional
Weight of each sample, such that a sample with a weight of at least
``min_samples`` is by itself a core sample; a sample with negative
weight may inhibit its eps-neighbor from being core.
Note that weights are absolute, and default to 1.
"""
X = check_array(X, accept_sparse='csr')
clust = dbscan(X, sample_weight=sample_weight, **self.get_params())
self.core_sample_indices_, self.labels_ = clust
if len(self.core_sample_indices_):
# fix for scipy sparse indexing issue
self.components_ = X[self.core_sample_indices_].copy()
else:
# no core samples
self.components_ = np.empty((0, X.shape[1]))
return self
def fit_predict(self, X, y=None, sample_weight=None):
"""Performs clustering on X and returns cluster labels.
Parameters
----------
X : array or sparse (CSR) matrix of shape (n_samples, n_features), or \
array of shape (n_samples, n_samples)
A feature array, or array of distances between samples if
``metric='precomputed'``.
sample_weight : array, shape (n_samples,), optional
Weight of each sample, such that a sample with a weight of at least
``min_samples`` is by itself a core sample; a sample with negative
weight may inhibit its eps-neighbor from being core.
Note that weights are absolute, and default to 1.
Returns
-------
y : ndarray, shape (n_samples,)
cluster labels
"""
self.fit(X, sample_weight=sample_weight)
return self.labels_
| bsd-3-clause |
russel1237/scikit-learn | sklearn/decomposition/tests/test_pca.py | 199 | 10949 | import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn import datasets
from sklearn.decomposition import PCA
from sklearn.decomposition import RandomizedPCA
from sklearn.decomposition.pca import _assess_dimension_
from sklearn.decomposition.pca import _infer_dimension_
iris = datasets.load_iris()
def test_pca():
# PCA on dense arrays
pca = PCA(n_components=2)
X = iris.data
X_r = pca.fit(X).transform(X)
np.testing.assert_equal(X_r.shape[1], 2)
X_r2 = pca.fit_transform(X)
assert_array_almost_equal(X_r, X_r2)
pca = PCA()
pca.fit(X)
assert_almost_equal(pca.explained_variance_ratio_.sum(), 1.0, 3)
X_r = pca.transform(X)
X_r2 = pca.fit_transform(X)
assert_array_almost_equal(X_r, X_r2)
# Test get_covariance and get_precision with n_components == n_features
# with n_components < n_features and with n_components == 0
for n_components in [0, 2, X.shape[1]]:
pca.n_components = n_components
pca.fit(X)
cov = pca.get_covariance()
precision = pca.get_precision()
assert_array_almost_equal(np.dot(cov, precision),
np.eye(X.shape[1]), 12)
def test_whitening():
# Check that PCA output has unit-variance
rng = np.random.RandomState(0)
n_samples = 100
n_features = 80
n_components = 30
rank = 50
# some low rank data with correlated features
X = np.dot(rng.randn(n_samples, rank),
np.dot(np.diag(np.linspace(10.0, 1.0, rank)),
rng.randn(rank, n_features)))
# the component-wise variance of the first 50 features is 3 times the
# mean component-wise variance of the remaingin 30 features
X[:, :50] *= 3
assert_equal(X.shape, (n_samples, n_features))
# the component-wise variance is thus highly varying:
assert_almost_equal(X.std(axis=0).std(), 43.9, 1)
for this_PCA, copy in [(x, y) for x in (PCA, RandomizedPCA)
for y in (True, False)]:
# whiten the data while projecting to the lower dim subspace
X_ = X.copy() # make sure we keep an original across iterations.
pca = this_PCA(n_components=n_components, whiten=True, copy=copy)
# test fit_transform
X_whitened = pca.fit_transform(X_.copy())
assert_equal(X_whitened.shape, (n_samples, n_components))
X_whitened2 = pca.transform(X_)
assert_array_almost_equal(X_whitened, X_whitened2)
assert_almost_equal(X_whitened.std(axis=0), np.ones(n_components))
assert_almost_equal(X_whitened.mean(axis=0), np.zeros(n_components))
X_ = X.copy()
pca = this_PCA(n_components=n_components, whiten=False,
copy=copy).fit(X_)
X_unwhitened = pca.transform(X_)
assert_equal(X_unwhitened.shape, (n_samples, n_components))
# in that case the output components still have varying variances
assert_almost_equal(X_unwhitened.std(axis=0).std(), 74.1, 1)
# we always center, so no test for non-centering.
def test_explained_variance():
# Check that PCA output has unit-variance
rng = np.random.RandomState(0)
n_samples = 100
n_features = 80
X = rng.randn(n_samples, n_features)
pca = PCA(n_components=2).fit(X)
rpca = RandomizedPCA(n_components=2, random_state=42).fit(X)
assert_array_almost_equal(pca.explained_variance_,
rpca.explained_variance_, 1)
assert_array_almost_equal(pca.explained_variance_ratio_,
rpca.explained_variance_ratio_, 3)
# compare to empirical variances
X_pca = pca.transform(X)
assert_array_almost_equal(pca.explained_variance_,
np.var(X_pca, axis=0))
X_rpca = rpca.transform(X)
assert_array_almost_equal(rpca.explained_variance_,
np.var(X_rpca, axis=0))
def test_pca_check_projection():
# Test that the projection of data is correct
rng = np.random.RandomState(0)
n, p = 100, 3
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5])
Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5])
Yt = PCA(n_components=2).fit(X).transform(Xt)
Yt /= np.sqrt((Yt ** 2).sum())
assert_almost_equal(np.abs(Yt[0][0]), 1., 1)
def test_pca_inverse():
# Test that the projection of data can be inverted
rng = np.random.RandomState(0)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed
# signal (since the data is almost of rank n_components)
pca = PCA(n_components=2).fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=3)
# same as above with whitening (approximate reconstruction)
pca = PCA(n_components=2, whiten=True)
pca.fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=3)
def test_pca_validation():
X = [[0, 1], [1, 0]]
for n_components in [-1, 3]:
assert_raises(ValueError, PCA(n_components).fit, X)
def test_randomized_pca_check_projection():
# Test that the projection by RandomizedPCA on dense data is correct
rng = np.random.RandomState(0)
n, p = 100, 3
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5])
Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5])
Yt = RandomizedPCA(n_components=2, random_state=0).fit(X).transform(Xt)
Yt /= np.sqrt((Yt ** 2).sum())
assert_almost_equal(np.abs(Yt[0][0]), 1., 1)
def test_randomized_pca_check_list():
# Test that the projection by RandomizedPCA on list data is correct
X = [[1.0, 0.0], [0.0, 1.0]]
X_transformed = RandomizedPCA(n_components=1,
random_state=0).fit(X).transform(X)
assert_equal(X_transformed.shape, (2, 1))
assert_almost_equal(X_transformed.mean(), 0.00, 2)
assert_almost_equal(X_transformed.std(), 0.71, 2)
def test_randomized_pca_inverse():
# Test that RandomizedPCA is inversible on dense data
rng = np.random.RandomState(0)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed signal
# (since the data is almost of rank n_components)
pca = RandomizedPCA(n_components=2, random_state=0).fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=2)
# same as above with whitening (approximate reconstruction)
pca = RandomizedPCA(n_components=2, whiten=True,
random_state=0).fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
relative_max_delta = (np.abs(X - Y_inverse) / np.abs(X).mean()).max()
assert_almost_equal(relative_max_delta, 0.11, decimal=2)
def test_pca_dim():
# Check automated dimensionality setting
rng = np.random.RandomState(0)
n, p = 100, 5
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5, 1, 2])
pca = PCA(n_components='mle').fit(X)
assert_equal(pca.n_components, 'mle')
assert_equal(pca.n_components_, 1)
def test_infer_dim_1():
# TODO: explain what this is testing
# Or at least use explicit variable names...
n, p = 1000, 5
rng = np.random.RandomState(0)
X = (rng.randn(n, p) * .1 + rng.randn(n, 1) * np.array([3, 4, 5, 1, 2])
+ np.array([1, 0, 7, 4, 6]))
pca = PCA(n_components=p)
pca.fit(X)
spect = pca.explained_variance_
ll = []
for k in range(p):
ll.append(_assess_dimension_(spect, k, n, p))
ll = np.array(ll)
assert_greater(ll[1], ll.max() - .01 * n)
def test_infer_dim_2():
# TODO: explain what this is testing
# Or at least use explicit variable names...
n, p = 1000, 5
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5, 1, 2])
X[10:20] += np.array([6, 0, 7, 2, -1])
pca = PCA(n_components=p)
pca.fit(X)
spect = pca.explained_variance_
assert_greater(_infer_dimension_(spect, n, p), 1)
def test_infer_dim_3():
n, p = 100, 5
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5, 1, 2])
X[10:20] += np.array([6, 0, 7, 2, -1])
X[30:40] += 2 * np.array([-1, 1, -1, 1, -1])
pca = PCA(n_components=p)
pca.fit(X)
spect = pca.explained_variance_
assert_greater(_infer_dimension_(spect, n, p), 2)
def test_infer_dim_by_explained_variance():
X = iris.data
pca = PCA(n_components=0.95)
pca.fit(X)
assert_equal(pca.n_components, 0.95)
assert_equal(pca.n_components_, 2)
pca = PCA(n_components=0.01)
pca.fit(X)
assert_equal(pca.n_components, 0.01)
assert_equal(pca.n_components_, 1)
rng = np.random.RandomState(0)
# more features than samples
X = rng.rand(5, 20)
pca = PCA(n_components=.5).fit(X)
assert_equal(pca.n_components, 0.5)
assert_equal(pca.n_components_, 2)
def test_pca_score():
# Test that probabilistic PCA scoring yields a reasonable score
n, p = 1000, 3
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1 + np.array([3, 4, 5])
pca = PCA(n_components=2)
pca.fit(X)
ll1 = pca.score(X)
h = -0.5 * np.log(2 * np.pi * np.exp(1) * 0.1 ** 2) * p
np.testing.assert_almost_equal(ll1 / h, 1, 0)
def test_pca_score2():
# Test that probabilistic PCA correctly separated different datasets
n, p = 100, 3
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1 + np.array([3, 4, 5])
pca = PCA(n_components=2)
pca.fit(X)
ll1 = pca.score(X)
ll2 = pca.score(rng.randn(n, p) * .2 + np.array([3, 4, 5]))
assert_greater(ll1, ll2)
# Test that it gives the same scores if whiten=True
pca = PCA(n_components=2, whiten=True)
pca.fit(X)
ll2 = pca.score(X)
assert_almost_equal(ll1, ll2)
def test_pca_score3():
# Check that probabilistic PCA selects the right model
n, p = 200, 3
rng = np.random.RandomState(0)
Xl = (rng.randn(n, p) + rng.randn(n, 1) * np.array([3, 4, 5])
+ np.array([1, 0, 7]))
Xt = (rng.randn(n, p) + rng.randn(n, 1) * np.array([3, 4, 5])
+ np.array([1, 0, 7]))
ll = np.zeros(p)
for k in range(p):
pca = PCA(n_components=k)
pca.fit(Xl)
ll[k] = pca.score(Xt)
assert_true(ll.argmax() == 1)
| bsd-3-clause |
lwjohnst86/studyGroup | lessons/python/matplotlib/hwk3.1.py | 12 | 2149 | # -*- coding: utf-8 -*-
from numpy import float32
from numpy import linspace
from numpy import polyfit
from numpy import polyval
import matplotlib.pyplot as plt
#Read in data from csv
f=open('data.csv','r')
line=f.readlines()
#Empty array for data
FN=[]
EFN=[]
#This loop goes through every line, strips new line character and then splits the data on ,. It will then save data into the arrays
for l in line:
a=l.strip()
x,y=a.split(",")
FN.append(float32(x))
EFN.append(float32(y))
f.close()
#Generate linear space but this was not used as of yet
z=linspace(-1,4)
#Create grid and plot data
fig = plt.figure(figsize = (4,4), dpi = 600)
a = fig.add_subplot(1,1,1)
plt.plot(FN,EFN,'ks',markersize=3)
#Created a fitted line for the data
fit=polyfit(FN,EFN,1)
plt.plot(z,polyval(fit,z),label=fit,color='k')
#Reset font size
for t in a.yaxis.get_major_ticks():
t.label.set_fontsize(6)
for t in a.xaxis.get_major_ticks():
t.label.set_fontsize(6)
#Set the subplot sizing
fig.subplots_adjust(top=0.95, right =0.89, left=0.13,bottom=0.25)
#Set limits and labels
plt.xlim(-0.2,3.5)
plt.ylim(0,0.8)
plt.ylabel(r'Extrafloral Nectar (mg of sugar per extrafloral nectary)',fontsize=6,verticalalignment='center')
plt.xlabel(r'Floral Nectar (mg of sugar per flower)',fontsize=6,horizontalalignment='center')
#Save as pdf
fig.savefig('EFNvFN.pdf',dpi=600)
plt.show()
"""In ecology, animals and plants interact with one another in an ecosystem.
There are several types of interactions that may occur such as predation,
parasitisim and mutualism. Mutualism is where the animals and plants both give
one another a survival benefit. So if a trait is not useful why invest energy
into producing it?
Different interactions have generally been studied individually even though
they occur in a community. This plot shows the relationship between EFN and FN
production in T. ulmifolia. There is a positive correlation, which suggests that
plants that produce more of one also produce more of the other
This is probably because of overall plant vigour. This was an initial figure
for a later experiment showing interactions."""
| apache-2.0 |
StephanEwen/incubator-flink | flink-python/pyflink/table/utils.py | 9 | 6042 | ################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import ast
from pyflink.common.types import RowKind
from pyflink.java_gateway import get_gateway
from pyflink.table.types import DataType, LocalZonedTimestampType, Row, RowType, \
TimeType, DateType, ArrayType, MapType, TimestampType, FloatType
from pyflink.util.java_utils import to_jarray
import datetime
import pickle
def pandas_to_arrow(schema, timezone, field_types, series):
import pyarrow as pa
import pandas as pd
def create_array(s, t):
try:
return pa.Array.from_pandas(s, mask=s.isnull(), type=t)
except pa.ArrowException as e:
error_msg = "Exception thrown when converting pandas.Series (%s) to " \
"pyarrow.Array (%s)."
raise RuntimeError(error_msg % (s.dtype, t), e)
arrays = []
for i in range(len(schema)):
s = series[i]
field_type = field_types[i]
schema_type = schema.types[i]
if type(s) == pd.DataFrame:
array_names = [(create_array(s[s.columns[j]], field.type), field.name)
for j, field in enumerate(schema_type)]
struct_arrays, struct_names = zip(*array_names)
arrays.append(pa.StructArray.from_arrays(struct_arrays, struct_names))
else:
arrays.append(create_array(
tz_convert_to_internal(s, field_type, timezone), schema_type))
return pa.RecordBatch.from_arrays(arrays, schema)
def arrow_to_pandas(timezone, field_types, batches):
def arrow_column_to_pandas(arrow_column, t: DataType):
if type(t) == RowType:
import pandas as pd
series = [column.to_pandas(date_as_object=True).rename(field.name)
for column, field in zip(arrow_column.flatten(), arrow_column.type)]
return pd.concat(series, axis=1)
else:
return arrow_column.to_pandas(date_as_object=True)
import pyarrow as pa
table = pa.Table.from_batches(batches)
return [tz_convert_from_internal(arrow_column_to_pandas(c, t), t, timezone)
for c, t in zip(table.itercolumns(), field_types)]
def tz_convert_from_internal(s, t: DataType, local_tz):
"""
Converts the timestamp series from internal according to the specified local timezone.
Returns the same series if the series is not a timestamp series. Otherwise,
returns a converted series.
"""
if type(t) == LocalZonedTimestampType:
return s.dt.tz_localize(local_tz)
else:
return s
def tz_convert_to_internal(s, t: DataType, local_tz):
"""
Converts the timestamp series to internal according to the specified local timezone.
"""
if type(t) == LocalZonedTimestampType:
from pandas.api.types import is_datetime64_dtype, is_datetime64tz_dtype
if is_datetime64_dtype(s.dtype):
return s.dt.tz_localize(None)
elif is_datetime64tz_dtype(s.dtype):
return s.dt.tz_convert(local_tz).dt.tz_localize(None)
return s
def to_expression_jarray(exprs):
"""
Convert python list of Expression to java array of Expression.
"""
gateway = get_gateway()
return to_jarray(gateway.jvm.Expression, [expr._j_expr for expr in exprs])
def pickled_bytes_to_python_converter(data, field_type: DataType):
if isinstance(field_type, RowType):
row_kind = RowKind(int.from_bytes(data[0], byteorder='big', signed=False))
data = zip(list(data[1:]), field_type.field_types())
fields = []
for d, d_type in data:
fields.append(pickled_bytes_to_python_converter(d, d_type))
result_row = Row(fields)
result_row.set_row_kind(row_kind)
return result_row
else:
data = pickle.loads(data)
if isinstance(field_type, TimeType):
seconds, microseconds = divmod(data, 10 ** 6)
minutes, seconds = divmod(seconds, 60)
hours, minutes = divmod(minutes, 60)
return datetime.time(hours, minutes, seconds, microseconds)
elif isinstance(field_type, DateType):
return field_type.from_sql_type(data)
elif isinstance(field_type, TimestampType):
return field_type.from_sql_type(int(data.timestamp() * 10**6))
elif isinstance(field_type, MapType):
key_type = field_type.key_type
value_type = field_type.value_type
zip_kv = zip(data[0], data[1])
return dict((pickled_bytes_to_python_converter(k, key_type),
pickled_bytes_to_python_converter(v, value_type))
for k, v in zip_kv)
elif isinstance(field_type, FloatType):
return field_type.from_sql_type(ast.literal_eval(data))
elif isinstance(field_type, ArrayType):
element_type = field_type.element_type
elements = []
for element_bytes in data:
elements.append(pickled_bytes_to_python_converter(element_bytes, element_type))
return elements
else:
return field_type.from_sql_type(data)
| apache-2.0 |
anitzkin/opencog | opencog/python/spatiotemporal/temporal_events/animation.py | 34 | 4896 | from matplotlib.lines import Line2D
from matplotlib.ticker import AutoMinorLocator
from numpy.core.multiarray import zeros
from spatiotemporal.temporal_events.trapezium import TemporalEventTrapezium
from spatiotemporal.time_intervals import TimeInterval
from matplotlib import pyplot as plt
from matplotlib import animation
__author__ = 'keyvan'
x_axis = xrange(13)
zeros_13 = zeros(13)
class Animation(object):
def __init__(self, event_a, event_b, event_c, plt=plt):
self.event_a = event_a
self.event_c = event_c
self.event_b_length_beginning = event_b.beginning - event_b.a
self.event_b_length_middle = self.event_b_length_beginning + event_b.ending - event_b.beginning
self.event_b_length_total = event_b.b - event_b.ending
self.plt = plt
self.fig = plt.figure(1)
self.ax_a_b = self.fig.add_subplot(4, 1, 1)
self.ax_b_c = self.fig.add_subplot(4, 1, 2)
self.ax_a_c = self.fig.add_subplot(4, 1, 3)
self.ax_relations = self.fig.add_subplot(4, 1, 4)
self.ax_a_b.set_xlim(0, 13)
self.ax_a_b.set_ylim(0, 1)
self.ax_b_c.set_xlim(0, 13)
self.ax_b_c.set_ylim(0, 1)
self.ax_a_c.set_xlim(0, 13)
self.ax_a_c.set_ylim(0, 1)
self.rects_a_b = self.ax_a_b.bar(x_axis, zeros_13)
self.rects_b_c = self.ax_b_c.bar(x_axis, zeros_13)
self.rects_a_c = self.ax_a_c.bar(x_axis, zeros_13)
self.line_a = Line2D([], [])
self.line_b = Line2D([], [])
self.line_c = Line2D([], [])
self.ax_relations.add_line(self.line_a)
self.ax_relations.add_line(self.line_b)
self.ax_relations.add_line(self.line_c)
a = min(event_a.a, event_c.a) - self.event_b_length_total
b = max(event_a.b, event_c.b)
self.ax_relations.set_xlim(a, b + self.event_b_length_total)
self.ax_relations.set_ylim(0, 1.1)
# self.interval = TimeInterval(a, b, 150)
self.interval = TimeInterval(a, b, 2)
self.ax_a_b.xaxis.set_minor_formatter(self.ax_a_b.xaxis.get_major_formatter())
self.ax_a_b.xaxis.set_minor_locator(AutoMinorLocator(2))
self.ax_a_b.xaxis.set_ticklabels('poDedOP')
self.ax_a_b.xaxis.set_ticklabels('mFsSfM', minor=True)
self.ax_b_c.xaxis.set_minor_formatter(self.ax_b_c.xaxis.get_major_formatter())
self.ax_b_c.xaxis.set_minor_locator(AutoMinorLocator(2))
self.ax_b_c.xaxis.set_ticklabels('poDedOP')
self.ax_b_c.xaxis.set_ticklabels('mFsSfM', minor=True)
self.ax_a_c.xaxis.set_minor_formatter(self.ax_a_c.xaxis.get_major_formatter())
self.ax_a_c.xaxis.set_minor_locator(AutoMinorLocator(2))
self.ax_a_c.xaxis.set_ticklabels('poDedOP')
self.ax_a_c.xaxis.set_ticklabels('mFsSfM', minor=True)
def init(self):
artists = []
self.line_a.set_data(self.event_a, self.event_a.membership_function)
self.line_b.set_data([], [])
self.line_c.set_data(self.event_c, self.event_c.membership_function)
artists.append(self.line_a)
artists.append(self.line_b)
artists.append(self.line_c)
for rect, h in zip(self.rects_a_b, zeros_13):
rect.set_height(h)
artists.append(rect)
for rect, h in zip(self.rects_b_c, zeros_13):
rect.set_height(h)
artists.append(rect)
for rect, h in zip(self.rects_a_c, (self.event_a * self.event_c).to_list()):
rect.set_height(h)
artists.append(rect)
return artists
def animate(self, t):
interval = self.interval
B = TemporalEventTrapezium(interval[t], interval[t] + self.event_b_length_total,
interval[t] + self.event_b_length_beginning,
interval[t] + self.event_b_length_middle)
plt.figure()
B.plot().show()
a_b = (self.event_a * B).to_list()
b_c = (B * self.event_c).to_list()
self.line_b.set_data(B, B.membership_function)
artists = []
for rect, h in zip(self.rects_a_b, a_b):
rect.set_height(h)
artists.append(rect)
for rect, h in zip(self.rects_b_c, b_c):
rect.set_height(h)
artists.append(rect)
artists.append(self.line_a)
artists.append(self.line_b)
artists.append(self.line_c)
return artists
def show(self):
fr = len(self.interval) - 1
anim = animation.FuncAnimation(self.fig, self.animate, init_func=self.init,
frames=fr, interval=fr, blit=True)
self.plt.show()
if __name__ == '__main__':
anim = Animation(TemporalEventTrapezium(4, 8, 5, 7),
TemporalEventTrapezium(0, 10, 6, 9),
TemporalEventTrapezium(0.5, 11, 1, 3))
# anim.show()
| agpl-3.0 |
dicortazar/ceres | cereslib/dfutils/format.py | 1 | 3667 | #!/usr/bin/python
# Copyright (C) 2016 Bitergia
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# Authors:
# Daniel Izquierdo Cortazar <[email protected]>
#
import pandas
import scipy
import datetime
class Format(object):
""" Library that allows to format dataframes to be later enriched
This class is the first step in the enrichment process of data.
Although this can be used alone for other purposes, its main
goal consists of providing well formated [missing fields,
string dates, removal of not needed fields] for the following
steps of the enrichment process.
This data format and cleaning process is done due to
inconsistencies and missing fields that may appear when reading
information.
"""
def fill_missing_fields(self, data, columns):
""" This method fills with 0's missing fields
:param data: original Pandas dataframe
:param columns: list of columns to be filled in the DataFrame
:type data: pandas.DataFrame
:type columns: list of strings
:returns: Pandas dataframe with missing fields filled with 0's
:rtype: pandas.DataFrame
"""
for column in columns:
if column not in data.columns:
data[column] = scipy.zeros(len(data))
return data
def update_field_names(self, data, matching):
""" This method updates the names of the fields according to matching
:param data: original Pandas dataframe
:param matching: dictionary of matchings between old and new values
:type data: pandas.DataFrame
:type matching: dictionary
:returns: Pandas dataframe with updated names
:rtype: pandas.DataFrame
"""
for key in matching.keys():
if key in data.columns:
data.rename(columns={key:matching[key]})
return data
def format_dates(self, data, columns):
""" This method translates columns values into datetime objects
:param data: original Pandas dataframe
:param columns: list of columns to cast the date to a datetime object
:type data: pandas.DataFrame
:type columns: list of strings
:returns: Pandas dataframe with updated 'columns' with datetime objects
:rtype: pandas.DataFrame
"""
for column in columns:
if column in data.columns:
data[column] = pandas.to_datetime(data[column])
return data
def remove_columns(self, data, columns):
""" This method removes columns in data
:param data: original Pandas dataframe
:param columns: list of columns to remove
:type data: pandas.DataFrame
:type columns: list of strings
:returns: Pandas dataframe with removed columns
:rtype: pandas.DataFrame
"""
for column in columns:
if column in data.columns:
data = data.drop(column, axis=1)
return data
| lgpl-3.0 |
JT5D/scikit-learn | sklearn/cross_decomposition/tests/test_pls.py | 22 | 9838 | import numpy as np
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.datasets import load_linnerud
from sklearn.cross_decomposition import pls_
from nose.tools import assert_equal
def test_pls():
d = load_linnerud()
X = d.data
Y = d.target
# 1) Canonical (symmetric) PLS (PLS 2 blocks canonical mode A)
# ===========================================================
# Compare 2 algo.: nipals vs. svd
# ------------------------------
pls_bynipals = pls_.PLSCanonical(n_components=X.shape[1])
pls_bynipals.fit(X, Y)
pls_bysvd = pls_.PLSCanonical(algorithm="svd", n_components=X.shape[1])
pls_bysvd.fit(X, Y)
# check equalities of loading (up to the sign of the second column)
assert_array_almost_equal(
pls_bynipals.x_loadings_,
np.multiply(pls_bysvd.x_loadings_, np.array([1, -1, 1])), decimal=5,
err_msg="nipals and svd implementation lead to different x loadings")
assert_array_almost_equal(
pls_bynipals.y_loadings_,
np.multiply(pls_bysvd.y_loadings_, np.array([1, -1, 1])), decimal=5,
err_msg="nipals and svd implementation lead to different y loadings")
# Check PLS properties (with n_components=X.shape[1])
# ---------------------------------------------------
plsca = pls_.PLSCanonical(n_components=X.shape[1])
plsca.fit(X, Y)
T = plsca.x_scores_
P = plsca.x_loadings_
Wx = plsca.x_weights_
U = plsca.y_scores_
Q = plsca.y_loadings_
Wy = plsca.y_weights_
def check_ortho(M, err_msg):
K = np.dot(M.T, M)
assert_array_almost_equal(K, np.diag(np.diag(K)), err_msg=err_msg)
# Orthogonality of weights
# ~~~~~~~~~~~~~~~~~~~~~~~~
check_ortho(Wx, "x weights are not orthogonal")
check_ortho(Wy, "y weights are not orthogonal")
# Orthogonality of latent scores
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
check_ortho(T, "x scores are not orthogonal")
check_ortho(U, "y scores are not orthogonal")
# Check X = TP' and Y = UQ' (with (p == q) components)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# center scale X, Y
Xc, Yc, x_mean, y_mean, x_std, y_std =\
pls_._center_scale_xy(X.copy(), Y.copy(), scale=True)
assert_array_almost_equal(Xc, np.dot(T, P.T), err_msg="X != TP'")
assert_array_almost_equal(Yc, np.dot(U, Q.T), err_msg="Y != UQ'")
# Check that rotations on training data lead to scores
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Xr = plsca.transform(X)
assert_array_almost_equal(Xr, plsca.x_scores_,
err_msg="rotation on X failed")
Xr, Yr = plsca.transform(X, Y)
assert_array_almost_equal(Xr, plsca.x_scores_,
err_msg="rotation on X failed")
assert_array_almost_equal(Yr, plsca.y_scores_,
err_msg="rotation on Y failed")
# "Non regression test" on canonical PLS
# --------------------------------------
# The results were checked against the R-package plspm
pls_ca = pls_.PLSCanonical(n_components=X.shape[1])
pls_ca.fit(X, Y)
x_weights = np.array(
[[-0.61330704, 0.25616119, -0.74715187],
[-0.74697144, 0.11930791, 0.65406368],
[-0.25668686, -0.95924297, -0.11817271]])
assert_array_almost_equal(pls_ca.x_weights_, x_weights)
x_rotations = np.array(
[[-0.61330704, 0.41591889, -0.62297525],
[-0.74697144, 0.31388326, 0.77368233],
[-0.25668686, -0.89237972, -0.24121788]])
assert_array_almost_equal(pls_ca.x_rotations_, x_rotations)
y_weights = np.array(
[[+0.58989127, 0.7890047, 0.1717553],
[+0.77134053, -0.61351791, 0.16920272],
[-0.23887670, -0.03267062, 0.97050016]])
assert_array_almost_equal(pls_ca.y_weights_, y_weights)
y_rotations = np.array(
[[+0.58989127, 0.7168115, 0.30665872],
[+0.77134053, -0.70791757, 0.19786539],
[-0.23887670, -0.00343595, 0.94162826]])
assert_array_almost_equal(pls_ca.y_rotations_, y_rotations)
# 2) Regression PLS (PLS2): "Non regression test"
# ===============================================
# The results were checked against the R-packages plspm, misOmics and pls
pls_2 = pls_.PLSRegression(n_components=X.shape[1])
pls_2.fit(X, Y)
x_weights = np.array(
[[-0.61330704, -0.00443647, 0.78983213],
[-0.74697144, -0.32172099, -0.58183269],
[-0.25668686, 0.94682413, -0.19399983]])
assert_array_almost_equal(pls_2.x_weights_, x_weights)
x_loadings = np.array(
[[-0.61470416, -0.24574278, 0.78983213],
[-0.65625755, -0.14396183, -0.58183269],
[-0.51733059, 1.00609417, -0.19399983]])
assert_array_almost_equal(pls_2.x_loadings_, x_loadings)
y_weights = np.array(
[[+0.32456184, 0.29892183, 0.20316322],
[+0.42439636, 0.61970543, 0.19320542],
[-0.13143144, -0.26348971, -0.17092916]])
assert_array_almost_equal(pls_2.y_weights_, y_weights)
y_loadings = np.array(
[[+0.32456184, 0.29892183, 0.20316322],
[+0.42439636, 0.61970543, 0.19320542],
[-0.13143144, -0.26348971, -0.17092916]])
assert_array_almost_equal(pls_2.y_loadings_, y_loadings)
# 3) Another non-regression test of Canonical PLS on random dataset
# =================================================================
# The results were checked against the R-package plspm
n = 500
p_noise = 10
q_noise = 5
# 2 latents vars:
np.random.seed(11)
l1 = np.random.normal(size=n)
l2 = np.random.normal(size=n)
latents = np.array([l1, l1, l2, l2]).T
X = latents + np.random.normal(size=4 * n).reshape((n, 4))
Y = latents + np.random.normal(size=4 * n).reshape((n, 4))
X = np.concatenate(
(X, np.random.normal(size=p_noise * n).reshape(n, p_noise)), axis=1)
Y = np.concatenate(
(Y, np.random.normal(size=q_noise * n).reshape(n, q_noise)), axis=1)
np.random.seed(None)
pls_ca = pls_.PLSCanonical(n_components=3)
pls_ca.fit(X, Y)
x_weights = np.array(
[[0.65803719, 0.19197924, 0.21769083],
[0.7009113, 0.13303969, -0.15376699],
[0.13528197, -0.68636408, 0.13856546],
[0.16854574, -0.66788088, -0.12485304],
[-0.03232333, -0.04189855, 0.40690153],
[0.1148816, -0.09643158, 0.1613305],
[0.04792138, -0.02384992, 0.17175319],
[-0.06781, -0.01666137, -0.18556747],
[-0.00266945, -0.00160224, 0.11893098],
[-0.00849528, -0.07706095, 0.1570547],
[-0.00949471, -0.02964127, 0.34657036],
[-0.03572177, 0.0945091, 0.3414855],
[0.05584937, -0.02028961, -0.57682568],
[0.05744254, -0.01482333, -0.17431274]])
assert_array_almost_equal(pls_ca.x_weights_, x_weights)
x_loadings = np.array(
[[0.65649254, 0.1847647, 0.15270699],
[0.67554234, 0.15237508, -0.09182247],
[0.19219925, -0.67750975, 0.08673128],
[0.2133631, -0.67034809, -0.08835483],
[-0.03178912, -0.06668336, 0.43395268],
[0.15684588, -0.13350241, 0.20578984],
[0.03337736, -0.03807306, 0.09871553],
[-0.06199844, 0.01559854, -0.1881785],
[0.00406146, -0.00587025, 0.16413253],
[-0.00374239, -0.05848466, 0.19140336],
[0.00139214, -0.01033161, 0.32239136],
[-0.05292828, 0.0953533, 0.31916881],
[0.04031924, -0.01961045, -0.65174036],
[0.06172484, -0.06597366, -0.1244497]])
assert_array_almost_equal(pls_ca.x_loadings_, x_loadings)
y_weights = np.array(
[[0.66101097, 0.18672553, 0.22826092],
[0.69347861, 0.18463471, -0.23995597],
[0.14462724, -0.66504085, 0.17082434],
[0.22247955, -0.6932605, -0.09832993],
[0.07035859, 0.00714283, 0.67810124],
[0.07765351, -0.0105204, -0.44108074],
[-0.00917056, 0.04322147, 0.10062478],
[-0.01909512, 0.06182718, 0.28830475],
[0.01756709, 0.04797666, 0.32225745]])
assert_array_almost_equal(pls_ca.y_weights_, y_weights)
y_loadings = np.array(
[[0.68568625, 0.1674376, 0.0969508],
[0.68782064, 0.20375837, -0.1164448],
[0.11712173, -0.68046903, 0.12001505],
[0.17860457, -0.6798319, -0.05089681],
[0.06265739, -0.0277703, 0.74729584],
[0.0914178, 0.00403751, -0.5135078],
[-0.02196918, -0.01377169, 0.09564505],
[-0.03288952, 0.09039729, 0.31858973],
[0.04287624, 0.05254676, 0.27836841]])
assert_array_almost_equal(pls_ca.y_loadings_, y_loadings)
# Orthogonality of weights
# ~~~~~~~~~~~~~~~~~~~~~~~~
check_ortho(pls_ca.x_weights_, "x weights are not orthogonal")
check_ortho(pls_ca.y_weights_, "y weights are not orthogonal")
# Orthogonality of latent scores
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
check_ortho(pls_ca.x_scores_, "x scores are not orthogonal")
check_ortho(pls_ca.y_scores_, "y scores are not orthogonal")
def test_PLSSVD():
# Let's check the PLSSVD doesn't return all possible component but just
# the specificied number
d = load_linnerud()
X = d.data
Y = d.target
n_components = 2
for clf in [pls_.PLSSVD, pls_.PLSRegression, pls_.PLSCanonical]:
pls = clf(n_components=n_components)
pls.fit(X, Y)
assert_equal(n_components, pls.y_scores_.shape[1])
def test_scale():
d = load_linnerud()
X = d.data
Y = d.target
# causes X[:, -1].std() to be zero
X[:, -1] = 1.0
for clf in [pls_.PLSCanonical(), pls_.PLSRegression(),
pls_.PLSSVD()]:
clf.set_params(scale=True)
clf.fit(X, Y)
| bsd-3-clause |
rosswhitfield/mantid | Framework/PythonInterface/test/python/plugins/algorithms/WorkflowAlgorithms/FindPeakAutomaticTest.py | 3 | 26334 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2019 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
import unittest
import numpy as np
from mantid.simpleapi import CreateEmptyTableWorkspace, CreateWorkspace, DeleteWorkspace, FindPeaksAutomatic
from mantid.api import mtd
from unittest import mock
import plugins.algorithms.WorkflowAlgorithms.FindPeaksAutomatic as _FindPeaksAutomatic
class FindPeaksAutomaticTest(unittest.TestCase):
data_ws = None
peak_guess_table = None
peak_table_header = [
'centre', 'error centre', 'height', 'error height', 'sigma', 'error sigma', 'area',
'error area'
]
alg_instance = None
x_values = None
y_values = None
def setUp(self):
# Creating two peaks on an exponential background with gaussian noise
self.x_values = np.linspace(0, 100, 1001)
self.centre = [25, 75]
self.height = [35, 20]
self.width = [10, 5]
self.y_values = self.gaussian(self.x_values, self.centre[0], self.height[0], self.width[0])
self.y_values += self.gaussian(self.x_values, self.centre[1], self.height[1], self.width[1])
self.background = 10 * np.ones(len(self.x_values))
self.y_values += self.background
# Generating a table with a guess of the position of the centre of the peaks
peak_table = CreateEmptyTableWorkspace()
peak_table.addColumn(type='float', name='Approximated Centre')
peak_table.addRow([self.centre[0] + 2])
peak_table.addRow([self.centre[1] - 3])
self.peakids = [
np.argwhere(self.x_values == self.centre[0])[0, 0],
np.argwhere(self.x_values == self.centre[1])[0, 0]
]
# Generating a workspace with the data and a flat background
self.raw_ws = CreateWorkspace(DataX=self.x_values,
DataY=self.y_values,
OutputWorkspace='raw_ws')
self.data_ws = CreateWorkspace(DataX=np.concatenate((self.x_values, self.x_values)),
DataY=np.concatenate((self.y_values, self.background)),
DataE=np.sqrt(
np.concatenate((self.y_values, self.background))),
NSpec=2,
OutputWorkspace='data_ws')
self.peak_guess_table = peak_table
self.alg_instance = _FindPeaksAutomatic.FindPeaksAutomatic()
def tearDown(self):
self.delete_if_present('data_ws')
self.delete_if_present('peak_guess_table')
self.delete_if_present('peak_table')
self.delete_if_present('refit_peak_table')
self.delete_if_present('fit_cost')
self.delete_if_present('fit_result_NormalisedCovarianceMatrix')
self.delete_if_present('fit_result_Parameters')
self.delete_if_present('fit_result_Workspace')
self.delete_if_present('fit_table')
self.delete_if_present('data_table')
self.delete_if_present('refit_data_table')
self.delete_if_present('tmp_table')
self.alg_instance = None
self.peak_guess_table = None
self.data_ws = None
@staticmethod
def gaussian(xvals, centre, height, sigma):
exponent = (xvals - centre) / (np.sqrt(2) * sigma)
return height * np.exp(-exponent * exponent)
@staticmethod
def delete_if_present(workspace):
if workspace in mtd:
DeleteWorkspace(workspace)
def assertTableEqual(self, expected, actual):
self.assertEqual(expected.columnCount(), actual.columnCount())
self.assertEqual(expected.rowCount(), actual.rowCount())
for i in range(expected.rowCount()):
self.assertEqual(expected.row(i), actual.row(i))
def assertPeakFound(self, peak_params, centre, height, sigma, tolerance=0.01):
if not np.isclose(peak_params['centre'], centre, rtol=tolerance):
raise Exception('Expected {}, got {}. Difference greater than tolerance {}'
.format(centre, peak_params['centre'], tolerance))
if not np.isclose(peak_params['height'], height, rtol=tolerance):
raise Exception('Expected {}, got {}. Difference greater than tolerance {}'
.format(height, peak_params['height'], tolerance))
if not np.isclose(peak_params['sigma'], sigma, rtol=tolerance):
raise Exception('Expected {}, got {}. Difference greater than tolerance {}'
.format(sigma, peak_params['sigma'], tolerance))
def test_algorithm_with_no_input_workspace_raises_exception(self):
with self.assertRaises(RuntimeError):
FindPeaksAutomatic()
def test_algorithm_with_negative_acceptance_threshold_throws(self):
with self.assertRaises(ValueError):
FindPeaksAutomatic(InputWorkspace=self.data_ws,
AcceptanceThreshold=-0.1,
PlotPeaks=False)
def test_algorithm_with_invalid_spectrum_number(self):
#tests that a float spectrum number throws an error
with self.assertRaises(TypeError):
FindPeaksAutomatic(InputWorkspace=self.data_ws,
PlotPeaks=False,
SpectrumNumber = 3.4)
#tests that a negative integer throws an error
with self.assertRaises(ValueError):
FindPeaksAutomatic(InputWorkspace=self.data_ws,
PlotPeaks=False,
SpectrumNumber = -1 )
def test_algorithm_with_negative_smooth_window_throws(self):
with self.assertRaises(ValueError):
FindPeaksAutomatic(InputWorkspace=self.data_ws, SmoothWindow=-5, PlotPeaks=False)
def test_algorithm_with_negative_num_bad_peaks_to_consider_throws(self):
with self.assertRaises(ValueError):
FindPeaksAutomatic(InputWorkspace=self.data_ws, BadPeaksToConsider=-3, PlotPeaks=False)
def test_algorithm_with_negative_estimate_of_peak_sigma_throws(self):
with self.assertRaises(ValueError):
FindPeaksAutomatic(InputWorkspace=self.data_ws, EstimatePeakSigma=-3, PlotPeaks=False)
def test_algorithm_with_negative_min_peak_sigma_throws(self):
with self.assertRaises(ValueError):
FindPeaksAutomatic(InputWorkspace=self.data_ws, MinPeakSigma=-0.1, PlotPeaks=False)
def test_algorithm_with_negative_max_peak_sigma_throws(self):
with self.assertRaises(ValueError):
FindPeaksAutomatic(InputWorkspace=self.data_ws, MaxPeakSigma=-0.1, PlotPeaks=False)
def test_algorithm_creates_all_output_workspaces(self):
ws_name = self.raw_ws.getName()
FindPeaksAutomatic(self.raw_ws)
self.assertIn('{}_with_errors'.format(ws_name), mtd)
self.assertIn('{}_{}'.format(self.raw_ws.getName(), 'properties'), mtd)
self.assertIn('{}_{}'.format(self.raw_ws.getName(), 'refit_properties'), mtd)
def test_algorithm_works_on_specified_spectrum(self):
x_values = np.array([np.linspace(0, 100, 1001), np.linspace(0, 100, 1001)], dtype=float)
centre = np.array([[25, 75], [10, 60]], dtype=float)
height = np.array([[35, 20], [40, 50]], dtype=float)
width = np.array([[10, 5], [8, 6]], dtype=float)
y_values = np.array(
[self.gaussian(x_values[0], centre[0, 0], height[0, 0], width[0, 0]),
self.gaussian(x_values[1], centre[1, 0], height[1, 0], width[1, 0])])
y_values += np.array(
[self.gaussian(x_values[0], centre[0, 1], height[0, 1], width[0, 1]),
self.gaussian(x_values[1], centre[1, 1], height[1, 1], width[1, 1])])
background = 10 * np.ones(x_values.shape)
y_values += background
raw_ws = CreateWorkspace(DataX=x_values,
DataY=y_values,
OutputWorkspace='raw_ws',NSpec = 2)
FindPeaksAutomatic(
InputWorkspace=raw_ws,
SpectrumNumber = 2,
SmoothWindow=500,
EstimatePeakSigma=6,
MinPeakSigma=3,
MaxPeakSigma=15,
)
peak_table = mtd['{}_{}'.format(raw_ws.getName(), 'properties')]
print(peak_table.row(1))
self.assertPeakFound(peak_table.row(0), 10, 40, 8)
self.assertPeakFound(peak_table.row(1), 60, 50, 6)
def test_algorithm_throws_RuntimeError_when_called_with_invalid_spectrum_number(self):
x_values = np.array([np.linspace(0, 100, 1001), np.linspace(0, 100, 1001)], dtype=float)
centre = np.array([[25, 75], [10, 60]], dtype=float)
height = np.array([[35, 20], [40, 50]], dtype=float)
width = np.array([[10, 5], [8, 6]], dtype=float)
y_values = np.array(
[self.gaussian(x_values[0], centre[0, 0], height[0, 0], width[0, 0]),
self.gaussian(x_values[1], centre[1, 0], height[1, 0], width[1, 0])])
y_values += np.array(
[self.gaussian(x_values[0], centre[0, 1], height[0, 1], width[0, 1]),
self.gaussian(x_values[1], centre[1, 1], height[1, 1], width[1, 1])])
background = 10 * np.ones(x_values.shape)
y_values += background
raw_ws = CreateWorkspace(DataX=x_values,
DataY=y_values,
OutputWorkspace='raw_ws',NSpec = 2)
with self.assertRaises(RuntimeError):
FindPeaksAutomatic(
InputWorkspace=raw_ws,
SpectrumNumber = 3,
SmoothWindow=500,
EstimatePeakSigma=6,
MinPeakSigma=3,
MaxPeakSigma=15,)
def test_algorithm_does_not_create_temporary_workspaces(self):
FindPeaksAutomatic(self.raw_ws)
self.assertNotIn('ret', mtd)
self.assertNotIn('raw_data_ws', mtd)
self.assertNotIn('flat_ws', mtd)
self.assertNotIn('fit_result_NormalisedCovarianceMatrix', mtd)
self.assertNotIn('fit_result_Parameters', mtd)
self.assertNotIn('fit_result_Workspace', mtd)
self.assertNotIn('fit_cost', mtd)
def test_output_tables_are_correctly_formatted(self):
FindPeaksAutomatic(self.raw_ws, FitToBaseline=True)
peak_table = mtd['{}_{}'.format(self.raw_ws.getName(), 'properties')]
refit_peak_table = mtd['{}_{}'.format(self.raw_ws.getName(), 'refit_properties')]
self.assertEqual(self.peak_table_header, peak_table.getColumnNames())
self.assertEqual(self.peak_table_header, refit_peak_table.getColumnNames())
self.assertEqual(2, peak_table.rowCount())
self.assertEqual(0, refit_peak_table.rowCount())
def test_single_erosion_returns_correct_result(self):
yvals = np.array([-2, 3, 1, 0, 4])
self.assertEqual(-2, self.alg_instance._single_erosion(yvals, 2, 2))
def test_single_erosion_checks_extremes_of_list_correctly(self):
yvals = np.array([-5, -3, 0, 1, -2, 2, 9])
self.assertEqual(-2, self.alg_instance._single_erosion(yvals, 3, 1))
self.assertEqual(-3, self.alg_instance._single_erosion(yvals, 3, 2))
def test_single_erosion_with_zero_window_does_nothing(self):
yvals = np.array([-5, -3, 0, 1, -2, 2, 9])
self.assertEqual(0, self.alg_instance._single_erosion(yvals, 2, 0))
def test_single_dilation_returns_correct_result(self):
yvals = np.array([-2, 3, 1, 0, 4])
self.assertEqual(4, self.alg_instance._single_dilation(yvals, 2, 2))
def test_single_dilation_checks_extremes_of_list_correctly(self):
yvals = np.array([-5, 3, 0, -7, 2, -2, 9])
self.assertEqual(2, self.alg_instance._single_dilation(yvals, 3, 1))
self.assertEqual(3, self.alg_instance._single_dilation(yvals, 3, 2))
def test_single_dilation_with_zero_window_does_nothing(self):
yvals = np.array([-5, -3, 0, 1, -2, 2, 9])
self.assertEqual(0, self.alg_instance._single_dilation(yvals, 2, 0))
def test_erosion_with_zero_window_is_an_invariant(self):
np.testing.assert_equal(self.y_values, self.alg_instance.erosion(self.y_values, 0))
def test_erosion_calls_single_erosion_the_correct_number_of_times(self, ):
with mock.patch(
'plugins.algorithms.WorkflowAlgorithms.FindPeaksAutomatic.FindPeaksAutomatic._single_erosion'
) as mock_single_erosion:
times = len(self.y_values)
win_size = 2
call_list = []
for i in range(times):
call_list.append(mock.call(self.y_values, i, win_size))
self.alg_instance.erosion(self.y_values, win_size)
self.assertEqual(times, mock_single_erosion.call_count)
mock_single_erosion.assert_has_calls(call_list, any_order=True)
def test_dilation_with_zero_window_is_an_invariant(self):
np.testing.assert_equal(self.y_values, self.alg_instance.dilation(self.y_values, 0))
def test_dilation_calls_single_erosion_the_correct_number_of_times(self):
with mock.patch(
'plugins.algorithms.WorkflowAlgorithms.FindPeaksAutomatic.FindPeaksAutomatic._single_dilation'
) as mock_single_dilation:
times = len(self.y_values)
win_size = 2
call_list = []
for i in range(times):
call_list.append(mock.call(self.y_values, i, win_size))
self.alg_instance.dilation(self.y_values, win_size)
self.assertEqual(times, mock_single_dilation.call_count)
mock_single_dilation.assert_has_calls(call_list, any_order=True)
@mock.patch('plugins.algorithms.WorkflowAlgorithms.FindPeaksAutomatic.FindPeaksAutomatic.erosion')
@mock.patch('plugins.algorithms.WorkflowAlgorithms.FindPeaksAutomatic.FindPeaksAutomatic.dilation'
)
def test_opening_calls_correct_functions_in_correct_order(self, mock_dilation, mock_erosion):
win_size = 3
self.alg_instance.opening(self.y_values, win_size)
self.assertEqual(mock_erosion.call_count, 1)
self.assertEqual(mock_dilation.call_count, 1)
erosion_ret = self.alg_instance.erosion(self.y_values, win_size)
mock_erosion.assert_called_with(self.y_values, win_size)
mock_dilation.assert_called_with(erosion_ret, win_size)
@mock.patch('plugins.algorithms.WorkflowAlgorithms.FindPeaksAutomatic.FindPeaksAutomatic.opening')
@mock.patch('plugins.algorithms.WorkflowAlgorithms.FindPeaksAutomatic.FindPeaksAutomatic.dilation')
@mock.patch('plugins.algorithms.WorkflowAlgorithms.FindPeaksAutomatic.FindPeaksAutomatic.erosion')
def test_average_calls_right_functions_in_right_order(self, mock_erosion, mock_dilation,
mock_opening):
win_size = 3
self.alg_instance.average(self.y_values, win_size)
self.assertEqual(mock_erosion.call_count, 1)
self.assertEqual(mock_dilation.call_count, 1)
self.assertEqual(mock_opening.call_count, 2)
op_ret = self.alg_instance.opening(self.y_values, win_size)
mock_opening.assert_called_with(self.y_values, win_size)
mock_dilation.assert_called_with(op_ret, win_size)
mock_erosion.assert_called_with(op_ret, win_size)
def test_generate_peak_guess_table_correctly_formats_table(self):
peakids = [2, 4, 10, 34]
peak_guess_table = self.alg_instance.generate_peak_guess_table(self.x_values, peakids)
self.assertEqual(peak_guess_table.getColumnNames(), ['centre'])
def test_generate_peak_guess_table_with_no_peaks_generates_empty_table(self):
peak_guess_table = self.alg_instance.generate_peak_guess_table(self.x_values, [])
self.assertEqual(peak_guess_table.rowCount(), 0)
def test_generate_peak_guess_table_adds_correct_values_of_peak_centre(self):
peakids = [2, 23, 19, 34, 25, 149, 234]
peak_guess_table = self.alg_instance.generate_peak_guess_table(self.x_values, peakids)
for i, pid in enumerate(sorted(peakids)):
self.assertAlmostEqual(peak_guess_table.row(i)['centre'], self.x_values[pid], 5)
def test_find_good_peaks_calls_fit_gaussian_peaks_twice_if_no_peaks_given(self):
with mock.patch('plugins.algorithms.WorkflowAlgorithms.FindPeaksAutomatic.FitGaussianPeaks'
) as mock_fit:
tmp_table = CreateEmptyTableWorkspace()
tmp_table.addColumn(type='float', name='chi2')
tmp_table.addColumn(type='float', name='poisson')
tmp_table.addRow([10, 20])
mock_fit.return_value = (mock.MagicMock(), mock.MagicMock(), tmp_table)
self.alg_instance.min_sigma = 1
self.alg_instance.max_sigma = 10
self.alg_instance.find_good_peaks(self.x_values, [], 0.1, 5, False, self.data_ws, 5)
self.assertEqual(2, mock_fit.call_count)
def _table_side_effect(self, idx):
raise ValueError('Index = %d' % idx)
def test_find_good_peaks_selects_correct_column_for_error(self):
with mock.patch('plugins.algorithms.WorkflowAlgorithms.FindPeaksAutomatic.FitGaussianPeaks'
) as mock_fit:
mock_table = mock.Mock()
mock_table.column.side_effect = self._table_side_effect
mock_fit.return_value = None, None, mock_table
# chi2 cost
with self.assertRaises(ValueError) as chi2:
self.alg_instance.find_good_peaks(self.x_values, [], 0.1, 5, False, self.data_ws, 5)
# poisson cost
with self.assertRaises(ValueError) as poisson:
self.alg_instance.find_good_peaks(self.x_values, [], 0.1, 5, True, self.data_ws, 5)
self.assertIn('Index = 0', chi2.exception.args)
self.assertNotIn('Index = 1', chi2.exception.args)
self.assertNotIn('Index = 0', poisson.exception.args)
self.assertIn('Index = 1', poisson.exception.args)
def test_find_good_peaks_returns_correct_peaks(self):
self.alg_instance._min_sigma = 1
self.alg_instance._max_sigma = 10
actual_peaks, peak_table, refit_peak_table = self.alg_instance.find_good_peaks(
self.x_values, self.peakids, 0, 5, False, self.data_ws, 5)
peak1 = peak_table.row(0)
peak2 = peak_table.row(1)
self.assertEquals(self.peakids, actual_peaks)
self.assertEqual(0, refit_peak_table.rowCount())
self.assertEqual(refit_peak_table.getColumnNames(), peak_table.getColumnNames())
self.assertPeakFound(peak1, self.centre[0], self.height[0]+10, self.width[0], 0.05)
self.assertPeakFound(peak2, self.centre[1], self.height[1]+10, self.width[1], 0.05)
def test_find_peaks_is_called_if_scipy_version_higher_1_1_0(self):
mock_scipy = mock.MagicMock()
mock_scipy.__version__ = '1.1.0'
mock_scipy.signal.find_peaks.return_value = (self.peakids, {
'prominences': self.peakids
})
with mock.patch.dict('sys.modules', scipy=mock_scipy):
self.alg_instance.process(self.x_values,
self.y_values,
raw_error=np.sqrt(self.y_values),
acceptance=0,
average_window=50,
bad_peak_to_consider=2,
use_poisson=False,
peak_width_estimate=5,
fit_to_baseline=False,
prog_reporter=mock.Mock())
self.assertEqual(2, mock_scipy.signal.find_peaks.call_count)
self.assertEqual(0, mock_scipy.signal.find_peaks_cwt.call_count)
def test_find_peaks_cwt_is_called_if_scipy_version_lower_1_1_0(self):
mock_scipy = mock.MagicMock()
mock_scipy.__version__ = '1.0.0'
mock_scipy.signal.find_peaks.return_value = (self.peakids, {
'prominences': self.peakids
})
with mock.patch.dict('sys.modules', scipy=mock_scipy):
self.alg_instance.process(self.x_values,
self.y_values,
raw_error=np.sqrt(self.y_values),
acceptance=0,
average_window=50,
bad_peak_to_consider=2,
use_poisson=False,
peak_width_estimate=5,
fit_to_baseline=False,
prog_reporter=mock.Mock())
self.assertEqual(0, mock_scipy.signal.find_peaks.call_count)
self.assertEqual(1, mock_scipy.signal.find_peaks_cwt.call_count)
def test_process_calls_find_good_peaks(self):
with mock.patch('plugins.algorithms.WorkflowAlgorithms.FindPeaksAutomatic.CreateWorkspace'
) as mock_create_ws:
mock_create_ws.return_value = self.data_ws
self.alg_instance.find_good_peaks = mock.Mock()
self.alg_instance.process(self.x_values,
self.y_values,
raw_error=np.sqrt(self.y_values),
acceptance=0,
average_window=50,
bad_peak_to_consider=2,
use_poisson=False,
peak_width_estimate=5,
fit_to_baseline=False,
prog_reporter=mock.Mock())
base = self.alg_instance.average(self.y_values, 50)
base += self.alg_instance.average(self.y_values - base, 50)
flat = self.y_values - base
self.assertEqual(1, self.alg_instance.find_good_peaks.call_count)
self.alg_instance.find_good_peaks.asser_called_with(self.x_values,
flat,
acceptance=0,
bad_peak_to_consider=2,
use_poisson=False,
fit_ws=self.data_ws,
peak_width_estimate=5)
def test_process_returns_the_return_value_of_find_good_peaks(self):
with mock.patch('plugins.algorithms.WorkflowAlgorithms.FindPeaksAutomatic.CreateWorkspace'
) as mock_create_ws:
mock_create_ws.return_value = self.data_ws
win_size = 500
actual_return = self.alg_instance.process(self.x_values,
self.y_values,
raw_error=np.sqrt(self.y_values),
acceptance=0,
average_window=win_size,
bad_peak_to_consider=2,
use_poisson=False,
peak_width_estimate=5,
fit_to_baseline=False,
prog_reporter=mock.Mock())
import copy
actual_return = copy.deepcopy(actual_return)
base = self.alg_instance.average(self.y_values, win_size)
base += self.alg_instance.average(self.y_values - base, win_size)
expected_return = self.alg_instance.find_good_peaks(self.x_values,
self.peakids,
acceptance=0,
bad_peak_to_consider=2,
use_poisson=False,
fit_ws=self.data_ws,
peak_width_estimate=5), base
self.assertEqual(expected_return[0][0], actual_return[0][0])
self.assertTableEqual(expected_return[0][1], actual_return[0][1])
np.testing.assert_almost_equal(expected_return[1], actual_return[1])
def _assert_matplotlib_not_present(self, *args):
import sys
self.assertNotIn('matplotlib.pyplot', sys.modules)
# If matplotlib.pyplot is imported other tests fail on windows and ubuntu
def test_matplotlib_pyplot_is_not_imported(self):
self.alg_instance.dilation = mock.Mock(side_effect=self._assert_matplotlib_not_present)
self.alg_instance.opening(self.y_values, 0)
def test_that_algorithm_finds_peaks_correctly(self):
FindPeaksAutomatic(
InputWorkspace=self.raw_ws,
SmoothWindow=500,
EstimatePeakSigma=5,
MinPeakSigma=3,
MaxPeakSigma=15,
)
peak_table = mtd['{}_{}'.format(self.raw_ws.getName(), 'properties')]
refit_peak_table = mtd['{}_{}'.format(self.raw_ws.getName(), 'refit_properties')]
self.assertEqual(2, peak_table.rowCount())
self.assertEqual(0, refit_peak_table.rowCount())
self.assertPeakFound(peak_table.row(0), self.centre[0], self.height[0], self.width[0], 0.05)
self.assertPeakFound(peak_table.row(1), self.centre[1], self.height[1], self.width[1], 0.05)
if __name__ == '__main__':
unittest.main()
| gpl-3.0 |
ilayn/scipy | scipy/special/_precompute/struve_convergence.py | 12 | 3456 | """
Convergence regions of the expansions used in ``struve.c``
Note that for v >> z both functions tend rapidly to 0,
and for v << -z, they tend to infinity.
The floating-point functions over/underflow in the lower left and right
corners of the figure.
Figure legend
=============
Red region
Power series is close (1e-12) to the mpmath result
Blue region
Asymptotic series is close to the mpmath result
Green region
Bessel series is close to the mpmath result
Dotted colored lines
Boundaries of the regions
Solid colored lines
Boundaries estimated by the routine itself. These will be used
for determining which of the results to use.
Black dashed line
The line z = 0.7*|v| + 12
"""
import numpy as np
import matplotlib.pyplot as plt # type: ignore[import]
import mpmath
def err_metric(a, b, atol=1e-290):
m = abs(a - b) / (atol + abs(b))
m[np.isinf(b) & (a == b)] = 0
return m
def do_plot(is_h=True):
from scipy.special._ufuncs import (_struve_power_series,
_struve_asymp_large_z,
_struve_bessel_series)
vs = np.linspace(-1000, 1000, 91)
zs = np.sort(np.r_[1e-5, 1.0, np.linspace(0, 700, 91)[1:]])
rp = _struve_power_series(vs[:,None], zs[None,:], is_h)
ra = _struve_asymp_large_z(vs[:,None], zs[None,:], is_h)
rb = _struve_bessel_series(vs[:,None], zs[None,:], is_h)
mpmath.mp.dps = 50
if is_h:
sh = lambda v, z: float(mpmath.struveh(mpmath.mpf(v), mpmath.mpf(z)))
else:
sh = lambda v, z: float(mpmath.struvel(mpmath.mpf(v), mpmath.mpf(z)))
ex = np.vectorize(sh, otypes='d')(vs[:,None], zs[None,:])
err_a = err_metric(ra[0], ex) + 1e-300
err_p = err_metric(rp[0], ex) + 1e-300
err_b = err_metric(rb[0], ex) + 1e-300
err_est_a = abs(ra[1]/ra[0])
err_est_p = abs(rp[1]/rp[0])
err_est_b = abs(rb[1]/rb[0])
z_cutoff = 0.7*abs(vs) + 12
levels = [-1000, -12]
plt.cla()
plt.hold(1)
plt.contourf(vs, zs, np.log10(err_p).T, levels=levels, colors=['r', 'r'], alpha=0.1)
plt.contourf(vs, zs, np.log10(err_a).T, levels=levels, colors=['b', 'b'], alpha=0.1)
plt.contourf(vs, zs, np.log10(err_b).T, levels=levels, colors=['g', 'g'], alpha=0.1)
plt.contour(vs, zs, np.log10(err_p).T, levels=levels, colors=['r', 'r'], linestyles=[':', ':'])
plt.contour(vs, zs, np.log10(err_a).T, levels=levels, colors=['b', 'b'], linestyles=[':', ':'])
plt.contour(vs, zs, np.log10(err_b).T, levels=levels, colors=['g', 'g'], linestyles=[':', ':'])
lp = plt.contour(vs, zs, np.log10(err_est_p).T, levels=levels, colors=['r', 'r'], linestyles=['-', '-'])
la = plt.contour(vs, zs, np.log10(err_est_a).T, levels=levels, colors=['b', 'b'], linestyles=['-', '-'])
lb = plt.contour(vs, zs, np.log10(err_est_b).T, levels=levels, colors=['g', 'g'], linestyles=['-', '-'])
plt.clabel(lp, fmt={-1000: 'P', -12: 'P'})
plt.clabel(la, fmt={-1000: 'A', -12: 'A'})
plt.clabel(lb, fmt={-1000: 'B', -12: 'B'})
plt.plot(vs, z_cutoff, 'k--')
plt.xlim(vs.min(), vs.max())
plt.ylim(zs.min(), zs.max())
plt.xlabel('v')
plt.ylabel('z')
def main():
plt.clf()
plt.subplot(121)
do_plot(True)
plt.title('Struve H')
plt.subplot(122)
do_plot(False)
plt.title('Struve L')
plt.savefig('struve_convergence.png')
plt.show()
if __name__ == "__main__":
main()
| bsd-3-clause |
simpeg/simpeg | examples/06-dc/plot_dipoledipole_3Dinversion_twospheres.py | 1 | 6653 | """
3D DC inversion of Dipole Dipole array
======================================
This is an example for 3D DC Inversion. The model consists of 2 spheres,
one conductive, the other one resistive compared to the background.
We restrain the inversion to the Core Mesh through the use an Active Cells
mapping that we combine with an exponetial mapping to invert
in log conductivity space. Here mapping, :math:`\\mathcal{M}`,
indicates transformation of our model to a different space:
.. math::
\\sigma = \\mathcal{M}(\\mathbf{m})
Following example will show you how user can implement a 3D DC inversion.
"""
from SimPEG import (
Mesh, Maps, Utils,
DataMisfit, Regularization, Optimization,
InvProblem, Directives, Inversion
)
from SimPEG.EM.Static import DC, Utils as DCUtils
import numpy as np
import matplotlib.pyplot as plt
try:
from pymatsolver import Pardiso as Solver
except ImportError:
from SimPEG import SolverLU as Solver
np.random.seed(12345)
# 3D Mesh
#########
# Cell sizes
csx, csy, csz = 1., 1., 0.5
# Number of core cells in each direction
ncx, ncy, ncz = 41, 31, 21
# Number of padding cells to add in each direction
npad = 7
# Vectors of cell lengths in each direction with padding
hx = [(csx, npad, -1.5), (csx, ncx), (csx, npad, 1.5)]
hy = [(csy, npad, -1.5), (csy, ncy), (csy, npad, 1.5)]
hz = [(csz, npad, -1.5), (csz, ncz)]
# Create mesh and center it
mesh = Mesh.TensorMesh([hx, hy, hz], x0="CCN")
# 2-spheres Model Creation
##########################
# Spheres parameters
x0, y0, z0, r0 = -6., 0., -3.5, 3.
x1, y1, z1, r1 = 6., 0., -3.5, 3.
# ln conductivity
ln_sigback = -5.
ln_sigc = -3.
ln_sigr = -6.
# Define model
# Background
mtrue = ln_sigback * np.ones(mesh.nC)
# Conductive sphere
csph = (np.sqrt((mesh.gridCC[:, 0] - x0)**2. + (mesh.gridCC[:, 1] - y0)**2. +
(mesh.gridCC[:, 2] - z0)**2.)) < r0
mtrue[csph] = ln_sigc * np.ones_like(mtrue[csph])
# Resistive Sphere
rsph = (np.sqrt((mesh.gridCC[:, 0] - x1)**2. + (mesh.gridCC[:, 1] - y1)**2. +
(mesh.gridCC[:, 2] - z1)**2.)) < r1
mtrue[rsph] = ln_sigr * np.ones_like(mtrue[rsph])
# Extract Core Mesh
xmin, xmax = -20., 20.
ymin, ymax = -15., 15.
zmin, zmax = -10., 0.
xyzlim = np.r_[[[xmin, xmax], [ymin, ymax], [zmin, zmax]]]
actind, meshCore = Utils.meshutils.ExtractCoreMesh(xyzlim, mesh)
# Function to plot cylinder border
def getCylinderPoints(xc, zc, r):
xLocOrig1 = np.arange(-r, r + r / 10., r / 10.)
xLocOrig2 = np.arange(r, -r - r / 10., -r / 10.)
# Top half of cylinder
zLoc1 = np.sqrt(-xLocOrig1**2. + r**2.) + zc
# Bottom half of cylinder
zLoc2 = -np.sqrt(-xLocOrig2**2. + r**2.) + zc
# Shift from x = 0 to xc
xLoc1 = xLocOrig1 + xc * np.ones_like(xLocOrig1)
xLoc2 = xLocOrig2 + xc * np.ones_like(xLocOrig2)
topHalf = np.vstack([xLoc1, zLoc1]).T
topHalf = topHalf[0:-1, :]
bottomHalf = np.vstack([xLoc2, zLoc2]).T
bottomHalf = bottomHalf[0:-1, :]
cylinderPoints = np.vstack([topHalf, bottomHalf])
cylinderPoints = np.vstack([cylinderPoints, topHalf[0, :]])
return cylinderPoints
# Setup a synthetic Dipole-Dipole Survey
# Line 1
xmin, xmax = -15., 15.
ymin, ymax = 0., 0.
zmin, zmax = 0, 0
endl = np.array([[xmin, ymin, zmin], [xmax, ymax, zmax]])
survey1 = DCUtils.gen_DCIPsurvey(endl, "dipole-dipole", dim=mesh.dim,
a=3, b=3, n=8)
# Line 2
xmin, xmax = -15., 15.
ymin, ymax = 5., 5.
zmin, zmax = 0, 0
endl = np.array([[xmin, ymin, zmin], [xmax, ymax, zmax]])
survey2 = DCUtils.gen_DCIPsurvey(endl, "dipole-dipole", dim=mesh.dim,
a=3, b=3, n=8)
# Line 3
xmin, xmax = -15., 15.
ymin, ymax = -5., -5.
zmin, zmax = 0, 0
endl = np.array([[xmin, ymin, zmin], [xmax, ymax, zmax]])
survey3 = DCUtils.gen_DCIPsurvey(endl, "dipole-dipole", dim=mesh.dim,
a=3, b=3, n=8)
# Concatenate lines
survey = DC.Survey(survey1.srcList + survey2.srcList + survey3.srcList)
# Setup Problem with exponential mapping and Active cells only in the core mesh
expmap = Maps.ExpMap(mesh)
mapactive = Maps.InjectActiveCells(mesh=mesh, indActive=actind,
valInactive=-5.)
mapping = expmap * mapactive
problem = DC.Problem3D_CC(mesh, sigmaMap=mapping)
problem.pair(survey)
problem.Solver = Solver
survey.dpred(mtrue[actind])
survey.makeSyntheticData(mtrue[actind], std=0.05, force=True)
# Tikhonov Inversion
####################
# Initial Model
m0 = np.median(ln_sigback) * np.ones(mapping.nP)
# Data Misfit
dmis = DataMisfit.l2_DataMisfit(survey)
# Regularization
regT = Regularization.Simple(mesh, indActive=actind, alpha_s=1e-6,
alpha_x=1., alpha_y=1., alpha_z=1.)
# Optimization Scheme
opt = Optimization.InexactGaussNewton(maxIter=10)
# Form the problem
opt.remember('xc')
invProb = InvProblem.BaseInvProblem(dmis, regT, opt)
# Directives for Inversions
beta = Directives.BetaEstimate_ByEig(beta0_ratio=1e+1)
Target = Directives.TargetMisfit()
betaSched = Directives.BetaSchedule(coolingFactor=5., coolingRate=2)
inv = Inversion.BaseInversion(invProb, directiveList=[beta, Target,
betaSched])
# Run Inversion
minv = inv.run(m0)
# Final Plot
############
fig, ax = plt.subplots(2, 2, figsize=(12, 6))
ax = Utils.mkvc(ax)
cyl0v = getCylinderPoints(x0, z0, r0)
cyl1v = getCylinderPoints(x1, z1, r1)
cyl0h = getCylinderPoints(x0, y0, r0)
cyl1h = getCylinderPoints(x1, y1, r1)
clim = [(mtrue[actind]).min(), (mtrue[actind]).max()]
dat = meshCore.plotSlice(((mtrue[actind])), ax=ax[0], normal='Y', clim=clim,
ind=int(ncy / 2))
ax[0].set_title('Ground Truth, Vertical')
ax[0].set_aspect('equal')
meshCore.plotSlice((minv), ax=ax[1], normal='Y', clim=clim, ind=int(ncy / 2))
ax[1].set_aspect('equal')
ax[1].set_title('Inverted Model, Vertical')
meshCore.plotSlice(((mtrue[actind])), ax=ax[2], normal='Z', clim=clim,
ind=int(ncz / 2))
ax[2].set_title('Ground Truth, Horizontal')
ax[2].set_aspect('equal')
meshCore.plotSlice((minv), ax=ax[3], normal='Z', clim=clim, ind=int(ncz / 2))
ax[3].set_title('Inverted Model, Horizontal')
ax[3].set_aspect('equal')
for i in range(2):
ax[i].plot(cyl0v[:, 0], cyl0v[:, 1], 'k--')
ax[i].plot(cyl1v[:, 0], cyl1v[:, 1], 'k--')
for i in range(2, 4):
ax[i].plot(cyl1h[:, 0], cyl1h[:, 1], 'k--')
ax[i].plot(cyl0h[:, 0], cyl0h[:, 1], 'k--')
fig.subplots_adjust(right=0.8)
cbar_ax = fig.add_axes([0.85, 0.15, 0.05, 0.7])
cb = plt.colorbar(dat[0], ax=cbar_ax)
cb.set_label('ln conductivity')
cbar_ax.axis('off')
plt.show()
| mit |
ai-se/Transfer-Learning | src/RQ1_3.py | 1 | 4195 | """
Compare XTREE with other threshold based learners.
"""
from __future__ import print_function, division
import os
import sys
# Update path
root = os.path.join(os.getcwd().split('src')[0], 'src')
if root not in sys.path:
sys.path.append(root)
from planners.XTREE import xtree
from planners.alves import alves
from utils.plot_util import plot_bar
from planners.shatnawi import shatnawi
from planners.oliveira import oliveira
from data.get_data import get_all_projects
from utils.file_util import list2dataframe
from utils.stats_utils.auec import compute_auec
from utils.rq_utils import measure_overlap, reshape
def research_question_1_3(decrease=True, verbose=True, plot_results=True):
"""
RQ1: How effective is XTREE?
RQ3: How does XTREE compare with BELLTREE? (The XTREE part of this RQ is answered here)
Parameters
----------
decrease: Bool
Compute AUPEC for defects reduced.
verbose: Bool
Display results on the console
plot_results: Bool
Save barcharts of overlap vs. defects increased/decreased
"""
data = get_all_projects()
if verbose:
print("Data \tXTREE\tAlves\tShatw\tOlive")
for proj, paths in data.iteritems():
i = 0
for train, test, validation in zip(paths.data[:-2], paths.data[1:-1], paths.data[2:]):
i += 1
"Convert to pandas type dataframe"
train = list2dataframe(train)
test = list2dataframe(test)
validation = list2dataframe(validation)
"Recommend changes with XTREE"
patched_xtree = xtree(train[train.columns[1:]], test)
patched_alves = alves(train[train.columns[1:]], test)
patched_shatw = shatnawi(train[train.columns[1:]], test)
patched_olive = oliveira(train[train.columns[1:]], test)
"Compute overlap with developers changes"
res_xtree = measure_overlap(test, patched_xtree, validation)
res_alves = measure_overlap(test, patched_alves, validation)
res_shatw = measure_overlap(test, patched_shatw, validation)
res_olive = measure_overlap(test, patched_olive, validation)
"AUPEC of defects decreased/increased"
res_dec, res_inc = reshape(res_xtree, res_alves, res_shatw, res_olive)
"Plot the results"
if plot_results:
plot_bar(res_inc, res_dec, save_path=os.path.join(
root, "results", "RQ1", proj), title="{} v{}".format(proj, i), y_lbl="Defects",
postfix="")
"Max/Min to normalize AUPEC"
y_max = max(res_dec.max(axis=0).values)
y_min = max(res_dec.min(axis=0).values)
if decrease:
"Decrease AUC"
xtree_dec_auc = compute_auec(res_dec[["Overlap", "XTREE"]], y_max, y_min)
alves_dec_auc = compute_auec(res_dec[["Overlap", "Alves"]], y_max, y_min)
shatw_dec_auc = compute_auec(res_dec[["Overlap", "Shatnawi"]], y_max, y_min)
olive_dec_auc = compute_auec(res_dec[["Overlap", "Oliveira"]], y_max, y_min)
if verbose:
print("{}-{}\t{}\t{}\t{}\t{}".format(proj[:3], i, xtree_dec_auc, alves_dec_auc, shatw_dec_auc, olive_dec_auc))
else:
"Increase AUC"
xtree_inc_auc = compute_auec(res_inc[["Overlap", "XTREE"]], y_max, y_min)
alves_inc_auc = compute_auec(res_inc[["Overlap", "Alves"]], y_max, y_min)
shatw_inc_auc = compute_auec(res_inc[["Overlap", "Shatnawi"]], y_max, y_min)
olive_inc_auc = compute_auec(res_inc[["Overlap", "Oliveira"]], y_max, y_min)
if verbose:
print("{}-{}\t{}\t{}\t{}\t{}".format(proj[:3], i, xtree_inc_auc, alves_inc_auc, shatw_inc_auc, olive_inc_auc))
if __name__ == "__main__":
print("AUPEC: Defects Reduced\n{}".format(22*"-"))
research_question_1_3(decrease=True, verbose=True, plot_results=False)
print("\n"+40*"="+"\nAUPEC: Defects Increased\n"+24*"-")
research_question_1_3(decrease=False, verbose=True, plot_results=False)
| unlicense |
lssfau/walberla | python/waLBerla_docs/ipython/ipython-tutorials/material/matplotlib_setup.py | 1 | 3753 | import matplotlib
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from IPython.display import HTML
from tempfile import NamedTemporaryFile
import base64
from IPython import get_ipython
ipython = get_ipython()
ipython.magic("matplotlib inline") # Show plots as images embedded in iPython notebook
def setMplFigureSize():
matplotlib.rcParams['figure.figsize'] = (15.0, 12.0)
VIDEO_TAG = """<video controls width="80%">
<source src="data:video/x-m4v;base64,{0}" type="video/mp4">
Your browser does not support the video tag.
</video>"""
def __anim_to_html(anim, fps):
if not hasattr(anim, '_encoded_video'):
with NamedTemporaryFile(suffix='.mp4') as f:
anim.save(f.name, fps=fps, extra_args=['-vcodec', 'libx264', '-pix_fmt',
'yuv420p', '-profile:v', 'baseline', '-level', '3.0'])
video = open(f.name, "rb").read()
anim._encoded_video = base64.b64encode(video).decode('ascii')
return VIDEO_TAG.format(anim._encoded_video)
def makeImshowAnimation(grid, gridUpdateFunction, frames=90, **kwargs):
from functools import partial
fig = plt.figure()
im = plt.imshow(grid, interpolation='none')
def updatefig(*args, **kwargs):
image = kwargs['image']
image = gridUpdateFunction(image)
im.set_array(image)
return im,
return animation.FuncAnimation(fig, partial(updatefig, image=grid), frames=frames)
# ------- Version 1: Embed the animation as HTML5 video --------- ----------------------------------
def displayAsHtmlVideo(anim, fps=30, show=True, **kwargs):
try:
plt.close(anim._fig)
res = __anim_to_html(anim, fps)
if show:
return HTML(res)
else:
return HTML("")
except KeyboardInterrupt:
pass
# ------- Version 2: Animation is shown in extra matplotlib window ----------------------------------
def displayInExtraWindow(animation, *args, **kwargs):
fig = plt.gcf()
try:
fig.canvas.manager.window.raise_()
except Exception:
pass
plt.show()
# ------- Version 3: Animation is shown in images that are updated directly in website --------------
def displayAsHtmlImage(animation, show=True, iterations=10000, *args, **kwargs):
from IPython import display
try:
if show:
fig = plt.gcf()
if show:
animation._init_draw()
for i in range(iterations):
if show:
display.display(fig)
animation._step()
if show:
display.clear_output(wait=True)
except KeyboardInterrupt:
display.clear_output(wait=False)
# Dispatcher
animation_display_mode = 'imageupdate'
display_animation_func = None
def disp(*args, **kwargs):
if not display_animation_func:
raise ("Call set_display_mode first")
return display_animation_func(*args, **kwargs)
def set_display_mode(mode):
from IPython import get_ipython
ipython = get_ipython()
global animation_display_mode
global display_animation_func
animation_display_mode = mode
if animation_display_mode == 'video':
ipython.magic("matplotlib inline")
display_animation_func = displayAsHtmlVideo
elif animation_display_mode == 'window':
ipython.magic("matplotlib qt")
display_animation_func = displayInExtraWindow
elif animation_display_mode == 'imageupdate':
ipython.magic("matplotlib inline")
display_animation_func = displayAsHtmlImage
else:
raise Exception("Unknown mode. Available modes 'imageupdate', 'video' and 'window' ")
set_display_mode('imageupdate')
setMplFigureSize()
| gpl-3.0 |
cactusbin/nyt | matplotlib/lib/matplotlib/tri/tripcolor.py | 4 | 5640 | from __future__ import print_function
from matplotlib.collections import PolyCollection, TriMesh
from matplotlib.colors import Normalize
from matplotlib.tri.triangulation import Triangulation
import numpy as np
def tripcolor(ax, *args, **kwargs):
"""
Create a pseudocolor plot of an unstructured triangular grid.
The triangulation can be specified in one of two ways; either::
tripcolor(triangulation, ...)
where triangulation is a :class:`matplotlib.tri.Triangulation`
object, or
::
tripcolor(x, y, ...)
tripcolor(x, y, triangles, ...)
tripcolor(x, y, triangles=triangles, ...)
tripcolor(x, y, mask=mask, ...)
tripcolor(x, y, triangles, mask=mask, ...)
in which case a Triangulation object will be created. See
:class:`~matplotlib.tri.Triangulation` for a explanation of these
possibilities.
The next argument must be *C*, the array of color values, either
one per point in the triangulation if color values are defined at
points, or one per triangle in the triangulation if color values
are defined at triangles. If there are the same number of points
and triangles in the triangulation it is assumed that color
values are defined at points; to force the use of color values at
triangles use the kwarg *facecolors*=C instead of just *C*.
*shading* may be 'flat' (the default) or 'gouraud'. If *shading*
is 'flat' and C values are defined at points, the color values
used for each triangle are from the mean C of the triangle's
three points. If *shading* is 'gouraud' then color values must be
defined at points. *shading* of 'faceted' is deprecated;
please use *edgecolors* instead.
The remaining kwargs are the same as for
:meth:`~matplotlib.axes.Axes.pcolor`.
**Example:**
.. plot:: mpl_examples/pylab_examples/tripcolor_demo.py
"""
if not ax._hold: ax.cla()
alpha = kwargs.pop('alpha', 1.0)
norm = kwargs.pop('norm', None)
cmap = kwargs.pop('cmap', None)
vmin = kwargs.pop('vmin', None)
vmax = kwargs.pop('vmax', None)
shading = kwargs.pop('shading', 'flat')
facecolors = kwargs.pop('facecolors', None)
tri, args, kwargs = Triangulation.get_from_args_and_kwargs(*args, **kwargs)
# C is the colors array, defined at either points or faces (i.e. triangles).
# If facecolors is None, C are defined at points.
# If facecolors is not None, C are defined at faces.
if facecolors is not None:
C = facecolors
else:
C = np.asarray(args[0])
# If there are a different number of points and triangles in the
# triangulation, can omit facecolors kwarg as it is obvious from
# length of C whether it refers to points or faces.
# Do not do this for gouraud shading.
if (facecolors is None and len(C) == len(tri.triangles) and
len(C) != len(tri.x) and shading != 'gouraud'):
facecolors = C
# Check length of C is OK.
if ( (facecolors is None and len(C) != len(tri.x)) or
(facecolors is not None and len(C) != len(tri.triangles)) ):
raise ValueError('Length of color values array must be the same '
'as either the number of triangulation points '
'or triangles')
# Handling of linewidths, shading, edgecolors and antialiased as
# in Axes.pcolor
linewidths = (0.25,)
if 'linewidth' in kwargs:
kwargs['linewidths'] = kwargs.pop('linewidth')
kwargs.setdefault('linewidths', linewidths)
if shading == 'faceted': # Deprecated.
edgecolors = 'k'
else:
edgecolors = 'none'
if 'edgecolor' in kwargs:
kwargs['edgecolors'] = kwargs.pop('edgecolor')
ec = kwargs.setdefault('edgecolors', edgecolors)
if 'antialiased' in kwargs:
kwargs['antialiaseds'] = kwargs.pop('antialiased')
if 'antialiaseds' not in kwargs and ec.lower() == "none":
kwargs['antialiaseds'] = False
if shading == 'gouraud':
if facecolors is not None:
raise ValueError('Gouraud shading does not support the use '
'of facecolors kwarg')
if len(C) != len(tri.x):
raise ValueError('For gouraud shading, the length of color '
'values array must be the same as the '
'number of triangulation points')
collection = TriMesh(tri, **kwargs)
else:
# Vertices of triangles.
maskedTris = tri.get_masked_triangles()
verts = np.concatenate((tri.x[maskedTris][...,np.newaxis],
tri.y[maskedTris][...,np.newaxis]), axis=2)
# Color values.
if facecolors is None:
# One color per triangle, the mean of the 3 vertex color values.
C = C[maskedTris].mean(axis=1)
elif tri.mask is not None:
# Remove color values of masked triangles.
C = C.compress(1-tri.mask)
collection = PolyCollection(verts, **kwargs)
collection.set_alpha(alpha)
collection.set_array(C)
if norm is not None: assert(isinstance(norm, Normalize))
collection.set_cmap(cmap)
collection.set_norm(norm)
if vmin is not None or vmax is not None:
collection.set_clim(vmin, vmax)
else:
collection.autoscale_None()
ax.grid(False)
minx = tri.x.min()
maxx = tri.x.max()
miny = tri.y.min()
maxy = tri.y.max()
corners = (minx, miny), (maxx, maxy)
ax.update_datalim( corners)
ax.autoscale_view()
ax.add_collection(collection)
return collection
| unlicense |
schoolie/bokeh | bokeh/util/tests/test_serialization.py | 1 | 4853 | from __future__ import absolute_import
import base64
import pytest
import numpy as np
import pandas as pd
import bokeh.util.serialization as bus
def test_id():
assert len(bus.make_id()) == 36
assert isinstance(bus.make_id(), str)
def test_id_with_simple_ids():
import os
os.environ["BOKEH_SIMPLE_IDS"] = "yes"
assert bus.make_id() == "1001"
assert bus.make_id() == "1002"
del os.environ["BOKEH_SIMPLE_IDS"]
testing = [[float('nan'), 3], [float('-inf'), [float('inf')]]]
expected = [['NaN', 3.0], ['-Infinity', ['Infinity']]]
def test_traverse_return_valid_json():
assert bus.traverse_data(testing) == expected
def test_traverse_with_numpy():
assert bus.traverse_data(testing, True) == expected
def test_traverse_without_numpy():
assert bus.traverse_data(testing, False) == expected
def test_transform_array_force_list_default():
dt_ok = bus.BINARY_ARRAY_TYPES
for dt in dt_ok:
a = np.empty(shape=10, dtype=dt)
out = bus.transform_array(a)
assert isinstance(out, dict)
def test_transform_array_force_list_true():
dt_ok = bus.BINARY_ARRAY_TYPES
for dt in dt_ok:
a = np.empty(shape=10, dtype=dt)
out = bus.transform_array(a, force_list=True)
assert isinstance(out, list)
def test_transform_series_force_list_default():
# default int seems to be int64
df = pd.Series([1, 3, 5, 6, 8])
out = bus.transform_series(df)
assert isinstance(out, list)
df = pd.Series([1, 3, 5, 6, 8], dtype=np.int32)
out = bus.transform_series(df)
assert isinstance(out, dict)
df = pd.Series([1.0, 3, 5, 6, 8])
out = bus.transform_series(df)
assert isinstance(out, dict)
df = pd.Series(np.array([np.nan, np.inf, -np.inf, 0]))
out = bus.transform_series(df)
assert isinstance(out, dict)
def test_transform_series_force_list_true():
df = pd.Series([1, 3, 5, 6, 8])
out = bus.transform_series(df, force_list=True)
assert isinstance(out, list)
df = pd.Series([1, 3, 5, 6, 8], dtype=np.int32)
out = bus.transform_series(df, force_list=True)
assert isinstance(out, list)
df = pd.Series([1.0, 3, 5, 6, 8])
out = bus.transform_series(df, force_list=True)
assert isinstance(out, list)
df = pd.Series(np.array([np.nan, np.inf, -np.inf, 0]))
out = bus.transform_series(df, force_list=True)
assert isinstance(out, list)
def test_transform_array_to_list():
dt_ok = bus.BINARY_ARRAY_TYPES
for dt in dt_ok:
a = np.empty(shape=10, dtype=dt)
out = bus.transform_array_to_list(a)
assert isinstance(out, list)
@pytest.mark.parametrize('values', [(['cat', 'dog']), ([1.2, 'apple'])])
def test_transform_array_with_nans_to_list(values):
s = pd.Series([np.nan, values[0], values[1]])
out = bus.transform_array_to_list(s)
assert isinstance(out, list)
assert out == ['NaN', values[0], values[1]]
def test_array_encoding_disabled_by_dtype():
assert len(bus.BINARY_ARRAY_TYPES) > 0
dt_ok = bus.BINARY_ARRAY_TYPES
dt_bad = set(np.dtype(x) for x in set(np.typeDict.values()) - set([np.void])) - dt_ok
for dt in dt_ok:
a = np.empty(shape=10, dtype=dt)
assert not bus.array_encoding_disabled(a)
for dt in dt_bad:
a = np.empty(shape=10, dtype=dt)
assert bus.array_encoding_disabled(a)
def test_encode_base64_dict():
for dt in [np.float32, np.float64, np.int64]:
for shape in [(12,), (2, 6), (2,2,3)]:
a = np.arange(12, dtype=dt)
a.reshape(shape)
d = bus.encode_base64_dict(a)
assert 'shape' in d
assert d['shape'] == a.shape
assert 'dtype' in d
assert d['dtype'] == a.dtype.name
assert '__ndarray__' in d
b64 = base64.b64decode(d['__ndarray__'])
aa = np.fromstring(b64, dtype=d['dtype'])
assert np.array_equal(a, aa)
def test_decode_base64_dict():
for dt in [np.float32, np.float64, np.int64]:
for shape in [(12,), (2, 6), (2,2,3)]:
a = np.arange(12, dtype=dt)
a.reshape(shape)
data = base64.b64encode(a).decode('utf-8')
d = {
'__ndarray__' : data,
'dtype' : a.dtype.name,
'shape' : a.shape
}
aa = bus.decode_base64_dict(d)
assert aa.shape == a.shape
assert aa.dtype.name == a.dtype.name
assert np.array_equal(a, aa)
def test_encode_decode_roundtrip():
for dt in [np.float32, np.float64, np.int64]:
for shape in [(12,), (2, 6), (2,2,3)]:
a = np.arange(12, dtype=dt)
a.reshape(shape)
d = bus.encode_base64_dict(a)
aa = bus.decode_base64_dict(d)
assert np.array_equal(a, aa)
| bsd-3-clause |
EnergyExemplarNorthAmerica/Python-PLEXOS-API | Solution Files/aggregate_by_category.py | 1 | 3381 | # -*- coding: utf-8 -*-
"""
Created on Tue Jan 22 22:55:30 2019
@author: Steven.Broad
"""
# standard Python/SciPy libraries
import os
import pandas as pd
import matplotlib.pyplot as plt
from datetime import datetime
# Python .NET interface
from dotnet.seamless import add_assemblies, load_assembly
# load PLEXOS assemblies... replace the path below with the installation
# installation folder for your PLEXOS installation.
add_assemblies('C:/Program Files (x86)/Energy Exemplar/PLEXOS 7.5/')
load_assembly('PLEXOS7_NET.Core')
load_assembly('EEUTILITY')
# Import from .NET assemblies (both PLEXOS and system)
from PLEXOS7_NET.Core import *
from EEUTILITY.Enums import *
from System import *
# Create a PLEXOS solution file object and load the solution
sol = Solution()
sol_file = 'Model Q2 Week1 DA Solution.zip' # replace with your solution file
if not os.path.exists(sol_file):
print 'No such file'
else:
sol.Connection(sol_file)
'''
Simple query: works similarly to PLEXOS Solution Viewer
Recordset Query(
SimulationPhaseEnum SimulationPhaseId,
CollectionEnum CollectionId,
String ParentName,
String ChildName,
PeriodEnum PeriodTypeId,
SeriesTypeEnum SeriesTypeId,
String PropertyList[ = None],
Object DateFrom[ = None],
Object DateTo[ = None],
String TimesliceList[ = None],
String SampleList[ = None],
String ModelName[ = None],
AggregationEnum AggregationType[ = None],
String Category[ = None],
String Filter[ = None]
)
'''
# Setup and run the query
# a. Alias the Query method with the arguments you plan to use.
query = sol.Query[SimulationPhaseEnum,CollectionEnum,String,String, \
PeriodEnum, SeriesTypeEnum, String, Object, Object, \
String, String, String, AggregationEnum, String, \
String]
# b. Construct a tuple of values to send as parameters.
params = (SimulationPhaseEnum.STSchedule, \
CollectionEnum.SystemGenerators, \
'', \
'', \
PeriodEnum.Interval, \
SeriesTypeEnum.Values, \
'1', \
DateTime.Parse('4/1/2024'), \
DateTime.Parse('4/1/2024'), \
'0', \
'', \
'', \
AggregationEnum.Category, \
'', \
'')
# c. Use the __invoke__ method of the alias to call the method.
results = query.__invoke__(params)
# Check to see if the query had results
if results == None or results.EOF:
print 'No results'
else:
# Create a DataFrame with a column for each column in the results
cols = [x.Name for x in results.Fields]
names = cols[cols.index('phase_name')+1:]
df = pd.DataFrame(columns=cols)
# loop through the recordset
idx = 0
while not results.EOF:
df.loc[idx] = [datetime(x.Value.Year,x.Value.Month,x.Value.Day,x.Value.Hour,x.Value.Minute,0) if str(type(x.Value)) == 'System.DateTime' else x.Value for x in results.Fields]
idx += 1
results.MoveNext() #VERY IMPORTANT
wb = pd.ExcelWriter('query_by_category.xlsx')
df.to_excel(wb, 'Query') # 'Query' is the name of the worksheet
wb.save()
| gpl-3.0 |
dingmingliu/quanttrade | quanttrade/test/ma_cross.py | 1 | 4991 | __author__ = 'tyler'
import datetime
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from pandas.io.data import DataReader
from backtest import Strategy, Portfolio
class MovingAverageCrossStrategy(Strategy):
def __init__(self, symbol, bars, short_window=100, long_window=400):
self.symbol = symbol
self.bars = bars
self.short_window = short_window
self.long_window = long_window
def generate_signals(self):
"""Returns the DataFrame of symbols containing the signals
to go long, short or hold (1, -1 or 0)."""
signals = pd.DataFrame(index=self.bars.index)
signals['signal'] = 0.0
# Create the set of short and long simple moving averages over the
# respective periods
signals['short_mavg'] = pd.rolling_mean(bars['Close'], self.short_window, min_periods=1)
signals['long_mavg'] = pd.rolling_mean(bars['Close'], self.long_window, min_periods=1)
# Create a 'signal' (invested or not invested) when the short moving average crosses the long
# moving average, but only for the period greater than the shortest moving average window
signals['signal'][self.short_window:] = np.where(signals['short_mavg'][self.short_window:]
> signals['long_mavg'][self.short_window:], 1.0, 0.0)
# Take the difference of the signals in order to generate actual trading orders
signals['positions'] = signals['signal'].diff()
return signals
class MarketOnClosePortfolio(Portfolio):
"""Encapsulates the notion of a portfolio of positions based
on a set of signals as provided by a Strategy.
Requires:
symbol - A stock symbol which forms the basis of the portfolio.
bars - A DataFrame of bars for a symbol set.
signals - A pandas DataFrame of signals (1, 0, -1) for each symbol.
initial_capital - The amount in cash at the start of the portfolio."""
def __init__(self, symbol, bars, signals, initial_capital=100000.0):
self.symbol = symbol
self.bars = bars
self.signals = signals
self.initial_capital = float(initial_capital)
self.positions = self.generate_positions()
def generate_positions(self):
positions = pd.DataFrame(index=signals.index).fillna(0.0)
positions[self.symbol] = 100*signals['signal'] # This strategy buys 100 shares
return positions
def backtest_portfolio(self):
portfolio = self.positions*self.bars['Close']
pos_diff = self.positions.diff()
portfolio['holdings'] = (self.positions*self.bars['Close']).sum(axis=1)
portfolio['cash'] = self.initial_capital - (pos_diff*self.bars['Close']).sum(axis=1).cumsum()
portfolio['total'] = portfolio['cash'] + portfolio['holdings']
portfolio['returns'] = portfolio['total'].pct_change()
return portfolio
if __name__ == "__main__":
# Obtain daily bars of AAPL from Yahoo Finance for the period
# 1st Jan 1990 to 1st Jan 2002 - This is an example from ZipLine
symbol = 'AAPL'
bars = DataReader(symbol, "yahoo", datetime.datetime(2013,1,1), datetime.datetime(2015,1,1))
# Create a Moving Average Cross Strategy instance with a short moving
# average window of 100 days and a long window of 400 days
mac = MovingAverageCrossStrategy(symbol, bars, short_window=100, long_window=400)
signals = mac.generate_signals()
# Create a portfolio of AAPL, with $100,000 initial capital
portfolio = MarketOnClosePortfolio(symbol, bars, signals, initial_capital=100000.0)
returns = portfolio.backtest_portfolio()
# Plot two charts to assess trades and equity curve
fig = plt.figure()
fig.patch.set_facecolor('white') # Set the outer colour to white
ax1 = fig.add_subplot(211, ylabel='Price in $')
# Plot the AAPL closing price overlaid with the moving averages
bars['Close'].plot(ax=ax1, color='r', lw=2.)
signals[['short_mavg', 'long_mavg']].plot(ax=ax1, lw=2.)
# Plot the "buy" trades against AAPL
ax1.plot(signals.ix[signals.positions == 1.0].index,
signals.short_mavg[signals.positions == 1.0],
'^', markersize=10, color='m')
# Plot the "sell" trades against AAPL
ax1.plot(signals.ix[signals.positions == -1.0].index,
signals.short_mavg[signals.positions == -1.0],
'v', markersize=10, color='k')
# Plot the equity curve in dollars
ax2 = fig.add_subplot(212, ylabel='Portfolio value in $')
returns['total'].plot(ax=ax2, lw=2.)
# Plot the "buy" and "sell" trades against the equity curve
ax2.plot(returns.ix[signals.positions == 1.0].index,
returns.total[signals.positions == 1.0],
'^', markersize=10, color='m')
ax2.plot(returns.ix[signals.positions == -1.0].index,
returns.total[signals.positions == -1.0],
'v', markersize=10, color='k')
# Plot the figure
fig.show()
| apache-2.0 |
leriomaggio/code-coherence-evaluation-tool | code_comments_coherence/source_code_analysis/admin.py | 1 | 40106 |
from __future__ import division, absolute_import
#-------------------------
# Django framework imports
#-------------------------
from django.contrib import admin
from django.core import urlresolvers
from django.conf import settings
from django.contrib.messages import ERROR
from django.utils.translation import gettext_lazy as _
from django.http import HttpResponse, HttpResponseNotModified, HttpResponseBadRequest
from django.shortcuts import get_object_or_404, render_to_response
from django.template.context import RequestContext
from django.conf.urls import patterns, url
#-------------------------
# Project specific imports
#-------------------------
from .models import SoftwareProject, CodeClass, CodeMethod, AgreementEvaluation, SourceCodeFile
from .models import AgreementEvaluationToBeChecked
from .forms import SoftwareProjectAdminForm, CodeArtifactAdminForm, SourceCodeFileAdminForm
from .forms import InlineCodeArtifactAdminForm, AgreementEvaluationAdminForm
from .forms import AdminFilterSoftwareProject, AdminFilterCodeClass
from .forms import AdminFilterEvaluated, AdminFilterEvaluator, AdminFilterAgreementVote
from .forms import CodeArtifactModifyAdminForm
from .settings import STRONG_DISAGREEMENT, DISAGREEMENT, DONT_KNOW, AGREEMENT, STRONG_AGREEMENT
from .settings import DEFAULT_AGREEMENT, DEFAULT_AGREEMENT_LABEL
from .settings import FURTHER_EVAL
from .code_analysis.utils import ensure_xml_files_folder
#---------------------
# Celery tasks imports
#---------------------
from .tasks import generate_task_data
from .tasks import create_source_code_file_task
from .tasks import analysis_task, mapping_task
from celery import group
#----------------------
# Python STD Lib Import
#----------------------
from datetime import datetime
import os
class SoftwareProjectAdmin(admin.ModelAdmin):
form = SoftwareProjectAdminForm
list_display = ['__str__', 'display_project_url', 'statistics']
# This is automatically switched to default in case of SUPERUSER Admin
# see: `render_change_form` method.
change_form_template = 'admin/change_form_no_save.html'
#====================
# Model Admin actions
#====================
actions = ['generate_code_base', 'generate_code_files']
def generate_code_base(self, request, queryset):
"""Admin Action to start the "code-comments" association task to generate the
code bases of selected projects.
Please note that any existing code base associated to the project will be deleted and
freshly re-generated as well.
"""
rows_updated = 0
selection_errors = 0
for sw_project in queryset:
if sw_project.has_code_base:
# Delete the existing Code Base
for code_method in sw_project.code_methods.all():
code_method.delete()
for code_class in sw_project.code_classes.all():
code_class.delete()
xml_folder_path, res = ensure_xml_files_folder(sw_project.source_folder_path)
# Group Celery Tasks and start them asynchronously
cb_group = group(mapping_task.s(analyzer_cls, titem) for analyzer_cls, titem in
generate_task_data(sw_project, xml_folder_path))
cb_group.apply_async()
rows_updated += 1
# Check positive cases
if rows_updated:
if rows_updated == 1:
msg = _("The generation of the code base of 1 Project has been started and will be \
completed shortly. Please hold on a few minutes and refresh this page to \
check for updates.")
else:
msg = _("The generation of the code base of %d Projects have been started and \
will be completed shortly. Please hold on a few minutes and \
refresh this page to check for updates.")
self.message_user(request, msg)
# Check possible selection error(s)
if selection_errors:
if selection_errors == 1:
message_bit = _("1 selected Software Project has")
else:
message_bit = _("%d selected Software Projects have")
self.message_user(request, _("%s been ignored since the corresponding Code Base \
was not empty!" % message_bit, ERROR))
generate_code_base.short_description = _("Generate Code Base")
def apply_async_create_source_code_file_tasks(self, sw_project):
""" Asynchronously create `SourceCodeFile` instances for the input `SoftwareProject`
object.
Asynchrounous tasks are made available through Celery
Parameters:
-----------
sw_project: `SoftwareProject` instance to whom generated `SourceCodeFile` objects are
being associated.
"""
for root, dirnames, filenames in os.walk(sw_project.source_folder_path):
for filename in filenames:
if not filename.startswith('.'): # If this is not an Hidden File
src_filepath = os.path.join(root, filename)
name, ext = os.path.splitext(src_filepath)
if ext and ext in sw_project.file_extensions:
create_source_code_file_task.delay(sw_project, src_filepath)
#
def generate_code_files(self, request, queryset):
""" Admin Action to generate `SourceCodeFile` instances based on extracted source files
on the File System (MEDIA_ROOT)
Please note that any existing SourceCodeFile instances already stored in the db,
will be deleted and re-generated from scratch.
"""
rows_updated = 0
selection_errors = 0
for sw_project in queryset:
if sw_project.source_folder_path:
if sw_project.source_files.count() > 0:
# Delete any existing SourceCodeFile instance already saved
for code_file in sw_project.source_files.all():
code_file.delete()
self.apply_async_create_source_code_file_tasks(sw_project)
rows_updated += 1
else:
selection_errors += 1 # Selected Project with no decompressed archive
# Check positive cases
if rows_updated:
if rows_updated == 1:
msg = _("Code Files for 1 Project are being generated. Please hold on a while \
and refresh this page to check for updates.")
else:
msg = _("Code Files for %d Project are being generated. Please hold on a while \
and refresh this page to check for updates.")
self.message_user(request, msg)
# Check possible selection error(s)
if selection_errors:
if selection_errors == 1:
message_bit = _("1 selected Software Project has")
else:
message_bit = _("%d selected Software Projects have")
self.message_user(request, _("%s been ignored since the content of corresponding \
decompressed archive has not been found" % message_bit,
ERROR))
generate_code_files.short_description = _("Fetch Code Files")
#=================================
# Model Admin list_display methods
#=================================
def statistics(self, object):
tag = '''<ul>
<li><b>No. of Code Files :</b> %d</li>
<li><b>No. of Code Classes :</b> %d</li>
<li><b>No. of Code Methods :</b> %d</li>
</ul>''' % (object.source_files.count(), object.code_classes.count(),
object.code_methods.count())
tag += '''
<a href="./%d/view_stats/" target="_blank" > %s </a>
''' % (object.id, _('View Chart'))
return tag
statistics.short_description = _('Project Code Base Statistics')
statistics.allow_tags = True
def display_project_url(self, object):
return '<a href="{url}" title="{name}" target="_blank">{url}</a>'.format(
url=object.project_url, name=str(object))
display_project_url.short_description = _('Project Website')
display_project_url.allow_tags = True
#===============================
# Model Admin methods overriding
#===============================
def get_form(self, request, obj=None, **kwargs):
"""
Customize the fields of the ModelForm by
removing the `src_folder_path` field in
case we this method has been invoked in an
`add_view()` (namely, `obj == None`)
"""
self.exclude = []
self.readonly_fields = []
if not obj:
# this means that we are instantiating an unbounded form
self.exclude.append('src_folder_path')
else:
# the form will be bounded, so the field will be read_only
self.readonly_fields.append('src_folder_path')
return super(SoftwareProjectAdmin, self).get_form(request, obj, **kwargs)
def save_model(self, request, obj, form, change):
"""
TODO: Specify the customization
"""
super(SoftwareProjectAdmin, self).save_model(request, obj, form, change)
# start celery analysis tasks
xml_folder_path, folder_existing = ensure_xml_files_folder(obj.source_folder_path)
if not folder_existing:
# Start the parsing task only if the xml_folder_path has been created for the first
# time invoking the `ensure_xml_files_folder` function
task_group = group(analysis_task.s(analyzer, task_item) for analyzer, task_item in
generate_task_data(obj, xml_folder_path))
task_group.apply_async()
msg = _("The code analysis process is now working in background. "
"Please check in a few moments")
self.message_user(request, msg, "INFO")
def get_actions(self, request):
usr = request.user
if usr.is_superuser or (usr.has_perm('add_codeclass') and usr.has_perm('add_codemethod')):
return super(SoftwareProjectAdmin, self).get_actions(request)
return None
def render_change_form(self, request, context, add=False, change=False, form_url='', obj=None):
"""
This model admin method has been overridden only to automatically restore the
`change_form_template` in case the current user is a superadmin.
In the default case, a staff member user (i.e., `is_staff == True`) cannot save
Project information.
"""
if request.user.is_superuser:
self.change_form_template = None
return super(SoftwareProjectAdmin, self).render_change_form(request, context, add, change,
form_url, obj)
def get_readonly_fields(self, request, obj=None):
""" If current user is not a SuperUser Admin, all forms fields are marked as "readonly"
to avoid possible saving errors.
Note: In addition, the `change_form_template` as well has all the "save" button
sets disabled (see `render_change_form` overridden method).
"""
if not request.user.is_superuser:
self.readonly_fields = ['name', 'version', 'project_url', 'src_folder_path',
'file_extensions', 'src_package_file']
return super(SoftwareProjectAdmin, self).get_readonly_fields(request, obj)
def get_urls(self):
"""
Added two additional view to support Ajax-based actions from the
change_list to register agreement evaluations.
"""
urls = super(SoftwareProjectAdmin, self).get_urls()
my_urls = patterns('',
# url(r'^(?P<project_id>\d+)/view_stats/generate_chart/$',
# self.generate_chart_image, name='project_chart'),
url(r'^(?P<project_id>\d+)/view_stats/$',
self.view_project_stats, name='project_statistics'),
)
return my_urls + urls
# ================================
# Model Admin custom view methods
# =================================
def view_project_stats(self, request, project_id):
import matplotlib.pyplot as plt
import mpld3
project_instance = get_object_or_404(SoftwareProject, id=project_id)
barchart_figure = plt.figure(1, figsize=(6, 6))
xvalues = range(3) # the x locations for the groups
width = 0.5 # the width of the bars
yvalues = [project_instance.source_files.count(),
project_instance.code_classes.count(),
project_instance.code_methods.count()]
plt.title(_(u'Software Project Statistics'))
plt.bar(xvalues, yvalues, width)
barchart_d3 = mpld3.fig_to_html(barchart_figure)
# Generate Pie Chart showing distribution of methods among classes
total_methods_count = project_instance.code_methods.count()
classes = project_instance.code_classes.all()
percentage_values = [cl.methods.count()/total_methods_count for cl in classes]
labels = [cl.class_name for cl in classes]
piechart_figure = plt.figure(2, figsize=(11,11))
plt.title(_(u'Distribution of Methods among classes'))
from numpy.random import random
color_values = random(total_methods_count)
jet_cm = plt.get_cmap('jet')
plt.pie(percentage_values, labels=labels, autopct='%1.1f%%',
shadow=True, startangle=90, colors=jet_cm(color_values))
piechart_d3 = mpld3.fig_to_html(piechart_figure)
opts = self.model._meta
return render_to_response('admin/software_project_stats.html',
{'project': project_instance, 'barchart': barchart_d3,
'piechart': piechart_d3,
'opts': opts, 'app_label': self.model._meta.app_label,},
context_instance=RequestContext(request))
# def generate_chart_image(self, request, project_id):
# from matplotlib.pyplot import bar, figure, close
# from matplotlib.backends.backend_agg import FigureCanvasAgg
# import numpy as np
# import mpld3
#
# project_instance = get_object_or_404(SoftwareProject, id=project_id)
# figure = figure(1, figsize=(6,6))
#
# ind = np.arange(3) # the x locations for the groups
# width = 0.35 # the width of the bars
# xvalues = ind+width
# yvalues = [project_instance.source_files.count(),
# project_instance.code_classes.count(),
# project_instance.code_methods.count()]
#
# bar(xvalues, yvalues, width)
# # title('Raining Hogs and Dogs', bbox={'facecolor': '0.8', 'pad': 5})
#
# canvas = FigureCanvasAgg(figure)
# response = HttpResponse(content_type='image/jpg')
# canvas.print_jpg(response)
# close(figure)
# return response
class CodeArtifactAdmin(admin.ModelAdmin):
list_filter = ['project']
readonly_fields = ['file_path']
change_list_template = "admin/change_list_extra_head.html"
#=================================
# Model Admin list_display methods
#=================================
def offset(self, object):
return '<span>(%d - %d)<br><strong>%d Lines</strong></span>' % (
object.start_line, object.end_line, (object.end_line - object.start_line)+1)
offset.short_description = _('Lines of Code')
offset.allow_tags = True
def display_code_fragment(self, object):
return object.source_code_fragment
display_code_fragment.short_description = _('Code Fragment')
display_code_fragment.allow_tags = True
def display_code_comment(self, object):
return object.source_code_comment
display_code_comment.short_description = _('Code Comment')
display_code_comment.allow_tags = True
def source_code_file(self, object):
filepath = object.file_path
try:
src_code_file = SourceCodeFile.objects.get(filepath__exact=filepath)
change_url = urlresolvers.reverse('admin:source_code_analysis_sourcecodefile_change',
args=(src_code_file.id,))
tag = '<a href="%s#%d" target="_blank" title="Code file for %s method">' \
'Code file</a>' % (change_url, object.start_line, object.display_name)
return tag
except SourceCodeFile.DoesNotExist:
return _('<b>Source File not found in the DB</b>')
source_code_file.short_description = _('Source File')
source_code_file.allow_tags = True
#==============================
# ModelAdmin method overriding
#==============================
def get_readonly_fields(self, request, obj=None):
""" If current user is not a SuperUser Admin, this method adds to the list of
readonly_fields, `start_line` and `end_line` fields too to make this fields
unchangable.
Please note that, in any case, the `change_form` template has been properly
changed to remove the `submit_row` templatetag
"""
readonly_fields = super(CodeArtifactAdmin, self).get_readonly_fields(request, obj)
if not request.user.is_superuser and len(readonly_fields):
readonly_fields.append('start_line')
readonly_fields.append('end_line')
return readonly_fields
class Media:
css = {
"all": (settings.STATIC_URL + 'css/pygments.css',)
}
class InlineCodeMethodAdmin(admin.StackedInline):
model = CodeMethod
form = InlineCodeArtifactAdminForm
readonly_fields = ['method_name', 'project', 'start_line', 'end_line']
fieldsets = (
(None, {
'fields': (('method_name',), ('start_line', 'end_line'), ),
'classes': ('extrapretty',),
}),
(_('Method Code'), {
'fields': ('code_fragment',),
'classes': ('collapse',),
}),
(_('Method Comment'), {
'classes': ('collapse',),
'fields': ('comment',)
}),
)
class Media:
css = {
"all": (settings.STATIC_URL + 'css/pygments.css',)
}
class CodeClassAdmin(CodeArtifactAdmin):
list_display = ['display_name', 'offset', 'project',
'display_code_comment', 'display_methods_count',
'source_code_file']
search_fields = ['class_name']
list_per_page = 100
inlines = [InlineCodeMethodAdmin]
change_form_template = 'admin/change_form_no_save.html'
form = CodeArtifactAdminForm
readonly_fields = CodeArtifactAdmin.readonly_fields + ['class_name']
fieldsets = (
(None, {
'fields': ('file_path', 'class_name', ('start_line', 'end_line'), 'project', ),
'classes': ('extrapretty',),
}),
(_('Class Code'), {
'fields': ('code_fragment',),
'classes': ('collapse',),
}),
(_('Class Comment'), {
'classes': ('collapse',),
'fields': ('comment',)
}),
# (_('Class Parse Tree (in XML)'), {
# 'classes': ('collapse',),
# 'fields': ('xml_tree',)
# }),
)
#=================================
# Model Admin list_display methods
#=================================
def display_name(self, object):
cname = object.class_name
filepath = object.src_filename
tag = '%s<br>@%s' % (cname, filepath)
return tag
display_name.short_description = _('Class')
display_name.allow_tags = True
def display_methods_count(self, object):
methods_count = object.methods.count()
tag = '<b>%s</b>:%d' % (_('Number of Methods'), methods_count)
return tag
display_methods_count.short_description = _('Number of Methods')
display_methods_count.allow_tags = True
#==============================
# ModelAdmin method overriding
#==============================
def get_actions(self, request):
if request.user.is_superuser or request.user.has_perm('delete_codeclass'):
return super(CodeArtifactAdmin, self).get_actions(request)
else:
return None
class Media:
css = {
"all": (settings.STATIC_URL + 'css/pygments.css',)
}
class CodeMethodAdmin(CodeArtifactAdmin):
list_display = ['display_name', 'offset', 'display_code_fragment', 'display_code_comment',
'source_code_file']
search_fields = ['method_name']
list_filter = ['project',]
list_per_page = 10
readonly_fields = CodeArtifactAdmin.readonly_fields + ['method_name', 'project']
#form = CodeArtifactAdminForm
change_form_template = 'admin/change_form_no_save.html'
fieldsets = (
(None, {
'fields': ('file_path', 'method_name', ('start_line', 'end_line'), 'project', ),
'classes': ('extrapretty',),
}),
('Method Code', {
'fields': ('code_fragment',),
# 'classes': ('collapse',),
}),
('Method Comment', {
# 'classes': ('collapse',),
'fields': ('comment',)
}),
# ('Method Parse Tree (in XML)', {
# 'classes': ('collapse',),
# 'fields': ('xml_tree',)
# }),
)
#=================================
# Model Admin list_display methods
#=================================
def display_name(self, object):
fname = object.method_name
class_name = object.code_class.src_filename
tag = '%s<br>@%s' % (fname, class_name)
return tag
display_name.short_description = _('Method')
display_name.allow_tags = True
#==============================
# ModelAdmin method overriding
#==============================
def get_actions(self, request):
if request.user.is_superuser or request.user.has_perm('delete_codemethod'):
return super(CodeArtifactAdmin, self).get_actions(request)
else:
return None
def get_form(self, request, obj=None, **kwargs):
if request.user.is_superuser:
self.form = CodeArtifactModifyAdminForm
else:
self.form = CodeArtifactAdminForm
return super(CodeMethodAdmin, self).get_form(request, obj, **kwargs)
class Media:
css = {
"all": (settings.STATIC_URL + 'css/pygments.css',)
}
class AgreementEvaluationAdmin(admin.ModelAdmin):
list_display = ['code_fragment', 'comment', 'agreement_voting']
list_filter = ['evaluated','reference_method__project',
'reference_method__code_class', 'evaluator',
'agreement_vote',]
search_fields = ['reference_method__method_name', ]
list_per_page = 100
change_list_template = "admin/change_list_filters_on_top.html"
form = AgreementEvaluationAdminForm
readonly_fields = ['evaluator', 'evaluation_datetime', 'last_update']
fieldsets = (
(_('Code-Comments Evaluation'), {
'fields': (('code_fragment', 'code_comment'),
('agreement_vote', 'wrong_association'),
('evaluation_datetime', 'last_update')),
}),
(_('Method Information'), {
'fields': (('method_name', 'start_line', 'end_line',),),
'classes': ('collapse',),
}),
)
#=================================
# Model Admin list_display methods
#=================================
def code_fragment(self, object):
return object.reference_method.source_code_fragment
code_fragment.short_description = _("Code Fragment")
code_fragment.allow_tags = True
def comment(self, object):
return object.reference_method.source_code_comment
comment.short_description = _("Comment")
comment.allow_tags = True
def _code_file_info(self, object):
try:
filepath = object.reference_method.file_path
src_code_file = SourceCodeFile.objects.get(filepath__exact=filepath)
change_url = urlresolvers.reverse('admin:source_code_analysis_sourcecodefile_change',
args=(src_code_file.id,))
msg = _('Are you not sure enough? <br><br> Please take a look at the ')
msg_title = _('Code file for %s method' % object.reference_method.display_name)
link_label = _('for the method ')
addendum = '''
<br><br>
<p>
<span>{message}</span>
<a href="{change_url}#{start_line}" target="_blank"
title="{link_title}">
Code file </a>
{link_label}
<span style="font-family: Courier New, Arial, sans-serif;">
{method_name}
</span>
</p>'''.format(message=msg, change_url=change_url, link_title=msg_title,
start_line=object.reference_method.start_line,
link_label=link_label,
method_name=object.reference_method.display_name)
except SourceCodeFile.DoesNotExist:
addendum = ''
evaluation_question = _('What is the agreement rate between this Comment and \
corresponding Method code?')
return addendum, evaluation_question
def _agreement_vote_widget(self, addendum, evaluation_question, object):
target_field = object._meta.fields[3] # should be 'wrong_association'
label_value = target_field.verbose_name if target_field.name == 'wrong_association' else \
_('Error in Association')
selected_keys = {
'question_message': evaluation_question,
'obid': str(object.id), 'stdis': STRONG_DISAGREEMENT, 'dis': DISAGREEMENT,
'dk': DONT_KNOW, 'agr': AGREEMENT, 'stagr': STRONG_AGREEMENT,
'label': label_value, 'addendum': addendum, 'default': DEFAULT_AGREEMENT_LABEL,
'id_-1': '', 'id_0': '', 'id_1': '', 'id_2': '', 'id_3': '', 'id_4': '',
'checked': 'checked' if object.wrong_association else ''}
if object.agreement_vote != DEFAULT_AGREEMENT:
selected_keys.update({'id_' + str(object.agreement_vote): 'selected="selected"'})
else:
selected_keys.update({'id_-1': 'selected="selected"'})
# TODO: This could be easily fixed with a Django Form Instance (maybe)
tag = '''<div class="agreement_rate">
<p>
{question_message}
</p>
<select id="id_agreement_vote-{obid}" name="{obid}">
<option value="-1" {id_-1}>{default}</option>
<option value="0" {id_0}>{stdis}</option>
<option value="1" {id_1}>{dis}</option>
<option value="2" {id_2}>{dk}</option>
<option value="3" {id_3}>{agr}</option>
<option value="4" {id_4}>{stagr}</option>
</select>
<br>
<br>
<label for="id_form-wrong_association-{obid}"><b>{label}:</b></label>
<input id="id_form-wrong_association-{obid}" name="wrong_association-{obid}"
type="checkbox" {checked}>
<br>
{addendum}
</div>
'''.format(**selected_keys)
return tag
def agreement_voting(self, object):
"""
This method shows ...
"""
#TODO Complete Method doc
addendum, evaluation_question = self._code_file_info(object)
return self._agreement_vote_widget(addendum, evaluation_question, object)
agreement_voting.short_description = _('Agreement')
agreement_voting.allow_tags = True
#===============================
# Model Admin methods overriding
#===============================
def queryset(self, request):
"""
This method returns different `AgreementEvaluation` queryset depending on the
priviegies of `request.user`.
In case, `request.user` is a superuser, this method has the default behaviour.
Otherwise, this method returns the set of AgreementEvaluations filtered by current
evaluator.
(This documentation may be improved)
"""
if request.user.is_superuser:
return super(AgreementEvaluationAdmin, self).queryset(request)
request_user = request.user
return request_user.evaluations.exclude(agreement_vote=5) # Use RelatedManager
def get_actions(self, request):
"""If the current user is not a Superuser, no action will be allowed"""
if request.user.is_superuser or request.user.has_perm('delete_agreementevaluation'):
return super(AgreementEvaluationAdmin, self).get_actions(request)
else:
return None
def get_urls(self):
"""
Added two additional view to support Ajax-based actions from the
change_list to register agreement evaluations.
"""
urls = super(AgreementEvaluationAdmin, self).get_urls()
my_urls = patterns('',
url(r'^(?P<evaluation_id>\d+)/agreement/$',
self.change_agreement_evaluation, name='ajax_change_evaluation'),
url(r'^(?P<evaluation_id>\d+)/wrong-association/$',
self.mark_wrong_association, name='ajax_mark_wrong_association'),
)
return my_urls + urls
def changelist_view(self, request, extra_context=None):
"""
"""
q = request.GET.copy()
# Remove Empty values to avoid errors in search (queries)
if 'reference_method__project' in q and q['reference_method__project'] == '':
q.pop('reference_method__project')
if 'reference_method__code_class' in q and q['reference_method__code_class']== '':
q.pop('reference_method__code_class')
if 'evaluated' in q and q['evaluated'] == '':
q.pop('evaluated')
if 'evaluator' in q and q['evaluator'] == '':
q.pop('evaluator')
if 'agreement_vote' in q and q['agreement_vote'] == '':
q.pop('agreement_vote')
request.GET = q
request.META['QUERY_STRING'] = request.GET.urlencode()
# Set `filter_formset`
filter_formset = list()
# Check if this filters make sense:
# If only one instance of Project and/or Class is stored in the DB, the filter
# does not make any sense! :)
if SoftwareProject.objects.count() > 1:
filter_formset.append(AdminFilterSoftwareProject(request))
if CodeClass.objects.count() > 1:
filter_formset.append(AdminFilterCodeClass(request))
filter_formset.append(AdminFilterEvaluated(request))
filter_formset.append(AdminFilterAgreementVote(request))
# At last, add the filter based on evaluators in case current user is a superuser
if request.user.is_superuser:
filter_formset.append(AdminFilterEvaluator(request))
new_context = {'filter_formset': filter_formset}
new_context.update(extra_context or {})
return super(AgreementEvaluationAdmin, self).changelist_view(request,
extra_context=new_context)
#================================
# Model Admin custom view methods
#================================
def change_agreement_evaluation(self, request, evaluation_id):
"""
TODO
"""
# TODO: Complete Documentation
if request.is_ajax and request.method == 'POST' and request.user.is_staff:
agreement_rate = int(request.POST.get('evaluation', None))
if agreement_rate is None:
return HttpResponseBadRequest(content='KO')
agreement_eval = get_object_or_404(AgreementEvaluation, pk=evaluation_id)
if agreement_eval.agreement_vote != agreement_rate:
agreement_eval.agreement_vote = agreement_rate
update_fields = ['agreement_vote']
if agreement_rate != DEFAULT_AGREEMENT:
agreement_eval.evaluation_datetime = datetime.now()
update_fields.append('evaluation_datetime')
agreement_eval.save(update_fields=update_fields)
return HttpResponse(content='OK')
return HttpResponseNotModified()
return HttpResponseBadRequest(content='KO')
def mark_wrong_association(self, request, evaluation_id):
"""
TODO
"""
# TODO: Complete Documentation
if request.is_ajax and request.method == 'POST' and request.user.is_staff:
wrong_association_value = request.POST.get('wrong', None)
if wrong_association_value is None:
return HttpResponseBadRequest(content='KO')
wrong_association_value = bool(int(wrong_association_value))
agreement_eval = get_object_or_404(AgreementEvaluation, pk=evaluation_id)
if agreement_eval.wrong_association != wrong_association_value:
agreement_eval.wrong_association = wrong_association_value
agreement_eval.agreement_vote = DEFAULT_AGREEMENT
agreement_eval.evaluation_datetime = datetime.now()
agreement_eval.save(update_fields=['agreement_vote', 'evaluation_datetime',
'wrong_association'])
return HttpResponse(content='OK')
return HttpResponseNotModified()
return HttpResponseBadRequest(content='KO')
class Media:
css = {
"all": (settings.STATIC_URL + 'css/pygments.css',)
}
js = [
settings.STATIC_URL + 'js/admin_agreement_eval.js',
]
class AgreementEvaluationToBeCheckedAdmin(AgreementEvaluationAdmin):
"""
Specialized Version of the `AgreementEvaluationAdmin` Class which
is specifically suited to evaluate "To be checked" `AgreementEvaluation`
instances.
"""
list_filter = ['reference_method__project', 'reference_method__code_class', 'evaluator',]
def _agreement_vote_widget(self, addendum, evaluation_question, object):
target_field = object._meta.fields[3] # should be 'wrong_association'
label_value = target_field.verbose_name if target_field.name == 'wrong_association' else \
_('Error in Association')
selected_keys = {
'question_message': evaluation_question,
'obid': str(object.id), 'stdis': STRONG_DISAGREEMENT, 'dis': DISAGREEMENT,
'dk': DONT_KNOW, 'agr': AGREEMENT, 'stagr': STRONG_AGREEMENT,
'label': label_value, 'addendum': addendum, 'default': DEFAULT_AGREEMENT_LABEL,
'sttbc': FURTHER_EVAL,
'id_-1': '', 'id_0': '', 'id_1': '', 'id_2': '', 'id_3': '', 'id_4': '',
'checked': 'checked' if object.wrong_association else ''}
if object.agreement_vote != DEFAULT_AGREEMENT:
selected_keys.update({'id_' + str(object.agreement_vote): 'selected="selected"'})
else:
selected_keys.update({'id_-1': 'selected="selected"'})
# TODO: This could be easily fixed with a Django Form Instance (maybe)
tag = '''<div class="agreement_rate">
<p>
{question_message}
</p>
<select id="id_agreement_vote-{obid}" name="{obid}">
<option value="-1" {id_-1}>{default}</option>
<option value="0" {id_0}>{stdis}</option>
<option value="1" {id_1}>{dis}</option>
<option value="2" {id_2}>{dk}</option>
<option value="3" {id_3}>{agr}</option>
<option value="4" {id_4}>{stagr}</option>
<option value="5" {id_5}>{sttbc}</option>
</select>
<br>
<br>
<label for="id_form-wrong_association-{obid}"><b>{label}:</b></label>
<input id="id_form-wrong_association-{obid}" name="wrong_association-{obid}"
type="checkbox" {checked}>
<br>
{addendum}
</div>
'''.format(**selected_keys)
return tag
# ===============================
# Model Admin methods overriding
#===============================
def queryset(self, request):
if request.user.is_superuser:
qs = AgreementEvaluation.objects.all()
else:
qs = request.user.evaluations.all()
return qs.filter(agreement_vote=5)
def changelist_view(self, request, extra_context=None):
"""
"""
q = request.GET.copy()
# Remove Empty values to avoid errors in search (queries)
if 'reference_method__project' in q and q['reference_method__project'] == '':
q.pop('reference_method__project')
if 'reference_method__code_class' in q and q['reference_method__code_class'] == '':
q.pop('reference_method__code_class')
if 'evaluator' in q and q['evaluator'] == '':
q.pop('evaluator')
request.GET = q
request.META['QUERY_STRING'] = request.GET.urlencode()
# Set `filter_formset`
filter_formset = list()
# Check if this filters make sense:
# If only one instance of Project and/or Class is stored in the DB, the filter
# does not make any sense! :)
if SoftwareProject.objects.count() > 1:
filter_formset.append(AdminFilterSoftwareProject(request))
if CodeClass.objects.count() > 1:
filter_formset.append(AdminFilterCodeClass(request))
# At last, add the filter based on evaluators in case current user is a superuser
if request.user.is_superuser:
filter_formset.append(AdminFilterEvaluator(request))
new_context = {'filter_formset': filter_formset}
new_context.update(extra_context or {})
return super(AgreementEvaluationToBeCheckedAdmin, self).changelist_view(request,
extra_context=new_context)
class SourceCodeFileAdmin(admin.ModelAdmin):
readonly_fields = ['filepath']
exclude = ['project', 'filepath']
list_display = ['filepath', 'project']
list_filter = ['project']
form = SourceCodeFileAdminForm
change_form_template = 'admin/change_form_no_save.html'
class Media:
css = {
"all": (settings.STATIC_URL + 'css/pygments.css',)
}
admin.site.register(SoftwareProject, SoftwareProjectAdmin)
admin.site.register(CodeClass, CodeClassAdmin)
admin.site.register(CodeMethod, CodeMethodAdmin)
admin.site.register(AgreementEvaluation, AgreementEvaluationAdmin)
admin.site.register(SourceCodeFile, SourceCodeFileAdmin)
admin.site.register(AgreementEvaluationToBeChecked, AgreementEvaluationToBeCheckedAdmin) | bsd-3-clause |
Haleyo/spark-tk | regression-tests/sparktkregtests/testcases/frames/frame_matrix_datatype_test.py | 11 | 8964 | # vim: set encoding=utf-8
# Copyright (c) 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Tests matrix datatype on frames """
import unittest
import numpy
from itertools import ifilter, imap
from sparktkregtests.lib import sparktk_test
from sparktk.dtypes import matrix, vector
class FrameMatrixDataTypeTest(sparktk_test.SparkTKTestCase):
def setUp(self):
"""Build frames to be exercised and establish known baselines"""
super(FrameMatrixDataTypeTest, self).setUp()
self.dataset = [["A", [[1,2],[3,4]]], ["B", [[5,6],[7,8]]], ["C", [[9,10],[11,12],[13,14]]]]
self.schema = [("C0", str), ("C1", matrix)]
def test_frame_create_row_count(self):
""" Trivial Frame creation. """
frame = self.context.frame.create(self.dataset,
schema=self.schema)
self.assertEqual(frame.count(), len(self.dataset))
self.assertEqual(len(frame.take(3)), 3)
# test to see if taking more rows than exist still
# returns only the right number of rows
self.assertEqual(len(frame.take(10)), len(self.dataset))
@unittest.skip("sparktk: schema inference between matrix and vector is ambiguous")
def test_without_schema(self):
"""Test without a specified schema"""
frame = self.context.frame.create(self.dataset)
self.assertEqual(frame.schema, self.schema)
@unittest.skip("sparktk: schema inference between matrix and vector is ambiguous")
def test_with_validate_schema_no_schema_provided(self):
"""Test without a specified schema validating the schema"""
frame = self.context.frame.create(self.dataset, validate_schema=True)
self.assertEqual(frame.schema, self.schema)
def test_with_validate_schema_with_valid_schema(self):
"""Test with validate_schema true and also a valid schema"""
# should default to using the defined schema
frame = self.context.frame.create(self.dataset,
validate_schema=True,
schema=self.schema)
self.assertEqual(frame.schema, self.schema)
def test_validate_schema_with_invalid_schema_all_columns_same_datatype(self):
"""Test with validate_schema=True and invalid schema, columns same type"""
invalid_schema = [("col1", int), ("col2", int)]
validated_frame = self.context.frame.create(self.dataset,
validate_schema=True,
schema=invalid_schema)
for row in validated_frame.take(validated_frame.count()):
for item in row:
if type(item) is not int:
self.assertEqual(item, None)
def test_validate_schema_of_strs(self):
"""Test validate schema true with schema of strs"""
schema = [("C0", str), ("C1", str)]
# should not throw an exception
# if the datatype can be cast to the schema-specified
# datatype validate schema should just cast it
# since ints and floats can be cast to string
# it should not error but should cast all of the data to strings
frame = self.context.frame.create(self.dataset, schema=schema, validate_schema=True)
for row in frame.take(frame.count()):
# the data should all be cast to str by validate_schema=True
for item in row:
self.assertEqual(type(item), str)
def test_add_columns(self):
"""Test add columns on matrix column data"""
frame = self.context.frame.create(self.dataset, self.schema)
# Add the number of rows of the matrix as a column named shape
frame.add_columns(lambda row: row["C1"].shape[0], ('shape', int))
obtained_result = frame.take(10, columns='shape')
expected_result = [[numpy.array(item[1]).shape[0]] for item in self.dataset]
self.assertEqual(obtained_result, expected_result)
def test_filter(self):
"""Test filter on matrix column data"""
frame = self.context.frame.create(self.dataset, self.schema)
# Get number of rows in each matrix from shape of the underlying ndarray
frame.filter(lambda row: row["C1"].shape[0] == 2)
obtained_result = frame.count()
obtained_result_matrix = frame.take(10, columns='C1')
# Get expected result by converting the actual dataset to ndarray and testing the same condition
filtered_result_matrix = list(ifilter(lambda i: numpy.array(i[1]).shape[0] == 2, self.dataset))
expected_result_matrix = list(imap(lambda row: [numpy.array(row[1])], filtered_result_matrix))
expected_result = len(expected_result_matrix)
self.assertEqual(obtained_result, expected_result)
numpy.testing.assert_array_equal(obtained_result_matrix, expected_result_matrix)
def test_convert_matrix_col_to_vector(self):
""" Convert a matrix column to vector using add_columns"""
frame = self.context.frame.create(self.dataset, self.schema)
# Filter the rows which have more than 2 rows as the final vector construction can be for only 2 values
# as vector needs the length to be defined
frame.filter(lambda row: row["C1"].shape[0] == 2)
# Add first column of each matrix as a new column with vector data type
frame.add_columns(lambda row: row["C1"][:,0], ('first_column', vector(2)))
obtained_result = frame.take(10, columns='first_column')
# Convert the first 2 elements of the dataset to numpy array and get the fist column
expected_result = [[numpy.array(item[1])[:,0]] for item in self.dataset[:2]]
numpy.testing.assert_array_equal(obtained_result, expected_result)
def test_covariance_matrix(self):
"""Test the output of dicom_covariance_matrix"""
frame = self.context.frame.create(self.dataset, self.schema)
frame.matrix_covariance_matrix("C1")
results = frame.to_pandas(frame.count())
#compare result
for i, row in results.iterrows():
actual_cov = row['CovarianceMatrix_C1']
#expected ouput using numpy's covariance method
expected_cov = numpy.cov(row['C1'])
numpy.testing.assert_almost_equal(
actual_cov, expected_cov,
decimal=4, err_msg="cov incorrect")
def test_matrix_svd(self):
""" Test matrix svd operation on the frame"""
frame = self.context.frame.create(self.dataset, self.schema)
frame.matrix_svd("C1")
#compare matrix_svd output with numpy's svd
results = frame.to_pandas(frame.count())
for i, row in results.iterrows():
actual_U = row['U_C1']
actual_V = row['Vt_C1']
actual_s = row['SingularVectors_C1']
#expected ouput using numpy's svd
U, s, V = numpy.linalg.svd(row['C1'])
numpy.testing.assert_almost_equal(
actual_U, U, decimal=4,
err_msg="U incorrect")
numpy.testing.assert_almost_equal(
actual_V, V, decimal=4,
err_msg="V incorrect")
numpy.testing.assert_almost_equal(
actual_s[0], s, decimal=4,
err_msg="Singual vectors incorrect")
def test_matrix_pcs(self):
""" Test matrix pca operation on frame"""
dataset = [["A", [[1,2,3],[3,4,5],[2,6,7]]],
["B", [[5,6,7],[7,8,9],[4,3,5]]],
["C", [[9,10,11],[11,12,13],[13,14,15]]]]
frame = self.context.frame.create(dataset, self.schema)
frame.matrix_svd("C1")
frame.matrix_pca("C1", "Vt_C1")
#compare matrix_pca output with numpy's
results = frame.to_pandas(frame.count())
for i, row in results.iterrows():
actual_pcs = row['PrincipalComponents_C1']
#expected ouput using numpy's svd
U, s, V = numpy.linalg.svd(row['C1'])
expected_pcs = row['C1'].dot(V.T)
numpy.testing.assert_almost_equal(
actual_pcs, expected_pcs, decimal=4,
err_msg="pcs incorrect")
if __name__ == "__main__":
unittest.main()
| apache-2.0 |
almarklein/bokeh | bokeh/cli/utils.py | 1 | 8120 | from __future__ import print_function
from collections import OrderedDict
from six.moves.urllib import request as urllib2
import io
import pandas as pd
from .. import charts
from . import help_messages as hm
def keep_source_input_sync(filepath, callback, start=0):
""" Monitor file at filepath checking for new lines (similar to
tail -f) and calls callback on every new line found.
Args:
filepath (str): path to the series data file (
i.e.: /source/to/my/data.csv)
callback (callable): function to be called with the a DataFrame
created from the new lines found from file at filepath
starting byte start
start (int): specifies where to start reading from the file at
filepath.
Default: 0
Returns:
DataFrame created from data read from filepath
"""
if filepath is None:
msg = "No Input! Please specify --source_filename or --buffer t"
raise IOError(msg)
if filepath.lower().startswith('http'):
# Create a request for the given URL.
while True:
request = urllib2.Request(filepath)
data = get_data_from_url(request, start)
f = io.BytesIO(data)
f.seek(start)
line = f.readline() # See note below
if not line:
continue # No data, try again
callback(line)
start = len(data)
else:
f = open(filepath, 'r')
f.seek(start)
while True:
line = f.readline() # See note below
if not line:
continue # No data, try again
callback(line)
source = pd.read_csv(filepath)
return source
# Try to get the response. This will raise a urllib2.URLError if there is a
# problem (e.g., invalid URL).
# Reference:
# - http://stackoverflow.com/questions/5209087/python-seek-in-http-response-stream
# - http://stackoverflow.com/questions/1971240/python-seek-on-remote-file-using-http
def get_data_from_url(request, start=0, length=0):
""" Read from request after adding headers to retrieve data from byte
specified in start.
request (urllib2.Request): request object related to the data to read
start (int, optional): byte to start reading from.
Default: 0
length: length of the data range to read from start. If 0 it reads
until the end of the stream.
Default: 0
Returns:
String read from request
"""
ranged = False
# Add the header to specify the range to download.
if start and length:
request.add_header("Range", "bytes=%d-%d" % (start, start + length - 1))
elif start:
request.add_header("Range", "bytes=%s-" % start)
response = urllib2.urlopen(request)
# If a content-range header is present, partial retrieval worked.
if "content-range" in response.headers:
print("Partial retrieval successful.")
# The header contains the string 'bytes', followed by a space, then the
# range in the format 'start-end', followed by a slash and then the total
# size of the page (or an asterix if the total size is unknown). Lets get
# the range and total size from this.
_range, total = response.headers['content-range'].split(' ')[-1].split('/')
# Print a message giving the range information.
if total == '*':
print("Bytes %s of an unknown total were retrieved." % _range)
else:
print("Bytes %s of a total of %s were retrieved." % (_range, total))
# # No header, so partial retrieval was unsuccessful.
# else:
# print "Unable to use partial retrieval."
data = response.read()
return data
def parse_output_config(output):
"""Parse the output specification string and return the related chart
output attribute.
Attr:
output (str): String with the syntax convention specified for the
cli output option is as follows: <output_type>://<type_arg>
Valid values:
output_type: file or server
type_arg:
file_path if output_type is file
serve path if output_type is server
Returns:
dictionary containing the output arguments to pass to a chart object
"""
output_type, output_options = output.split('://')
if output_type == 'file':
return {'filename': output_options}
elif output_type == 'server':
# TODO: check if server configuration is as flexible as with plotting
# interface and add support for url/name if so.
out_opt = output_options.split("@")
attrnames = ['server', 'url', 'name']
# unpack server output parametrs in order to pass them to the plot
# creation function
kws = dict((attrn, val) for attrn, val in zip( attrnames, out_opt))
return {'server': kws['server']}
else:
msg = "Unknown output type %s found. Please use: file|server"
print (msg % output_type)
return {}
def get_chart_params(title, output, show_legend=False):
"""Parse output type and output options and return related chart
parameters. For example: returns filename if output_type is file
or server it output_type is server
Args:
title (str): the title of your plot.
output (str): selected output. Follows the following convention:
<output_type>://<type_arg> where output_type can be
`file` (in that case type_arg specifies the file path) or
`server` (in that case type_arg specify the server name).
Returns:
dictionary containing the arguments to pass to a chart object
related to title and output options
"""
params = {'title': title, 'legend': show_legend}
output_params = parse_output_config(output)
if output_params:
params.update(output_params)
return params
def get_data_series(series, source, indexes):
"""Generate an OrderedDict from the source series excluding index
and all series not specified in series.
Args:
series (list(str)): list of strings specifying the names of the
series to keep from source
source (DataFrame): pandas DataFrame with the data series to be
plotted
indexes (lst(str)): name of the series of source to be used as index.
Returns:
OrderedDict with the data series from source
"""
series = define_series(series, source, indexes)
# generate charts data
data_series = OrderedDict()
for i, colname in enumerate(series+indexes):
try:
data_series[colname] = source[colname]
except KeyError:
raise KeyError(hm.ERR_MSG_SERIES_NOT_FOUND % (colname, source.keys()))
return data_series
def define_series(series, source, indexes):
"""If series is empty returns source_columns excluding the column
where column == index. Otherwise returns the series.split(',')
Args:
series (str): string that contains the names of the
series to keep from source, separated by `,`
source (DataFrame): pandas DataFrame with the data series to be
plotted
indexes (lst(str)): name of the series of source to be used as index.
Returns:
list of the names (as str) of the series except index
"""
if not series:
return [c for c in source.columns if c not in indexes]
else:
return series.split(',')
def get_charts_mapping():
"""Return a dict with chart classes names (lower case) as keys and
their related class as values.
Returns:
dict mapping chart classes names to chart classes
"""
mapping = {}
for (clsname, cls) in charts.__dict__.items():
try:
# TODO: We may need to restore the objects filtering
# when charts creators (or builders registration) is added
# to the charts API
mapping[clsname.lower()] = cls
except TypeError:
pass
return mapping | bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.