repo_name
stringlengths 7
79
| path
stringlengths 4
179
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 959
798k
| license
stringclasses 15
values |
---|---|---|---|---|---|
zangsir/sms-tools
|
lectures/07-Sinusoidal-plus-residual-model/plots-code/stochasticSynthesisFrame.py
|
24
|
2966
|
import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import hamming, hanning, triang, blackmanharris, resample
import math
import sys, os, time
from scipy.fftpack import fft, ifft
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/'))
import utilFunctions as UF
def stochasticModelFrame(x, w, N, stocf) :
# x: input array sound, w: analysis window, N: FFT size,
# stocf: decimation factor of mag spectrum for stochastic analysis
hN = N/2+1 # size of positive spectrum
hM = (w.size)/2 # half analysis window size
pin = hM # initialize sound pointer in middle of analysis window
fftbuffer = np.zeros(N) # initialize buffer for FFT
yw = np.zeros(w.size) # initialize output sound frame
w = w / sum(w) # normalize analysis window
#-----analysis-----
xw = x[pin-hM:pin+hM] * w # window the input sound
X = fft(xw) # compute FFT
mX = 20 * np.log10( abs(X[:hN]) ) # magnitude spectrum of positive frequencies
mXenv = resample(np.maximum(-200, mX), mX.size*stocf) # decimate the mag spectrum
pX = np.angle(X[:hN])
#-----synthesis-----
mY = resample(mXenv, hN) # interpolate to original size
pY = 2*np.pi*np.random.rand(hN) # generate phase random values
Y = np.zeros(N, dtype = complex)
Y[:hN] = 10**(mY/20) * np.exp(1j*pY) # generate positive freq.
Y[hN:] = 10**(mY[-2:0:-1]/20) * np.exp(-1j*pY[-2:0:-1]) # generate negative freq.
fftbuffer = np.real( ifft(Y) ) # inverse FFT
y = fftbuffer*N/2
return mX, pX, mY, pY, y
# example call of stochasticModel function
if __name__ == '__main__':
(fs, x) = UF.wavread('../../../sounds/ocean.wav')
w = np.hanning(1024)
N = 1024
stocf = 0.1
maxFreq = 10000.0
lastbin = N*maxFreq/fs
first = 1000
last = first+w.size
mX, pX, mY, pY, y = stochasticModelFrame(x[first:last], w, N, stocf)
plt.figure(1, figsize=(9, 5))
plt.subplot(3,1,1)
plt.plot(float(fs)*np.arange(mY.size)/N, mY, 'r', lw=1.5, label="mY")
plt.axis([0, maxFreq, -78, max(mX)+0.5])
plt.title('mY (stochastic approximation of mX)')
plt.subplot(3,1,2)
plt.plot(float(fs)*np.arange(pY.size)/N, pY-np.pi, 'c', lw=1.5, label="pY")
plt.axis([0, maxFreq, -np.pi, np.pi])
plt.title('pY (random phases)')
plt.subplot(3,1,3)
plt.plot(np.arange(first, last)/float(fs), y, 'b', lw=1.5)
plt.axis([first/float(fs), last/float(fs), min(y), max(y)])
plt.title('yst')
plt.tight_layout()
plt.savefig('stochasticSynthesisFrame.png')
plt.show()
|
agpl-3.0
|
Clyde-fare/scikit-learn
|
benchmarks/bench_plot_lasso_path.py
|
301
|
4003
|
"""Benchmarks of Lasso regularization path computation using Lars and CD
The input data is mostly low rank but is a fat infinite tail.
"""
from __future__ import print_function
from collections import defaultdict
import gc
import sys
from time import time
import numpy as np
from sklearn.linear_model import lars_path
from sklearn.linear_model import lasso_path
from sklearn.datasets.samples_generator import make_regression
def compute_bench(samples_range, features_range):
it = 0
results = defaultdict(lambda: [])
max_it = len(samples_range) * len(features_range)
for n_samples in samples_range:
for n_features in features_range:
it += 1
print('====================')
print('Iteration %03d of %03d' % (it, max_it))
print('====================')
dataset_kwargs = {
'n_samples': n_samples,
'n_features': n_features,
'n_informative': n_features / 10,
'effective_rank': min(n_samples, n_features) / 10,
#'effective_rank': None,
'bias': 0.0,
}
print("n_samples: %d" % n_samples)
print("n_features: %d" % n_features)
X, y = make_regression(**dataset_kwargs)
gc.collect()
print("benchmarking lars_path (with Gram):", end='')
sys.stdout.flush()
tstart = time()
G = np.dot(X.T, X) # precomputed Gram matrix
Xy = np.dot(X.T, y)
lars_path(X, y, Xy=Xy, Gram=G, method='lasso')
delta = time() - tstart
print("%0.3fs" % delta)
results['lars_path (with Gram)'].append(delta)
gc.collect()
print("benchmarking lars_path (without Gram):", end='')
sys.stdout.flush()
tstart = time()
lars_path(X, y, method='lasso')
delta = time() - tstart
print("%0.3fs" % delta)
results['lars_path (without Gram)'].append(delta)
gc.collect()
print("benchmarking lasso_path (with Gram):", end='')
sys.stdout.flush()
tstart = time()
lasso_path(X, y, precompute=True)
delta = time() - tstart
print("%0.3fs" % delta)
results['lasso_path (with Gram)'].append(delta)
gc.collect()
print("benchmarking lasso_path (without Gram):", end='')
sys.stdout.flush()
tstart = time()
lasso_path(X, y, precompute=False)
delta = time() - tstart
print("%0.3fs" % delta)
results['lasso_path (without Gram)'].append(delta)
return results
if __name__ == '__main__':
from mpl_toolkits.mplot3d import axes3d # register the 3d projection
import matplotlib.pyplot as plt
samples_range = np.linspace(10, 2000, 5).astype(np.int)
features_range = np.linspace(10, 2000, 5).astype(np.int)
results = compute_bench(samples_range, features_range)
max_time = max(max(t) for t in results.values())
fig = plt.figure('scikit-learn Lasso path benchmark results')
i = 1
for c, (label, timings) in zip('bcry', sorted(results.items())):
ax = fig.add_subplot(2, 2, i, projection='3d')
X, Y = np.meshgrid(samples_range, features_range)
Z = np.asarray(timings).reshape(samples_range.shape[0],
features_range.shape[0])
# plot the actual surface
ax.plot_surface(X, Y, Z.T, cstride=1, rstride=1, color=c, alpha=0.8)
# dummy point plot to stick the legend to since surface plot do not
# support legends (yet?)
#ax.plot([1], [1], [1], color=c, label=label)
ax.set_xlabel('n_samples')
ax.set_ylabel('n_features')
ax.set_zlabel('Time (s)')
ax.set_zlim3d(0.0, max_time * 1.1)
ax.set_title(label)
#ax.legend()
i += 1
plt.show()
|
bsd-3-clause
|
a4a881d4/6FSK
|
utils.py
|
1
|
1800
|
import random
import numpy as np
def rsrcBin(L):
r = []
for k in range(L):
r.append(random.randint(0,1))
return r
def rsrc(L):
r = rsrcBin(L)
x = [1-2*x for x in r]
return x
def fftOnce(x):
W = len(x)
hw = np.hamming(W)
ss = np.fft.fft(x*hw)
return np.conj(ss)*ss
def spectrum(x):
W = 1024*32
r = fftOnce(x[:W])
for k in range(W/2,len(x)-W,W/2):
r = r + fftOnce(x[k:k+W])
return r
def xorsum(k):
r = 0
for i in range(self.order):
r = r^(k&1)
k = k>>1
return r&1
class mseq:
def __init__(self,poly):
self.p = poly
k=0
while poly!=0:
k = k+1
poly = poly>>1
self.order = k-1
print "M sequence order",k
self.length = (1<<self.order)-1
self.s = []
state = 1
for n in range(self.length):
state = state<<1
if state>self.length:
state = state^self.p
self.s.append(1)
else:
self.s.append(0)
def printSeq(self,x=None):
if x==None:
x = self.s
for k in x:
print k,
print ""
def sum(self):
ss = 0
for x in self.s:
ss = ss + x
return ss
def shift(self,l):
return self.s[l:]+self.s[:l]
class gold:
def __init__(self,p0,p1):
self.m0 = mseq(p0)
self.m1 = mseq(p1)
def seq(self,k0,k1):
s0 = self.m0.shift(k0)
s1 = self.m1.shift(k1)
r = [a^b for (a,b) in zip(s0,s1)]
return r
def toReal(self,s):
return np.array([1-2*x for x in s])
def xcorr(self,x,y):
return np.correlate(np.array(x),np.array(y),'full')
def main():
m = mseq(0x409)
m.printSeq()
y = m.shift(1)
print "shift 1"
m.printSeq(y)
print m.sum()
g = gold(0x409,0x40f)
s = g.toReal(g.seq(1,3))
x = g.xcorr(s,s)
import matplotlib.pyplot as plt
plt.plot(x)
plt.show()
if __name__ == '__main__':
main()
|
gpl-3.0
|
ryandougherty/mwa-capstone
|
MWA_Tools/build/matplotlib/examples/units/evans_test.py
|
3
|
2335
|
"""
A mockup "Foo" units class which supports
conversion and different tick formatting depending on the "unit".
Here the "unit" is just a scalar conversion factor, but this example shows mpl is
entirely agnostic to what kind of units client packages use
"""
import matplotlib
from matplotlib.cbook import iterable
import matplotlib.units as units
import matplotlib.ticker as ticker
from pylab import figure, show
class Foo:
def __init__( self, val, unit=1.0 ):
self.unit = unit
self._val = val * unit
def value( self, unit ):
if unit is None: unit = self.unit
return self._val / unit
class FooConverter:
@staticmethod
def axisinfo(unit, axis):
'return the Foo AxisInfo'
if unit==1.0 or unit==2.0:
return units.AxisInfo(
majloc = ticker.IndexLocator( 8, 0 ),
majfmt = ticker.FormatStrFormatter("VAL: %s"),
label='foo',
)
else:
return None
@staticmethod
def convert(obj, unit, axis):
"""
convert obj using unit. If obj is a sequence, return the
converted sequence
"""
if units.ConversionInterface.is_numlike(obj):
return obj
if iterable(obj):
return [o.value(unit) for o in obj]
else:
return obj.value(unit)
@staticmethod
def default_units(x, axis):
'return the default unit for x or None'
if iterable(x):
for thisx in x:
return thisx.unit
else:
return x.unit
units.registry[Foo] = FooConverter()
# create some Foos
x = []
for val in range( 0, 50, 2 ):
x.append( Foo( val, 1.0 ) )
# and some arbitrary y data
y = [i for i in range( len(x) ) ]
# plot specifying units
fig = figure()
fig.suptitle("Custom units")
fig.subplots_adjust(bottom=0.2)
ax = fig.add_subplot(1,2,2)
ax.plot( x, y, 'o', xunits=2.0 )
for label in ax.get_xticklabels():
label.set_rotation(30)
label.set_ha('right')
ax.set_title("xunits = 2.0")
# plot without specifying units; will use the None branch for axisinfo
ax = fig.add_subplot(1,2,1)
ax.plot( x, y ) # uses default units
ax.set_title('default units')
for label in ax.get_xticklabels():
label.set_rotation(30)
label.set_ha('right')
show()
|
gpl-2.0
|
detrout/debian-statsmodels
|
examples/python/regression_diagnostics.py
|
28
|
2876
|
## Regression diagnostics
# This example file shows how to use a few of the ``statsmodels`` regression diagnostic tests in a real-life context. You can learn about more tests and find out more information abou the tests here on the [Regression Diagnostics page.](http://statsmodels.sourceforge.net/stable/diagnostic.html)
#
# Note that most of the tests described here only return a tuple of numbers, without any annotation. A full description of outputs is always included in the docstring and in the online ``statsmodels`` documentation. For presentation purposes, we use the ``zip(name,test)`` construct to pretty-print(short descriptions in the examples below.
# ## Estimate a regression model
from __future__ import print_function
from statsmodels.compat import lzip
import statsmodels
import numpy as np
import pandas as pd
import statsmodels.formula.api as smf
import statsmodels.stats.api as sms
# Load data
url = 'http://vincentarelbundock.github.io/Rdatasets/csv/HistData/Guerry.csv'
dat = pd.read_csv(url)
# Fit regression model (using the natural log of one of the regressaors)
results = smf.ols('Lottery ~ Literacy + np.log(Pop1831)', data=dat).fit()
# Inspect the results
print(results.summary())
# ## Normality of the residuals
# Jarque-Bera test:
name = ['Jarque-Bera', 'Chi^2 two-tail prob.', 'Skew', 'Kurtosis']
test = sms.jarque_bera(results.resid)
lzip(name, test)
# Omni test:
name = ['Chi^2', 'Two-tail probability']
test = sms.omni_normtest(results.resid)
lzip(name, test)
# ## Influence tests
#
# Once created, an object of class ``OLSInfluence`` holds attributes and methods that allow users to assess the influence of each observation. For example, we can compute and extract the first few rows of DFbetas by:
from statsmodels.stats.outliers_influence import OLSInfluence
test_class = OLSInfluence(results)
test_class.dfbetas[:5,:]
# Explore other options by typing ``dir(influence_test)``
#
# Useful information on leverage can also be plotted:
from statsmodels.graphics.regressionplots import plot_leverage_resid2
print(plot_leverage_resid2(results))
# Other plotting options can be found on the [Graphics page.](http://statsmodels.sourceforge.net/stable/graphics.html)
# ## Multicollinearity
#
# Condition number:
np.linalg.cond(results.model.exog)
# ## Heteroskedasticity tests
#
# Breush-Pagan test:
name = ['Lagrange multiplier statistic', 'p-value',
'f-value', 'f p-value']
test = sms.het_breushpagan(results.resid, results.model.exog)
lzip(name, test)
# Goldfeld-Quandt test
name = ['F statistic', 'p-value']
test = sms.het_goldfeldquandt(results.resid, results.model.exog)
lzip(name, test)
# ## Linearity
#
# Harvey-Collier multiplier test for Null hypothesis that the linear specification is correct:
name = ['t value', 'p value']
test = sms.linear_harvey_collier(results)
lzip(name, test)
|
bsd-3-clause
|
BigTone2009/sms-tools
|
lectures/09-Sound-description/plots-code/knn.py
|
25
|
1718
|
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
import os, sys
from numpy import random
from scipy.stats import mode
def eucDist(vec1, vec2):
return np.sqrt(np.sum(np.power(np.array(vec1) - np.array(vec2), 2)))
n = 30
qn = 8
K = 3
class1 = np.transpose(np.array([np.random.normal(-2,2,n), np.random.normal(-2,2,n)]))
class2 = np.transpose(np.array([np.random.normal(2,2,n), np.random.normal(2,2,n)]))
query = np.transpose(np.array([np.random.normal(0,2,qn), np.random.normal(0,2,qn)]))
plt.figure(1, figsize=(9.5, 3.5))
plt.subplot(1,2,1)
plt.scatter(class1[:,0],class1[:,1], c='b', alpha=0.7, s=50, edgecolor='none')
plt.scatter(class2[:,0],class2[:,1], c='r', alpha=0.7, s=50, edgecolor='none')
plt.scatter(query[:,0],query[:,1], c='c', alpha=1, s=50)
predClass = []
for kk in range(query.shape[0]):
dist = []
for pp in range(class1.shape[0]):
euc = eucDist(query[kk,:], class1[pp,:])
dist.append([euc, 1])
for pp in range(class2.shape[0]):
euc = eucDist(query[kk,:], class2[pp,:])
dist.append([euc, 2])
dist = np.array(dist)
indSort = np.argsort(dist[:,0])
topKDist = dist[indSort[:K],1]
predClass.append(mode(topKDist)[0][0].tolist())
predClass = np.array(predClass)
indC1 = np.where(predClass==1)[0]
indC2 = np.where(predClass==2)[0]
plt.subplot(1,2,2)
plt.scatter(class1[:,0],class1[:,1], c='b', alpha=0.3, s=50, edgecolor='none')
plt.scatter(class2[:,0],class2[:,1], c='r', alpha=0.3, s=50, edgecolor='none')
plt.scatter(query[indC1,0],query[indC1,1], c='b', alpha=1, s=50)
plt.scatter(query[indC2,0],query[indC2,1], c='r', alpha=1, s=50)
plt.tight_layout()
plt.savefig('knn.png')
plt.show()
|
agpl-3.0
|
mitdbg/modeldb
|
client/verta/verta/_internal_utils/_utils.py
|
1
|
30525
|
# -*- coding: utf-8 -*-
import datetime
import glob
import inspect
import json
import numbers
import os
import re
import string
import subprocess
import sys
import threading
import time
import requests
from requests.adapters import HTTPAdapter
from urllib3.util.retry import Retry
from google.protobuf import json_format
from google.protobuf.struct_pb2 import Value, ListValue, Struct, NULL_VALUE
from ..external import six
from ..external.six.moves.urllib.parse import urljoin # pylint: disable=import-error, no-name-in-module
from .._protos.public.modeldb import CommonService_pb2 as _CommonService
try:
import pandas as pd
except ImportError: # pandas not installed
pd = None
try:
import tensorflow as tf
except ImportError: # TensorFlow not installed
tf = None
try:
import ipykernel
except ImportError: # Jupyter not installed
pass
else:
try:
from IPython.display import Javascript, display
try: # Python 3
from notebook.notebookapp import list_running_servers
except ImportError: # Python 2
import warnings
from IPython.utils.shimmodule import ShimWarning
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=ShimWarning)
from IPython.html.notebookapp import list_running_servers
del warnings, ShimWarning # remove ad hoc imports from scope
except ImportError: # abnormally nonstandard installation of Jupyter
pass
try:
import numpy as np
except ImportError: # NumPy not installed
np = None
BOOL_TYPES = (bool,)
else:
BOOL_TYPES = (bool, np.bool_)
_GRPC_PREFIX = "Grpc-Metadata-"
_VALID_HTTP_METHODS = {'GET', 'POST', 'PUT', 'DELETE'}
_VALID_FLAT_KEY_CHARS = set(string.ascii_letters + string.digits + '_-/')
THREAD_LOCALS = threading.local()
THREAD_LOCALS.active_experiment_run = None
SAVED_MODEL_DIR = "/app/tf_saved_model/"
class Connection:
def __init__(self, scheme=None, socket=None, auth=None, max_retries=0, ignore_conn_err=False):
"""
HTTP connection configuration utility struct.
Parameters
----------
scheme : {'http', 'https'}, optional
HTTP authentication scheme.
socket : str, optional
Hostname and port.
auth : dict, optional
Verta authentication headers.
max_retries : int, default 0
Maximum number of times to retry a request on a connection failure. This only attempts retries
on HTTP codes {502, 503, 504} which commonly occur during back end connection lapses.
ignore_conn_err : bool, default False
Whether to ignore connection errors and instead return successes with empty contents.
"""
self.scheme = scheme
self.socket = socket
self.auth = auth
# TODO: retry on 404s, but only if we're sure it's not legitimate e.g. from a GET
self.retry = Retry(total=max_retries,
backoff_factor=1, # each retry waits (2**retry_num) seconds
method_whitelist=False, # retry on all HTTP methods
status_forcelist=(502, 503, 504), # only retry on these status codes
raise_on_redirect=False, # return Response instead of raising after max retries
raise_on_status=False) # return Response instead of raising after max retries
self.ignore_conn_err = ignore_conn_err
class Configuration:
def __init__(self, use_git=True, debug=False):
"""
Client behavior configuration utility struct.
Parameters
----------
use_git : bool, default True
Whether to use a local Git repository for certain operations.
"""
self.use_git = use_git
self.debug = debug
class LazyList(object):
# number of items to fetch per back end call in __iter__()
_ITER_PAGE_LIMIT = 100
def __init__(self, conn, conf, msg, endpoint, rest_method):
self._conn = conn
self._conf = conf
self._msg = msg # protobuf msg used to make back end calls
self._endpoint = endpoint
self._rest_method = rest_method
def __getitem__(self, index):
if isinstance(index, int):
# copy msg to avoid mutating `self`'s state
msg = self._msg.__class__()
msg.CopyFrom(self._msg)
msg.page_limit = 1
if index >= 0:
# convert zero-based indexing into page number
msg.page_number = index + 1
else:
# reverse page order to index from end
msg.ascending = not msg.ascending # pylint: disable=no-member
msg.page_number = abs(index)
response_msg = self._call_back_end(msg)
records = self._get_records(response_msg)
if (not records
and msg.page_number > response_msg.total_records): # pylint: disable=no-member
raise IndexError("index out of range")
id_ = records[0].id
return self._create_element(id_)
else:
raise TypeError("index must be integer, not {}".format(type(index)))
def __iter__(self):
# copy msg to avoid mutating `self`'s state
msg = self._msg.__class__()
msg.CopyFrom(self._msg)
msg.page_limit = self._ITER_PAGE_LIMIT
msg.page_number = 0 # this will be incremented as soon as we enter the loop
seen_ids = set()
total_records = float('inf')
while msg.page_limit*msg.page_number < total_records: # pylint: disable=no-member
msg.page_number += 1 # pylint: disable=no-member
response_msg = self._call_back_end(msg)
total_records = response_msg.total_records
ids = self._get_ids(response_msg)
for id_ in ids:
# skip if we've seen the ID before
if id_ in seen_ids:
continue
else:
seen_ids.add(id_)
yield self._create_element(id_)
def __len__(self):
# copy msg to avoid mutating `self`'s state
msg = self._msg.__class__()
msg.CopyFrom(self._msg)
msg.page_limit = msg.page_number = 1 # minimal request just to get total_records
response_msg = self._call_back_end(msg)
return response_msg.total_records
def _call_back_end(self, msg):
data = proto_to_json(msg)
if self._rest_method == "GET":
response = make_request(
self._rest_method,
self._endpoint.format(self._conn.scheme, self._conn.socket),
self._conn, params=data,
)
elif self._rest_method == "POST":
response = make_request(
self._rest_method,
self._endpoint.format(self._conn.scheme, self._conn.socket),
self._conn, json=data,
)
raise_for_http_error(response)
response_msg = json_to_proto(response.json(), msg.Response)
return response_msg
def _get_ids(self, response_msg):
return (record.id for record in self._get_records(response_msg))
def _get_records(self, response_msg):
"""Get the attribute of `response_msg` that is not `total_records`."""
raise NotImplementedError
def _create_element(self, id_):
"""Instantiate element to return to user."""
raise NotImplementedError
def make_request(method, url, conn, **kwargs):
"""
Makes a REST request.
Parameters
----------
method : {'GET', 'POST', 'PUT', 'DELETE'}
HTTP method.
url : str
URL.
conn : Connection
Connection authentication and configuration.
**kwargs
Parameters to requests.request().
Returns
-------
requests.Response
"""
if method.upper() not in _VALID_HTTP_METHODS:
raise ValueError("`method` must be one of {}".format(_VALID_HTTP_METHODS))
if conn.auth is not None:
# add auth to `kwargs['headers']`
kwargs.setdefault('headers', {}).update(conn.auth)
with requests.Session() as s:
s.mount(url, HTTPAdapter(max_retries=conn.retry))
try:
response = s.request(method, url, **kwargs)
except (requests.exceptions.BaseHTTPError,
requests.exceptions.RequestException) as e:
if not conn.ignore_conn_err:
raise e
else:
if response.ok or not conn.ignore_conn_err:
return response
# fabricate response
response = requests.Response()
response.status_code = 200 # success
response._content = six.ensure_binary("{}") # empty contents
return response
def raise_for_http_error(response):
"""
Raises a potential HTTP error with a back end message if provided, or a default error message otherwise.
Parameters
----------
response : :class:`requests.Response`
Response object returned from a `requests`-module HTTP request.
Raises
------
:class:`requests.HTTPError`
If an HTTP error occured.
"""
try:
response.raise_for_status()
except requests.HTTPError as e:
try:
reason = response.json()['message']
except (ValueError, # not JSON response
KeyError): # no 'message' from back end
six.raise_from(e, None) # use default reason
else:
# replicate https://github.com/psf/requests/blob/428f7a/requests/models.py#L954
if 400 <= response.status_code < 500:
cause = "Client"
elif 500 <= response.status_code < 600:
cause = "Server"
else: # should be impossible here, but sure okay
cause = "Unexpected"
message = "{} {} Error: {} for url: {}".format(response.status_code, cause, reason, response.url)
six.raise_from(requests.HTTPError(message, response=response), None)
def is_hidden(path): # to avoid "./".startswith('.')
return os.path.basename(path.rstrip('/')).startswith('.') and path != "."
def find_filepaths(paths, extensions=None, include_hidden=False, include_venv=False):
"""
Unravels a list of file and directory paths into a list of only filepaths by walking through the
directories.
Parameters
----------
paths : str or list of str
File and directory paths.
extensions : str or list of str, optional
What files to include while walking through directories. If not provided, all files will be
included.
include_hidden : bool, default False
Whether to include hidden files and subdirectories found while walking through directories.
include_venv : bool, default False
Whether to include Python virtual environment directories.
Returns
-------
filepaths : set
"""
if isinstance(paths, six.string_types):
paths = [paths]
paths = list(map(os.path.expanduser, paths))
if isinstance(extensions, six.string_types):
extensions = [extensions]
if extensions is not None:
# prepend period to file extensions where missing
extensions = map(lambda ext: ext if ext.startswith('.') else ('.' + ext), extensions)
extensions = set(extensions)
filepaths = set()
for path in paths:
if os.path.isdir(path):
for parent_dir, dirnames, filenames in os.walk(path):
if not include_hidden:
# skip hidden directories
dirnames[:] = [dirname for dirname in dirnames if not is_hidden(dirname)]
# skip hidden files
filenames[:] = [filename for filename in filenames if not is_hidden(filename)]
if not include_venv:
exec_path_glob = os.path.join(parent_dir, "{}", "bin", "python*")
dirnames[:] = [dirname for dirname in dirnames if not glob.glob(exec_path_glob.format(dirname))]
for filename in filenames:
if extensions is None or os.path.splitext(filename)[1] in extensions:
filepaths.add(os.path.join(parent_dir, filename))
else:
filepaths.add(path)
return filepaths
def proto_to_json(msg):
"""
Converts a `protobuf` `Message` object into a JSON-compliant dictionary.
The output preserves snake_case field names and integer representaions of enum variants.
Parameters
----------
msg : google.protobuf.message.Message
`protobuf` `Message` object.
Returns
-------
dict
JSON object representing `msg`.
"""
return json.loads(json_format.MessageToJson(msg,
including_default_value_fields=True,
preserving_proto_field_name=True,
use_integers_for_enums=True))
def json_to_proto(response_json, response_cls, ignore_unknown_fields=True):
"""
Converts a JSON-compliant dictionary into a `protobuf` `Message` object.
Parameters
----------
response_json : dict
JSON object representing a Protocol Buffer message.
response_cls : type
`protobuf` `Message` subclass, e.g. ``CreateProject.Response``.
ignore_unknown_fields : bool, default True
Whether to allow (and ignore) fields in `response_json` that are not defined in
`response_cls`. This is for forward compatibility with the back end; if the Client protos
are outdated and we get a response with new fields, ``True`` prevents an error.
Returns
-------
google.protobuf.message.Message
`protobuf` `Message` object represented by `response_json`.
"""
return json_format.Parse(json.dumps(response_json),
response_cls(),
ignore_unknown_fields=ignore_unknown_fields)
def to_builtin(obj):
"""
Tries to coerce `obj` into a built-in type, for JSON serialization.
Parameters
----------
obj
Returns
-------
object
A built-in equivalent of `obj`, or `obj` unchanged if it could not be handled by this function.
"""
# jump through ludicrous hoops to avoid having hard dependencies in the Client
cls_ = obj.__class__
obj_class = getattr(cls_, '__name__', None)
obj_module = getattr(cls_, '__module__', None)
# booleans
if isinstance(obj, BOOL_TYPES):
return True if obj else False
# NumPy scalars
if obj_module == "numpy" and obj_class.startswith(('int', 'uint', 'float', 'str')):
return obj.item()
# scientific library collections
if obj_class == "ndarray":
return obj.tolist()
if obj_class == "Series":
return obj.values.tolist()
if obj_class == "DataFrame":
return obj.values.tolist()
if obj_class == "Tensor" and obj_module == "torch":
return obj.detach().numpy().tolist()
if tf is not None and isinstance(obj, tf.Tensor): # if TensorFlow
try:
return obj.numpy().tolist()
except: # TF 1.X or not-eager execution
pass
# strings
if isinstance(obj, six.string_types): # prevent infinite loop with iter
return obj
if isinstance(obj, six.binary_type):
return six.ensure_str(obj)
# dicts and lists
if isinstance(obj, dict):
return {to_builtin(key): to_builtin(val) for key, val in six.viewitems(obj)}
try:
iter(obj)
except TypeError:
pass
else:
return [to_builtin(val) for val in obj]
return obj
def python_to_val_proto(raw_val, allow_collection=False):
"""
Converts a Python variable into a `protobuf` `Value` `Message` object.
Parameters
----------
raw_val
Python variable.
allow_collection : bool, default False
Whether to allow ``list``s and ``dict``s as `val`. This flag exists because some callers
ought to not support logging collections, so this function will perform the typecheck on `val`.
Returns
-------
google.protobuf.struct_pb2.Value
`protobuf` `Value` `Message` representing `val`.
"""
# TODO: check `allow_collection` before `to_builtin()` to avoid unnecessary processing
val = to_builtin(raw_val)
if val is None:
return Value(null_value=NULL_VALUE)
elif isinstance(val, bool): # did you know that `bool` is a subclass of `int`?
return Value(bool_value=val)
elif isinstance(val, numbers.Real):
return Value(number_value=val)
elif isinstance(val, six.string_types):
return Value(string_value=val)
elif isinstance(val, (list, dict)):
if allow_collection:
if isinstance(val, list):
list_value = ListValue()
list_value.extend(val) # pylint: disable=no-member
return Value(list_value=list_value)
else: # isinstance(val, dict)
if all([isinstance(key, six.string_types) for key in val.keys()]):
struct_value = Struct()
struct_value.update(val) # pylint: disable=no-member
return Value(struct_value=struct_value)
else: # protobuf's fault
raise TypeError("struct keys must be strings; consider using log_artifact() instead")
else:
raise TypeError("unsupported type {}; consider using log_attribute() instead".format(type(raw_val)))
else:
raise TypeError("unsupported type {}; consider using log_artifact() instead".format(type(raw_val)))
def val_proto_to_python(msg):
"""
Converts a `protobuf` `Value` `Message` object into a Python variable.
Parameters
----------
msg : google.protobuf.struct_pb2.Value
`protobuf` `Value` `Message` representing a variable.
Returns
-------
one of {None, bool, float, int, str}
Python variable represented by `msg`.
"""
value_kind = msg.WhichOneof("kind")
if value_kind == "null_value":
return None
elif value_kind == "bool_value":
return msg.bool_value
elif value_kind == "number_value":
return int(msg.number_value) if msg.number_value.is_integer() else msg.number_value
elif value_kind == "string_value":
return msg.string_value
elif value_kind == "list_value":
return [val_proto_to_python(val_msg)
for val_msg
in msg.list_value.values]
elif value_kind == "struct_value":
return {key: val_proto_to_python(val_msg)
for key, val_msg
in msg.struct_value.fields.items()}
else:
raise NotImplementedError("retrieved value type is not supported")
def unravel_key_values(rpt_key_value_msg):
"""
Converts a repeated KeyValue field of a protobuf message into a dictionary.
Parameters
----------
rpt_key_value_msg : google.protobuf.pyext._message.RepeatedCompositeContainer
Repeated KeyValue field of a protobuf message.
Returns
-------
dict of str to {None, bool, float, int, str}
Names and values.
"""
return {key_value.key: val_proto_to_python(key_value.value)
for key_value
in rpt_key_value_msg}
def unravel_artifacts(rpt_artifact_msg):
"""
Converts a repeated Artifact field of a protobuf message into a list of names.
Parameters
----------
rpt_artifact_msg : google.protobuf.pyext._message.RepeatedCompositeContainer
Repeated Artifact field of a protobuf message.
Returns
-------
list of str
Names of artifacts.
"""
return [artifact.key
for artifact
in rpt_artifact_msg]
def unravel_observation(obs_msg):
"""
Converts an Observation protobuf message into a more straightforward Python tuple.
This is useful because an Observation message has a oneof that's finicky to handle.
Returns
-------
str
Name of observation.
{None, bool, float, int, str}
Value of observation.
str
Human-readable timestamp.
"""
if obs_msg.WhichOneof("oneOf") == "attribute":
key = obs_msg.attribute.key
value = obs_msg.attribute.value
elif obs_msg.WhichOneof("oneOf") == "artifact":
key = obs_msg.artifact.key
value = "{} artifact".format(_CommonService.ArtifactTypeEnum.ArtifactType.Name(obs_msg.artifact.artifact_type))
return (
key,
val_proto_to_python(value),
timestamp_to_str(obs_msg.timestamp),
)
def unravel_observations(rpt_obs_msg):
"""
Converts a repeated Observation field of a protobuf message into a dictionary.
Parameters
----------
rpt_obs_msg : google.protobuf.pyext._message.RepeatedCompositeContainer
Repeated Observation field of a protobuf message.
Returns
-------
dict of str to list of tuples ({None, bool, float, int, str}, str)
Names and observation sequences.
"""
observations = {}
for obs_msg in rpt_obs_msg:
key, value, timestamp = unravel_observation(obs_msg)
observations.setdefault(key, []).append((value, timestamp))
return observations
def validate_flat_key(key):
"""
Checks whether `key` contains invalid characters.
To prevent bugs with querying (which allow dot-delimited nested keys), flat keys (such as those
used for individual metrics) must not contain periods.
Furthermore, to prevent potential bugs with the back end down the line, keys should be restricted
to alphanumeric characters, underscores, and dashes until we can verify robustness.
Parameters
----------
key : str
Name of metadatum.
Raises
------
ValueError
If `key` contains invalid characters.
"""
for c in key:
if c not in _VALID_FLAT_KEY_CHARS:
raise ValueError("`key` may only contain alphanumeric characters, underscores, dashes,"
" and forward slashes")
def generate_default_name():
"""
Generates a string that can be used as a default entity name while avoiding collisions.
The generated string is a concatenation of the current process ID and the current Unix timestamp,
such that a collision should only occur if a single process produces two of an entity at the same
nanosecond.
Returns
-------
name : str
String generated from the current process ID and Unix timestamp.
"""
return "{}{}".format(os.getpid(), str(time.time()).replace('.', ''))
class UTC(datetime.tzinfo):
"""UTC timezone class for Python 2 timestamp calculations"""
def utcoffset(self, dt):
return datetime.timedelta(0)
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return datetime.timedelta(0)
def timestamp_to_ms(timestamp):
"""
Converts a Unix timestamp into one with millisecond resolution.
Parameters
----------
timestamp : float or int
Unix timestamp.
Returns
-------
int
`timestamp` with millisecond resolution (13 integer digits).
"""
num_integer_digits = len(str(timestamp).split('.')[0])
return int(timestamp*10**(13 - num_integer_digits))
def ensure_timestamp(timestamp):
"""
Converts a representation of a datetime into a Unix timestamp with millisecond resolution.
If `timestamp` is provided as a string, this function attempts to use pandas (if installed) to
parse it into a Unix timestamp, since pandas can interally handle many different human-readable
datetime string representations. If pandas is not installed, this function will only handle an
ISO 8601 representation.
Parameters
----------
timestamp : str or float or int
String representation of a datetime or numerical Unix timestamp.
Returns
-------
int
`timestamp` with millisecond resolution (13 integer digits).
"""
if isinstance(timestamp, six.string_types):
try: # attempt with pandas, which can parse many time string formats
return timestamp_to_ms(pd.Timestamp(timestamp).timestamp())
except NameError: # pandas not installed
six.raise_from(ValueError("pandas must be installed to parse datetime strings"),
None)
except ValueError: # can't be handled by pandas
six.raise_from(ValueError("unable to parse datetime string \"{}\"".format(timestamp)),
None)
elif isinstance(timestamp, numbers.Real):
return timestamp_to_ms(timestamp)
elif isinstance(timestamp, datetime.datetime):
if six.PY2:
# replicate https://docs.python.org/3/library/datetime.html#datetime.datetime.timestamp
seconds = (timestamp - datetime.datetime(1970, 1, 1, tzinfo=UTC())).total_seconds()
else: # Python 3
seconds = timestamp.timestamp()
return timestamp_to_ms(seconds)
else:
raise TypeError("unable to parse timestamp of type {}".format(type(timestamp)))
def timestamp_to_str(timestamp):
"""
Converts a Unix timestamp into a human-readable string representation.
Parameters
----------
timestamp : int
Numerical Unix timestamp.
Returns
-------
str
Human-readable string representation of `timestamp`.
"""
num_digits = len(str(timestamp))
return str(datetime.datetime.fromtimestamp(timestamp*10**(10 - num_digits)))
def now():
"""
Returns the current Unix timestamp with millisecond resolution.
Returns
-------
now : int
Current Unix timestamp in milliseconds.
"""
return timestamp_to_ms(time.time())
def get_python_version():
"""
Returns the version number of the locally-installed Python interpreter.
Returns
-------
str
Python version number in the form "{major}.{minor}.{patch}".
"""
return '.'.join(map(str, sys.version_info[:3]))
def save_notebook(notebook_path=None, timeout=5):
"""
Saves the current notebook on disk and returns its contents after the file has been rewritten.
Parameters
----------
notebook_path : str, optional
Filepath of the Jupyter Notebook.
timeout : float, default 5
Maximum number of seconds to wait for the notebook to save.
Returns
-------
notebook_contents : file-like
An in-memory copy of the notebook's contents at the time this function returns. This can
be ignored, but is nonetheless available to minimize the risk of a race condition caused by
delaying the read until a later time.
Raises
------
OSError
If the notebook is not saved within `timeout` seconds.
"""
if notebook_path is None:
notebook_path = get_notebook_filepath()
modtime = os.path.getmtime(notebook_path)
display(Javascript('''
require(["base/js/namespace"],function(Jupyter) {
Jupyter.notebook.save_checkpoint();
});
'''))
# wait for file to be modified
start_time = time.time()
while time.time() - start_time < timeout:
new_modtime = os.path.getmtime(notebook_path)
if new_modtime > modtime:
break
time.sleep(0.01)
else:
raise OSError("unable to save notebook")
# wait for file to be rewritten
timeout -= (time.time() - start_time) # remaining time
start_time = time.time()
while time.time() - start_time < timeout:
with open(notebook_path, 'r') as f:
contents = f.read()
if contents:
return six.StringIO(contents)
time.sleep(0.01)
else:
raise OSError("unable to read saved notebook")
def get_notebook_filepath():
"""
Returns the filesystem path of the Jupyter notebook running the Client.
This implementation is from https://github.com/jupyter/notebook/issues/1000#issuecomment-359875246.
Returns
-------
str
Raises
------
OSError
If one of the following is true:
- Jupyter is not installed
- Client is not being called from a notebook
- the calling notebook cannot be identified
"""
try:
connection_file = ipykernel.connect.get_connection_file()
except (NameError, # Jupyter not installed
RuntimeError): # not in a Notebook
pass
else:
kernel_id = re.search('kernel-(.*).json', connection_file).group(1)
for server in list_running_servers():
response = requests.get(urljoin(server['url'], 'api/sessions'),
params={'token': server.get('token', '')})
if response.ok:
for session in response.json():
if session['kernel']['id'] == kernel_id:
relative_path = session['notebook']['path']
return os.path.join(server['notebook_dir'], relative_path)
raise OSError("unable to find notebook file")
def get_script_filepath():
"""
Returns the filesystem path of the Python script running the Client.
This function iterates back through the call stack until it finds a non-Verta stack frame and
returns its filepath.
Returns
-------
str
Raises
------
OSError
If the calling script cannot be identified.
"""
for frame_info in inspect.stack():
module = inspect.getmodule(frame_info[0])
if module is None or module.__name__.split('.', 1)[0] != "verta":
filepath = frame_info[1]
if os.path.exists(filepath): # e.g. Jupyter fakes the filename for cells
return filepath
else:
break # continuing might end up returning a built-in
raise OSError("unable to find script file")
def is_org(workspace_name, conn):
response = make_request(
"GET",
"{}://{}/api/v1/uac-proxy/organization/getOrganizationByName".format(conn.scheme, conn.socket),
conn, params={'org_name': workspace_name},
)
return response.status_code != 404
|
mit
|
bsipocz/statsmodels
|
statsmodels/examples/run_all.py
|
34
|
1984
|
'''run all examples to make sure we don't get an exception
Note:
If an example contaings plt.show(), then all plot windows have to be closed
manually, at least in my setup.
uncomment plt.show() to show all plot windows
'''
from __future__ import print_function
from statsmodels.compat.python import lzip, input
import matplotlib.pyplot as plt #matplotlib is required for many examples
stop_on_error = True
filelist = ['example_glsar.py', 'example_wls.py', 'example_gls.py',
'example_glm.py', 'example_ols_tftest.py', #'example_rpy.py',
'example_ols.py', 'example_ols_minimal.py', 'example_rlm.py',
'example_discrete.py', 'example_predict.py',
'example_ols_table.py',
'tut_ols.py', 'tut_ols_rlm.py', 'tut_ols_wls.py']
use_glob = True
if use_glob:
import glob
filelist = glob.glob('*.py')
print(lzip(range(len(filelist)), filelist))
for fname in ['run_all.py', 'example_rpy.py']:
filelist.remove(fname)
#filelist = filelist[15:]
#temporarily disable show
plt_show = plt.show
def noop(*args):
pass
plt.show = noop
cont = input("""Are you sure you want to run all of the examples?
This is done mainly to check that they are up to date.
(y/n) >>> """)
has_errors = []
if 'y' in cont.lower():
for run_all_f in filelist:
try:
print("\n\nExecuting example file", run_all_f)
print("-----------------------" + "-"*len(run_all_f))
exec(open(run_all_f).read())
except:
#f might be overwritten in the executed file
print("**********************" + "*"*len(run_all_f))
print("ERROR in example file", run_all_f)
print("**********************" + "*"*len(run_all_f))
has_errors.append(run_all_f)
if stop_on_error:
raise
print('\nModules that raised exception:')
print(has_errors)
#reenable show after closing windows
plt.close('all')
plt.show = plt_show
plt.show()
|
bsd-3-clause
|
CollasLab/edd
|
setup.py
|
1
|
2787
|
import os, sys
from setuptools import setup
from distutils.extension import Extension
def get_version(m):
xs = m.__version__.split('.')
xs += ['0', '0']
version, major, minor = xs[:3]
return version, major, minor
if not sys.version_info[:2] == (2, 7):
raise Exception('''\
Edd requires python version 2.7.x, but you are using %d.%d.%d''' %
sys.version_info[:3])
try:
import numpy as np
except ImportError:
raise Exception('''\
EDD has compile time dependencies on numpy. So please install numpy first.
e.g.: pip install --upgrade numpy''')
try:
import pysam
version, major, minor = get_version(pysam)
too_old_pysam = int(version) == 0 and int(major) < 10
too_recent_pysam = int(version) != 0 or int(major) >= 12
if too_old_pysam or too_recent_pysam:
sys.stderr.write('''\
###########
# ERROR #
###########
EDD is only compatible with pysam versions from 0.10.0 up to and including 0.11.2.2.
The detected version was %s.
Aborting ...
''' % pysam.__version__)
sys.exit(1)
except ImportError:
raise Exception('''\
EDD has compile time dependencies on pysam. So please install pysam first.
e.g.: pip install --upgrade pysam''')
try:
from Cython.Distutils import build_ext # Cython should be installed via pysam
#from Cython.Distutils.extension import Extension
except ImportError:
raise Exception('please install cython first, e.g.: pip install --upgrade cython')
setup(name='edd',
version='1.1.19',
description='Enriched domain detector for ChIP-seq data',
url='http://github.com/CollasLab/edd',
author='Eivind G. Lund',
author_email='[email protected]',
packages=['eddlib',
'eddlib.algorithm'],
# installs into root dir (not what i want)
#data_files=[('eddlib', ['eddlib/default_parameters.conf'])],
package_data={'': ['*.conf']},
scripts=[
'bin/edd',
#'bin/edd-tools',
],
install_requires=[
'Logbook',
'pybedtools',
'statsmodels',
'patsy', # statsmodels dependency
'pandas',
'python-dateutil', # pandas dependency
'scipy',
'numpy',
'pysam>=0.10,<0.12',
],
ext_modules=[
Extension('eddlib.read_bam',
sources=['eddlib/read_bam.pyx'],
include_dirs=pysam.get_include() + [np.get_include()],
define_macros=pysam.get_defines(),
),
Extension('eddlib.algorithm.chrom_max_segments',
sources=['eddlib/algorithm/chrom_max_segments.pyx'],
include_dirs=[np.get_include()]),
],
cmdclass = {'build_ext': build_ext},
)
|
mit
|
subodhchhabra/pandashells
|
pandashells/test/p_smooth_tests.py
|
3
|
1577
|
#! /usr/bin/env python
from mock import patch, MagicMock
from unittest import TestCase
import pandas as pd
from pandashells.bin.p_smooth import main, get_input_args, validate_args
class GetInputArgsTests(TestCase):
@patch('pandashells.bin.p_smooth.sys.argv', 'p.smooth -x x -y y'.split())
def test_right_number_of_args(self):
args = get_input_args()
self.assertEqual(len(args.__dict__), 6)
class ValidateArgs(TestCase):
def test_okay(self):
# passing test means nothing raised
args = MagicMock(quiet=False)
cols = ['a']
df = MagicMock(columns=['a'])
validate_args(args, cols, df)
@patch('pandashells.bin.p_smooth.sys.stderr')
def test_bad_cols(self, stderr_mock):
# passing test means nothing raised
args = MagicMock(quiet=False)
cols = ['b']
df = MagicMock(columns=['a'])
with self.assertRaises(SystemExit):
validate_args(args, cols, df)
class MainTests(TestCase):
@patch(
'pandashells.bin.p_smooth.sys.argv',
'p.smooth -x x -y y'.split())
@patch('pandashells.bin.p_smooth.io_lib.df_to_output')
@patch('pandashells.bin.p_smooth.io_lib.df_from_input')
def test_cli(self, df_from_input_mock, df_to_output_mock):
df_in = pd.DataFrame({
'x': range(1, 101),
'y': range(1, 101),
})
df_from_input_mock.return_value = df_in
main()
dfout = df_to_output_mock
self.assertEqual(
list(dfout.call_args_list[0][0][1].columns), ['x', 'y'])
|
bsd-2-clause
|
wogsland/QSTK
|
build/lib.linux-x86_64-2.7/Bin/Data_CSV.py
|
5
|
3301
|
#File to read the data from mysql and push into CSV.
# Python imports
import datetime as dt
import csv
import copy
import os
import pickle
# 3rd party imports
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# QSTK imports
from QSTK.qstkutil import qsdateutil as du
import QSTK.qstkutil.DataEvolved as de
def get_data(ls_symbols, ls_keys):
'''
@summary: Gets a data chunk for backtesting
@param dt_start: Start time
@param dt_end: End time
@param ls_symbols: symbols to use
@note: More data will be pulled from before and after the limits to ensure
valid data on the start/enddates which requires lookback/forward
@return: data dictionry
'''
print "Getting Data from MySQL"
# Modify dates to ensure enough data for all features
dt_start = dt.datetime(2005,1,1)
dt_end = dt.datetime(2012, 8, 31)
ldt_timestamps = du.getNYSEdays( dt_start, dt_end, dt.timedelta(hours=16) )
c_da = de.DataAccess('mysql')
ldf_data = c_da.get_data(ldt_timestamps, ls_symbols, ls_keys)
d_data = dict(zip(ls_keys, ldf_data))
return d_data
def read_symbols(s_symbols_file):
ls_symbols=[]
file = open(s_symbols_file, 'r')
for f in file.readlines():
j = f[:-1]
ls_symbols.append(j)
file.close()
return ls_symbols
def csv_sym(sym, d_data, ls_keys, s_directory):
bool_first_iter = True
for key in ls_keys:
if bool_first_iter == True:
df_sym = d_data[key].reindex(columns = [sym])
df_sym = df_sym.rename(columns = {sym : key})
bool_first_iter = False
else:
df_temp = d_data[key].reindex(columns = [sym])
df_temp = df_temp.rename(columns = {sym : key})
df_sym = df_sym.join(df_temp, how= 'outer')
symfilename = sym.split('-')[0]
sym_file = open(s_directory + symfilename + '.csv', 'w')
sym_file.write("Date,Open,High,Low,Close,Volume,Adj Close \n")
ldt_timestamps = list(df_sym.index)
ldt_timestamps.reverse()
for date in ldt_timestamps:
date_to_csv = '{:%Y-%m-%d}'.format(date)
string_to_csv = date_to_csv
for key in ls_keys:
string_to_csv = string_to_csv + ',' + str(df_sym[key][date])
string_to_csv = string_to_csv + '\n'
sym_file.write(string_to_csv)
def main(s_directory, s_symbols_file):
#ls_symbols = read_symbols(s_symbols_file)
ls_symbols = ['ACS-201002','BDK-201003','BJS-201004','BSC-201108','CCT-201111','EQ-200907','JAVA-201002','NCC-200901','NOVL-201104','PBG-201003','PTV-201011','ROH-200904','SGP-200911','SII-201008','WB-200901','WYE-200910','XTO-201006']
ls_keys = ['actual_open', 'actual_high', 'actual_low', 'actual_close', 'volume', 'close']
d_data = get_data(ls_symbols, ls_keys)
# print d_data
print "Creating CSV files now"
for sym in ls_symbols:
print sym
csv_sym(sym,d_data, ls_keys, s_directory)
print "Created all CSV files"
if __name__ == '__main__' :
s_directory = 'MLTData/'
s_directory = os.environ['QSDATA'] + '/Yahoo/'
s_symbols_file1 = 'MLTData/sp5002012.txt'
s_symbols_file2 = 'MLTData/index.txt'
s_symbols_file3 = 'MLTData/sp5002008.txt'
main(s_directory, s_symbols_file3)
|
bsd-3-clause
|
F-Tag/python-vad
|
test/vad_test.py
|
1
|
1276
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from itertools import product
from librosa import load
from pyvad import vad
fs_vads = (8000, 16000, 32000, 48000)
hops = (10, 20, 30)
vad_modes = (0, 1, 2, 3)
fss = [16000, 22050]
name = "voice/arctic_a0007.wav"
for fs in fss:
data, fs_r = load(name, sr=fs)
for fs_vad, hop, vad_mode in product(fs_vads, hops, vad_modes):
# print(fs, fs_vad, hop, vad_mode)
vact = vad(data, fs_r, fs_vad=fs_vad,
hop_length=hop, vad_mode=vad_mode)
assert vact.sum() > data.size//2, vact.sum()
"""
import matplotlib.pyplot as plt
plt.plot(data)
plt.plot(vact)
plt.savefig(("voice_"+str(fs_r)+str(fs_vad)+str(hop)+str(vad_mode)+".png"))
plt.close()
"""
"""
data = (np.random.rand(fs*3)-0.5)*0.1
for fs_vad, hop, vad_mode in product(fs_vads, hops, vad_modes):
print(fs, fs_vad, hop, vad_mode)
vact = vad(data, fs, fs_vad=fs_vad, hop_length=hop, vad_mode=vad_mode)
# assert not vact.any(), vact.sum()
import matplotlib.pyplot as plt
plt.plot(data)
plt.plot(vact)
plt.savefig(("noise_"+str(fs)+str(fs_vad)+str(hop)+str(vad_mode)+".png"))
plt.close()
"""
|
mit
|
jmmease/pandas
|
pandas/tests/frame/test_asof.py
|
11
|
3881
|
# coding=utf-8
import numpy as np
from pandas import (DataFrame, date_range, Timestamp, Series,
to_datetime)
import pandas.util.testing as tm
from .common import TestData
class TestFrameAsof(TestData):
def setup_method(self, method):
self.N = N = 50
self.rng = date_range('1/1/1990', periods=N, freq='53s')
self.df = DataFrame({'A': np.arange(N), 'B': np.arange(N)},
index=self.rng)
def test_basic(self):
df = self.df.copy()
df.loc[15:30, 'A'] = np.nan
dates = date_range('1/1/1990', periods=self.N * 3,
freq='25s')
result = df.asof(dates)
assert result.notna().all(1).all()
lb = df.index[14]
ub = df.index[30]
dates = list(dates)
result = df.asof(dates)
assert result.notna().all(1).all()
mask = (result.index >= lb) & (result.index < ub)
rs = result[mask]
assert (rs == 14).all(1).all()
def test_subset(self):
N = 10
rng = date_range('1/1/1990', periods=N, freq='53s')
df = DataFrame({'A': np.arange(N), 'B': np.arange(N)},
index=rng)
df.loc[4:8, 'A'] = np.nan
dates = date_range('1/1/1990', periods=N * 3,
freq='25s')
# with a subset of A should be the same
result = df.asof(dates, subset='A')
expected = df.asof(dates)
tm.assert_frame_equal(result, expected)
# same with A/B
result = df.asof(dates, subset=['A', 'B'])
expected = df.asof(dates)
tm.assert_frame_equal(result, expected)
# B gives self.df.asof
result = df.asof(dates, subset='B')
expected = df.resample('25s', closed='right').ffill().reindex(dates)
expected.iloc[20:] = 9
tm.assert_frame_equal(result, expected)
def test_missing(self):
# GH 15118
# no match found - `where` value before earliest date in index
N = 10
rng = date_range('1/1/1990', periods=N, freq='53s')
df = DataFrame({'A': np.arange(N), 'B': np.arange(N)},
index=rng)
result = df.asof('1989-12-31')
expected = Series(index=['A', 'B'], name=Timestamp('1989-12-31'))
tm.assert_series_equal(result, expected)
result = df.asof(to_datetime(['1989-12-31']))
expected = DataFrame(index=to_datetime(['1989-12-31']),
columns=['A', 'B'], dtype='float64')
tm.assert_frame_equal(result, expected)
def test_all_nans(self):
# GH 15713
# DataFrame is all nans
result = DataFrame([np.nan]).asof([0])
expected = DataFrame([np.nan])
tm.assert_frame_equal(result, expected)
# testing non-default indexes, multiple inputs
dates = date_range('1/1/1990', periods=self.N * 3, freq='25s')
result = DataFrame(np.nan, index=self.rng, columns=['A']).asof(dates)
expected = DataFrame(np.nan, index=dates, columns=['A'])
tm.assert_frame_equal(result, expected)
# testing multiple columns
dates = date_range('1/1/1990', periods=self.N * 3, freq='25s')
result = DataFrame(np.nan, index=self.rng,
columns=['A', 'B', 'C']).asof(dates)
expected = DataFrame(np.nan, index=dates, columns=['A', 'B', 'C'])
tm.assert_frame_equal(result, expected)
# testing scalar input
result = DataFrame(np.nan, index=[1, 2], columns=['A', 'B']).asof([3])
expected = DataFrame(np.nan, index=[3], columns=['A', 'B'])
tm.assert_frame_equal(result, expected)
result = DataFrame(np.nan, index=[1, 2], columns=['A', 'B']).asof(3)
expected = Series(np.nan, index=['A', 'B'], name=3)
tm.assert_series_equal(result, expected)
|
bsd-3-clause
|
carlthome/librosa
|
docs/examples/plot_segmentation.py
|
1
|
7702
|
# -*- coding: utf-8 -*-
"""
======================
Laplacian segmentation
======================
This notebook implements the laplacian segmentation method of
`McFee and Ellis, 2014 <http://bmcfee.github.io/papers/ismir2014_spectral.pdf>`_,
with a couple of minor stability improvements.
Throughout the example, we will refer to equations in the paper by number, so it will be
helpful to read along.
"""
# Code source: Brian McFee
# License: ISC
###################################
# Imports
# - numpy for basic functionality
# - scipy for graph Laplacian
# - matplotlib for visualization
# - sklearn.cluster for K-Means
#
from __future__ import print_function
import numpy as np
import scipy
import matplotlib.pyplot as plt
import sklearn.cluster
import librosa
import librosa.display
#############################
# First, we'll load in a song
y, sr = librosa.load('audio/Karissa_Hobbs_-_09_-_Lets_Go_Fishin.mp3')
##############################################
# Next, we'll compute and plot a log-power CQT
BINS_PER_OCTAVE = 12 * 3
N_OCTAVES = 7
C = librosa.amplitude_to_db(np.abs(librosa.cqt(y=y, sr=sr,
bins_per_octave=BINS_PER_OCTAVE,
n_bins=N_OCTAVES * BINS_PER_OCTAVE)),
ref=np.max)
plt.figure(figsize=(12, 4))
librosa.display.specshow(C, y_axis='cqt_hz', sr=sr,
bins_per_octave=BINS_PER_OCTAVE,
x_axis='time')
plt.tight_layout()
##########################################################
# To reduce dimensionality, we'll beat-synchronous the CQT
tempo, beats = librosa.beat.beat_track(y=y, sr=sr, trim=False)
Csync = librosa.util.sync(C, beats, aggregate=np.median)
# For plotting purposes, we'll need the timing of the beats
# we fix_frames to include non-beat frames 0 and C.shape[1] (final frame)
beat_times = librosa.frames_to_time(librosa.util.fix_frames(beats,
x_min=0,
x_max=C.shape[1]),
sr=sr)
plt.figure(figsize=(12, 4))
librosa.display.specshow(Csync, bins_per_octave=12*3,
y_axis='cqt_hz', x_axis='time',
x_coords=beat_times)
plt.tight_layout()
#####################################################################
# Let's build a weighted recurrence matrix using beat-synchronous CQT
# (Equation 1)
# width=3 prevents links within the same bar
# mode='affinity' here implements S_rep (after Eq. 8)
R = librosa.segment.recurrence_matrix(Csync, width=3, mode='affinity',
sym=True)
# Enhance diagonals with a median filter (Equation 2)
df = librosa.segment.timelag_filter(scipy.ndimage.median_filter)
Rf = df(R, size=(1, 7))
###################################################################
# Now let's build the sequence matrix (S_loc) using mfcc-similarity
#
# :math:`R_\text{path}[i, i\pm 1] = \exp(-\|C_i - C_{i\pm 1}\|^2 / \sigma^2)`
#
# Here, we take :math:`\sigma` to be the median distance between successive beats.
#
mfcc = librosa.feature.mfcc(y=y, sr=sr)
Msync = librosa.util.sync(mfcc, beats)
path_distance = np.sum(np.diff(Msync, axis=1)**2, axis=0)
sigma = np.median(path_distance)
path_sim = np.exp(-path_distance / sigma)
R_path = np.diag(path_sim, k=1) + np.diag(path_sim, k=-1)
##########################################################
# And compute the balanced combination (Equations 6, 7, 9)
deg_path = np.sum(R_path, axis=1)
deg_rec = np.sum(Rf, axis=1)
mu = deg_path.dot(deg_path + deg_rec) / np.sum((deg_path + deg_rec)**2)
A = mu * Rf + (1 - mu) * R_path
###########################################################
# Plot the resulting graphs (Figure 1, left and center)
plt.figure(figsize=(8, 4))
plt.subplot(1, 3, 1)
librosa.display.specshow(Rf, cmap='inferno_r', y_axis='time',
y_coords=beat_times)
plt.title('Recurrence similarity')
plt.subplot(1, 3, 2)
librosa.display.specshow(R_path, cmap='inferno_r')
plt.title('Path similarity')
plt.subplot(1, 3, 3)
librosa.display.specshow(A, cmap='inferno_r')
plt.title('Combined graph')
plt.tight_layout()
#####################################################
# Now let's compute the normalized Laplacian (Eq. 10)
L = scipy.sparse.csgraph.laplacian(A, normed=True)
# and its spectral decomposition
evals, evecs = scipy.linalg.eigh(L)
# We can clean this up further with a median filter.
# This can help smooth over small discontinuities
evecs = scipy.ndimage.median_filter(evecs, size=(9, 1))
# cumulative normalization is needed for symmetric normalize laplacian eigenvectors
Cnorm = np.cumsum(evecs**2, axis=1)**0.5
# If we want k clusters, use the first k normalized eigenvectors.
# Fun exercise: see how the segmentation changes as you vary k
k = 5
X = evecs[:, :k] / Cnorm[:, k-1:k]
# Plot the resulting representation (Figure 1, center and right)
plt.figure(figsize=(8, 4))
plt.subplot(1, 2, 2)
librosa.display.specshow(Rf, cmap='inferno_r')
plt.title('Recurrence matrix')
plt.subplot(1, 2, 1)
librosa.display.specshow(X,
y_axis='time',
y_coords=beat_times)
plt.title('Structure components')
plt.tight_layout()
#############################################################
# Let's use these k components to cluster beats into segments
# (Algorithm 1)
KM = sklearn.cluster.KMeans(n_clusters=k)
seg_ids = KM.fit_predict(X)
# and plot the results
plt.figure(figsize=(12, 4))
colors = plt.get_cmap('Paired', k)
plt.subplot(1, 3, 2)
librosa.display.specshow(Rf, cmap='inferno_r')
plt.title('Recurrence matrix')
plt.subplot(1, 3, 1)
librosa.display.specshow(X,
y_axis='time',
y_coords=beat_times)
plt.title('Structure components')
plt.subplot(1, 3, 3)
librosa.display.specshow(np.atleast_2d(seg_ids).T, cmap=colors)
plt.title('Estimated segments')
plt.colorbar(ticks=range(k))
plt.tight_layout()
###############################################################
# Locate segment boundaries from the label sequence
bound_beats = 1 + np.flatnonzero(seg_ids[:-1] != seg_ids[1:])
# Count beat 0 as a boundary
bound_beats = librosa.util.fix_frames(bound_beats, x_min=0)
# Compute the segment label for each boundary
bound_segs = list(seg_ids[bound_beats])
# Convert beat indices to frames
bound_frames = beats[bound_beats]
# Make sure we cover to the end of the track
bound_frames = librosa.util.fix_frames(bound_frames,
x_min=None,
x_max=C.shape[1]-1)
###################################################
# And plot the final segmentation over original CQT
# sphinx_gallery_thumbnail_number = 5
import matplotlib.patches as patches
plt.figure(figsize=(12, 4))
bound_times = librosa.frames_to_time(bound_frames)
freqs = librosa.cqt_frequencies(n_bins=C.shape[0],
fmin=librosa.note_to_hz('C1'),
bins_per_octave=BINS_PER_OCTAVE)
librosa.display.specshow(C, y_axis='cqt_hz', sr=sr,
bins_per_octave=BINS_PER_OCTAVE,
x_axis='time')
ax = plt.gca()
for interval, label in zip(zip(bound_times, bound_times[1:]), bound_segs):
ax.add_patch(patches.Rectangle((interval[0], freqs[0]),
interval[1] - interval[0],
freqs[-1],
facecolor=colors(label),
alpha=0.50))
plt.tight_layout()
plt.show()
|
isc
|
Eng-Mo/CarND-Advanced-Lane-Lines
|
LaneDetect.py
|
1
|
12666
|
# coding: utf-8
# In[1]:
import numpy as np
import cv2
import matplotlib
import matplotlib.pyplot as plt
import matplotlib as mpimg
import numpy as np
from IPython.display import HTML
import os, sys
import glob
import moviepy
from moviepy.editor import VideoFileClip
from moviepy.editor import *
from IPython import display
from IPython.core.display import display
from IPython.display import Image
import pylab
import scipy.misc
# In[2]:
def region_of_interest(img):
mask = np.zeros(img.shape, dtype=np.uint8) #mask image
roi_corners = np.array([[(200,675), (1200,675), (700,430),(500,430)]],
dtype=np.int32) # vertisies seted to form trapezoidal scene
channel_count = 1#img.shape[2] # image channels
ignore_mask_color = (255,)*channel_count
cv2.fillPoly(mask, roi_corners, ignore_mask_color)
masked_image = cv2.bitwise_and(img, mask)
return masked_image
# In[3]:
def ColorThreshold(img): # Threshold Yellow anf White Colos from RGB, HSV, HLS color spaces
HSV = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)
# For yellow
yellow = cv2.inRange(HSV, (20, 100, 100), (50, 255, 255))
# For white
sensitivity_1 = 68
white = cv2.inRange(HSV, (0,0,255-sensitivity_1), (255,20,255))
sensitivity_2 = 60
HSL = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)
white_2 = cv2.inRange(HSL, (0,255-sensitivity_2,0), (255,255,sensitivity_2))
white_3 = cv2.inRange(img, (200,200,200), (255,255,255))
bit_layer = yellow | white | white_2 | white_3
return bit_layer
# In[4]:
from skimage import morphology
def SobelThr(img): # Sobel edge detection extraction
gray=img
sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0,ksize=15)
sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1,ksize=15)
abs_sobelx = np.absolute(sobelx)
abs_sobely = np.absolute(sobely)
scaled_sobelx = np.uint8(255*abs_sobelx/np.max(abs_sobelx))
scaled_sobely = np.uint8(255*abs_sobely/np.max(abs_sobely))
binary_outputabsx = np.zeros_like(scaled_sobelx)
binary_outputabsx[(scaled_sobelx >= 70) & (scaled_sobelx <= 255)] = 1
binary_outputabsy = np.zeros_like(scaled_sobely)
binary_outputabsy[(scaled_sobely >= 100) & (scaled_sobely <= 150)] = 1
mag_thresh=(100, 200)
gradmag = np.sqrt(sobelx**2 + sobely**2)
scale_factor = np.max(gradmag)/255
gradmag = (gradmag/scale_factor).astype(np.uint8)
binary_outputmag = np.zeros_like(gradmag)
binary_outputmag[(gradmag >= mag_thresh[0]) & (gradmag <= mag_thresh[1])] = 1
combinedS = np.zeros_like(binary_outputabsx)
combinedS[(((binary_outputabsx == 1) | (binary_outputabsy == 1))|(binary_outputmag==1)) ] = 1
return combinedS
# In[5]:
def combinI(b1,b2): ##Combine color threshold + Sobel edge detection
combined = np.zeros_like(b1)
combined[((b1 == 1)|(b2 == 255)) ] = 1
return combined
# In[6]:
def prespectI(img): # Calculate the prespective transform and warp the Image to the eye bird view
src=np.float32([[728,475],
[1058,690],
[242,690],
[565,475]])
dst=np.float32([[1058,20],
[1058,700],
[242,700],
[242,20]])
M = cv2.getPerspectiveTransform(src, dst)
warped = cv2.warpPerspective(img, M, (1280,720), flags=cv2.INTER_LINEAR)
return (warped, M)
# In[7]:
def undistorT(imgorg): # Calculate Undistortion coefficients
nx =9
ny = 6
objpoints = []
imgpoints = []
objp=np.zeros((6*9,3),np.float32)
objp[:,:2]=np.mgrid[0:6,0:9].T.reshape(-1,2)
images=glob.glob('./camera_cal/calibration*.jpg')
for fname in images: # find corner points and Make a list of calibration images
img = cv2.imread(fname)
# Convert to grayscale
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Find the chessboard corners
ret, corners = cv2.findChessboardCorners(gray, (6,9),None)
# If found, draw corners
if ret == True:
imgpoints.append(corners)
objpoints.append(objp)
# Draw and display the corners
#cv2.drawChessboardCorners(img, (nx, ny), corners, ret)
return cv2.calibrateCamera(objpoints,imgpoints,gray.shape[::-1],None,None)
# In[8]:
def undistresult(img, mtx,dist): # undistort frame
undist= cv2.undistort(img, mtx, dist, None, mtx)
return undist
# In[9]:
def LineFitting(wimgun): #Fit Lane Lines
# Set minimum number of pixels found to recenter window
minpix = 20
# Create empty lists to receive left and right lane pixel indices
left_lane_inds = []
right_lane_inds = []
histogram = np.sum(wimgun[350:,:], axis=0)
# Create an output image to draw on and visualize the result
out_img = np.dstack((wimgun, wimgun, wimgun))
# Find the peak of the left and right halves of the histogram
# These will be the starting point for the left and right lines
midpoint = np.int(histogram.shape[0]/2)
leftx_base = np.argmax(histogram[:midpoint])
rightx_base = np.argmax(histogram[midpoint:]) + midpoint
nwindows = 9
# Set height of windows
window_height = np.int(wimgun.shape[0]/nwindows)
# Identify the x and y positions of all nonzero pixels in the image
nonzero = wimgun.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
# Current positions to be updated for each window
leftx_current = leftx_base
rightx_current = rightx_base
# Set the width of the windows +/- margin
margin =80
# Step through the windows one by one
for window in range(nwindows):
# Identify window boundaries in x and y (and right and left)
win_y_low = wimgun.shape[0] - (window+1)*window_height
win_y_high = wimgun.shape[0] - window*window_height
win_xleft_low = leftx_current - margin
win_xleft_high = leftx_current + margin
win_xright_low = rightx_current - margin
win_xright_high = rightx_current + margin
# Draw the windows on the visualization image
cv2.rectangle(out_img,(win_xleft_low,win_y_low),(win_xleft_high,win_y_high),(0,255,0), 2)
cv2.rectangle(out_img,(win_xright_low,win_y_low),(win_xright_high,win_y_high),(0,255,0), 2)
# Identify the nonzero pixels in x and y within the window
good_left_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) & (nonzerox >= win_xleft_low) & (nonzerox < win_xleft_high)).nonzero()[0]
good_right_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) & (nonzerox >= win_xright_low) & (nonzerox < win_xright_high)).nonzero()[0]
# Append these indices to the lists
left_lane_inds.append(good_left_inds)
right_lane_inds.append(good_right_inds)
# If you found > minpix pixels, recenter next window on their mean position
if len(good_left_inds) > minpix:
leftx_current = np.int(np.mean(nonzerox[good_left_inds]))
if len(good_right_inds) > minpix:
rightx_current = np.int(np.mean(nonzerox[good_right_inds]))
# Concatenate the arrays of indices
left_lane_inds = np.concatenate(left_lane_inds)
right_lane_inds = np.concatenate(right_lane_inds)
# Again, extract left and right line pixel positions
leftx = nonzerox[left_lane_inds]
lefty = nonzeroy[left_lane_inds]
rightx = nonzerox[right_lane_inds]
righty = nonzeroy[right_lane_inds]
# Fit a second order polynomial to each
left_fit = np.polyfit(lefty, leftx, 2)
right_fit = np.polyfit(righty, rightx, 2)
# Generate x and y values for plotting
ploty = np.linspace(0, wimgun.shape[0]-1, wimgun.shape[0] )
left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2]
right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2]
# Create an image to draw on and an image to show the selection window
# out_img = np.dstack((wimgun, wimgun, wimgun))*255
window_img = np.zeros_like(out_img)
# Color in left and right line pixels
out_img[nonzeroy[left_lane_inds], nonzerox[left_lane_inds]] = [255, 0, 0]
out_img[nonzeroy[right_lane_inds], nonzerox[right_lane_inds]] = [0, 0, 255]
# plt.plot(left_fitx, ploty, color='yellow')
# plt.plot(right_fitx, ploty, color='yellow')
# plt.xlim(0, 1280)
# plt.ylim(720, 0)
# plt.imshow(out_img)
# # plt.savefig("./output_images/Window Image"+str(n)+".png")
# plt.show()
# Generate a polygon to illustrate the search window area
# And recast the x and y points into usable format for cv2.fillPoly()
left_line_window1 = np.array([np.transpose(np.vstack([left_fitx-margin, ploty]))])
left_line_window2 = np.array([np.flipud(np.transpose(np.vstack([left_fitx+margin, ploty])))])
left_line_pts = np.hstack((left_line_window1, left_line_window2))
right_line_window1 = np.array([np.transpose(np.vstack([right_fitx-margin, ploty]))])
right_line_window2 = np.array([np.flipud(np.transpose(np.vstack([right_fitx+margin, ploty])))])
right_line_pts = np.hstack((right_line_window1, right_line_window2))
# Draw the lane onto the warped blank image
cv2.fillPoly(window_img, np.int_([left_line_pts]), (0,255, 0))
cv2.fillPoly(window_img, np.int_([right_line_pts]), (0,255, 0))
result = cv2.addWeighted(out_img, 1, window_img, 0.3, 0)
# plt.title("r")
# plt.plot(left_fitx, ploty, color='yellow')
# plt.plot(right_fitx, ploty, color='yellow')
# plt.xlim(0, 1280)
# plt.ylim(720, 0)
# plt.imshow(result)
# # plt.savefig("./output_images/Line Image"+str(n)+".png")
# plt.show()
# Define y-value where we want radius of curvature
# I'll choose the maximum y-value, corresponding to the bottom of the image
y_eval = np.max(ploty)
left_curverad = ((1 + (2*left_fit[0]*y_eval + left_fit[1])**2)**1.5) / np.absolute(2*left_fit[0])
right_curverad = ((1 + (2*right_fit[0]*y_eval + right_fit[1])**2)**1.5) / np.absolute(2*right_fit[0])
#print(left_curverad, right_curverad)
ym_per_pix = 30/720 # meters per pixel in y dimension
xm_per_pix = 3.7/700 # meters per pixel in x dimension
# Fit new polynomials to x,y in world space
left_fit_cr = np.polyfit(ploty*ym_per_pix, left_fitx*xm_per_pix, 2)
right_fit_cr = np.polyfit(ploty*ym_per_pix, right_fitx*xm_per_pix, 2)
# y_eval = np.max(ploty)
# # Calculate the new radias of curvature
left_curverad = ((1 + (2*left_fit_cr[0]*y_eval*ym_per_pix + left_fit_cr[1])**2)**1.5) / np.absolute(2*left_fit_cr[0])
right_curverad = ((1 + (2*right_fit_cr[0]*y_eval*ym_per_pix + right_fit_cr[1])**2)**1.5) / np.absolute(2*right_fit_cr[0])
# # left_curverad = ((1 + (2*left_fit[0]*y_eval + left_fit[1])**2)**1.5) / np.absolute(2*left_fit[0])
# # right_curverad = ((1 + (2*right_fit[0]*y_eval + right_fit[1])**2)**1.5) / np.absolute(2*right_fit[0])
camera_center=wimgun.shape[0]/2
# #lane_center = (right_fitx[719] + left_fitx[719])/2
car_position = (camera_center- (left_fitx[-1]+right_fitx[-1])/2)*xm_per_pix
# print(left_curverad1, right_curverad1, lane_offset)
return (left_fit, ploty,right_fit,left_curverad, right_curverad,car_position)
# Create an image to draw the lines on
def unwrappedframe(img,pm, Minv, left_fit,ploty,right_fit):
left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2]
right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2]
nonzero = img.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
warp_zero = np.zeros_like(pm).astype(np.uint8)
color_warp = np.dstack((warp_zero, warp_zero, warp_zero))
# Recast the x and y points into usable format for cv2.fillPoly()
pts_left = np.array([np.transpose(np.vstack([left_fitx, ploty]))])
pts_right = np.array([np.flipud(np.transpose(np.vstack([right_fitx, ploty])))])
pts = np.hstack((pts_left, pts_right))
# Draw the lane onto the warped blank image
cv2.fillPoly(color_warp, np.int_([pts]), (0,255, 0))
# Warp the blank back to original image space using inverse perspective matrix (Minv)
newwarp = cv2.warpPerspective(color_warp, Minv, (img.shape[1], img.shape[0]))
# Combine the result with the original image
return cv2.addWeighted(img, 1, newwarp, 0.3, 0)
|
mit
|
mxjl620/scikit-learn
|
sklearn/__init__.py
|
59
|
3038
|
"""
Machine learning module for Python
==================================
sklearn is a Python module integrating classical machine
learning algorithms in the tightly-knit world of scientific Python
packages (numpy, scipy, matplotlib).
It aims to provide simple and efficient solutions to learning problems
that are accessible to everybody and reusable in various contexts:
machine-learning as a versatile tool for science and engineering.
See http://scikit-learn.org for complete documentation.
"""
import sys
import re
import warnings
# Make sure that DeprecationWarning within this package always gets printed
warnings.filterwarnings('always', category=DeprecationWarning,
module='^{0}\.'.format(re.escape(__name__)))
# PEP0440 compatible formatted version, see:
# https://www.python.org/dev/peps/pep-0440/
#
# Generic release markers:
# X.Y
# X.Y.Z # For bugfix releases
#
# Admissible pre-release markers:
# X.YaN # Alpha release
# X.YbN # Beta release
# X.YrcN # Release Candidate
# X.Y # Final release
#
# Dev branch marker is: 'X.Y.dev' or 'X.Y.devN' where N is an integer.
# 'X.Y.dev0' is the canonical version of 'X.Y.dev'
#
__version__ = '0.17.dev0'
try:
# This variable is injected in the __builtins__ by the build
# process. It used to enable importing subpackages of sklearn when
# the binaries are not built
__SKLEARN_SETUP__
except NameError:
__SKLEARN_SETUP__ = False
if __SKLEARN_SETUP__:
sys.stderr.write('Partial import of sklearn during the build process.\n')
# We are not importing the rest of the scikit during the build
# process, as it may not be compiled yet
else:
from . import __check_build
from .base import clone
__check_build # avoid flakes unused variable error
__all__ = ['calibration', 'cluster', 'covariance', 'cross_decomposition',
'cross_validation', 'datasets', 'decomposition', 'dummy',
'ensemble', 'externals', 'feature_extraction',
'feature_selection', 'gaussian_process', 'grid_search',
'isotonic', 'kernel_approximation', 'kernel_ridge',
'lda', 'learning_curve',
'linear_model', 'manifold', 'metrics', 'mixture', 'multiclass',
'naive_bayes', 'neighbors', 'neural_network', 'pipeline',
'preprocessing', 'qda', 'random_projection', 'semi_supervised',
'svm', 'tree', 'discriminant_analysis',
# Non-modules:
'clone']
def setup_module(module):
"""Fixture for the tests to assure globally controllable seeding of RNGs"""
import os
import numpy as np
import random
# It could have been provided in the environment
_random_seed = os.environ.get('SKLEARN_SEED', None)
if _random_seed is None:
_random_seed = np.random.uniform() * (2 ** 31 - 1)
_random_seed = int(_random_seed)
print("I: Seeding RNGs with %r" % _random_seed)
np.random.seed(_random_seed)
random.seed(_random_seed)
|
bsd-3-clause
|
lakehanne/ensenso
|
ensenso_detect/manikin/load.py
|
1
|
6848
|
#!/usr/bin/env python
import os
import torch
from PIL import Image
from os import listdir
from torch.autograd import Variable
from PIL import ImageFont, ImageDraw
import json
import argparse
import torch.utils.data as data
import torchvision.models as models
import torchvision.transforms as transforms
import numpy as np
from random import shuffle
import torch.nn as nn
from torchvision import models
import cv2
import sys
from IPython.core import ultratb
sys.excepthook = ultratb.FormattedTB(mode='Verbose',
color_scheme='Linux', call_pdb=1)
from model import ResNet, ResidualBlock
from matplotlib import pyplot as plt
torch.set_default_tensor_type('torch.DoubleTensor')
#class to get values from multiple layers with one forward pass
class Net(nn.Module):
def __init__(self):
self.conv1 = nn.Conv2d(1, 1, 3)
self.conv2 = nn.Conv2d(1, 1, 3)
self.conv3 = nn.Conv2d(1, 1, 3)
def forward(self, x):
out1 = F.relu(self.conv1(x))
out2 = F.relu(self.conv2(out1))
out3 = F.relu(self.conv3(out2))
return out1, out2, out3
class loadAndParse():
def __init__(self, args, true_path="raw/face_pos/", fake_path="raw/face_pos/"):
self.args = args
self.normalize = transforms.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]
)
self.preprocess = transforms.Compose([
transforms.Scale(40),
transforms.RandomHorizontalFlip(),
transforms.RandomCrop(32),
transforms.ToTensor()
# self.normalize
])
#provide path to true images
self.true_path= true_path
self.fake_path= fake_path
#define tensors to hold the images in memory
self.real_images, self.real_labels = [], []
self.fake_images, self.fake_labels = [], []
# #load labels file
def loadLabelsFromJson(self):
labels_file = open('labels.json').read()
labels = json.loads(labels_file)
classes = labels # 0 = fake, 1=real
return classes
def loadImages(self, path):
# return array of images
imagesList = listdir(path)
loadedImages = []
for image in imagesList:
img = Image.open(path + image)
loadedImages.append(img)
return loadedImages
# get images in the dir
def getImages(self):
#load images
true_images = self.loadImages(self.true_path)
fake_images = self.loadImages(self.fake_path)
#define labels
self.real_labels = [1]*len(true_images) #faces
self.fake_labels = [0]*len(fake_images)
classes = self.loadLabelsFromJson()
#be sure the images are rightly loaded
if self.args.disp:
true_images[0].show()
fake_images[0].show()
# Now preprocess and create list for images
for imgs in true_images:
# cast to double since preprocess sends to FloatTensor by default
images_temp = self.preprocess(imgs).double()
if images_temp.size(0) == 3:
self.real_images.append(images_temp)
for imgs in fake_images:
# cast to double since preprocess sends to FloatTensor by default
images_temp = self.preprocess(imgs).double()
if images_temp.size(0) == 3:
self.fake_images.append(images_temp)
if self.args.disp:
print(self.real_images[3])
print(self.fake_images[2])
if self.args.verbose:
# #be sure the images are properly loaded in memory
print("\nTotal # of AllTensors: {}, images size: {}".format(len(self.real_images),
self.real_images[64].size()))
def getImagesAsTensors(self):
self.getImages()
Xtr_len = len(self.real_images)
Xfk_len = len(self.fake_images)
Xtr_tensors = torch.LongTensor(Xtr_len, self.real_images[0].size(0), self.real_images[0].size(1),
self.real_images[0].size(2))
Xfk_tensors = torch.LongTensor(Xfk_len, self.fake_images[0].size(0), self.fake_images[0].size(1),
self.fake_images[0].size(2))
Xtr_tensors = torch.stack(self.real_images[:], 0)
Ytr_tensors = torch.from_numpy(np.array(self.real_labels[:]))
Xfk_tensors = torch.stack(self.fake_images[:], 0)
Yte_tensors = torch.from_numpy(np.array(self.fake_labels[:]))
tr_dataset = data.TensorDataset(Xtr_tensors, Ytr_tensors)
tr_loader = data.DataLoader(tr_dataset, batch_size=self.args.batchSize, shuffle=True)
return tr_loader, Xfk_tensors
def main():
parser = argparse.ArgumentParser(description='Process environmental variables')
parser.add_argument('--feature', dest='feature', action='store_true')
parser.add_argument('--no-feature', dest='feature', action='store_false')
parser.set_defaults(feature=True)
parser.add_argument('--verbose', type=bool, default=False)
parser.add_argument('--epoch', type=int, default=500)
parser.add_argument('--disp', type=bool, default=False)
parser.add_argument('--cuda', type=bool, default=True)
parser.add_argument('--pkl_model', type=int, default=1)
parser.add_argument('--fake_test', type=int, default=0)
parser.add_argument('--batchSize', type=int, default=1)
parser.add_argument('--model', type=str, default='resnet_acc=97_iter=1000.pkl')
args = parser.parse_args()
lnp = loadAndParse(args)
classes = lnp.loadLabelsFromJson()
tr_loader, test_X = lnp.getImagesAsTensors()
base, ext = os.path.splitext(args.model)
model = models.resnet18(pretrained=False)
'''
if (ext == ".pkl"): #using high score model
model = ResNet(ResidualBlock, [3, 3, 3]).cuda()
model.load_state_dict(torch.load('models225/' + args.model))
# print(model.load_state_dict(torch.load('models225/' + args.model)))
else:
model = torch.load('models225/' + args.model)
model.eval()
'''
if not args.cuda:
model.cpu()
#get last layer from resnet
last_layer = nn.Sequential(*list(model.children())[:-1])
model.classifier = last_layer
print(last_layer)
'''
remove last fully connected layer
this will contain the features extracted by the convnet
'''
# eye_classifier = nn.Sequential(*list(model.classifier.children())[:-1])
# model.classifier = eye_classifier
print('using model: ', args.model)
corrIdx, Idx = 0, 0
if (args.fake_test==1):
for i in range(test_X.size(0)):
output = model(Variable(test_X.cuda()))
_, predicted = torch.max(output, 1)
#collect classes
classified = predicted.data[0][0]
index = int(classified)
if index == 0: #fake
corrIdx += 1
Idx += 1
img_class = classes[str(index)]
#display image and class
print('class \'o\' image', classes[str(index)])
print('\n\ncorrectly classified: %d %%' %(100* corrIdx / Idx) )
else:
for images, labels in tr_loader:
output = model(Variable(images.cuda()))
_, predicted = torch.max(output, 1)
#collect classes
classified = predicted.data[0][0]
index = int(classified)
if index == 1: #real
corrIdx += 1
Idx += 1
img_class = classes[str(index)]
#display image and class
# print('class of image', classes[str(index)])
print('\n\ncorrectly classified: %d %%' %(100* corrIdx / Idx) )
if __name__ == '__main__':
main()
|
mit
|
mulhod/reviewer_experience_prediction
|
util/cv_learn.py
|
1
|
61443
|
"""
:author: Matt Mulholland ([email protected])
:date: 10/14/2015
Command-line utility utilizing the RunCVExperiments class, which enables
one to run cross-validation experiments incrementally with a number of
different machine learning algorithms and parameter customizations, etc.
"""
import logging
from copy import copy
from json import dump
from os import makedirs
from itertools import chain
from os.path import (join,
isdir,
isfile,
dirname,
realpath)
from warnings import filterwarnings
import numpy as np
import scipy as sp
import pandas as pd
from cytoolz import take
from typing import (Any,
Dict,
List,
Union,
Optional,
Iterable)
from pymongo import ASCENDING
from sklearn.externals import joblib
from sklearn.metrics import make_scorer
from schema import (Or,
And,
Schema,
SchemaError,
Optional as Default)
from pymongo.collection import Collection
from sklearn.cluster import MiniBatchKMeans
from pymongo.errors import ConnectionFailure
from sklearn.grid_search import GridSearchCV
from sklearn.naive_bayes import (BernoulliNB,
MultinomialNB)
from skll.metrics import (kappa,
pearson,
spearman,
kendall_tau,
f1_score_least_frequent)
from sklearn.feature_selection import (chi2,
SelectPercentile)
from argparse import (ArgumentParser,
ArgumentDefaultsHelpFormatter)
from sklearn.cross_validation import StratifiedKFold
from sklearn.feature_extraction import (FeatureHasher,
DictVectorizer)
from sklearn.linear_model import (Perceptron,
PassiveAggressiveRegressor)
from src.mongodb import connect_to_db
from src import (LABELS,
Scorer,
Learner,
Numeric,
BinRanges,
ParamGrid,
formatter,
Vectorizer,
VALID_GAMES,
LEARNER_DICT,
LABELS_STRING,
experiments as ex,
LEARNER_DICT_KEYS,
parse_games_string,
LEARNER_ABBRS_DICT,
OBJ_FUNC_ABBRS_DICT,
LEARNER_ABBRS_STRING,
OBJ_FUNC_ABBRS_STRING,
parse_learners_string,
find_default_param_grid,
parse_non_nlp_features_string)
from src.datasets import (validate_bin_ranges,
get_bin_ranges_helper)
# Filter out warnings since there will be a lot of
# "UndefinedMetricWarning" warnings when running `RunCVExperiments`
filterwarnings("ignore")
# Set up logger
logger = logging.getLogger('util.cv_learn')
logging_debug = logging.DEBUG
logger.setLevel(logging_debug)
loginfo = logger.info
logerr = logger.error
logdebug = logger.debug
sh = logging.StreamHandler()
sh.setLevel(logging_debug)
sh.setFormatter(formatter)
logger.addHandler(sh)
class CVConfig(object):
"""
Class for representing a set of configuration options for use with
the `RunCVExperiments` class.
"""
# Default value to use for the `hashed_features` parameter if 0 is
# passed in.
_n_features_feature_hashing = 2**18
def __init__(self,
db: Collection,
games: set,
learners: List[str],
param_grids: List[ParamGrid],
training_rounds: int,
training_samples_per_round: int,
grid_search_samples_per_fold: int,
non_nlp_features: set,
prediction_label: str,
output_path: str,
objective: str = None,
data_sampling: str = 'even',
grid_search_folds: int = 5,
hashed_features: Optional[int] = None,
nlp_features: bool = True,
bin_ranges: Optional[BinRanges] = None,
lognormal: bool = False,
power_transform: Optional[float] = None,
majority_baseline: bool = True,
rescale: bool = True,
feature_selection_percentile: float = 1.0,
n_jobs: int = 1) -> 'CVConfig':
"""
Initialize object.
:param db: MongoDB database collection object
:type db: Collection
:param games: set of games to use for training models
:type games: set
:param learners: list of abbreviated names corresponding to
the available learning algorithms (see
`src.LEARNER_ABBRS_DICT`, etc.)
:type learners: list
:param param_grids: list of lists of dictionaries of parameters
mapped to lists of values (must be aligned
with list of learners)
:type param_grids: list
:param training_rounds: number of training rounds to do (in
addition to the grid search round)
:type training_rounds: int
:param training_samples_per_round: number of training samples
to use in each training round
:type training_samples_per_round: int
:param grid_search_samples_per_fold: number of samples to use
for each grid search fold
:type grid_search_samples_per_fold: int
:param non_nlp_features: set of non-NLP features to add into the
feature dictionaries
:type non_nlp_features: set
:param prediction_label: feature to predict
:type prediction_label: str
:param objective: objective function to use in ranking the runs;
if left unspecified, the objective will be
decided in `GridSearchCV` and will be either
accuracy for classification or r2 for
regression
:param output_path: path for output reports, etc.
:type output_path: str
:type objective: str or None
:param data_sampling: how the data should be sampled (i.e.,
either 'even' or 'stratified')
:type data_sampling: str
:param grid_search_folds: number of grid search folds to use
(default: 5)
:type grid_search_folds: int
:param hashed_features: use FeatureHasher in place of
DictVectorizer and use the given number
of features (must be positive number or
0, which will set it to the default
number of features for feature hashing,
2^18)
:type hashed_features: int
:param nlp_features: include NLP features (default: True)
:type nlp_features: bool
:param bin_ranges: list of tuples representing the maximum and
minimum values corresponding to bins (for
splitting up the distribution of prediction
label values)
:type bin_ranges: list or None
:param lognormal: transform raw label values using `ln` (default:
False)
:type lognormal: bool
:param power_transform: power by which to transform raw label
values (default: False)
:type power_transform: float or None
:param majority_baseline: evaluate a majority baseline model
:type majority_baseline: bool
:param rescale: whether or not to rescale the predicted values
based on the input value distribution (defaults
to True, but set to False if this is a
classification experiment)
:type rescale: bool
:param feature_selection_percentile: use `chi2`-based
`SelectPercentile` feature
selection to retain the
given percentage of
features, i.e., a value in
(0.0, 1.0] (defaults to 1.0
to forego feature selection
altogether)
:type feature_selection_percentile: float
:param njobs: value of `n_jobs` parameter, which is passed into
the learners (where applicable)
:type n_jobs: int
:returns: instance of `CVConfig` class
:rtype: CVConfig
:raises SchemaError, ValueError: if the input parameters result
in conflicts or are invalid
"""
# Get dicionary of parameters (but remove "self" since that
# doesn't need to be validated and remove values set to None
# since they will be dealt with automatically)
params = dict(locals())
del params['self']
for param in list(params):
if params[param] is None:
del params[param]
# Schema
exp_schema = Schema(
{'db': Collection,
'games': And(set, lambda x: x.issubset(VALID_GAMES)),
'learners': And([str],
lambda learners: all(learner in LEARNER_DICT_KEYS
for learner in learners)),
'param_grids': [[{str: list}]],
'training_rounds': And(int, lambda x: x > 1),
'training_samples_per_round': And(int, lambda x: x > 0),
'grid_search_samples_per_fold': And(int, lambda x: x > 1),
'non_nlp_features': And({str}, lambda x: LABELS.issuperset(x)),
'prediction_label':
And(str,
lambda x: x in LABELS and not x in params['non_nlp_features']),
'output_path': And(str, lambda x: isdir(output_path)),
Default('objective', default=None): lambda x: x in OBJ_FUNC_ABBRS_DICT,
Default('data_sampling', default='even'):
And(str, lambda x: x in ex.ExperimentalData.sampling_options),
Default('grid_search_folds', default=5): And(int, lambda x: x > 1),
Default('hashed_features', default=None):
Or(None,
lambda x: not isinstance(x, bool)
and isinstance(x, int)
and x > -1),
Default('nlp_features', default=True): bool,
Default('bin_ranges', default=None):
Or(None,
And([(float, float)],
lambda x: validate_bin_ranges(x) is None)),
Default('lognormal', default=False): bool,
Default('power_transform', default=None):
Or(None, And(float, lambda x: x != 0.0)),
Default('majority_baseline', default=True): bool,
Default('rescale', default=True): bool,
Default('feature_selection_percentile', default=1.0):
And(float, lambda x: x > 0.0 and x <= 1.0),
Default('n_jobs', default=1): And(int, lambda x: x > 0)
}
)
# Validate the schema
try:
self.validated = exp_schema.validate(params)
except (ValueError, SchemaError) as e:
msg = ('The set of passed-in parameters was not able to be '
'validated and/or the bin ranges values, if specified, were'
' not able to be validated.')
logerr('{0}:\n\n{1}'.format(msg, e))
raise e
# Set up the experiment
self._further_validate_and_setup()
def _further_validate_and_setup(self) -> None:
"""
Further validate the experiment's configuration settings and set
up certain configuration settings, such as setting the total
number of hashed features to use, etc.
:returns: None
:rtype: None
"""
# Make sure parameters make sense/are valid
if len(self.validated['learners']) != len(self.validated['param_grids']):
raise SchemaError(autos=None,
errors='The lists of of learners and parameter '
'grids must be the same size.')
if (self.validated['hashed_features'] is not None
and self.validated['hashed_features'] == 0):
self.validated['hashed_features'] = self._n_features_feature_hashing
if self.validated['lognormal'] and self.validated['power_transform']:
raise SchemaError(autos=None,
errors='Both "lognormal" and "power_transform" '
'were set simultaneously.')
if len(self.validated['learners']) != len(self.validated['param_grids']):
raise SchemaError(autos=None,
errors='The "learners" and "param_grids" '
'parameters were both set and the '
'lengths of the lists are unequal.')
class RunCVExperiments(object):
"""
Class for conducting sets of incremental cross-validation
experiments.
"""
# Constants
default_cursor_batch_size_ = 50
requires_classes_kwarg_ = frozenset({'BernoulliNB',
'MultinomialNB',
'Perceptron',
'SGDClassifier',
'PassiveAggressiveClassifier'})
def __init__(self, config: CVConfig) -> 'RunCVExperiments':
"""
Initialize object.
:param config: an `CVConfig` instance containing configuration
options relating to the experiment, etc.
:type config: CVConfig
"""
# Experiment configuration settings
self.cfg_ = pd.Series(config.validated)
cfg = self.cfg_
# Games
if not cfg.games:
raise ValueError('The set of games must be greater than zero!')
self.games_string_ = ', '.join(cfg.games)
# Output path and output file names/templates
self.stats_report_path_ = join(cfg.output_path, 'cv_stats.csv')
self.aggregated_stats_report_path_ = join(cfg.output_path,
'cv_stats_aggregated.csv')
self.model_weights_path_template_ = join(cfg.output_path,
'{0}_model_weights_{1}.csv')
self.model_path_template_ = join(cfg.output_path, '{0}_{1}.model')
if cfg.majority_baseline:
self.majority_baseline_report_path_ = join(cfg.output_path,
'maj_baseline_stats.csv')
if cfg.lognormal or cfg.power_transform:
self.transformation_string_ = ('ln' if cfg.lognormal
else 'x**{0}'.format(cfg.power_transform))
else:
self.transformation_string_ = 'None'
# Objective function
if not cfg.objective in OBJ_FUNC_ABBRS_DICT:
raise ValueError('Unrecognized objective function used: {0}. '
'These are the available objective functions: {1}.'
.format(cfg.objective, OBJ_FUNC_ABBRS_STRING))
# Data-set- and database-related variables
self.batch_size_ = \
(cfg.training_samples_per_round
if cfg.training_samples_per_round < self.default_cursor_batch_size_
else self.default_cursor_batch_size_)
self.projection_ = {'_id': 0}
if not cfg.nlp_features:
self.projection_['nlp_features'] = 0
self.data_ = self._generate_experimental_data()
# Create and fit vectorizers with all grid search samples and
# training samples
self.train_ids_ = list(chain(*self.data_.training_set))
self.grid_search_ids_ = list(chain(*self.data_.grid_search_set))
self.gs_vec_ = self._make_vectorizer(self.grid_search_ids_,
hashed_features=cfg.hashed_features)
self.training_vec_ = self._make_vectorizer(self.train_ids_,
hashed_features=cfg.hashed_features)
# Learner-related variables
self.learners_ = [LEARNER_DICT[learner] for learner in cfg.learners]
self.learner_names_ = [LEARNER_ABBRS_DICT[learner] for learner
in cfg.learners]
self.cv_learners_ = {}
# Do grid search round
loginfo('Executing parameter grid search learning round...')
self.learner_gs_cv_params_ = self._do_grid_search_round()
# Do incremental learning experiments
loginfo('Incremental learning cross-validation experiments '
'initialized...')
self._do_training_cross_validation()
self.training_cv_aggregated_stats_ = \
ex.aggregate_cross_validation_experiments_stats(self.cv_learner_stats_)
# Generate a report with the results from the cross-validation
# experiments
self.generate_learning_reports()
# Generate statistics for the majority baseline model
if cfg.majority_baseline:
self._majority_baseline_stats = self._evaluate_majority_baseline_model()
def _resolve_objective_function(self) -> Scorer:
"""
Resolve value of parameter to be passed in to the `scoring`
parameter in `GridSearchCV`, which can be `None`, a string, or a
callable.
:returns: a value to pass into the `scoring` parameter in
`GridSearchCV`, which can be None to use the default,
a string value that represents one of the scoring
functions, or a custom scorer function (via
`make_scorer`)
:rtype: str, None, callable
"""
objective = self.cfg_.objective
if objective == 'accuracy':
return make_scorer(ex.accuracy_score_round_inputs)
if objective.startswith('precision'):
if objective.endswith('macro'):
return make_scorer(ex.precision_score_round_inputs,
average='macro')
elif objective.endswith('weighted'):
return make_scorer(ex.precision_score_round_inputs,
average='weighted')
if objective.startswith('f1'):
if objective.endswith('macro'):
return make_scorer(ex.f1_score_round_inputs,
average='macro')
elif objective.endswith('weighted'):
return make_scorer(ex.f1_score_round_inputs,
average='weighted')
elif objective.endswith('least_frequent'):
return make_scorer(ex.f1_score_least_frequent_round_inputs)
if objective == 'pearson_r':
return make_scorer(pearson)
if objective == 'spearman':
return make_scorer(spearman)
if objective == 'kendall_tau':
return make_scorer(kendall_tau)
if objective.startswith('uwk'):
if objective == 'uwk':
return make_scorer(ex.kappa_round_inputs)
return make_scorer(ex.kappa_round_inputs,
allow_off_by_one=True)
if objective.startswith('lwk'):
if objective == 'lwk':
return make_scorer(ex.kappa_round_inputs,
weights='linear')
return make_scorer(ex.kappa_round_inputs,
weights='linear',
allow_off_by_one=True)
if objective.startswith('qwk'):
if objective == 'qwk':
return make_scorer(ex.kappa_round_inputs,
weights='quadratic')
return make_scorer(ex.kappa_round_inputs,
weights='quadratic',
allow_off_by_one=True)
return objective
def _generate_experimental_data(self):
"""
Call `src.experiments.ExperimentalData` to generate a set of
data to be used for grid search, training, etc.
"""
loginfo('Extracting dataset...')
cfg = self.cfg_
return ex.ExperimentalData(db=cfg.db,
prediction_label=cfg.prediction_label,
games=cfg.games,
folds=cfg.training_rounds,
fold_size=cfg.training_samples_per_round,
grid_search_folds=cfg.grid_search_folds,
grid_search_fold_size=
cfg.grid_search_samples_per_fold,
sampling=cfg.data_sampling,
lognormal=cfg.lognormal,
power_transform=cfg.power_transform,
bin_ranges=cfg.bin_ranges,
batch_size=self.batch_size_)
def _make_vectorizer(self, ids: List[str],
hashed_features: Optional[int] = None,
batch_size: int = 20) -> Vectorizer:
"""
Make a vectorizer.
:param ids: a list of sample ID strings with which to fit the
vectorizer
:type ids: list
:param hashed_features: if feature hasing is being used, provide
the number of features to use;
otherwise, the value should be None
:type hashed_features: int or None
:param batch_size: value to use for each batch of data when
fitting the vectorizer (default: 20)
:type batch_size: int
:returns: a vectorizer, i.e., DictVectorizer or FeatureHasher
:rtype: Vectorizer
:raises ValueError: if the value of `hashed_features` is not
greater than zero or `ids` is empty
"""
if not ids:
raise ValueError('The "ids" parameter is empty.')
if hashed_features is not None:
if hashed_features < 1 or isinstance(hashed_features, float):
raise ValueError('The value of "hashed_features" should be a '
'positive integer, preferably a very large '
'integer.')
vec = FeatureHasher(n_features=hashed_features,
non_negative=True,
dtype=np.float32)
else:
vec = DictVectorizer(sparse=True, dtype=np.float32)
# Incrementally fit the vectorizer with one batch of data at a
# time
batch_size = 20
samples = self._generate_samples(ids, 'x')
while True:
batch = list(take(batch_size, samples))
if not batch: break
vec.fit(batch)
return vec
def _generate_samples(self, ids: List[str], key: Optional[str] = None) \
-> Iterable[Union[Dict[str, Any], str, Numeric]]:
"""
Generate feature dictionaries for the review samples in the
given cursor.
Provides a lower-memory way of fitting a vectorizer, for
example.
:param ids: list of ID strings
:type ids: list
:param key: yield only the value of the specified key (if a key
is specified), can be the following values: 'y',
'x', or 'id'
:type key: str or None
:yields: feature dictionary
:ytype: dict, str, int, float, etc.
"""
cfg = self.cfg_
for doc in ex.make_cursor(cfg.db,
projection=self.projection_,
batch_size=self.batch_size_,
id_strings=ids):
sample = ex.get_data_point(doc,
prediction_label=cfg.prediction_label,
nlp_features=cfg.nlp_features,
non_nlp_features=cfg.non_nlp_features,
lognormal=cfg.lognormal,
power_transform=cfg.power_transform,
bin_ranges=cfg.bin_ranges)
# Either yield the sample given the specified key or yield
# the whole sample (or, if the sample is equal to None,
# continue)
if not sample: continue
yield sample.get(key, sample)
def _vectorize_and_sparsify_data(self,
vec: Vectorizer,
ids: List[str],
batch_size: int = 50) \
-> sp.sparse.csr.csr_matrix:
"""
Vectorize and sparsify sample data pointed to by the input
sample IDs in batches.
:param vec: vectorizer
:type vec: DictVectorizer/FeatureHasher
:param ids: list of IDs of the the samples to use
:type ids: list
:param batch_size:
:type batch_size: int
:returns: sparse matrix
:rtype: sp.sparse.csr.csr_matrix
"""
X = []
samples = self._generate_samples(ids, 'x')
while True:
X_list = list(take(batch_size, samples))
if not X_list: break
X_part = vec.transform(X_list)
del X_list
X.append(X_part)
del X_part
return sp.sparse.csr_matrix(np.vstack([x.todense() for x in X]))
def _do_grid_search_round(self) -> Dict[str, Dict[str, Any]]:
"""
Do grid search round.
:returns: dictionary of learner names mapped to dictionaries
representing the `best_params_` resulting from each
run with `GridSearchCV` with each learner type
:rtype: dict
"""
cfg = self.cfg_
# Get the data to use, vectorizing the sample feature dictionaries
y_train = list(self._generate_samples(self.grid_search_ids_, 'y'))
X_train = self._vectorize_and_sparsify_data(self.gs_vec_,
self.grid_search_ids_)
# Feature selection
if cfg.feature_selection_percentile != 1.0:
loginfo('Removing {0}% of the features during grid search round...'
.format(100 - 100*cfg.feature_selection_percentile))
X_train = \
(SelectPercentile(chi2,
percentile=100*cfg.feature_selection_percentile)
.fit_transform(X_train, y_train))
# Make a `StratifiedKFold` object using the list of labels
# NOTE: This will effectively redistribute the samples in the
# various grid search folds, but it will maintain the
# distribution of labels. Furthermore, due to the use of the
# `RandomState` object, it should always happen in the exact
# same way.
prng = np.random.RandomState(12345)
gs_cv_folds_ = StratifiedKFold(y=y_train,
n_folds=self.data_.grid_search_folds,
shuffle=True,
random_state=prng)
# Iterate over the learners/parameter grids, executing the grid search
# cross-validation for each
loginfo('Doing a grid search cross-validation round with {0} folds for'
' each learner and each corresponding parameter grid.'
.format(self.data_.grid_search_folds))
n_jobs_learners = ['Perceptron', 'SGDClassifier',
'PassiveAggressiveClassifier']
learner_gs_cv_params_ = {}
for learner, learner_name, param_grids in zip(self.learners_,
self.learner_names_,
cfg.param_grids):
loginfo('Grid search cross-validation for {0}...'
.format(learner_name))
# If the learner is `MiniBatchKMeans`, set the `batch_size`
# parameter to the number of training samples
if learner_name == 'MiniBatchKMeans':
for param_grid in param_grids:
param_grid['batch_size'] = [len(y_train)]
# If learner is of any of the learner types in
# `n_jobs_learners`, add in the `n_jobs` parameter specified
# in the config (but only do so if that `n_jobs` value is
# greater than 1 since it won't matter because 1 is the
# default, anyway)
if cfg.n_jobs > 1:
if learner_name in n_jobs_learners:
for param_grid in param_grids:
param_grid['n_jobs'] = [cfg.n_jobs]
# Make `GridSearchCV` instance
folds_diff = cfg.grid_search_folds - self.data_.grid_search_folds
if (self.data_.grid_search_folds < 2
or folds_diff/cfg.grid_search_folds > 0.25):
msg = ('Either there weren\'t enough folds after collecting '
'data (via `ExperimentalData`) to do the grid search '
'round or the number of folds had to be reduced to such'
' a degree that it would mean a +25\% reduction in the '
'total number of folds used during the grid search '
'round.')
logerr(msg)
raise ValueError(msg)
gs_cv = GridSearchCV(learner(),
param_grids,
cv=gs_cv_folds_,
scoring=self._resolve_objective_function())
# Do the grid search cross-validation
gs_cv.fit(X_train, y_train)
learner_gs_cv_params_[learner_name] = gs_cv.best_params_
del gs_cv
del X_train
del y_train
return learner_gs_cv_params_
def _do_training_cross_validation(self) -> None:
"""
Do cross-validation with training data. Each train/test split
will represent an individual incremental learning experiment,
i.e., starting with the best estimator from the grid search
round, learn little by little from batches of training samples
and evaluate on the held-out partition of data.
:returns: None
:rtype: None
"""
cfg = self.cfg_
fit_kwargs = {'classes': list(self.data_.classes)}
# Store all of the samples used during cross-validation
self.y_training_set_all_ = list(self._generate_samples(self.train_ids_, 'y'))
# Initialize learner objects with the optimal set of parameters
# learned from the grid search round (one for each
# sub-experiment of the cross-validation round)
for learner, learner_name in zip(self.learners_, self.learner_names_):
self.cv_learners_[learner_name] = \
[learner(**self.learner_gs_cv_params_[learner_name])
for i in range(len(self.data_.training_set))]
# Make a list of empty lists corresponding to each estimator
# instance for each learner, which will be used to store the
# performance metrics for each cross-validation
# leave-one-fold-out sub-experiment
self.cv_learner_stats_ = [[] for _ in cfg.learners]
# Fit the `SelectPercentile` feature selector (if applicable)
if cfg.feature_selection_percentile != 1.0:
loginfo('Removing {0}% of the features during training round...'
.format(100 - 100*cfg.feature_selection_percentile))
feature_selector = \
(SelectPercentile(chi2,
percentile=100*cfg.feature_selection_percentile)
.fit(self._vectorize_and_sparsify_data(self.training_vec_,
self.train_ids_),
self.y_training_set_all_))
# For each fold of the training set, train on all of the other
# folds and evaluate on the one left out fold
for i, held_out_fold in enumerate(self.data_.training_set):
loginfo('Cross-validation sub-experiment #{0} in progress'
.format(i + 1))
# Use each training fold (except for the held-out set) to
# incrementally build up the model
training_folds = (self.data_.training_set[:i]
+ self.data_.training_set[i + 1:])
y_train_all = []
for j, training_fold in enumerate(training_folds):
# Get the training data
y_train = list(self._generate_samples(training_fold, 'y'))
y_train_all.extend(y_train)
X_train = self._vectorize_and_sparsify_data(self.training_vec_,
training_fold)
if cfg.feature_selection_percentile != 1.0:
X_train = feature_selector.transform(X_train)
# Iterate over the learners
for learner_name in self.learner_names_:
# Partially fit each estimator with the new training
# data (specifying the `classes` keyword argument if
# this is the first go-round and it's a learner that
# requires this to be specified initially)
(self.cv_learners_[learner_name][i]
.partial_fit(X_train,
y_train,
**fit_kwargs if not j and learner_name
in self.requires_classes_kwarg_
else {}))
# Get mean and standard deviation for actual values
y_train_all = np.array(y_train_all)
y_train_mean = y_train_all.mean()
y_train_std = y_train_all.std()
# Get test data
y_test = list(self._generate_samples(held_out_fold, 'y'))
X_test = self._vectorize_and_sparsify_data(self.training_vec_,
held_out_fold)
if cfg.feature_selection_percentile != 1.0:
X_test = feature_selector.transform(X_test)
# Make predictions with the modified estimators
for j, learner_name in enumerate(self.learner_names_):
# Make predictions with the given estimator,rounding the
# predictions
y_test_preds = \
np.round(self.cv_learners_[learner_name][i].predict(X_test))
# Rescale the predicted values based on the
# mean/standard deviation of the actual values and
# fit the predicted values within the original scale
# (i.e., no predicted values should be outside the range
# of possible values)
y_test_preds_dict = \
ex.rescale_preds_and_fit_in_scale(y_test_preds,
self.data_.classes,
y_train_mean,
y_train_std)
if cfg.rescale:
y_test_preds = y_test_preds_dict['rescaled']
else:
y_test_preds = y_test_preds_dict['fitted_only']
# Evaluate the predictions and add to list of evaluation
# reports for each learner
(self.cv_learner_stats_[j]
.append(ex.evaluate_predictions_from_learning_round(
y_test=y_test,
y_test_preds=y_test_preds,
classes=self.data_.classes,
prediction_label=cfg.prediction_label,
non_nlp_features=cfg.non_nlp_features,
nlp_features=cfg.nlp_features,
learner=self.cv_learners_[learner_name][i],
learner_name=learner_name,
games=cfg.games,
test_games=cfg.games,
_round=i + 1,
iteration_rounds=self.data_.folds,
n_train_samples=len(y_train_all),
n_test_samples=len(held_out_fold),
rescaled=cfg.rescale,
transformation_string=self.transformation_string_,
bin_ranges=cfg.bin_ranges)))
def _get_majority_baseline(self) -> np.ndarray:
"""
Generate a majority baseline array of prediction labels.
:returns: array of prediction labels
:rtype: np.ndarray
"""
self._majority_label = max(set(self.y_training_set_all_),
key=self.y_training_set_all_.count)
return np.array([self._majority_label]*len(self.y_training_set_all_))
def _evaluate_majority_baseline_model(self) -> pd.Series:
"""
Evaluate the majority baseline model predictions.
:returns: a Series containing the majority label system's
performance metrics and attributes
:rtype: pd.Series
"""
cfg = self.cfg_
stats_dict = ex.compute_evaluation_metrics(self.y_training_set_all_,
self._get_majority_baseline(),
self.data_.classes)
stats_dict.update({'games' if len(cfg.games) > 1 else 'game':
self.games_string_
if VALID_GAMES.difference(cfg.games)
else 'all_games',
'prediction_label': cfg.prediction_label,
'majority_label': self._majority_label,
'learner': 'majority_baseline_model',
'transformation': self.transformation_string_})
if cfg.bin_ranges:
stats_dict.update({'bin_ranges': cfg.bin_ranges})
return pd.Series(stats_dict)
def generate_majority_baseline_report(self) -> None:
"""
Generate a CSV file reporting on the performance of the
majority baseline model.
:returns: None
:rtype: None
"""
self._majority_baseline_stats.to_csv(self.majority_baseline_report_path_)
def generate_learning_reports(self) -> None:
"""
Generate report for the cross-validation experiments.
:returns: None
:rtype: None
"""
# Generate a report consisting of the evaluation metrics for
# each sub-experiment comprising each cross-validation
# experiment for each learner
(pd.DataFrame(list(chain(*self.cv_learner_stats_)))
.to_csv(self.stats_report_path_,
index=False))
# Generate a report consisting of the aggregated evaluation
# metrics from each cross-validation experiment with each
# learner
(self.training_cv_aggregated_stats_
.to_csv(self.aggregated_stats_report_path_,
index=False))
def store_sorted_features(self) -> None:
"""
Store files with sorted lists of features and their associated
coefficients from each model.
:returns: None
:rtype: None
"""
makedirs(dirname(self.model_weights_path_template_), exist_ok=True)
# Generate feature weights files and a README.json providing
# the parameters corresponding to each set of feature weights
params_dict = {}
for learner_name in self.cv_learners_:
# Skip MiniBatchKMeans models
if learner_name == 'MiniBatchKMeans':
logdebug('Skipping MiniBatchKMeans learner instances since '
'coefficients can not be extracted from them.')
continue
for i, estimator in enumerate(self.cv_learners_[learner_name]):
# Get dataframe of the features/coefficients
try:
ex.print_model_weights(estimator,
learner_name,
self.data_.classes,
self.cfg_.games,
self.vec_,
self.model_weights_path_template_
.format(learner_name, i + 1))
params_dict.setdefault(learner_name, {})
params_dict[learner_name][i] = estimator.get_params()
except ValueError:
logerr('Could not generate features/feature coefficients '
'dataframe for {0}...'.format(learner_name))
# Save parameters file also
if params_dict:
dump(params_dict,
open(join(dirname(self.model_weights_path_template_),
'model_params_readme.json'), 'w'),
indent=4)
def store_models(self) -> None:
"""
Save the learners to disk.
:returns: None
:rtype: None
"""
# Iterate over the learner types (for which there will be
# separate instances for each sub-experiment of the
# cross-validation experiment)
for learner_name in self.cv_learners_:
loginfo('Saving {0} model files to disk...'.format(learner_name))
for i, estimator in enumerate(self.cv_learners_[learner_name]):
loginfo('Saving {0} model file #{1}'.format(learner_name, i + 1))
joblib.dump(estimator,
self.model_path_template_.format(learner_name, i + 1))
def main(argv=None):
parser = ArgumentParser(description='Run incremental learning '
'experiments.',
formatter_class=ArgumentDefaultsHelpFormatter,
conflict_handler='resolve')
_add_arg = parser.add_argument
_add_arg('--games',
help='Game(s) to use in experiments; or "all" to use data from '
'all games.',
type=str,
required=True)
_add_arg('--out_dir',
help='Directory in which to output data related to the results '
'of the conducted experiments.',
type=str,
required=True)
_add_arg('--train_rounds',
help='The maximum number of rounds of learning to conduct (the '
'number of rounds will necessarily be limited by the amount'
' of training data and the number of samples used per '
'round). Use "0" to do as many rounds as possible.',
type=int,
default=0)
_add_arg('--train_samples_per_round',
help='The maximum number of training samples to use in each '
'round.',
type=int,
default=100)
_add_arg('--grid_search_folds',
help='The maximum number of folds to use in the grid search '
'round.',
type=int,
default=5)
_add_arg('--grid_search_samples_per_fold',
help='The maximum number of training samples to use in each grid '
'search fold.',
type=int,
default=1000)
_add_arg('--prediction_label',
help='Label to predict.',
choices=LABELS,
default='total_game_hours')
_add_arg('--non_nlp_features',
help='Comma-separated list of non-NLP features to combine with '
'the NLP features in creating a model. Use "all" to use all'
' available features, "none" to use no non-NLP features. If'
' --only_non_nlp_features is used, NLP features will be '
'left out entirely.',
type=str,
default='none')
_add_arg('--only_non_nlp_features',
help="Don't use any NLP features.",
action='store_true',
default=False)
_add_arg('--data_sampling',
help="Method used for sampling the data.",
choices=ex.ExperimentalData.sampling_options,
default='even')
_add_arg('--learners',
help='Comma-separated list of learning algorithms to try. Refer '
'to list of learners above to find out which abbreviations '
'stand for which learners. Set of available learners: {0}. '
'Use "all" to include all available learners.'
.format(LEARNER_ABBRS_STRING),
type=str,
default='all')
_add_arg('--nbins',
help='Number of bins to split up the distribution of prediction '
'label values into. Use 0 (or don\'t specify) if the values'
' should not be collapsed into bins. Note: Only use this '
'option (and --bin_factor below) if the prediction labels '
'are numeric.',
type=int,
default=0)
_add_arg('--bin_factor',
help='Factor by which to multiply the size of each bin. Defaults'
' to 1.0 if --nbins is specified.',
type=float,
required=False)
_add_arg('--lognormal',
help='Transform raw label values with log before doing anything '
'else, whether it be binning the values or learning from '
'them.',
action='store_true',
default=False)
_add_arg('--power_transform',
help='Transform raw label values via `x**power` where `power` is'
' the value specified and `x` is the raw label value before'
' doing anything else, whether it be binning the values or '
'learning from them.',
type=float,
default=None)
_add_arg('--use_feature_hasher',
help='Use FeatureHasher to be more memory-efficient.',
action='store_true',
default=False)
_add_arg('--feature_selection_percentile',
help='Use `chi2`-based `SelectPercentile` feature selection with '
'the given percentage of features selected (where the '
'percentage falls in the range (0.0, 1.0]).',
type=float,
default=1.0)
_add_arg('--rescale_predictions',
help='Rescale prediction values based on the mean/standard '
'deviation of the input values and fit all predictions into '
'the expected scale. Don\'t use if the experiment involves '
'labels rather than numeric values.',
action='store_true',
default=False)
_add_arg('--objective',
help='Objective function to use in determining which learner/set'
' of parameters resulted in the best performance.',
choices=OBJ_FUNC_ABBRS_DICT.keys(),
default='qwk')
_add_arg('--n_jobs',
help='Value of "n_jobs" parameter to pass in to learners whose '
'tasks can be parallelized. Should be no more than the '
'number of cores (or virtual cores) for the machine that '
'this process is run on.',
type=int,
default=1)
_add_arg('--evaluate_maj_baseline',
help='Evaluate the majority baseline model.',
action='store_true',
default=False)
_add_arg('--save_best_features',
help='Get the best features from each model and write them out '
'to files.',
action='store_true',
default=False)
_add_arg('--save_model_files',
help='Save model files to disk.',
action='store_true',
default=False)
_add_arg('-dbhost', '--mongodb_host',
help='Host that the MongoDB server is running on.',
type=str,
default='localhost')
_add_arg('-dbport', '--mongodb_port',
help='Port that the MongoDB server is running on.',
type=int,
default=37017)
_add_arg('-log', '--log_file_path',
help='Path to log file. If no path is specified, then a "logs" '
'directory will be created within the directory specified '
'via the --out_dir argument and a log will automatically be '
'stored.',
type=str,
required=False)
args = parser.parse_args()
# Command-line arguments and flags
games = parse_games_string(args.games)
train_rounds = args.train_rounds
train_samples_per_round = args.train_samples_per_round
grid_search_folds = args.grid_search_folds
grid_search_samples_per_fold = args.grid_search_samples_per_fold
prediction_label = args.prediction_label
non_nlp_features = parse_non_nlp_features_string(args.non_nlp_features,
prediction_label)
only_non_nlp_features = args.only_non_nlp_features
nbins = args.nbins
bin_factor = args.bin_factor
lognormal = args.lognormal
power_transform = args.power_transform
feature_hashing = args.use_feature_hasher
feature_selection_percentile = args.feature_selection_percentile
rescale_predictions = args.rescale_predictions
data_sampling = args.data_sampling
learners = parse_learners_string(args.learners)
host = args.mongodb_host
port = args.mongodb_port
objective = args.objective
n_jobs = args.n_jobs
evaluate_maj_baseline = args.evaluate_maj_baseline
save_best_features = args.save_best_features
save_model_files = args.save_model_files
# Validate the input arguments
if isfile(realpath(args.out_dir)):
raise FileExistsError('The specified output destination is the name '
'of a currently existing file.')
else:
output_path = realpath(args.out_dir)
if save_best_features:
if learners == ['mbkm']:
loginfo('The specified set of learners do not work with the '
'current way of extracting features from models and, '
'thus, --save_best_features, will be ignored.')
save_best_features = False
if feature_hashing:
raise ValueError('The --save_best_features option cannot be used '
'in conjunction with the --use_feature_hasher '
'option.')
if args.log_file_path:
if isdir(realpath(args.log_file_path)):
raise FileExistsError('The specified log file path is the name of'
' a currently existing directory.')
else:
log_file_path = realpath(args.log_file_path)
else:
log_file_path = join(output_path, 'logs', 'learn.log')
log_dir = dirname(log_file_path)
if lognormal and power_transform:
raise ValueError('Both "lognormal" and "power_transform" were '
'specified simultaneously.')
# Output results files to output directory
makedirs(output_path, exist_ok=True)
makedirs(log_dir, exist_ok=True)
# Set up file handler
file_handler = logging.FileHandler(log_file_path)
file_handler.setLevel(logging_debug)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
# Log a bunch of job attributes
loginfo('Output directory: {0}'.format(output_path))
loginfo('Game{0} to train/evaluate models on: {1}'
.format('s' if len(games) > 1 else '',
', '.join(games) if VALID_GAMES.difference(games)
else 'all games'))
loginfo('Maximum number of learning rounds to conduct: {0}'
.format(train_rounds))
loginfo('Maximum number of training samples to use in each round: {0}'
.format(train_samples_per_round))
loginfo('Maximum number of grid search folds to use during the grid search'
' round: {0}'.format(grid_search_folds))
loginfo('Maximum number of training samples to use in each grid search '
'fold: {0}'.format(grid_search_samples_per_fold))
loginfo('Prediction label: {0}'.format(prediction_label))
loginfo('Data sampling method: {0}'.format(data_sampling))
loginfo('Lognormal transformation: {0}'.format(lognormal))
loginfo('Power transformation: {0}'.format(power_transform))
loginfo('Non-NLP features to use: {0}'
.format(', '.join(non_nlp_features) if non_nlp_features else 'none'))
if only_non_nlp_features:
if not non_nlp_features:
raise ValueError('No features to train a model on since the '
'--only_non_nlp_features flag was used and the '
'set of non-NLP features is empty.')
loginfo('Leaving out all NLP features')
if nbins == 0:
if bin_factor:
raise ValueError('--bin_factor should not be specified if --nbins'
' is not specified or set to 0.')
bin_ranges = None
else:
if bin_factor and bin_factor <= 0:
raise ValueError('--bin_factor should be set to a positive, '
'non-zero value.')
elif not bin_factor:
bin_factor = 1.0
loginfo('Number of bins to split up the distribution of prediction '
'label values into: {}'.format(nbins))
loginfo("Factor by which to multiply each succeeding bin's size: {}"
.format(bin_factor))
if feature_hashing:
loginfo('Using feature hashing to increase memory efficiency')
if feature_selection_percentile == 1.0:
loginfo('Not doing feature selection.')
else:
if (feature_selection_percentile <= 0.0
or feature_selection_percentile > 1.0):
raise ValueError('Value in range (0.0, 1.0] expected for the '
'--feature_selection_percentile option.')
loginfo('Using chi2-based SelectPercentile feature selection with the '
'following percentage of features selected for use: {0}'
.format(100*feature_selection_percentile))
if rescale_predictions:
loginfo('Rescaling predicted values based on the mean/standard '
'deviation of the input values.')
loginfo('Learners: {0}'.format(', '.join([LEARNER_ABBRS_DICT[learner]
for learner in learners])))
loginfo('Using {0} as the objective function'.format(objective))
if n_jobs < 1:
msg = '--n_jobs must be greater than 0.'
logerr(msg)
raise ValueError(msg)
loginfo('Number of tasks to run in parallel during learner fitting (when '
'possible to run tasks in parallel): {0}'.format(n_jobs))
# Connect to running Mongo server
loginfo('MongoDB host: {0}'.format(host))
loginfo('MongoDB port: {0}'.format(port))
try:
db = connect_to_db(host=host, port=port)
except ConnectionFailure as e:
logerr('Unable to connect to MongoDB reviews collection.')
logerr(e)
raise e
# Check to see if the database has the proper index and, if not,
# index the database here
index_name = 'steam_id_number_1'
if not index_name in db.index_information():
logdebug('Creating index on the "steam_id_number" key...')
db.create_index('steam_id_number', ASCENDING)
if nbins:
# Get ranges of prediction label distribution bins given the
# number of bins and the factor by which they should be
# multiplied as the index increases
try:
bin_ranges = get_bin_ranges_helper(db,
games,
prediction_label,
nbins,
bin_factor,
lognormal=lognormal,
power_transform=power_transform)
except ValueError as e:
msg = ('Encountered a ValueError while computing the bin ranges '
'given {0} and {1} as the values for the number of bins and'
' the bin factor. This could be due to an unrecognized '
'prediction label, which would cause no values to be found,'
'which in turn would result in an empty array.'
.format(nbins, bin_factor))
logerr(msg)
raise e
if lognormal or power_transform:
transformation = ('lognormal' if lognormal
else 'x**{0}'.format(power_transform))
else:
transformation = None
loginfo('Bin ranges (nbins = {0}, bin_factor = {1}{2}): {3}'
.format(nbins,
bin_factor,
', {0} transformation'.format(transformation)
if transformation
else '',
bin_ranges))
# Do learning experiments
loginfo('Starting incremental learning experiments...')
learners = sorted(learners)
try:
cfg = CVConfig(
db=db,
games=games,
learners=learners,
param_grids=[find_default_param_grid(learner)
for learner in learners],
training_rounds=train_rounds,
training_samples_per_round=train_samples_per_round,
grid_search_samples_per_fold=grid_search_samples_per_fold,
non_nlp_features=non_nlp_features,
prediction_label=prediction_label,
output_path=output_path,
objective=objective,
data_sampling=data_sampling,
grid_search_folds=grid_search_folds,
hashed_features=0 if feature_hashing else None,
nlp_features=not only_non_nlp_features,
bin_ranges=bin_ranges,
lognormal=lognormal,
power_transform=power_transform,
majority_baseline=evaluate_maj_baseline,
rescale=rescale_predictions,
feature_selection_percentile=feature_selection_percentile,
n_jobs=n_jobs)
except (SchemaError, ValueError) as e:
logerr('Encountered an exception while instantiating the CVConfig '
'instance: {0}'.format(e))
raise e
try:
experiments = RunCVExperiments(cfg)
except ValueError as e:
logerr('Encountered an exception while instantiating the '
'RunCVExperiments instance: {0}'.format(e))
raise e
# Save the best-performing features
if save_best_features:
loginfo('Generating feature coefficient output files for each model '
'(after all learning rounds)...')
experiments.store_sorted_features()
# Save the model files
if save_model_files:
loginfo('Writing out model files for each model to disk...')
experiments.store_models()
# Generate evaluation report for the majority baseline model, if
# specified
if evaluate_maj_baseline:
loginfo('Generating report for the majority baseline model...')
loginfo('Majority label: {0}'.format(experiments._majority_label))
experiments.generate_majority_baseline_report()
loginfo('Complete.')
if __name__ == '__main__':
main()
|
mit
|
klusta-team/klustaviewa
|
klustaviewa/views/tests/test_similaritymatrixview.py
|
2
|
1396
|
"""Unit tests for correlation matrix view."""
# -----------------------------------------------------------------------------
# Imports
# -----------------------------------------------------------------------------
import os
import numpy as np
import numpy.random as rnd
import pandas as pd
from klustaviewa.views.tests.mock_data import (setup, teardown, create_similarity_matrix,
nspikes, nclusters, nsamples, nchannels, fetdim, ncorrbins)
from kwiklib.dataio import KlustersLoader
from kwiklib.dataio.selection import select
from kwiklib.dataio.tools import check_dtype, check_shape
from klustaviewa import USERPREF
from klustaviewa.views import SimilarityMatrixView
from klustaviewa.views.tests.utils import show_view, get_data
# -----------------------------------------------------------------------------
# Tests
# -----------------------------------------------------------------------------
def test_similaritymatrixview():
data = get_data()
kwargs = {}
kwargs['similarity_matrix'] = create_similarity_matrix(nclusters)
kwargs['cluster_colors_full'] = data['cluster_colors_full']
kwargs['operators'] = [
lambda self: self.view.show_selection(5, 6),
lambda self: (self.close()
if USERPREF['test_auto_close'] != False else None),
]
# Show the view.
show_view(SimilarityMatrixView, **kwargs)
|
bsd-3-clause
|
jnishi/chainer
|
chainer/training/extensions/plot_report.py
|
2
|
6590
|
import json
from os import path
import warnings
import numpy
import six
from chainer import reporter
from chainer import serializer as serializer_module
from chainer.training import extension
from chainer.training import trigger as trigger_module
_available = None
def _try_import_matplotlib():
global matplotlib, _available
try:
import matplotlib # NOQA
_available = True
except (ImportError, TypeError):
_available = False
def _check_available():
if _available is None:
_try_import_matplotlib()
if not _available:
warnings.warn('matplotlib is not installed on your environment, '
'so nothing will be plotted at this time. '
'Please install matplotlib to plot figures.\n\n'
' $ pip install matplotlib\n')
class PlotReport(extension.Extension):
"""Trainer extension to output plots.
This extension accumulates the observations of the trainer to
:class:`~chainer.DictSummary` at a regular interval specified by a supplied
trigger, and plot a graph with using them.
There are two triggers to handle this extension. One is the trigger to
invoke this extension, which is used to handle the timing of accumulating
the results. It is set to ``1, 'iteration'`` by default. The other is the
trigger to determine when to emit the result. When this trigger returns
True, this extension appends the summary of accumulated values to the list
of past summaries, and writes the list to the log file. Then, this
extension makes a new fresh summary object which is used until the next
time that the trigger fires.
It also adds ``'epoch'`` and ``'iteration'`` entries to each result
dictionary, which are the epoch and iteration counts at the output.
.. warning::
If your environment needs to specify a backend of matplotlib
explicitly, please call ``matplotlib.use`` before calling
``trainer.run``. For example:
.. code-block:: python
import matplotlib
matplotlib.use('Agg')
trainer.extend(
extensions.PlotReport(['main/loss', 'validation/main/loss'],
'epoch', file_name='loss.png'))
trainer.run()
Then, once one of instances of this extension is called,
``matplotlib.use`` will have no effect.
For the details, please see here:
https://matplotlib.org/faq/usage_faq.html#what-is-a-backend
Args:
y_keys (iterable of strs): Keys of values regarded as y. If this is
None, nothing is output to the graph.
x_key (str): Keys of values regarded as x. The default value is
'iteration'.
trigger: Trigger that decides when to aggregate the result and output
the values. This is distinct from the trigger of this extension
itself. If it is a tuple in the form ``<int>, 'epoch'`` or ``<int>,
'iteration'``, it is passed to :class:`IntervalTrigger`.
postprocess: Callback to postprocess the result dictionaries. Figure
object, Axes object, and all plot data are passed to this callback
in this order. This callback can modify the figure.
file_name (str): Name of the figure file under the output directory.
It can be a format string.
marker (str): The marker used to plot the graph. Default is ``'x'``. If
``None`` is given, it draws with no markers.
grid (bool): Set the axis grid on if True. Default is True.
"""
def __init__(self, y_keys, x_key='iteration', trigger=(1, 'epoch'),
postprocess=None, file_name='plot.png', marker='x',
grid=True):
_check_available()
self._x_key = x_key
if isinstance(y_keys, str):
y_keys = (y_keys,)
self._y_keys = y_keys
self._trigger = trigger_module.get_trigger(trigger)
self._file_name = file_name
self._marker = marker
self._grid = grid
self._postprocess = postprocess
self._init_summary()
self._data = {k: [] for k in y_keys}
@staticmethod
def available():
_check_available()
return _available
def __call__(self, trainer):
if self.available():
# Dynamically import pyplot to call matplotlib.use()
# after importing chainer.training.extensions
import matplotlib.pyplot as plt
else:
return
keys = self._y_keys
observation = trainer.observation
summary = self._summary
if keys is None:
summary.add(observation)
else:
summary.add({k: observation[k] for k in keys if k in observation})
if self._trigger(trainer):
stats = self._summary.compute_mean()
stats_cpu = {}
for name, value in six.iteritems(stats):
stats_cpu[name] = float(value) # copy to CPU
updater = trainer.updater
stats_cpu['epoch'] = updater.epoch
stats_cpu['iteration'] = updater.iteration
x = stats_cpu[self._x_key]
data = self._data
for k in keys:
if k in stats_cpu:
data[k].append((x, stats_cpu[k]))
f = plt.figure()
a = f.add_subplot(111)
a.set_xlabel(self._x_key)
if self._grid:
a.grid()
for k in keys:
xy = data[k]
if len(xy) == 0:
continue
xy = numpy.array(xy)
a.plot(xy[:, 0], xy[:, 1], marker=self._marker, label=k)
if a.has_data():
if self._postprocess is not None:
self._postprocess(f, a, summary)
l = a.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
f.savefig(path.join(trainer.out, self._file_name),
bbox_extra_artists=(l,), bbox_inches='tight')
plt.close()
self._init_summary()
def serialize(self, serializer):
if isinstance(serializer, serializer_module.Serializer):
serializer('_plot_{}'.format(self._file_name),
json.dumps(self._data))
else:
self._data = json.loads(
serializer('_plot_{}'.format(self._file_name), ''))
def _init_summary(self):
self._summary = reporter.DictSummary()
|
mit
|
ankurankan/scikit-learn
|
benchmarks/bench_covertype.py
|
14
|
7233
|
"""
===========================
Covertype dataset benchmark
===========================
Benchmark stochastic gradient descent (SGD), Liblinear, and Naive Bayes, CART
(decision tree), RandomForest and Extra-Trees on the forest covertype dataset
of Blackard, Jock, and Dean [1]. The dataset comprises 581,012 samples. It is
low dimensional with 54 features and a sparsity of approx. 23%. Here, we
consider the task of predicting class 1 (spruce/fir). The classification
performance of SGD is competitive with Liblinear while being two orders of
magnitude faster to train::
[..]
Classification performance:
===========================
Classifier train-time test-time error-rate
--------------------------------------------
liblinear 15.9744s 0.0705s 0.2305
GaussianNB 3.0666s 0.3884s 0.4841
SGD 1.0558s 0.1152s 0.2300
CART 79.4296s 0.0523s 0.0469
RandomForest 1190.1620s 0.5881s 0.0243
ExtraTrees 640.3194s 0.6495s 0.0198
The same task has been used in a number of papers including:
* `"SVM Optimization: Inverse Dependence on Training Set Size"
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.139.2112>`_
S. Shalev-Shwartz, N. Srebro - In Proceedings of ICML '08.
* `"Pegasos: Primal estimated sub-gradient solver for svm"
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.74.8513>`_
S. Shalev-Shwartz, Y. Singer, N. Srebro - In Proceedings of ICML '07.
* `"Training Linear SVMs in Linear Time"
<www.cs.cornell.edu/People/tj/publications/joachims_06a.pdf>`_
T. Joachims - In SIGKDD '06
[1] http://archive.ics.uci.edu/ml/datasets/Covertype
"""
from __future__ import division, print_function
# Author: Peter Prettenhofer <[email protected]>
# Arnaud Joly <[email protected]>
# License: BSD 3 clause
import os
from time import time
import argparse
import numpy as np
from sklearn.datasets import fetch_covtype, get_data_home
from sklearn.svm import LinearSVC
from sklearn.linear_model import SGDClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.metrics import zero_one_loss
from sklearn.externals.joblib import Memory
from sklearn.utils import check_array
# Memoize the data extraction and memory map the resulting
# train / test splits in readonly mode
memory = Memory(os.path.join(get_data_home(), 'covertype_benchmark_data'),
mmap_mode='r')
@memory.cache
def load_data(dtype=np.float32, order='C', random_state=13):
"""Load the data, then cache and memmap the train/test split"""
######################################################################
## Load dataset
print("Loading dataset...")
data = fetch_covtype(download_if_missing=True, shuffle=True,
random_state=random_state)
X = check_array(data['data'], dtype=dtype, order=order)
y = (data['target'] != 1).astype(np.int)
## Create train-test split (as [Joachims, 2006])
print("Creating train-test split...")
n_train = 522911
X_train = X[:n_train]
y_train = y[:n_train]
X_test = X[n_train:]
y_test = y[n_train:]
## Standardize first 10 features (the numerical ones)
mean = X_train.mean(axis=0)
std = X_train.std(axis=0)
mean[10:] = 0.0
std[10:] = 1.0
X_train = (X_train - mean) / std
X_test = (X_test - mean) / std
return X_train, X_test, y_train, y_test
ESTIMATORS = {
'GBRT': GradientBoostingClassifier(n_estimators=250),
'ExtraTrees': ExtraTreesClassifier(n_estimators=20),
'RandomForest': RandomForestClassifier(n_estimators=20),
'CART': DecisionTreeClassifier(min_samples_split=5),
'SGD': SGDClassifier(alpha=0.001, n_iter=2),
'GaussianNB': GaussianNB(),
'liblinear': LinearSVC(loss="l2", penalty="l2", C=1000, dual=False,
tol=1e-3)
}
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--classifiers', nargs="+",
choices=ESTIMATORS, type=str,
default=['liblinear', 'GaussianNB', 'SGD', 'CART'],
help="list of classifiers to benchmark.")
parser.add_argument('--n-jobs', nargs="?", default=1, type=int,
help="Number of concurrently running workers for "
"models that support parallelism.")
parser.add_argument('--order', nargs="?", default="C", type=str,
choices=["F", "C"],
help="Allow to choose between fortran and C ordered "
"data")
parser.add_argument('--random-seed', nargs="?", default=13, type=int,
help="Common seed used by random number generator.")
args = vars(parser.parse_args())
print(__doc__)
X_train, X_test, y_train, y_test = load_data(
order=args["order"], random_state=args["random_seed"])
print("")
print("Dataset statistics:")
print("===================")
print("%s %d" % ("number of features:".ljust(25), X_train.shape[1]))
print("%s %d" % ("number of classes:".ljust(25), np.unique(y_train).size))
print("%s %s" % ("data type:".ljust(25), X_train.dtype))
print("%s %d (pos=%d, neg=%d, size=%dMB)"
% ("number of train samples:".ljust(25),
X_train.shape[0], np.sum(y_train == 1),
np.sum(y_train == 0), int(X_train.nbytes / 1e6)))
print("%s %d (pos=%d, neg=%d, size=%dMB)"
% ("number of test samples:".ljust(25),
X_test.shape[0], np.sum(y_test == 1),
np.sum(y_test == 0), int(X_test.nbytes / 1e6)))
print()
print("Training Classifiers")
print("====================")
error, train_time, test_time = {}, {}, {}
for name in sorted(args["classifiers"]):
print("Training %s ... " % name, end="")
estimator = ESTIMATORS[name]
estimator_params = estimator.get_params()
if "random_state" in estimator_params:
estimator.set_params(random_state=args["random_seed"])
if "n_jobs" in estimator_params:
estimator.set_params(n_jobs=args["n_jobs"])
time_start = time()
estimator.fit(X_train, y_train)
train_time[name] = time() - time_start
time_start = time()
y_pred = estimator.predict(X_test)
test_time[name] = time() - time_start
error[name] = zero_one_loss(y_test, y_pred)
print("done")
print()
print("Classification performance:")
print("===========================")
print("%s %s %s %s"
% ("Classifier ", "train-time", "test-time", "error-rate"))
print("-" * 44)
for name in sorted(args["classifiers"], key=error.get):
print("%s %s %s %s" % (name.ljust(12),
("%.4fs" % train_time[name]).center(10),
("%.4fs" % test_time[name]).center(10),
("%.4f" % error[name]).center(10)))
print()
|
bsd-3-clause
|
laxmandhulipala/PWSA-Star
|
test/grid/other/run_pwsa.py
|
3
|
5121
|
#!/usr/bin/python
import sys
import os
import subprocess
import random
import numpy as np
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
# call both the pwsa and pastar_int binaries.
# ./pwsa_main.opt -D 10000 -K 10000 -graph maps/maze512-4-0.map -srcY 228 -srcX 417 -dstY 308 -dstX 28 -proc 4 -isGrid 1 -useEuc 1
# scen files look like: 17 2 maps/mazes/maze512-4-0.map 512 512 112 168 108 162 8.24264
runs = {}
pathQuality = {}
procs = [1,2,3,4,6,8,12,16,20,24,32]
random.seed(42)
D = 1000
K = 1000
def save(path, ext='png', close=True, verbose=True):
directory = os.path.split(path)[0]
filename = "%s.%s" % (os.path.split(path)[1], ext)
if directory == '':
directory = '.'
if not os.path.exists(directory):
os.makedirs(directory)
savepath = os.path.join(directory, filename)
if verbose:
print("Saving figure to '%s'..." % savepath),
plt.savefig(savepath)
if close:
plt.close()
if verbose:
print("Done")
def runTest(map_name, srcX, srcY, dstX, dstY, p):
args = ("./pwsa_main.opt", "-D", str(D), "-K", str(K),
"-graph", map_name, "-srcX", str(srcX),
"-srcY", str(srcY), "-dstX", str(dstX),
"-dstY", str(dstY), "-isGrid", str(1),
"-useEuc", str(1), '-proc', str(p))
popen = subprocess.Popen(args, stdout=subprocess.PIPE)
popen.wait()
output = popen.stdout.read()
output = output.split('\n')
if map_name not in runs:
runs[map_name] = {}
pathQuality[map_name] = {}
if p not in runs[map_name]:
runs[map_name][p] = []
queryStr = str(srcX) + "|" + str(srcY) + "|" + str(dstX) + "|" + str(dstY)
if queryStr not in pathQuality[map_name]:
pathQuality[map_name][queryStr] = []
print(output)
n = int(output[0].split('=')[1])
expanded = int(output[1].split('=')[1])
pathLength = int(output[2].split('=')[1])
execTime = float(output[3].split()[1])
util = float(output[4].split()[1])
runs[map_name][p] += [(n, expanded, pathLength, execTime, util, queryStr)]
def runTests(map_name, srcX, srcY, dstX, dstY, dist):
# Try a couple of variations of (D, K) with all proc specs
# TODO: get rid of this hackiness
if (random.random() > 0.1 or dist < 600):
return
for p in procs:
# running through ~10k scenarios. Can sample a bit to cut-down time.
runTest(map_name, srcX, srcY, dstX, dstY, p)
scen_name = sys.argv[1]
with open(scen_name) as scen:
lines = [line.rstrip('\n') for line in scen]
lines = lines[1:]
for line in lines:
print(line)
l = line.split()
map_name = l[1]
srcX = int(l[4])
srcY = int(l[5])
dstX = int(l[6])
dstY = int(l[7])
dist = float(l[8])
runTests(map_name, srcX, srcY, dstX, dstY, dist)
def graph(formula, x_range):
x = np.array(x_range)
y = formula(x)
plt.plot(x, y)
# speedup plots
for map_name in runs:
mapName = map_name.split('/')[2]
print(runs[map_name])
p1Time = 0.0
times = []
for p in procs:
tups = runs[map_name][p]
tot = 0
for tup in tups:
n = tup[0]
exp = tup[1]
pl = tup[2]
execTime = tup[3]
util = tup[4]
tot += execTime
tot /= len(tups)
print("tot = ", len(tups))
times += [(p, tot)]
if (p == 1):
p1Time = tot
x = []
y = []
print(times)
for pair in times:
x += [pair[0]]
y += [p1Time / pair[1]]
plt.plot(x, y)
graph(lambda x: x, range(0, 32))
plt.title('Speedup : ' + mapName)
plt.ylabel('Speedup')
plt.xlabel('num_proc')
plt.show()
save("images/speedup_" + mapName, ext="png", close=True, verbose=True)
# path-quality plots
for map_name in runs:
mapName = map_name.split('/')[2]
print(runs[map_name])
x = []
y = []
maxDiv = 0.0
trueLength = 0.0
ourLength = 0.0
maxQuery = ""
queryMap = {}
for p in procs:
tups = runs[map_name][p]
procDivergence = []
for tup in tups:
pathLength = tup[2]
queryStr = tup[5]
if queryStr not in queryMap:
# p == 1
queryMap[queryStr] = pathLength
else:
divergence = (pathLength * 1.0) / queryMap[queryStr] # should always be >=
if (pathLength < queryMap[queryStr]):
print("you got serious problems. Truelen = ", queryMap[queryStr], "queryStr = ", queryStr, " you got ", pathLength)
sys.exit(-1)
if (divergence > maxDiv):
maxDiv = divergence
trueLength = queryMap[queryStr]
ourLength = pathLength
maxQuery = queryStr
procDivergence += [divergence]
if (p > 1):
avgDivergence = (reduce(lambda x, y: x+y, procDivergence) * 1.0) / len(procDivergence)
x += [p]
y += [avgDivergence]
print(x)
print(y)
print("Max divergence on : " + maxQuery + " true=" + str(trueLength) + " ourLength = " + str(ourLength))
plt.plot(x, y)
plt.title('Average divergence : ' + mapName + " k = " + str(K) + " d = " + str(D))
plt.ylabel('length')
plt.xlabel('num_proc')
plt.show()
save("images/divergence_" + str(K) + "_" + str(D) + "_" + mapName, ext="png", close=True, verbose=True)
|
apache-2.0
|
grehx/spark-tk
|
regression-tests/sparktkregtests/testcases/models/collaborative_filtering_test.py
|
1
|
14075
|
# vim: set encoding=utf-8
# Copyright (c) 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Tests collaborative filtering against a generated data set"""
import unittest
from sparktkregtests.lib import sparktk_test
class CollabFilterTest(sparktk_test.SparkTKTestCase):
def setUp(self):
"""Build test frame"""
super(CollabFilterTest, self).setUp()
ALS_dataset = self.get_file("collab_filtering.csv")
schema = [("user", str), ("product", str), ("rating", float)]
self.frame = self.context.frame.import_csv(ALS_dataset, schema=schema)
# add integer columns for user and product
# this is caused by a weird api requirement
self.frame.add_columns(
lambda x: [x["user"][5:], x['product'][5:]],
[("user_int", int), ("item_int", int)])
self.base_frame = self.frame.to_pandas(self.frame.count())
self.old_frame = self.frame.copy()
# Remove some baseline values, collaborative filtering has
# to have empty entries
self.frame.filter(lambda x: (x["item_int"]+x["user_int"]) % 4 > 0)
def test_collaborative_filtering_recommend(self):
"""Test collaborative filtering and recommend"""
model = self.context.models.recommendation \
.collaborative_filtering \
.train(self.frame, "user_int", "item_int", "rating", max_steps=15)
recommend = model.recommend(0, 40)
recommend_dict = {i['product']: i['rating'] for i in recommend}
for k, v in recommend_dict.iteritems():
self.assertAlmostEqual(
self.base_frame[
(self.base_frame["product"] == "item-"+str(k)) &
(self.base_frame['user'] == "user-0")]['rating'].values[0],
v, delta=3.0)
def test_collaborative_filtering_model_parameters(self):
"""Test collaborative filtering model parameters"""
model = self.context.models.recommendation \
.collaborative_filtering \
.train(self.frame,
"user_int",
"item_int",
"rating",
15,
0.7,
0.8,
3,
False,
7,
5,
8,
0.4)
self.assertEqual(model.source_column_name, "user_int")
self.assertEqual(model.dest_column_name, "item_int")
self.assertEqual(model.weight_column_name, "rating")
self.assertEqual(model.max_steps, 15)
self.assertEqual(model.regularization, 0.7)
self.assertEqual(model.alpha, 0.8)
self.assertEqual(model.num_factors, 3)
self.assertEqual(model.use_implicit, False)
self.assertEqual(model.num_user_blocks, 7)
self.assertEqual(model.num_item_block, 5)
self.assertEqual(model.checkpoint_iterations, 8)
self.assertEqual(model.target_rmse, 0.4)
def test_als_collaborative_filtering_many_steps(self):
""" Test collaborative filtering with many steps"""
model = self.context.models.recommendation \
.collaborative_filtering \
.train(self.frame, "user_int", "item_int", "rating", max_steps=125)
recommend = model.recommend(0, 40)
recommend_dict = {i['product']: i['rating'] for i in recommend}
for k, v in recommend_dict.iteritems():
self.assertAlmostEqual(
self.base_frame[
(self.base_frame["product"] == "item-"+str(k)) &
(self.base_frame['user'] == "user-0")]['rating'].values[0],
v, delta=3.0)
def test_collaborative_filtering_predict(self):
"""Test collaborative filtering and predict"""
model = self.context.models.recommendation \
.collaborative_filtering \
.train(self.frame, "user_int", "item_int", "rating", max_steps=15)
scores = model.predict(
self.old_frame, "user_int", "item_int")
pd_scores = scores.to_pandas(scores.count())
for _, i in pd_scores.iterrows():
item_val = "item-"+str(int(i['product']))
user_val = "user-"+str(int(i['user']))
self.assertAlmostEqual(
self.base_frame[
(self.base_frame["product"] == item_val) &
(self.base_frame['user'] == user_val)]['rating'].values[0],
i['rating'], delta=5.5)
def test_collaborative_filtering_invalid_user(self):
"""Test collaborative filtering train with invalid user"""
with self.assertRaisesRegexp(
Exception,
'requirement failed: column invalid_user was not found'):
self.context.models.recommendation \
.collaborative_filtering \
.train(self.frame, "invalid_user", "item_int", "rating")
def test_collaborative_filtering_invalid_item(self):
"""Test collaborative filtering train with invalid item"""
with self.assertRaisesRegexp(
Exception,
'requirement failed: column invalid_int was not found'):
self.context.models.recommendation \
.collaborative_filtering \
.train(self.frame, "user_int", "invalid_int", "rating")
def test_collaborative_filtering_invalid_rating(self):
"""Test collaborative filtering with invalid rating"""
with self.assertRaisesRegexp(
Exception,
'requirement failed: column invalid_rating was not found'):
self.context.models.recommendation \
.collaborative_filtering \
.train(self.frame, "user_int", "item_int", "invalid_rating")
def test_collaborative_filtering_invalid_rmse(self):
"""Test collaborative filtering with invalid target_rmse"""
with self.assertRaisesRegexp(
Exception,
'requirement failed: target RMSE must be a positive value'):
self.context.models.recommendation \
.collaborative_filtering.train(
self.frame, "user_int", "item_int",
"rating", target_rmse=-15.0)
def test_collaborative_filtering_invalid_num_item_blocks(self):
"""Test collaborative filtering with invalid num_item_blocks"""
with self.assertRaisesRegexp(
Exception,
'Found num_item_blocks = -15.'):
self.context.models.recommendation \
.collaborative_filtering.train(
self.frame, "user_int", "item_int",
"rating", num_item_blocks=-15)
def test_collaborative_filtering_invalid_num_user_blocks(self):
"""Test collaborative filtering with invalid num_user_blocks"""
with self.assertRaisesRegexp(
Exception,
'Found num_user_blocks = -15.'):
self.context.models.recommendation \
.collaborative_filtering.train(
self.frame, "user_int", "item_int",
"rating", num_user_blocks=-15)
def test_collaborative_filtering_invalid_checkpoint_iterations(self):
"""Test collaborative filtering with invalid checkpoint_iterations"""
with self.assertRaisesRegexp(
Exception,
'Found checkpoint_iterations = -15.'):
self.context.models.recommendation \
.collaborative_filtering.train(
self.frame, "user_int", "item_int",
"rating", checkpoint_iterations=-15)
def test_collaborative_filtering_invalid_max_steps(self):
"""Test collaborative filtering invalid max steps"""
with self.assertRaisesRegexp(
Exception,
'Found max_steps = -15. Expected non-negative integer.'):
self.context.models.recommendation \
.collaborative_filtering.train(
self.frame, "user_int", "item_int",
"rating", max_steps=-15)
def test_collaborative_filtering_invalid_regularization(self):
"""Test collaborative filtering with invalid regularization"""
with self.assertRaisesRegexp(
Exception,
'parameter must have a value between 0 and 1'):
self.context.models.recommendation \
.collaborative_filtering.train(
self.frame, "user_int", "item_int",
"rating", regularization=-1.0)
with self.assertRaisesRegexp(
Exception,
'parameter must have a value between 0 and 1'):
self.context.models.recommendation \
.collaborative_filtering.train(
self.frame, "user_int", "item_int",
"rating", regularization=41.0)
def test_collaborative_filtering_invalid_alpha(self):
"""Test collaborative filtering with invalid alpha"""
with self.assertRaisesRegexp(
Exception,
'\'alpha\' parameter must have a value between 0 and 1'):
self.context.models.recommendation \
.collaborative_filtering.train(
self.frame, "user_int", "item_int", "rating", alpha=-1.0)
with self.assertRaisesRegexp(
Exception, 'parameter must have a value between 0 and 1'):
self.context.models.recommendation \
.collaborative_filtering.train(
self.frame, "user_int", "item_int", "rating", alpha=41.0)
def test_collaborative_filtering_invalid_recommend_items(self):
"""Test collaborative filtering recommend invalid items"""
with self.assertRaisesRegexp(
Exception,
'Found number_of_recommendations = -10.'):
model = self.context.models.recommendation \
.collaborative_filtering.train(
self.frame, "user_int", "item_int", "rating")
model.recommend(0, -10)
def test_collaborative_filtering_invalid_recommend_value(self):
"""Test collaborative filtering invalid item"""
with self.assertRaisesRegexp(
Exception,
'requirement failed: No users found with id = 1000.'):
model = self.context.models.recommendation \
.collaborative_filtering.train(
self.frame, "user_int", "item_int", "rating")
model.recommend(1000, 10)
def test_collaborative_filtering_predict_frame_invalid_source(self):
"""Test collaborative filtering predict frame with invalid source"""
with self.assertRaisesRegexp(
Exception,
'requirement failed: column invalid_source was not found'):
model = self.context.models.recommendation \
.collaborative_filtering.train(
self.frame, "user_int", "item_int", "rating")
model.predict(
self.old_frame, "invalid_source", "item_int")
def test_collaborative_filtering_predict_frame_invalid_item(self):
"""Test collaborative filtering predict frame with invalid item"""
with self.assertRaisesRegexp(
Exception,
'requirement failed: column invalid_item was not found'):
model = self.context.models.recommendation \
.collaborative_filtering.train(
self.frame, "user_int", "item_int", "rating")
model.predict(
self.old_frame, "user_int", "invalid_item")
def test_collaborative_filtering_predict_frame_invalid_output_user(self):
"""Test collaborative filtering predict with invalid output user"""
with self.assertRaisesRegexp(
Exception, 'requirement failed: column name can\'t be empty'):
model = self.context.models.recommendation \
.collaborative_filtering.train(
self.frame, "user_int", "item_int", "rating")
model.predict(
self.old_frame, "user_int",
"item_int", output_user_column_name="")
def test_collaborative_filtering_predict_invalid_output_product(self):
"""Test collaborative filtering predict with invalid output product"""
with self.assertRaisesRegexp(
Exception, 'requirement failed: column name can\'t be empty'):
model = self.context.models.recommendation \
.collaborative_filtering.train(
self.frame, "user_int", "item_int", "rating")
model.predict(
self.old_frame, "user_int",
"item_int", output_product_column_name="")
def test_collaborative_filtering_predict_frame_invalid_output_rating(self):
"""Test collaborative filtering predict with invalid output rating"""
with self.assertRaisesRegexp(
Exception, 'requirement failed: column name can\'t be empty'):
model = self.context.models.recommendation \
.collaborative_filtering.train(
self.frame, "user_int", "item_int", "rating")
model.predict(
self.old_frame, "user_int",
"item_int", output_rating_column_name="")
if __name__ == "__main__":
unittest.main()
|
apache-2.0
|
pvlib/pvlib-python
|
pvlib/tests/test_tracking.py
|
1
|
22589
|
import numpy as np
from numpy import nan
import pandas as pd
import pytest
from numpy.testing import assert_allclose
import pvlib
from pvlib import tracking, pvsystem
from .conftest import DATA_DIR, assert_frame_equal
SINGLEAXIS_COL_ORDER = ['tracker_theta', 'aoi',
'surface_azimuth', 'surface_tilt']
def test_solar_noon():
index = pd.date_range(start='20180701T1200', freq='1s', periods=1)
apparent_zenith = pd.Series([10], index=index)
apparent_azimuth = pd.Series([180], index=index)
tracker_data = tracking.singleaxis(apparent_zenith, apparent_azimuth,
axis_tilt=0, axis_azimuth=0,
max_angle=90, backtrack=True,
gcr=2.0/7.0)
expect = pd.DataFrame({'tracker_theta': 0, 'aoi': 10,
'surface_azimuth': 90, 'surface_tilt': 0},
index=index, dtype=np.float64)
expect = expect[SINGLEAXIS_COL_ORDER]
assert_frame_equal(expect, tracker_data)
def test_scalars():
apparent_zenith = 10
apparent_azimuth = 180
tracker_data = tracking.singleaxis(apparent_zenith, apparent_azimuth,
axis_tilt=0, axis_azimuth=0,
max_angle=90, backtrack=True,
gcr=2.0/7.0)
assert isinstance(tracker_data, dict)
expect = {'tracker_theta': 0, 'aoi': 10, 'surface_azimuth': 90,
'surface_tilt': 0}
for k, v in expect.items():
assert np.isclose(tracker_data[k], v)
def test_arrays():
apparent_zenith = np.array([10])
apparent_azimuth = np.array([180])
tracker_data = tracking.singleaxis(apparent_zenith, apparent_azimuth,
axis_tilt=0, axis_azimuth=0,
max_angle=90, backtrack=True,
gcr=2.0/7.0)
assert isinstance(tracker_data, dict)
expect = {'tracker_theta': 0, 'aoi': 10, 'surface_azimuth': 90,
'surface_tilt': 0}
for k, v in expect.items():
assert_allclose(tracker_data[k], v, atol=1e-7)
def test_nans():
apparent_zenith = np.array([10, np.nan, 10])
apparent_azimuth = np.array([180, 180, np.nan])
with np.errstate(invalid='ignore'):
tracker_data = tracking.singleaxis(apparent_zenith, apparent_azimuth,
axis_tilt=0, axis_azimuth=0,
max_angle=90, backtrack=True,
gcr=2.0/7.0)
expect = {'tracker_theta': np.array([0, nan, nan]),
'aoi': np.array([10, nan, nan]),
'surface_azimuth': np.array([90, nan, nan]),
'surface_tilt': np.array([0, nan, nan])}
for k, v in expect.items():
assert_allclose(tracker_data[k], v, atol=1e-7)
# repeat with Series because nans can differ
apparent_zenith = pd.Series(apparent_zenith)
apparent_azimuth = pd.Series(apparent_azimuth)
with np.errstate(invalid='ignore'):
tracker_data = tracking.singleaxis(apparent_zenith, apparent_azimuth,
axis_tilt=0, axis_azimuth=0,
max_angle=90, backtrack=True,
gcr=2.0/7.0)
expect = pd.DataFrame(np.array(
[[ 0., 10., 90., 0.],
[nan, nan, nan, nan],
[nan, nan, nan, nan]]),
columns=['tracker_theta', 'aoi', 'surface_azimuth', 'surface_tilt'])
assert_frame_equal(tracker_data, expect)
def test_arrays_multi():
apparent_zenith = np.array([[10, 10], [10, 10]])
apparent_azimuth = np.array([[180, 180], [180, 180]])
# singleaxis should fail for num dim > 1
with pytest.raises(ValueError):
tracking.singleaxis(apparent_zenith, apparent_azimuth,
axis_tilt=0, axis_azimuth=0,
max_angle=90, backtrack=True,
gcr=2.0/7.0)
# uncomment if we ever get singleaxis to support num dim > 1 arrays
# assert isinstance(tracker_data, dict)
# expect = {'tracker_theta': np.full_like(apparent_zenith, 0),
# 'aoi': np.full_like(apparent_zenith, 10),
# 'surface_azimuth': np.full_like(apparent_zenith, 90),
# 'surface_tilt': np.full_like(apparent_zenith, 0)}
# for k, v in expect.items():
# assert_allclose(tracker_data[k], v)
def test_azimuth_north_south():
apparent_zenith = pd.Series([60])
apparent_azimuth = pd.Series([90])
tracker_data = tracking.singleaxis(apparent_zenith, apparent_azimuth,
axis_tilt=0, axis_azimuth=180,
max_angle=90, backtrack=True,
gcr=2.0/7.0)
expect = pd.DataFrame({'tracker_theta': -60, 'aoi': 0,
'surface_azimuth': 90, 'surface_tilt': 60},
index=[0], dtype=np.float64)
expect = expect[SINGLEAXIS_COL_ORDER]
assert_frame_equal(expect, tracker_data)
tracker_data = tracking.singleaxis(apparent_zenith, apparent_azimuth,
axis_tilt=0, axis_azimuth=0,
max_angle=90, backtrack=True,
gcr=2.0/7.0)
expect['tracker_theta'] *= -1
assert_frame_equal(expect, tracker_data)
def test_max_angle():
apparent_zenith = pd.Series([60])
apparent_azimuth = pd.Series([90])
tracker_data = tracking.singleaxis(apparent_zenith, apparent_azimuth,
axis_tilt=0, axis_azimuth=0,
max_angle=45, backtrack=True,
gcr=2.0/7.0)
expect = pd.DataFrame({'aoi': 15, 'surface_azimuth': 90,
'surface_tilt': 45, 'tracker_theta': 45},
index=[0], dtype=np.float64)
expect = expect[SINGLEAXIS_COL_ORDER]
assert_frame_equal(expect, tracker_data)
def test_backtrack():
apparent_zenith = pd.Series([80])
apparent_azimuth = pd.Series([90])
tracker_data = tracking.singleaxis(apparent_zenith, apparent_azimuth,
axis_tilt=0, axis_azimuth=0,
max_angle=90, backtrack=False,
gcr=2.0/7.0)
expect = pd.DataFrame({'aoi': 0, 'surface_azimuth': 90,
'surface_tilt': 80, 'tracker_theta': 80},
index=[0], dtype=np.float64)
expect = expect[SINGLEAXIS_COL_ORDER]
assert_frame_equal(expect, tracker_data)
tracker_data = tracking.singleaxis(apparent_zenith, apparent_azimuth,
axis_tilt=0, axis_azimuth=0,
max_angle=90, backtrack=True,
gcr=2.0/7.0)
expect = pd.DataFrame({'aoi': 52.5716, 'surface_azimuth': 90,
'surface_tilt': 27.42833, 'tracker_theta': 27.4283},
index=[0], dtype=np.float64)
expect = expect[SINGLEAXIS_COL_ORDER]
assert_frame_equal(expect, tracker_data)
def test_axis_tilt():
apparent_zenith = pd.Series([30])
apparent_azimuth = pd.Series([135])
tracker_data = tracking.singleaxis(apparent_zenith, apparent_azimuth,
axis_tilt=30, axis_azimuth=180,
max_angle=90, backtrack=True,
gcr=2.0/7.0)
expect = pd.DataFrame({'aoi': 7.286245, 'surface_azimuth': 142.65730,
'surface_tilt': 35.98741,
'tracker_theta': -20.88121},
index=[0], dtype=np.float64)
expect = expect[SINGLEAXIS_COL_ORDER]
assert_frame_equal(expect, tracker_data)
tracker_data = tracking.singleaxis(apparent_zenith, apparent_azimuth,
axis_tilt=30, axis_azimuth=0,
max_angle=90, backtrack=True,
gcr=2.0/7.0)
expect = pd.DataFrame({'aoi': 47.6632, 'surface_azimuth': 50.96969,
'surface_tilt': 42.5152, 'tracker_theta': 31.6655},
index=[0], dtype=np.float64)
expect = expect[SINGLEAXIS_COL_ORDER]
assert_frame_equal(expect, tracker_data)
def test_axis_azimuth():
apparent_zenith = pd.Series([30])
apparent_azimuth = pd.Series([90])
tracker_data = tracking.singleaxis(apparent_zenith, apparent_azimuth,
axis_tilt=0, axis_azimuth=90,
max_angle=90, backtrack=True,
gcr=2.0/7.0)
expect = pd.DataFrame({'aoi': 30, 'surface_azimuth': 180,
'surface_tilt': 0, 'tracker_theta': 0},
index=[0], dtype=np.float64)
expect = expect[SINGLEAXIS_COL_ORDER]
assert_frame_equal(expect, tracker_data)
apparent_zenith = pd.Series([30])
apparent_azimuth = pd.Series([180])
tracker_data = tracking.singleaxis(apparent_zenith, apparent_azimuth,
axis_tilt=0, axis_azimuth=90,
max_angle=90, backtrack=True,
gcr=2.0/7.0)
expect = pd.DataFrame({'aoi': 0, 'surface_azimuth': 180,
'surface_tilt': 30, 'tracker_theta': 30},
index=[0], dtype=np.float64)
expect = expect[SINGLEAXIS_COL_ORDER]
assert_frame_equal(expect, tracker_data)
def test_horizon_flat():
# GH 569
solar_azimuth = np.array([0, 180, 359])
solar_zenith = np.array([100, 45, 100])
solar_azimuth = pd.Series(solar_azimuth)
solar_zenith = pd.Series(solar_zenith)
# depending on platform and numpy versions this will generate
# RuntimeWarning: invalid value encountered in > < >=
out = tracking.singleaxis(solar_zenith, solar_azimuth, axis_tilt=0,
axis_azimuth=180, backtrack=False, max_angle=180)
expected = pd.DataFrame(np.array(
[[ nan, nan, nan, nan],
[ 0., 45., 270., 0.],
[ nan, nan, nan, nan]]),
columns=['tracker_theta', 'aoi', 'surface_azimuth', 'surface_tilt'])
assert_frame_equal(out, expected)
def test_horizon_tilted():
# GH 569
solar_azimuth = np.array([0, 180, 359])
solar_zenith = np.full_like(solar_azimuth, 45)
solar_azimuth = pd.Series(solar_azimuth)
solar_zenith = pd.Series(solar_zenith)
out = tracking.singleaxis(solar_zenith, solar_azimuth, axis_tilt=90,
axis_azimuth=180, backtrack=False, max_angle=180)
expected = pd.DataFrame(np.array(
[[-180., 45., 0., 90.],
[ 0., 45., 180., 90.],
[ 179., 45., 359., 90.]]),
columns=['tracker_theta', 'aoi', 'surface_azimuth', 'surface_tilt'])
assert_frame_equal(out, expected)
def test_low_sun_angles():
# GH 656, 824
result = tracking.singleaxis(
apparent_zenith=80, apparent_azimuth=338, axis_tilt=30,
axis_azimuth=180, max_angle=60, backtrack=True, gcr=0.35)
expected = {
'tracker_theta': np.array([60.0]),
'aoi': np.array([80.420987]),
'surface_azimuth': np.array([253.897886]),
'surface_tilt': np.array([64.341094])}
for k, v in result.items():
assert_allclose(expected[k], v)
def test_SingleAxisTracker_creation():
system = tracking.SingleAxisTracker(max_angle=45,
gcr=.25,
module='blah',
inverter='blarg')
assert system.max_angle == 45
assert system.gcr == .25
assert system.arrays[0].module == 'blah'
assert system.inverter == 'blarg'
def test_SingleAxisTracker_one_array_only():
system = tracking.SingleAxisTracker(
arrays=[pvsystem.Array(
module='foo',
surface_tilt=None,
surface_azimuth=None
)]
)
assert system.arrays[0].module == 'foo'
with pytest.raises(ValueError,
match="SingleAxisTracker does not support "
r"multiple arrays\."):
tracking.SingleAxisTracker(
arrays=[pvsystem.Array(module='foo'),
pvsystem.Array(module='bar')]
)
with pytest.raises(ValueError,
match="Array must not have surface_tilt "):
tracking.SingleAxisTracker(arrays=[pvsystem.Array(module='foo')])
with pytest.raises(ValueError,
match="Array must not have surface_tilt "):
tracking.SingleAxisTracker(
arrays=[pvsystem.Array(surface_azimuth=None)])
with pytest.raises(ValueError,
match="Array must not have surface_tilt "):
tracking.SingleAxisTracker(
arrays=[pvsystem.Array(surface_tilt=None)])
def test_SingleAxisTracker_tracking():
system = tracking.SingleAxisTracker(max_angle=90, axis_tilt=30,
axis_azimuth=180, gcr=2.0/7.0,
backtrack=True)
apparent_zenith = pd.Series([30])
apparent_azimuth = pd.Series([135])
tracker_data = system.singleaxis(apparent_zenith, apparent_azimuth)
expect = pd.DataFrame({'aoi': 7.286245, 'surface_azimuth': 142.65730,
'surface_tilt': 35.98741,
'tracker_theta': -20.88121},
index=[0], dtype=np.float64)
expect = expect[SINGLEAXIS_COL_ORDER]
assert_frame_equal(expect, tracker_data)
# results calculated using PVsyst
pvsyst_solar_azimuth = 7.1609
pvsyst_solar_height = 27.315
pvsyst_axis_tilt = 20.
pvsyst_axis_azimuth = 20.
pvsyst_system = tracking.SingleAxisTracker(
max_angle=60., axis_tilt=pvsyst_axis_tilt,
axis_azimuth=180+pvsyst_axis_azimuth, backtrack=False)
# the definition of azimuth is different from PYsyst
apparent_azimuth = pd.Series([180+pvsyst_solar_azimuth])
apparent_zenith = pd.Series([90-pvsyst_solar_height])
tracker_data = pvsyst_system.singleaxis(apparent_zenith, apparent_azimuth)
expect = pd.DataFrame({'aoi': 41.07852, 'surface_azimuth': 180-18.432,
'surface_tilt': 24.92122,
'tracker_theta': -15.18391},
index=[0], dtype=np.float64)
expect = expect[SINGLEAXIS_COL_ORDER]
assert_frame_equal(expect, tracker_data)
# see test_irradiance for more thorough testing
def test_get_aoi():
system = tracking.SingleAxisTracker(max_angle=90, axis_tilt=30,
axis_azimuth=180, gcr=2.0/7.0,
backtrack=True)
surface_tilt = np.array([30, 0])
surface_azimuth = np.array([90, 270])
solar_zenith = np.array([70, 10])
solar_azimuth = np.array([100, 180])
out = system.get_aoi(surface_tilt, surface_azimuth,
solar_zenith, solar_azimuth)
expected = np.array([40.632115, 10.])
assert_allclose(out, expected, atol=0.000001)
def test_get_irradiance():
system = tracking.SingleAxisTracker(max_angle=90, axis_tilt=30,
axis_azimuth=180, gcr=2.0/7.0,
backtrack=True)
times = pd.date_range(start='20160101 1200-0700',
end='20160101 1800-0700', freq='6H')
# latitude=32, longitude=-111
solar_position = pd.DataFrame(np.array(
[[55.36421554, 55.38851771, 34.63578446, 34.61148229,
172.32003763, -3.44516534],
[96.50000401, 96.50000401, -6.50000401, -6.50000401,
246.91581654, -3.56292888]]),
columns=['apparent_zenith', 'zenith', 'apparent_elevation',
'elevation', 'azimuth', 'equation_of_time'],
index=times)
irrads = pd.DataFrame({'dni': [900, 0], 'ghi': [600, 0], 'dhi': [100, 0]},
index=times)
solar_zenith = solar_position['apparent_zenith']
solar_azimuth = solar_position['azimuth']
# invalid warnings already generated in horizon test above,
# no need to clutter test output here
with np.errstate(invalid='ignore'):
tracker_data = system.singleaxis(solar_zenith, solar_azimuth)
# some invalid values in irradiance.py. not our problem here
with np.errstate(invalid='ignore'):
irradiance = system.get_irradiance(tracker_data['surface_tilt'],
tracker_data['surface_azimuth'],
solar_zenith,
solar_azimuth,
irrads['dni'],
irrads['ghi'],
irrads['dhi'])
expected = pd.DataFrame(data=np.array(
[[961.80070, 815.94490, 145.85580, 135.32820, 10.52757492],
[nan, nan, nan, nan, nan]]),
columns=['poa_global', 'poa_direct',
'poa_diffuse', 'poa_sky_diffuse',
'poa_ground_diffuse'],
index=times)
assert_frame_equal(irradiance, expected, check_less_precise=2)
def test_SingleAxisTracker___repr__():
system = tracking.SingleAxisTracker(
max_angle=45, gcr=.25, module='blah', inverter='blarg',
temperature_model_parameters={'a': -3.56})
expected = """SingleAxisTracker:
axis_tilt: 0
axis_azimuth: 0
max_angle: 45
backtrack: True
gcr: 0.25
cross_axis_tilt: 0.0
name: None
Array:
name: None
surface_tilt: None
surface_azimuth: None
module: blah
albedo: 0.25
racking_model: None
module_type: None
temperature_model_parameters: {'a': -3.56}
strings: 1
modules_per_string: 1
inverter: blarg"""
assert system.__repr__() == expected
def test_calc_axis_tilt():
# expected values
expected_axis_tilt = 2.239 # [degrees]
expected_side_slope = 9.86649274360294 # [degrees]
expected = DATA_DIR / 'singleaxis_tracker_wslope.csv'
expected = pd.read_csv(expected, index_col='timestamp', parse_dates=True)
# solar positions
starttime = '2017-01-01T00:30:00-0300'
stoptime = '2017-12-31T23:59:59-0300'
lat, lon = -27.597300, -48.549610
times = pd.DatetimeIndex(pd.date_range(starttime, stoptime, freq='H'))
solpos = pvlib.solarposition.get_solarposition(times, lat, lon)
# singleaxis tracker w/slope data
slope_azimuth, slope_tilt = 77.34, 10.1149
axis_azimuth = 0.0
max_angle = 75.0
# Note: GCR is relative to horizontal distance between rows
gcr = 0.33292759 # GCR = length / horizontal_pitch = 1.64 / 5 / cos(9.86)
# calculate tracker axis zenith
axis_tilt = tracking.calc_axis_tilt(
slope_azimuth, slope_tilt, axis_azimuth=axis_azimuth)
assert np.isclose(axis_tilt, expected_axis_tilt)
# calculate cross-axis tilt and relative rotation
cross_axis_tilt = tracking.calc_cross_axis_tilt(
slope_azimuth, slope_tilt, axis_azimuth, axis_tilt)
assert np.isclose(cross_axis_tilt, expected_side_slope)
sat = tracking.singleaxis(
solpos.apparent_zenith, solpos.azimuth, axis_tilt, axis_azimuth,
max_angle, backtrack=True, gcr=gcr, cross_axis_tilt=cross_axis_tilt)
np.testing.assert_allclose(
sat['tracker_theta'], expected['tracker_theta'], atol=1e-7)
np.testing.assert_allclose(sat['aoi'], expected['aoi'], atol=1e-7)
np.testing.assert_allclose(
sat['surface_azimuth'], expected['surface_azimuth'], atol=1e-7)
np.testing.assert_allclose(
sat['surface_tilt'], expected['surface_tilt'], atol=1e-7)
def test_slope_aware_backtracking():
"""
Test validation data set from https://www.nrel.gov/docs/fy20osti/76626.pdf
"""
expected_data = np.array(
[('2019-01-01T08:00-0500', 2.404287, 122.79177, -84.440, -10.899),
('2019-01-01T09:00-0500', 11.263058, 133.288729, -72.604, -25.747),
('2019-01-01T10:00-0500', 18.733558, 145.285552, -59.861, -59.861),
('2019-01-01T11:00-0500', 24.109076, 158.939435, -45.578, -45.578),
('2019-01-01T12:00-0500', 26.810735, 173.931802, -28.764, -28.764),
('2019-01-01T13:00-0500', 26.482495, 189.371536, -8.475, -8.475),
('2019-01-01T14:00-0500', 23.170447, 204.13681, 15.120, 15.120),
('2019-01-01T15:00-0500', 17.296785, 217.446538, 39.562, 39.562),
('2019-01-01T16:00-0500', 9.461862, 229.102218, 61.587, 32.339),
('2019-01-01T17:00-0500', 0.524817, 239.330401, 79.530, 5.490)],
dtype=[
('Time', '<M8[h]'), ('ApparentElevation', '<f8'),
('SolarAzimuth', '<f8'), ('TrueTracking', '<f8'),
('Backtracking', '<f8')])
expected_axis_tilt = 9.666
expected_slope_angle = -2.576
slope_azimuth, slope_tilt = 180.0, 10.0
axis_azimuth = 195.0
axis_tilt = tracking.calc_axis_tilt(
slope_azimuth, slope_tilt, axis_azimuth)
assert np.isclose(axis_tilt, expected_axis_tilt, rtol=1e-3, atol=1e-3)
cross_axis_tilt = tracking.calc_cross_axis_tilt(
slope_azimuth, slope_tilt, axis_azimuth, axis_tilt)
assert np.isclose(
cross_axis_tilt, expected_slope_angle, rtol=1e-3, atol=1e-3)
sat = tracking.singleaxis(
90.0-expected_data['ApparentElevation'], expected_data['SolarAzimuth'],
axis_tilt, axis_azimuth, max_angle=90.0, backtrack=True, gcr=0.5,
cross_axis_tilt=cross_axis_tilt)
np.testing.assert_allclose(
sat['tracker_theta'], expected_data['Backtracking'],
rtol=1e-3, atol=1e-3)
truetracking = tracking.singleaxis(
90.0-expected_data['ApparentElevation'], expected_data['SolarAzimuth'],
axis_tilt, axis_azimuth, max_angle=90.0, backtrack=False, gcr=0.5,
cross_axis_tilt=cross_axis_tilt)
np.testing.assert_allclose(
truetracking['tracker_theta'], expected_data['TrueTracking'],
rtol=1e-3, atol=1e-3)
|
bsd-3-clause
|
RapidApplicationDevelopment/tensorflow
|
tensorflow/examples/learn/text_classification.py
|
8
|
4925
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of Estimator for DNN-based text classification with DBpedia data."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import numpy as np
import pandas
from sklearn import metrics
import tensorflow as tf
from tensorflow.contrib import learn
FLAGS = None
MAX_DOCUMENT_LENGTH = 10
EMBEDDING_SIZE = 50
n_words = 0
def bag_of_words_model(features, target):
"""A bag-of-words model. Note it disregards the word order in the text."""
target = tf.one_hot(target, 15, 1, 0)
features = tf.contrib.layers.bow_encoder(
features, vocab_size=n_words, embed_dim=EMBEDDING_SIZE)
logits = tf.contrib.layers.fully_connected(features, 15, activation_fn=None)
loss = tf.contrib.losses.softmax_cross_entropy(logits, target)
train_op = tf.contrib.layers.optimize_loss(
loss, tf.contrib.framework.get_global_step(),
optimizer='Adam', learning_rate=0.01)
return (
{'class': tf.argmax(logits, 1), 'prob': tf.nn.softmax(logits)},
loss, train_op)
def rnn_model(features, target):
"""RNN model to predict from sequence of words to a class."""
# Convert indexes of words into embeddings.
# This creates embeddings matrix of [n_words, EMBEDDING_SIZE] and then
# maps word indexes of the sequence into [batch_size, sequence_length,
# EMBEDDING_SIZE].
word_vectors = tf.contrib.layers.embed_sequence(
features, vocab_size=n_words, embed_dim=EMBEDDING_SIZE, scope='words')
# Split into list of embedding per word, while removing doc length dim.
# word_list results to be a list of tensors [batch_size, EMBEDDING_SIZE].
word_list = tf.unstack(word_vectors, axis=1)
# Create a Gated Recurrent Unit cell with hidden size of EMBEDDING_SIZE.
cell = tf.nn.rnn_cell.GRUCell(EMBEDDING_SIZE)
# Create an unrolled Recurrent Neural Networks to length of
# MAX_DOCUMENT_LENGTH and passes word_list as inputs for each unit.
_, encoding = tf.nn.rnn(cell, word_list, dtype=tf.float32)
# Given encoding of RNN, take encoding of last step (e.g hidden size of the
# neural network of last step) and pass it as features for logistic
# regression over output classes.
target = tf.one_hot(target, 15, 1, 0)
logits = tf.contrib.layers.fully_connected(encoding, 15, activation_fn=None)
loss = tf.contrib.losses.softmax_cross_entropy(logits, target)
# Create a training op.
train_op = tf.contrib.layers.optimize_loss(
loss, tf.contrib.framework.get_global_step(),
optimizer='Adam', learning_rate=0.01)
return (
{'class': tf.argmax(logits, 1), 'prob': tf.nn.softmax(logits)},
loss, train_op)
def main(unused_argv):
global n_words
# Prepare training and testing data
dbpedia = learn.datasets.load_dataset(
'dbpedia', test_with_fake_data=FLAGS.test_with_fake_data)
x_train = pandas.DataFrame(dbpedia.train.data)[1]
y_train = pandas.Series(dbpedia.train.target)
x_test = pandas.DataFrame(dbpedia.test.data)[1]
y_test = pandas.Series(dbpedia.test.target)
# Process vocabulary
vocab_processor = learn.preprocessing.VocabularyProcessor(MAX_DOCUMENT_LENGTH)
x_train = np.array(list(vocab_processor.fit_transform(x_train)))
x_test = np.array(list(vocab_processor.transform(x_test)))
n_words = len(vocab_processor.vocabulary_)
print('Total words: %d' % n_words)
# Build model
# Switch between rnn_model and bag_of_words_model to test different models.
model_fn = rnn_model
if FLAGS.bow_model:
model_fn = bag_of_words_model
classifier = learn.Estimator(model_fn=model_fn)
# Train and predict
classifier.fit(x_train, y_train, steps=100)
y_predicted = [
p['class'] for p in classifier.predict(x_test, as_iterable=True)]
score = metrics.accuracy_score(y_test, y_predicted)
print('Accuracy: {0:f}'.format(score))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--test_with_fake_data',
default=False,
help='Test the example code with fake data.',
action='store_true'
)
parser.add_argument(
'--bow_model',
default=False,
help='Run with BOW model instead of RNN.',
action='store_true'
)
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
|
apache-2.0
|
lthurlow/Network-Grapher
|
proj/external/matplotlib-1.2.1/doc/mpl_examples/animation/old_animation/animate_decay_tk_blit.py
|
3
|
1362
|
from __future__ import print_function
import time, sys
import numpy as np
import matplotlib.pyplot as plt
def data_gen():
t = data_gen.t
data_gen.t += 0.05
return np.sin(2*np.pi*t) * np.exp(-t/10.)
data_gen.t = 0
fig = plt.figure()
ax = fig.add_subplot(111)
line, = ax.plot([], [], animated=True, lw=2)
ax.set_ylim(-1.1, 1.1)
ax.set_xlim(0, 5)
ax.grid()
xdata, ydata = [], []
def run(*args):
background = fig.canvas.copy_from_bbox(ax.bbox)
# for profiling
tstart = time.time()
while 1:
# restore the clean slate background
fig.canvas.restore_region(background)
# update the data
t = data_gen.t
y = data_gen()
xdata.append(t)
ydata.append(y)
xmin, xmax = ax.get_xlim()
if t>=xmax:
ax.set_xlim(xmin, 2*xmax)
fig.canvas.draw()
background = fig.canvas.copy_from_bbox(ax.bbox)
line.set_data(xdata, ydata)
# just draw the animated artist
ax.draw_artist(line)
# just redraw the axes rectangle
fig.canvas.blit(ax.bbox)
if run.cnt==1000:
# print the timing info and quit
print('FPS:' , 1000/(time.time()-tstart))
sys.exit()
run.cnt += 1
run.cnt = 0
manager = plt.get_current_fig_manager()
manager.window.after(100, run)
plt.show()
|
mit
|
zhangsh950618/graduation
|
analyse/visualization.py
|
1
|
2529
|
# -*- coding: utf-8 -*-
from analyse import sentiment
from dao import comment_dao, blog_dao
import re
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import pyplot as plt
import numpy as np
from analyse.jieba_segmentation import JiebaSeg
import math
# s = sentiment.Sentiment()
#
# com_dao = comment_dao.CommentDao()
#
# comments = com_dao.search_all_comments_with_limit(1000)
# scores = []
# f = open('res.txt', 'w')
# for comment in comments:
# raw_comment_info = comment[3].encode('utf-8')
# comment_info = re.sub('(@\S*|\[.*\]|#.*#|秒拍视频|转发微博)', "", raw_comment_info)
# val = s.single_review_sentiment_score(comment_info)
# scores.append(val)
# f.write("得分:" + str(val) + "\n")
# f.write("原始:" + raw_comment_info + "\n")
# f.write("处理后:" + comment_info + "\n")
# f.write("\n")
# f.close()
# print "max = " + str(max(scores)), "min = " + str(min(scores))
# data = np.array(scores)
# data = (data - data.mean()) / (data.max() - data.min())
# bins = np.linspace(-1, 1, 20)
# plt.hist(data, bins=bins)
# plt.show()
# import re
# s = u""" 1
#
# as
# """
#
# print re.sub(u'(\s|\n|t)', u'', s)
# b_dao = blog_dao.BlogDao()
# blogs = b_dao.search_all_blogs_without_keyword()
# x = []
# y = []
# z = []
# for blog in blogs:
# x.append(int(blog[5]))
# y.append(int(blog[6]))
# z.append(int(blog[7]))
# fig = plt.figure()
# ax = fig.add_subplot(111, projection='3d')
# x = np.array(x)
# y = np.array(y)
# z = np.array(z)
# ax.scatter(x, y, z, c='r') # 绘制数据点
# ax.set_xlim(0, 5000)
# ax.set_ylim(0, 5000)
# ax.set_zlim(0, 5000)
# ax.set_zlabel('thumbup') # 坐标轴
# ax.set_ylabel('comment')
# ax.set_xlabel('forward')
# plt.show()
jieba_seg = JiebaSeg()
hot_blogs, cold_blogs = jieba_seg.get_hot_blogs(["郑爽"], 2500)
hx, hy, hz = [], [], []
cx, cy, cz = [], [], []
for blog in hot_blogs:
hx.append(int(blog[5]))
hy.append(int(blog[6]))
hz.append(int(blog[7]))
for blog in cold_blogs:
cx.append(int(blog[5]))
cy.append(int(blog[6]))
cz.append(int(blog[7]))
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
hx = np.array(hx)
hy = np.array(hy)
hz = np.array(hz)
cx = np.array(cx)
cy = np.array(cy)
cz = np.array(cz)
ax.scatter(cx, cy, cz, c='b') # 绘制数据点
ax.scatter(hx, hy, hz, c='r') # 绘制数据点
ax.set_xlim(0, 5000)
ax.set_ylim(0, 5000)
ax.set_zlim(0, 5000)
ax.set_zlabel('thumbup') # 坐标轴
ax.set_ylabel('comment')
ax.set_xlabel('forward')
plt.show()
|
apache-2.0
|
apache/spark
|
python/pyspark/pandas/tests/test_series_conversion.py
|
15
|
3303
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
from distutils.version import LooseVersion
import pandas as pd
from pyspark import pandas as ps
from pyspark.testing.pandasutils import PandasOnSparkTestCase
from pyspark.testing.sqlutils import SQLTestUtils
class SeriesConversionTest(PandasOnSparkTestCase, SQLTestUtils):
@property
def pser(self):
return pd.Series([1, 2, 3, 4, 5, 6, 7], name="x")
@property
def psser(self):
return ps.from_pandas(self.pser)
@unittest.skip("Pyperclip could not find a copy/paste mechanism for Linux.")
def test_to_clipboard(self):
pser = self.pser
psser = self.psser
self.assert_eq(psser.to_clipboard(), pser.to_clipboard())
self.assert_eq(psser.to_clipboard(excel=False), pser.to_clipboard(excel=False))
self.assert_eq(
psser.to_clipboard(sep=",", index=False), pser.to_clipboard(sep=",", index=False)
)
def test_to_latex(self):
pser = self.pser
psser = self.psser
self.assert_eq(psser.to_latex(), pser.to_latex())
self.assert_eq(psser.to_latex(col_space=2), pser.to_latex(col_space=2))
self.assert_eq(psser.to_latex(header=True), pser.to_latex(header=True))
self.assert_eq(psser.to_latex(index=False), pser.to_latex(index=False))
self.assert_eq(psser.to_latex(na_rep="-"), pser.to_latex(na_rep="-"))
self.assert_eq(psser.to_latex(float_format="%.1f"), pser.to_latex(float_format="%.1f"))
self.assert_eq(psser.to_latex(sparsify=False), pser.to_latex(sparsify=False))
self.assert_eq(psser.to_latex(index_names=False), pser.to_latex(index_names=False))
self.assert_eq(psser.to_latex(bold_rows=True), pser.to_latex(bold_rows=True))
# Can't specifying `encoding` without specifying `buf` as filename in pandas >= 1.0.0
# https://github.com/pandas-dev/pandas/blob/master/pandas/io/formats/format.py#L492-L495
if LooseVersion(pd.__version__) < LooseVersion("1.0.0"):
self.assert_eq(psser.to_latex(encoding="ascii"), pser.to_latex(encoding="ascii"))
self.assert_eq(psser.to_latex(decimal=","), pser.to_latex(decimal=","))
if __name__ == "__main__":
from pyspark.pandas.tests.test_series_conversion import * # noqa: F401
try:
import xmlrunner # type: ignore[import]
testRunner = xmlrunner.XMLTestRunner(output="target/test-reports", verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
|
apache-2.0
|
dpshelio/sunpy
|
examples/map/map_rotation.py
|
1
|
1141
|
# -*- coding: utf-8 -*-
"""
==============
Rotating a Map
==============
How to rotate a map.
"""
import astropy.units as u
import matplotlib.pyplot as plt
import sunpy.map
import sunpy.data.sample
###############################################################################
# We start with the sample data
aia_map = sunpy.map.Map(sunpy.data.sample.AIA_171_IMAGE)
##############################################################################
# `~sunpy.map.GenericMap` provides the `~sunpy.map.GenericMap.rotate` method
# which accepts an angle. This returns a rotated map and does not rotate in
# place. The data array size is expanded so that none of the original data is
# lost due to clipping. Note that subsequent rotations are not compounded.
# The map is only rotated by the specified amount from the original maps
# orientation.
aia_rotated = aia_map.rotate(angle=30 * u.deg)
###############################################################################
# Let's now plot the results.
fig = plt.figure()
ax = plt.subplot(projection=aia_rotated)
aia_rotated.plot()
aia_rotated.draw_limb()
aia_rotated.draw_grid()
plt.show()
|
bsd-2-clause
|
loli/sklearn-ensembletrees
|
examples/hashing_vs_dict_vectorizer.py
|
284
|
3265
|
"""
===========================================
FeatureHasher and DictVectorizer Comparison
===========================================
Compares FeatureHasher and DictVectorizer by using both to vectorize
text documents.
The example demonstrates syntax and speed only; it doesn't actually do
anything useful with the extracted vectors. See the example scripts
{document_classification_20newsgroups,clustering}.py for actual learning
on text documents.
A discrepancy between the number of terms reported for DictVectorizer and
for FeatureHasher is to be expected due to hash collisions.
"""
# Author: Lars Buitinck <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from collections import defaultdict
import re
import sys
from time import time
import numpy as np
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction import DictVectorizer, FeatureHasher
def n_nonzero_columns(X):
"""Returns the number of non-zero columns in a CSR matrix X."""
return len(np.unique(X.nonzero()[1]))
def tokens(doc):
"""Extract tokens from doc.
This uses a simple regex to break strings into tokens. For a more
principled approach, see CountVectorizer or TfidfVectorizer.
"""
return (tok.lower() for tok in re.findall(r"\w+", doc))
def token_freqs(doc):
"""Extract a dict mapping tokens from doc to their frequencies."""
freq = defaultdict(int)
for tok in tokens(doc):
freq[tok] += 1
return freq
categories = [
'alt.atheism',
'comp.graphics',
'comp.sys.ibm.pc.hardware',
'misc.forsale',
'rec.autos',
'sci.space',
'talk.religion.misc',
]
# Uncomment the following line to use a larger set (11k+ documents)
#categories = None
print(__doc__)
print("Usage: %s [n_features_for_hashing]" % sys.argv[0])
print(" The default number of features is 2**18.")
print()
try:
n_features = int(sys.argv[1])
except IndexError:
n_features = 2 ** 18
except ValueError:
print("not a valid number of features: %r" % sys.argv[1])
sys.exit(1)
print("Loading 20 newsgroups training data")
raw_data = fetch_20newsgroups(subset='train', categories=categories).data
data_size_mb = sum(len(s.encode('utf-8')) for s in raw_data) / 1e6
print("%d documents - %0.3fMB" % (len(raw_data), data_size_mb))
print()
print("DictVectorizer")
t0 = time()
vectorizer = DictVectorizer()
vectorizer.fit_transform(token_freqs(d) for d in raw_data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_size_mb / duration))
print("Found %d unique terms" % len(vectorizer.get_feature_names()))
print()
print("FeatureHasher on frequency dicts")
t0 = time()
hasher = FeatureHasher(n_features=n_features)
X = hasher.transform(token_freqs(d) for d in raw_data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_size_mb / duration))
print("Found %d unique terms" % n_nonzero_columns(X))
print()
print("FeatureHasher on raw tokens")
t0 = time()
hasher = FeatureHasher(n_features=n_features, input_type="string")
X = hasher.transform(tokens(d) for d in raw_data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_size_mb / duration))
print("Found %d unique terms" % n_nonzero_columns(X))
|
bsd-3-clause
|
ycaihua/scikit-learn
|
sklearn/datasets/tests/test_svmlight_format.py
|
28
|
10792
|
from bz2 import BZ2File
import gzip
from io import BytesIO
import numpy as np
import os
import shutil
from tempfile import NamedTemporaryFile
from sklearn.externals.six import b
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import raises
from sklearn.utils.testing import assert_in
import sklearn
from sklearn.datasets import (load_svmlight_file, load_svmlight_files,
dump_svmlight_file)
currdir = os.path.dirname(os.path.abspath(__file__))
datafile = os.path.join(currdir, "data", "svmlight_classification.txt")
multifile = os.path.join(currdir, "data", "svmlight_multilabel.txt")
invalidfile = os.path.join(currdir, "data", "svmlight_invalid.txt")
invalidfile2 = os.path.join(currdir, "data", "svmlight_invalid_order.txt")
def test_load_svmlight_file():
X, y = load_svmlight_file(datafile)
# test X's shape
assert_equal(X.indptr.shape[0], 7)
assert_equal(X.shape[0], 6)
assert_equal(X.shape[1], 21)
assert_equal(y.shape[0], 6)
# test X's non-zero values
for i, j, val in ((0, 2, 2.5), (0, 10, -5.2), (0, 15, 1.5),
(1, 5, 1.0), (1, 12, -3),
(2, 20, 27)):
assert_equal(X[i, j], val)
# tests X's zero values
assert_equal(X[0, 3], 0)
assert_equal(X[0, 5], 0)
assert_equal(X[1, 8], 0)
assert_equal(X[1, 16], 0)
assert_equal(X[2, 18], 0)
# test can change X's values
X[0, 2] *= 2
assert_equal(X[0, 2], 5)
# test y
assert_array_equal(y, [1, 2, 3, 4, 1, 2])
def test_load_svmlight_file_fd():
# test loading from file descriptor
X1, y1 = load_svmlight_file(datafile)
fd = os.open(datafile, os.O_RDONLY)
try:
X2, y2 = load_svmlight_file(fd)
assert_array_equal(X1.data, X2.data)
assert_array_equal(y1, y2)
finally:
os.close(fd)
def test_load_svmlight_file_multilabel():
X, y = load_svmlight_file(multifile, multilabel=True)
assert_equal(y, [(0, 1), (2,), (1, 2)])
def test_load_svmlight_files():
X_train, y_train, X_test, y_test = load_svmlight_files([datafile] * 2,
dtype=np.float32)
assert_array_equal(X_train.toarray(), X_test.toarray())
assert_array_equal(y_train, y_test)
assert_equal(X_train.dtype, np.float32)
assert_equal(X_test.dtype, np.float32)
X1, y1, X2, y2, X3, y3 = load_svmlight_files([datafile] * 3,
dtype=np.float64)
assert_equal(X1.dtype, X2.dtype)
assert_equal(X2.dtype, X3.dtype)
assert_equal(X3.dtype, np.float64)
def test_load_svmlight_file_n_features():
X, y = load_svmlight_file(datafile, n_features=22)
# test X'shape
assert_equal(X.indptr.shape[0], 7)
assert_equal(X.shape[0], 6)
assert_equal(X.shape[1], 22)
# test X's non-zero values
for i, j, val in ((0, 2, 2.5), (0, 10, -5.2),
(1, 5, 1.0), (1, 12, -3)):
assert_equal(X[i, j], val)
# 21 features in file
assert_raises(ValueError, load_svmlight_file, datafile, n_features=20)
def test_load_compressed():
X, y = load_svmlight_file(datafile)
with NamedTemporaryFile(prefix="sklearn-test", suffix=".gz") as tmp:
tmp.close() # necessary under windows
with open(datafile, "rb") as f:
shutil.copyfileobj(f, gzip.open(tmp.name, "wb"))
Xgz, ygz = load_svmlight_file(tmp.name)
# because we "close" it manually and write to it,
# we need to remove it manually.
os.remove(tmp.name)
assert_array_equal(X.toarray(), Xgz.toarray())
assert_array_equal(y, ygz)
with NamedTemporaryFile(prefix="sklearn-test", suffix=".bz2") as tmp:
tmp.close() # necessary under windows
with open(datafile, "rb") as f:
shutil.copyfileobj(f, BZ2File(tmp.name, "wb"))
Xbz, ybz = load_svmlight_file(tmp.name)
# because we "close" it manually and write to it,
# we need to remove it manually.
os.remove(tmp.name)
assert_array_equal(X.toarray(), Xbz.toarray())
assert_array_equal(y, ybz)
@raises(ValueError)
def test_load_invalid_file():
load_svmlight_file(invalidfile)
@raises(ValueError)
def test_load_invalid_order_file():
load_svmlight_file(invalidfile2)
@raises(ValueError)
def test_load_zero_based():
f = BytesIO(b("-1 4:1.\n1 0:1\n"))
load_svmlight_file(f, zero_based=False)
def test_load_zero_based_auto():
data1 = b("-1 1:1 2:2 3:3\n")
data2 = b("-1 0:0 1:1\n")
f1 = BytesIO(data1)
X, y = load_svmlight_file(f1, zero_based="auto")
assert_equal(X.shape, (1, 3))
f1 = BytesIO(data1)
f2 = BytesIO(data2)
X1, y1, X2, y2 = load_svmlight_files([f1, f2], zero_based="auto")
assert_equal(X1.shape, (1, 4))
assert_equal(X2.shape, (1, 4))
def test_load_with_qid():
# load svmfile with qid attribute
data = b("""
3 qid:1 1:0.53 2:0.12
2 qid:1 1:0.13 2:0.1
7 qid:2 1:0.87 2:0.12""")
X, y = load_svmlight_file(BytesIO(data), query_id=False)
assert_array_equal(y, [3, 2, 7])
assert_array_equal(X.toarray(), [[.53, .12], [.13, .1], [.87, .12]])
res1 = load_svmlight_files([BytesIO(data)], query_id=True)
res2 = load_svmlight_file(BytesIO(data), query_id=True)
for X, y, qid in (res1, res2):
assert_array_equal(y, [3, 2, 7])
assert_array_equal(qid, [1, 1, 2])
assert_array_equal(X.toarray(), [[.53, .12], [.13, .1], [.87, .12]])
@raises(ValueError)
def test_load_invalid_file2():
load_svmlight_files([datafile, invalidfile, datafile])
@raises(TypeError)
def test_not_a_filename():
# in python 3 integers are valid file opening arguments (taken as unix
# file descriptors)
load_svmlight_file(.42)
@raises(IOError)
def test_invalid_filename():
load_svmlight_file("trou pic nic douille")
def test_dump():
Xs, y = load_svmlight_file(datafile)
Xd = Xs.toarray()
# slicing a csr_matrix can unsort its .indices, so test that we sort
# those correctly
Xsliced = Xs[np.arange(Xs.shape[0])]
for X in (Xs, Xd, Xsliced):
for zero_based in (True, False):
for dtype in [np.float32, np.float64, np.int32]:
f = BytesIO()
# we need to pass a comment to get the version info in;
# LibSVM doesn't grok comments so they're not put in by
# default anymore.
dump_svmlight_file(X.astype(dtype), y, f, comment="test",
zero_based=zero_based)
f.seek(0)
comment = f.readline()
try:
comment = str(comment, "utf-8")
except TypeError: # fails in Python 2.x
pass
assert_in("scikit-learn %s" % sklearn.__version__, comment)
comment = f.readline()
try:
comment = str(comment, "utf-8")
except TypeError: # fails in Python 2.x
pass
assert_in(["one", "zero"][zero_based] + "-based", comment)
X2, y2 = load_svmlight_file(f, dtype=dtype,
zero_based=zero_based)
assert_equal(X2.dtype, dtype)
assert_array_equal(X2.sorted_indices().indices, X2.indices)
if dtype == np.float32:
assert_array_almost_equal(
# allow a rounding error at the last decimal place
Xd.astype(dtype), X2.toarray(), 4)
else:
assert_array_almost_equal(
# allow a rounding error at the last decimal place
Xd.astype(dtype), X2.toarray(), 15)
assert_array_equal(y, y2)
def test_dump_concise():
one = 1
two = 2.1
three = 3.01
exact = 1.000000000000001
# loses the last decimal place
almost = 1.0000000000000001
X = [[one, two, three, exact, almost],
[1e9, 2e18, 3e27, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]]
y = [one, two, three, exact, almost]
f = BytesIO()
dump_svmlight_file(X, y, f)
f.seek(0)
# make sure it's using the most concise format possible
assert_equal(f.readline(),
b("1 0:1 1:2.1 2:3.01 3:1.000000000000001 4:1\n"))
assert_equal(f.readline(), b("2.1 0:1000000000 1:2e+18 2:3e+27\n"))
assert_equal(f.readline(), b("3.01 \n"))
assert_equal(f.readline(), b("1.000000000000001 \n"))
assert_equal(f.readline(), b("1 \n"))
f.seek(0)
# make sure it's correct too :)
X2, y2 = load_svmlight_file(f)
assert_array_almost_equal(X, X2.toarray())
assert_array_equal(y, y2)
def test_dump_comment():
X, y = load_svmlight_file(datafile)
X = X.toarray()
f = BytesIO()
ascii_comment = "This is a comment\nspanning multiple lines."
dump_svmlight_file(X, y, f, comment=ascii_comment, zero_based=False)
f.seek(0)
X2, y2 = load_svmlight_file(f, zero_based=False)
assert_array_almost_equal(X, X2.toarray())
assert_array_equal(y, y2)
# XXX we have to update this to support Python 3.x
utf8_comment = b("It is true that\n\xc2\xbd\xc2\xb2 = \xc2\xbc")
f = BytesIO()
assert_raises(UnicodeDecodeError,
dump_svmlight_file, X, y, f, comment=utf8_comment)
unicode_comment = utf8_comment.decode("utf-8")
f = BytesIO()
dump_svmlight_file(X, y, f, comment=unicode_comment, zero_based=False)
f.seek(0)
X2, y2 = load_svmlight_file(f, zero_based=False)
assert_array_almost_equal(X, X2.toarray())
assert_array_equal(y, y2)
f = BytesIO()
assert_raises(ValueError,
dump_svmlight_file, X, y, f, comment="I've got a \0.")
def test_dump_invalid():
X, y = load_svmlight_file(datafile)
f = BytesIO()
y2d = [y]
assert_raises(ValueError, dump_svmlight_file, X, y2d, f)
f = BytesIO()
assert_raises(ValueError, dump_svmlight_file, X, y[:-1], f)
def test_dump_query_id():
# test dumping a file with query_id
X, y = load_svmlight_file(datafile)
X = X.toarray()
query_id = np.arange(X.shape[0]) // 2
f = BytesIO()
dump_svmlight_file(X, y, f, query_id=query_id, zero_based=True)
f.seek(0)
X1, y1, query_id1 = load_svmlight_file(f, query_id=True, zero_based=True)
assert_array_almost_equal(X, X1.toarray())
assert_array_almost_equal(y, y1)
assert_array_almost_equal(query_id, query_id1)
|
bsd-3-clause
|
patrikja/SyntheticPopulations
|
python/activity_assignment.py
|
1
|
11301
|
import numpy as np
import pandas as pd
import statsmodels.formula.api as sm
import copy
from common import *
# The following must be identical to the corresponding values in the genertion
# script (if the files produced there are to be used).
survey_attributes_csv = 'survey_attributes.csv'
survey_activities_csv = 'survey_activities.csv'
synthetic_people_csv = 'synthetic_people.csv'
synthetic_activities_csv = 'synthetic_activities.csv'
attribute_names = ['attr_a', 'attr_b', 'attr_c']
activity_names = ['act0', 'act1', 'act2']
bin_names = [['a0','a1', 'a2'], ['b0', 'b1', 'b2'], ['c0', 'c1', 'c2']]
bin_names_flat =[val for sublist in bin_names for val in sublist[1:]]
n_attribute_names = len(attribute_names)
n_activity_names = len(activity_names)
n_bins = map(lambda e: len(e), bin_names)
n_bin_names_flat = len(bin_names_flat)
class Activity:
def __init__(self, name, starttime, duration, location_id):
self.name = name
self.starttime = starttime
self.duration = duration
self.location_id = location_id
def __repr__(self):
return self.name
#return 'Activity name: {0}. Start time: {1}. Duration: {2}.'.format(self.name, self.starttime, self.duration)
class Person:
def __init__(self, person_id, household_id, attributes, activities):
self.person_id = person_id
self.household_id = household_id #household this person belongs to
self.attributes = attributes #list with the bin value for each attribute
# Array with ones ond zeros where all bin values, exept the first, for
# all attributes are represented. One means that that bin value is the
# matching one for this person. If there are only zeros for an attribute
# it means that the first bin value is used.
# Example: [a0, b1, c2] transforms to [0, 0, 1, 0, 0, 1]:
# a1 a2 b1 b2 c1 c2
# [0, 0, 1, 0, 0, 1]
self.bins = np.zeros(n_bin_names_flat, dtype=np.int)
for attribute in attributes:
if attribute in bin_names_flat:
self.bins[bin_names_flat.index(attribute)] = 1
self.activities = activities
#Sum total time for each activity
self.survey_activity_times = np.zeros(n_activity_names, dtype=np.int)
for activity in activities:
self.survey_activity_times[activity_names.index(activity.name)] += activity.duration
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.person_id == other.person_id
else:
return False
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return 'Person id: {0}. Household id: {1}. Attributes: {2}. Activities: {3}'\
.format(self.person_id, self.household_id, self.attributes, self.activities)
# Fitted activity time is y = x*b, where y is a vector of times for different
# categories, x is a vector of ones and zeros, representing the presence of
# attributes (a 1 is added for the interception) and b is a matrix with the
# linear coefficients.
def assign_fitted_time(self, beta):
self.fitted_activity_times = np.matmul(beta, np.hstack((1, self.bins)))
# Calculate the distance between two persons as the (Euclidian) distance
# between their fitted activity time vectors.
# TODO: Replace with Mahalanobis distance instead of Euclidian
def distance(self, other_person):
return np.linalg.norm( self.fitted_activity_times -
other_person.fitted_activity_times)
class Household:
def __init__(self, household_id):
self.household_id = household_id
self.persons = []
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.household_id == other.household_id
else:
return False
def __ne__(self, other):
return not self.__eq__(other)
def addPerson(self, person):
self.persons.append(person)
# The houshold-houshold distance is defined as follows:
# For every person in one of the households, take the smallest distance
# between it and any person in the other household.
# The household distance is the biggest of the person-person distances
# for all persons in the household.
def distance(self, other_household):
max_person_dist = 0
for my_person in self.persons:
min_person_dist = float("inf")
for other_person in other_household.persons:
min_person_dist = min(min_person_dist, my_person.distance(other_person))
max_person_dist = max(max_person_dist, min_person_dist)
return max_person_dist
#print pd.merge(pd.read_csv(survey_attributes_csv), pd.read_csv(survey_activities_csv), left_on='person_id', right_on='person_id')
# Read survey files and construct list of survey persons
survey_attributes_df = pd.read_csv(survey_attributes_csv)
survey_activities_df = pd.read_csv(survey_activities_csv)
# Add dummy row to be able to use while construction below
empty_row = pd.DataFrame(columns=survey_activities_df.columns.values.squeeze().tolist())
empty_row.set_value(len(survey_activities_df), 'person_id', -1)
empty_row.set_value(len(survey_activities_df), 'household_id', -1)
empty_row.set_value(len(survey_activities_df), 'activity_type', '')
empty_row.set_value(len(survey_activities_df), 'start_time', 0)
empty_row.set_value(len(survey_activities_df), 'duration', 0)
empty_row.set_value(len(survey_activities_df), 'location', 0)
survey_activities_df = survey_activities_df.append(empty_row)
survey_persons = []
activities = []
activity_row_no = 0
for index, attribute_row in survey_attributes_df.iterrows():
while survey_activities_df['person_id'].iloc[activity_row_no] < attribute_row['person_id']: activity_row_no += 1
activities = []
while survey_activities_df['person_id'].iloc[activity_row_no] == attribute_row['person_id']:
activities.append(Activity(survey_activities_df['activity_type'].iloc[activity_row_no],
survey_activities_df['start_time'].iloc[activity_row_no],
survey_activities_df['duration'].iloc[activity_row_no],
survey_activities_df['location'].iloc[activity_row_no]))
activity_row_no += 1
attributes = map(lambda a: attribute_row[a], attribute_names)
survey_persons.append(Person(attribute_row['person_id'],
attribute_row['household_id'],
attributes,
activities))
# Create list of survey households and associate survey persons with them
survey_households = []
for person in survey_persons:
hh_temp = Household(person.household_id)
if not hh_temp in survey_households:
survey_households.append(hh_temp)
for person in survey_persons:
survey_households[survey_households.index(Household(person.household_id))].addPerson(person)
# Read synthetic people file and construct list of synthetic persons. They have no activities.
synthetic_people_df = pd.read_csv(synthetic_people_csv)
synthetic_persons = []
for index, row in synthetic_people_df.iterrows():
attributes = map(lambda a: row[a], attribute_names)
synthetic_persons.append(Person(row['person_id'], row['household_id'], map(lambda a: row[a], attribute_names), []))
# Create list of synthetic households and associate synthetic persons with them
synthetic_households = []
for person in synthetic_persons:
hh_temp = Household(person.household_id)
if not hh_temp in synthetic_households:
synthetic_households.append(hh_temp)
for person in synthetic_persons:
synthetic_households[synthetic_households.index(Household(person.household_id))].addPerson(person)
# Create a dataframe with activity times and attributes. The attributes are
# represented as dummy variables by the vector of zeros and ones created above.
act_df = pd.DataFrame(columns=activity_names+bin_names_flat)
for person in survey_persons:
row = pd.DataFrame(columns=activity_names+bin_names_flat)
for activity_id in range(0, n_activity_names):
row.set_value(person.person_id, activity_names[activity_id], person.survey_activity_times[activity_id])
for bin_no in range(0, n_bin_names_flat):
row.set_value(person.person_id, bin_names_flat[bin_no], person.bins[bin_no])
act_df = act_df.append(row)
# TODO: This is needed to make the output of the fitting nice. WHY???
act_df=act_df.fillna(0)
# For each activity time, fit it as a function of the attributes.
beta = np.zeros((n_activity_names, n_bin_names_flat+1), dtype=float)
beta_row = 0
for activity in activity_names:
formula_str = activity + ' ~ '
for bin_name in bin_names_flat:
formula_str += bin_name + ' + '
formula_str = formula_str[0:-3]
# TODO: What is ols and is it good enough?
result = sm.ols(formula=formula_str, data=act_df).fit()
beta[beta_row][:] = result.params
beta_row += 1
# Assign fitted times to survey persons and synthetic persons
for person in survey_persons:
person.assign_fitted_time(beta)
for person in synthetic_persons:
person.assign_fitted_time(beta)
# For each synthetic household, find the survey household that it is closest to.
# Then, for each synthetic person in the synthetic household, find the survey
# person in the selected survey household that it is closest to and copy the
# survey person's schedule to the synthetic person
for synthetic_household in synthetic_households:
min_household_dist = float("inf")
closest_survey_household = Household(0)
for survey_household in survey_households:
if synthetic_household.distance(survey_household) < min_household_dist:
min_household_dist = synthetic_household.distance(survey_household)
closest_survey_household = survey_household
for synthetic_person in synthetic_household.persons:
min_person_dist = float("inf")
closest_survey_person = Person(0, 0, [], [])
for survey_person in closest_survey_household.persons:
if synthetic_person.distance(survey_person) < min_person_dist:
min_person_dist = synthetic_person.distance(survey_person)
closest_survey_person = survey_person
synthetic_person.activities = copy.deepcopy(closest_survey_person.activities)
#for person in survey_persons:
# print person
#for person in synthetic_persons:
# print person
# Create dataframe with schedules for synthetic persons and write to file.
synthetic_activities_df = pd.DataFrame(columns=['person_id',
'household_id',
'activity_type',
'start_time',
'duration',
'location'])
for person in synthetic_persons:
for activity in person.activities:
row = pd.DataFrame(columns=['person_id', 'household_id', 'activity_type',
'start_time', 'duration', 'location'])
row.set_value(0, 'person_id', person.person_id)
row.set_value(0, 'household_id', person.household_id)
row.set_value(0, 'activity_type', activity.name)
row.set_value(0, 'start_time', activity.starttime)
row.set_value(0, 'duration', activity.duration)
row.set_value(0, 'location', activity.location_id)
synthetic_activities_df = synthetic_activities_df.append(row)
synthetic_activities_df.to_csv(synthetic_activities_csv, index=False)
|
bsd-3-clause
|
ianmtaylor1/pacal
|
pacal/utils.py
|
1
|
22199
|
# PaCal - the probabilistic calculator
# Copyright (C) 2009 Szymon Jaroszewicz, Marcin Korzen
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import types
from functools import partial
from numpy import array, arange, empty, cos, sin, abs
from numpy import pi, isnan, unique, diff
from numpy import hstack, maximum, isfinite
from numpy import isinf, log, exp, logspace, Inf
from numpy import finfo, double, isscalar, asfarray
from pylab import plot, loglog, show, semilogx, sqrt, figure
from pylab import real, ones_like
from numpy import zeros, sort
from numpy.linalg import eigvals
from numpy.fft.fftpack import fft, ifft
from numpy import real, concatenate, linspace, argmin
#from scipy.fftpack.basic import fft
from scipy.optimize import fmin_cg,fmin, fmin_tnc
from . import params
# safe infinity
try:
from numpy import Inf
except:
Inf = float('inf')
# function wrappers for avoiding lambdas
# wrap instancemethod .pdf in a partial function call for pickling
# only used for parallel execution
def _call_pdf(obj, x):
return obj.pdf(x)
def wrap_pdf(pdf):
if params.general.parallel:
return partial(_call_pdf, pdf.__self__)
return pdf
def combine_interpolation_nodes(oldXs, oldYs, newXs, newYs):
"""Combine old and new interpolation nodes in sorted order."""
XsYs_sorted = sorted(zip(list(oldXs) + list(newXs), list(oldYs) + list(newYs)))
Xs = array([t[0] for t in XsYs_sorted])
Ys = array([t[1] for t in XsYs_sorted])
return Xs, Ys
def combine_interpolation_nodes_fast(oldXs, oldYs, newXs, newYs):
"""Combine old and new interpolation nodes in sorted order."""
newsize = len(oldXs) + len(newXs)
combinedXs = empty(newsize)
combinedYs = empty(newsize)
combinedXs[::2] = oldXs
combinedXs[1::2] = newXs
combinedYs[::2] = oldYs
combinedYs[1::2] = newYs
return combinedXs, combinedYs
# Chebyshev related utilities
def cheb_nodes(n, a = -1, b = 1):
"""Chebyshev nodes for given degree n"""
apb = 0.5 * (a + b)
if n == 1:
return array([apb])
bma = 0.5 * (b - a)
cs = apb - bma * cos(arange(n) * pi / (n-1))
# ensure that endpoints are exact
cs[0] = a
cs[-1] = b
return cs
# Chebyshev nodes in logspace
def cheb_nodes_log(n, a = 1, b = 10):
"""Chebyshev nodes in logspace for given degree n"""
assert(0 < a < b)
cs = cos(arange(n) * pi / (n-1))
cs = exp(cs/2.0*log(b/a))*sqrt(a*b)
# ensure that endpoints are exact
cs[0] = a
cs[-1] = b
return cs
def chebspace(a, b, n, returnWeights=False):
"""Chebyshev nodes for given degree n"""
apb = 0.5 * (a + b)
bma = 0.5 * (b - a)
cs = apb - bma * cos(arange(n) * pi / (n-1))
# ensure that endpoints are exact
cs[0] = a
cs[-1] = b
if returnWeights:
weights = ones_like(cs)
weights[::2] = -1
weights[0] /= 2
weights[-1] /= 2
return cs, weights
else:
return cs
def chebspace1(a, b, n, returnWeights=False):
"""Chebyshev nodes for given degree n"""
apb = 0.5 * (a + b)
bma = 0.5 * (b - a)
cs = apb - bma * cos(arange(1, 2*n, 2) * pi / (2*n))
if returnWeights:
weights = ones_like(cs)
weights = sin(arange(1, 2 * n, 2) * pi / (2 * n))
weights[1::2] = -1 * weights[1::2]
return cs, weights
else:
return cs
def incremental_cheb_nodes(n, a = -1, b = 1):
"""Extra Chebyshev nodes added by moving from degree m to n=2*m-1"""
apb = 0.5 * (a + b)
bma = 0.5 * (b - a)
return apb - bma * cos(arange(1,n-1,2) * pi / (n-1))
def incremental_cheb_nodes_log(n, a = 1, b = 10):
"""Extra Chebyshev nodes in logspace added by moving from degree m to n=2*m-1"""
cs = cos(arange(1,n-1,2) * pi / (n-1))
return exp(cs/2.0*log(b/a))*sqrt(a*b)
def cheb_nodes1(n, a = -1, b = 1):
"""Chebyshev nodes of the first kind for given degree n.
These are roots of Cheb. polys of the 1st kind."""
apb = 0.5 * (a + b)
bma = 0.5 * (b - a)
return apb - bma * cos(arange(1, 2*n, 2) * pi / (2*n))
def incremental_cheb_nodes1(n, a = -1, b = 1):
"""Extra Chebyshev nodes added by moving from degree m to n=2*m-1"""
apb = 0.5 * (a + b)
bma = 0.5 * (b - a)
ind = arange(0, n)
return apb - bma * cos((2*ind[((ind % 3) != 1)] + 1)* pi / (2*n))
def chebt2(f):
"""chebyshev transformation, coefficients in expansion using
Chebyshev polynomials T_n(x), see chebfun for details"""
n = len(f)
oncircle = concatenate((f[-1::-1], f[1:-1]))
fftcoef = real(fft(oncircle))/(2*n-2)
#print n, len(fftcoef)
#print fftcoef[n-1:]
#print fftcoef[n-1:0:-1]
fftcoef[n-1:0:-1] += fftcoef[n-1:] # z+conj(z)
return fftcoef[n-1::-1]
#return c
def ichebt2(c):
"""inverse chebyshev transformation, values of function in Chebyshev
nodes of the second kind, see chebfun for details"""
n = len(c)
oncircle = concatenate(([c[-1]],c[-2:0:-1]/2, c[0:-1]/2));
v = real(ifft(oncircle));
f = (n-1)*concatenate(([2*v[0]], v[1:n-1]+v[-1:n-1:-1], [2*v[n-1]] ))
return f
def chebt1(f):
#TODO
"""chebyshev transformation, see chebfun"""
n = len(f)
oncircle = concatenate((f[-1::-1], f[1:-1]))
fftcoef = real(fft(oncircle))/(2*n-2)
return fftcoef[n-1::-1]
def ichebt1(c):
#TODO
"""inverse chebyshev transformation, see chebfun"""
n = len(c)
print("tam===", n)
oncircle = concatenate((c[-1::-1], c[1:-1]));
print("v=", oncircle, n)
v = real(ifft(oncircle));
print(v)
print(v[-2:n:-1])
print("|", v[1:-1])
f = (n-1)*concatenate(([2*v(1)], v[-2:n:-1]+v[1:-1], 2*v[-1]));
print("|", f)
return f
def cheb1companion(c):
"""s_n[f] = sum_{i=0}^n c_i T_i(x)"""
n = len(c)
CT = zeros((n-1, n-1))
CT[0,1] = 1
i=arange(1,n-2)
CT[i, i-1] = 0.5
CT[i, i+1] = 0.5
i=arange(0,n-1)
CT[-1, i] = -.5*c[0:-1]/c[-1]
CT[-1, -2] = CT[-1, -2] + .5
return CT
def chebroots(c):
return sort(eigvals(cheb1companion(c)))
def epsunique(tab, eps = params.segments.unique_eps):
ub = unique(tab[isnan(tab)==False])
return ub[~isfinite(ub) | hstack((True, (diff(ub)/maximum(1,abs(ub[1:])))>eps))]
def estimateDegreeOfPole(f, x, pos = True, fromTo = None, N = 10, deriv = False, debug_plot = False):
if fromTo is None:
if x == 0:
fromTo = (-1,-10)
else:
# testing around nonzero singularities is less accurate
fromTo = (-1,-7)
ex = logspace(fromTo[0], fromTo[1], N)
if pos:
lx = x + ex
else:
lx = x - ex
y = abs(f(lx))
#if deriv:
# y -= min(y[isfinite(y)])
yi = log(y)
xi = log(abs(ex))
ind = isfinite(yi)
xi = xi[ind]
yi = yi[ind]
ri = yi[0:-1] - yi[1:]
di = abs(xi[1:]-xi[0:-1])
if debug_plot:
print(xi,yi, f(xi))
loglog(xi,yi)
if len(yi) > 1:
return ri[-1]/di[-1]
else:
return 0
def estimateAtInfExponent(f, x, pos = True, fromTo = None, N = 10, deriv = False, debug_plot = False):
if fromTo is None:
fromTo = (1,10)
ex = logspace(fromTo[0], fromTo[1], N)
if pos:
lx = ex
else:
lx = -ex
y = abs(f(lx))
#if deriv:
# y -= min(y[isfinite(y)])
yi = log(y)
xi = log(abs(ex))
ind = isfinite(yi)
xi = xi[ind]
yi = yi[ind]
ri = yi[0:-1] - yi[1:]
di = abs(xi[1:]-xi[0:-1])
if debug_plot:
print(xi,yi, f(xi))
loglog(xi,yi)
if len(yi) > 1:
return ri[-1]/di[-1]
else:
return 0
def testPole(f, x, pos = True, pole_eps = None, deriv = None, debug_info = None, **kwargs):
if pole_eps is None:
pole_eps = params.pole_detection.max_pole_exponent
if deriv is None:
deriv = params.pole_detection.derivative
if debug_info is None:
debug_info = params.segments.debug_info
deg = estimateDegreeOfPole(f, x, pos, deriv = deriv, **kwargs)
if deriv:
if (abs(deg) >= abs(pole_eps) and deg <= 1 - abs(pole_eps)) or (deg >= 1 + abs(pole_eps) and deg <= 2 - abs(pole_eps)) or deg>2:
#if (deg >= abs(pole_eps) and deg <= 1 - abs(pole_eps)) or (deg >= 1 + abs(pole_eps) and deg <= 2 - abs(pole_eps))or (deg >= 2 + abs(pole_eps) and deg <= 3 - abs(pole_eps)):
pole = True
else:
pole = False
else:
if deg >= pole_eps:
pole = False
else:
pole = True
if debug_info:
print("x={0}, deg={1}, pole={2} check_deriv={3}".format(x, deg, pole, deriv))
return pole
class convergence_monitor(object):
"""Monitor numerical convergence."""
def __init__(self, par = None):
# convergence parameters
if par is None:
par = params.convergence
self.abstol = par.abstol
self.reltol = par.reltol
self.min_quit_iter = par.min_quit_iter # the earliest iteration to quit early
self.min_quit_no_improvement = par.min_quit_no_improvement # quit early if no improvement for this # of steps
self.min_improvement_ratio = par.min_improvement_ratio # by what factor the error needs to improve
self.min_quit_iter = max(2, self.min_quit_iter)
self.ae_list = []
self.y_list = []
self.e_list = []
self.converged = False
self.n_no_improvement = 0 # for how many steps there was no improvement
self.last_good = 0 # last entry for which error decreased
def add(self, abserr, yest, extra_data = None):
self.ae_list.append(abserr)
self.y_list.append(yest)
self.e_list.append(extra_data)
def test_convergence(self):
yest = abs(self.y_list[-1])
ae = self.ae_list[-1]
step = len(self.ae_list)
tol = max(self.abstol, yest * self.reltol)
if ae <= tol:
self.converged = True
return True, "converged"
if len(self.ae_list) > 0:
if ae < self.min_improvement_ratio * self.ae_list[self.last_good]:
self.last_good = len(self.ae_list) - 1
self.n_no_improvement = 0
else:
self.n_no_improvement += 1
if step >= self.min_quit_iter:
if self.n_no_improvement >= self.min_quit_no_improvement:
return True, "diverged"
return False, "continue"
def get_best_result(self, err_decr = 0.75):
"""Return currently best result. A result is considered only
if its error is err_decr times better than previous best."""
if self.converged:
return self.y_list[-1], self.ae_list[-1], self.e_list[-1]
best_y = self.y_list[0]
best_ae = self.ae_list[0]
if best_y == 0:
best_re = finfo(double).max
else:
best_re = best_ae / abs(best_y)
best_e = self.e_list[0]
for i in range(1, len(self.ae_list)):
y = self.y_list[i]
ae = self.ae_list[i]
if y == 0:
re = finfo(double).max
else:
re = ae / abs(y)
if re < err_decr * best_re:
best_y = y
best_ae = ae
best_re = re
best_e = self.e_list[i]
return best_y, best_ae, best_e
def stepfun(x, shift = 0.0):
if isscalar(x):
if x < shift:
return 0.0
else:
return 1.0
else:
mask = (x >= 0.0)
y = zeros_like(asfarray(x))
y[mask] = 1.0
return y
# Root finding
try:
from scipy.optimize import ridder, brentq
have_scipy_opt = True
except:
have_scipy_opt = False
#have_scipy_opt = False
def findinv(fun, a = 0.0, b = 1.0, c = 0.5, **kwargs):
"""find solution of equation f(x)=c, on interval [a, b]"""
if have_scipy_opt:
# fix too low relative tolerance for brentq
if "rtol" in kwargs and kwargs["rtol"] < finfo(float).eps * 2:
kwargs["rtol"] = finfo(float).eps * 2
return brentq(lambda x : fun(x) - c, a, b, **kwargs)
else:
return bisect(lambda x : fun(x) - c, a, b, **kwargs)
# copied from scipy
def bisect(f, xa, xb, xtol = 10*finfo(double).eps, rtol = 2*finfo(double).eps, maxiter = 1000, args = ()):
tol = min(xtol, rtol*(abs(xa) + abs(xb))) # fix for long intervals
fa = f(xa, *args)
fb = f(xb, *args)
if fa*fb > 0: raise RuntimeError("Interval does not contain zero")
if fa == 0: return xa
if fb == 0: return xb
dm = xb - xa
for i in range(maxiter):
dm /= 2
xm = xa + dm
fm = f(xm, *args)
if fm*fa >= 0:
xa = xm
if fm == 0 or abs(dm) < tol:
return xm
print("WARNING: zero fidning did not converge")
return xm
def estimateTailExponent(f, fromTo = None, N =300, deriv = False, debug_plot = False, pos = True):
if fromTo is None:
fromTo = (1,100)
ex = logspace(fromTo[0], fromTo[1], N)
if pos:
lx = ex
xi = log(ex)
else:
lx = -ex
xi = -log(ex)
y = abs(f(lx))
ind = (y > 0)
xi = xi[ind]
yi = log(y[ind])
ri = yi[1:] - yi[0:-1]
di = abs(xi[1:]-xi[0:-1])
if debug_plot:
print(ri, di)
plot(xi,yi)
if len(yi) > 1:
ex = ri[-1]/di[-1]
if ex>50:
return Inf
else:
return ex
else:
return 0
def maxprob(pdf, x0, lub=None):
def fun(x):
#print x, lub
if lub is not None:
for i in range(len(x)):
if x[i]<lub[i][0]:
x[i]=lub[i][0]
if x[i]>lub[i][1]:
x[i]=lub[i][1]
f = -pdf(*[x[i] for i in range(len(x))])
#print x, f
return f
#return fmin_tnc(fun, x0,bounds=lub)
return fmin_cg(fun, x0, gtol=1e-14)
#return fmin(fun, x0)
def fmin2(fc, L, U, **kwargs):
xx = linspace(L,U,20)
y = [fc(x) for x in xx]
ind = argmin(y)
#xopt = fmin_cg(fc, xx[ind], maxiter=20, disp=0)
xopt = fmin(fc, xx[ind], maxiter=20, disp=0)
if xopt>U:
return U
if xopt<L:
return L
return xopt
try:
from math import lgamma
except:
from .gamma import lgamma
def binomial_coeff(n, k):
if k > n - k: # take advantage of symmetry
k = n - k
c = 1
for i in range(k):
c = c * (n - i)
c = c / (i + 1)
return c
def multinomial_coeff(n, ki=[]):
assert sum(ki)==n, "incorrect values n, k ({0}, {1})".format(n, ki)
c = 1
j=0
for k in ki:
for i in range(k):
c = c * (n - j)
c = c / (i + 1)
j += 1
return c
def taylor_coeff(fun, N):
"""From L. Trefethen, Ten digits algorithms """
zz = exp(2j*pi*(array(list(range(N))))/N)
c = fft(fun(zz))/N
return real(c)
_debug_fig = None
_debug_cancelled = False
def debug_plot(a, b, nodes, fs, coeffs):
global _debug_fig, _debug_cancelled
if _debug_cancelled:
return
if 'show' not in locals():
from pylab import axes, subplot, subplots_adjust, figure, draw, plot, axvline, xlim, title, waitforbuttonpress, gcf
from matplotlib.widgets import Button
if _debug_fig is None:
#curfig = gcf()
#print dir(curfig)
_debug_fig = figure()
ax = _debug_fig.add_subplot(111)
#subplots_adjust(bottom=0.15)
butax = axes([0.8, 0.015, 0.1, 0.04])
button = Button(butax, 'Debug', hovercolor='0.975')
def debug(event):
import pdb; pdb.set_trace()
button.on_clicked(debug)
_debug_fig.sca(ax)
draw()
#figure(curfig)
_debug_fig.gca().clear()
plot(nodes, fs, linewidth=5, figure = _debug_fig)
axvline(a, color="r", figure = _debug_fig)
axvline(b, color="r", figure = _debug_fig)
d = 0.05 * (b-a)
_debug_fig.gca().set_xlim(a-d, b+d)
title("press key in figure for next debugplot or close window to continue")
try:
while not _debug_cancelled and not _debug_fig.waitforbuttonpress(-1):
pass
except:
_debug_cancelled = True
def ordinal_ending(n):
if n == 1:
return "st"
if n == 2:
return "nd"
return "th"
def is_instance_method(obj):
"""Checks if an object is a bound method on an instance."""
if not isinstance(obj, types.MethodType):
return False # Not a method
if obj.__self__ is None:
return False # Method is not bound
if issubclass(obj.__self__.__class__, type) or obj.__self__.__class__ is type:
return False # Method is a classmethod
return True
def get_parmap():
if params.general.parallel:
if params.general.process_pool is None:
import multiprocessing
p = multiprocessing.current_process()
#print p.name
#import os; print os.getpid()
if p.name.startswith("Main"):
params.general.process_pool = multiprocessing.Pool(params.general.nprocs)
if params.general.process_pool is None:
raise RuntimeError("Process pool not initialized")
pmap = params.general.process_pool.map
else:
pmap = map
return pmap
if __name__ == "__main__":
from pacal import *
import time
import numpy.polynomial.chebyshev as ch
c = array([1, 2, 3, 1])
CT =cheb1companion(array([1, 2, 3, 1]))
print(CT)
print(chebroots(c))
print(ch.chebroots(c))
print(chebroots(c) - ch.chebroots(c))
0/0
#print taylor_coeff(lambda x:exp(x), 30)
N = NormalDistr()
fun = N.mgf()
#fun.plot()
t0 = time.time()
t_i = taylor_coeff(fun, 100)
print(time.time()-t0)
sil=1
t0 = time.time()
for i in range(50):
if i==0:
sil=1
else:
sil *= i
mi = N.moment(i, 0.0)
print(i, repr(mi), repr(t_i[i])*sil*2, repr(mi/sil/2), repr(t_i[i]), repr(mi/sil/2-t_i[i]));
print(time.time()-t0)
print(N.summary())
show()
0/0
print(binomial_coeff(10, 7))
print(multinomial_coeff(10, [3, 3, 4]))
print(multinomial_coeff(13, [7, 2, 4]))
print(multinomial_coeff(21, [9, 8, 4]))
0/0
from .standard_distr import *
from pylab import *
print(estimateTailExponent(LevyDistr(), pos = True))
L = LevyDistr()
L.summary()
A= UniformDistr() / UniformDistr()
# ChiSquareDistr(1) / ChiSquareDistr(1.1)
A.summary()
A.plot()
S =A
figure()
for i in linspace(1,10,10):
S_1 = S * 2
S = S + S
subplot(211)
(S/(2**(i))).plot(xmin=0,xmax=50)
print(i, end=' ')
(S/(2**(i))).summary()
subplot(212)
r = S.get_piecewise_pdf() - S_1.get_piecewise_pdf()
r.plot(xmin=0,xmax=50)
show()
0/0
#m = 3
#n = 2*m-1
#print cheb_nodes(m)
#print incremental_cheb_nodes(n)
#print cheb_nodes(n)
#print combine_interpolation_nodes(cheb_nodes(m),
# arange(m),
# incremental_cheb_nodes(n),
# arange(m-1))[0]
#print combine_interpolation_nodes_fast(cheb_nodes(m),
# arange(m),
# incremental_cheb_nodes(n),
# arange(m-1))[0]
#from pylab import plot, show, axvline, figure
#for i in xrange(2,5):
# print i, cheb_nodes1(i)
# plot(cheb_nodes1(i), [i]*i, "o")
# for x in cheb_nodes1(i):
# axvline(x)
#segf1 = Segment(0.0, 1.0, lambda x:(n+1)/(n) * x ** (1/n))
#segf2 = Segment(0.0, 2.0, lambda x:pi/2 * sqrt(1 - (x-1) ** 2))
#segf1 = Segment(0.0, 1.0, lambda x: exp(-1/x))
#segf2 = Segment(0.0, 0.5, lambda x:-1/log(x))
#figure()
#print estimateDegreeOfZero(lambda x: x**0.5, 0)
#n=7.0
#estimateDegreeOfZero(lambda x:(n+1)/(n) * x ** (1/n), 0)
#estimateDegreeOfZero(lambda x:pi/2 * sqrt(1 - (x-1) ** 2), 0)
#print estimateDegreeOfZero(lambda x: exp(-1/x), 0)
# estimateDegreeOfZero(lambda x: 1/(x+x**4), Inf)
# estimateDegreeOfZero(lambda x: exp(-x), Inf)
#print findinv(lambda x: 1/(1+exp(-x)), a=-1e300, b=1e300, c=0.5, rtol =1e-16, maxiter = 10000)
from numpy import ones_like, zeros_like
def _pole_test(f, x, pos = True, deriv = False):
return str(testPole(f, x, pos, deriv = deriv)) + " " + str(estimateDegreeOfPole(f, x, pos)) + " " + str(estimateDegreeOfPole(f, x, pos, deriv = True))
print("0,", _pole_test(lambda x: ones_like(x), 0))
print("0',", _pole_test(lambda x: ones_like(x), 0, deriv = True))
print("1,", _pole_test(lambda x: zeros_like(x), 0))
print("x,", _pole_test(lambda x: x, 0))
print("x',", _pole_test(lambda x: x, 0, deriv = True))
print("x**1.5,", _pole_test(lambda x: x**1.5, 0))
print("x**0.5,", _pole_test(lambda x: x**0.5, 0))
print("-log(x),", _pole_test(lambda x: -log(x), 0))
print("-log(sqrt(x)),", _pole_test(lambda x: -log(sqrt(x)), 0))
print("-log(-x),", _pole_test(lambda x: -log(-x), 0, pos = False))
print("1+x**0.5,", _pole_test(lambda x: 1+x**0.5, 0))
print("(1+x**0.5)',", _pole_test(lambda x: 1+x**0.5, 0, deriv = True))
print("x*log(x),", _pole_test(lambda x: x*log(x), 0))
print(_pole_test(lambda x: 1+(1*x+7)*x**-2.5, 0))
print(testPole(lambda x: 1.0/abs(2*x-1), 0.5, pos= False))
print(testPole(lambda x: 9.0*abs(2*x-1), 0.5, pos= True))
|
gpl-3.0
|
kushalbhola/MyStuff
|
Practice/PythonApplication/env/Lib/site-packages/pandas/core/indexes/frozen.py
|
2
|
5592
|
"""
frozen (immutable) data structures to support MultiIndexing
These are used for:
- .names (FrozenList)
- .levels & .codes (FrozenNDArray)
"""
import warnings
import numpy as np
from pandas.util._decorators import deprecate_kwarg
from pandas.core.dtypes.cast import coerce_indexer_dtype
from pandas.core.base import PandasObject
from pandas.io.formats.printing import pprint_thing
class FrozenList(PandasObject, list):
"""
Container that doesn't allow setting item *but*
because it's technically non-hashable, will be used
for lookups, appropriately, etc.
"""
# Side note: This has to be of type list. Otherwise,
# it messes up PyTables type checks.
def union(self, other):
"""
Returns a FrozenList with other concatenated to the end of self.
Parameters
----------
other : array-like
The array-like whose elements we are concatenating.
Returns
-------
diff : FrozenList
The collection difference between self and other.
"""
if isinstance(other, tuple):
other = list(other)
return type(self)(super().__add__(other))
def difference(self, other):
"""
Returns a FrozenList with elements from other removed from self.
Parameters
----------
other : array-like
The array-like whose elements we are removing self.
Returns
-------
diff : FrozenList
The collection difference between self and other.
"""
other = set(other)
temp = [x for x in self if x not in other]
return type(self)(temp)
# TODO: Consider deprecating these in favor of `union` (xref gh-15506)
__add__ = __iadd__ = union
# Python 2 compat
def __getslice__(self, i, j):
return self.__class__(super().__getslice__(i, j))
def __getitem__(self, n):
# Python 3 compat
if isinstance(n, slice):
return self.__class__(super().__getitem__(n))
return super().__getitem__(n)
def __radd__(self, other):
if isinstance(other, tuple):
other = list(other)
return self.__class__(other + list(self))
def __eq__(self, other):
if isinstance(other, (tuple, FrozenList)):
other = list(other)
return super().__eq__(other)
__req__ = __eq__
def __mul__(self, other):
return self.__class__(super().__mul__(other))
__imul__ = __mul__
def __reduce__(self):
return self.__class__, (list(self),)
def __hash__(self):
return hash(tuple(self))
def _disabled(self, *args, **kwargs):
"""This method will not function because object is immutable."""
raise TypeError(
"'%s' does not support mutable operations." % self.__class__.__name__
)
def __str__(self):
return pprint_thing(self, quote_strings=True, escape_chars=("\t", "\r", "\n"))
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, str(self))
__setitem__ = __setslice__ = __delitem__ = __delslice__ = _disabled
pop = append = extend = remove = sort = insert = _disabled
class FrozenNDArray(PandasObject, np.ndarray):
# no __array_finalize__ for now because no metadata
def __new__(cls, data, dtype=None, copy=False):
warnings.warn(
"\nFrozenNDArray is deprecated and will be removed in a "
"future version.\nPlease use `numpy.ndarray` instead.\n",
FutureWarning,
stacklevel=2,
)
if copy is None:
copy = not isinstance(data, FrozenNDArray)
res = np.array(data, dtype=dtype, copy=copy).view(cls)
return res
def _disabled(self, *args, **kwargs):
"""This method will not function because object is immutable."""
raise TypeError("'%s' does not support mutable operations." % self.__class__)
__setitem__ = __setslice__ = __delitem__ = __delslice__ = _disabled
put = itemset = fill = _disabled
def _shallow_copy(self):
return self.view()
def values(self):
"""returns *copy* of underlying array"""
arr = self.view(np.ndarray).copy()
return arr
def __repr__(self):
"""
Return a string representation for this object.
"""
prepr = pprint_thing(self, escape_chars=("\t", "\r", "\n"), quote_strings=True)
return "%s(%s, dtype='%s')" % (type(self).__name__, prepr, self.dtype)
@deprecate_kwarg(old_arg_name="v", new_arg_name="value")
def searchsorted(self, value, side="left", sorter=None):
"""
Find indices to insert `value` so as to maintain order.
For full documentation, see `numpy.searchsorted`
See Also
--------
numpy.searchsorted : Equivalent function.
"""
# We are much more performant if the searched
# indexer is the same type as the array.
#
# This doesn't matter for int64, but DOES
# matter for smaller int dtypes.
#
# xref: https://github.com/numpy/numpy/issues/5370
try:
value = self.dtype.type(value)
except ValueError:
pass
return super().searchsorted(value, side=side, sorter=sorter)
def _ensure_frozen(array_like, categories, copy=False):
array_like = coerce_indexer_dtype(array_like, categories)
array_like = array_like.view(FrozenNDArray)
if copy:
array_like = array_like.copy()
return array_like
|
apache-2.0
|
hehongliang/tensorflow
|
tensorflow/contrib/gan/python/estimator/python/gan_estimator_test.py
|
5
|
15018
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for TFGAN's estimator.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import shutil
import tempfile
from absl.testing import parameterized
import numpy as np
import six
from tensorflow.contrib import layers
from tensorflow.contrib.gan.python import namedtuples as tfgan_tuples
from tensorflow.contrib.gan.python.estimator.python import gan_estimator_impl as estimator
from tensorflow.contrib.gan.python.losses.python import tuple_losses as losses
from tensorflow.contrib.learn.python.learn.learn_io import graph_io
from tensorflow.core.example import example_pb2
from tensorflow.core.example import feature_pb2
from tensorflow.python.estimator import model_fn as model_fn_lib
from tensorflow.python.estimator.estimator import WarmStartSettings
from tensorflow.python.estimator.inputs import numpy_io
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework.errors_impl import NotFoundError
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import metrics as metrics_lib
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import test
from tensorflow.python.summary.writer import writer_cache
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import learning_rate_decay
from tensorflow.python.training import training
from tensorflow.python.training import training_util
def generator_fn(noise_dict, mode):
del mode
noise = noise_dict['x']
return layers.fully_connected(noise, tensor_shape.dimension_value(
noise.shape[1]))
def discriminator_fn(data, unused_conditioning, mode):
del unused_conditioning, mode
return layers.fully_connected(data, 1)
class GetGANModelTest(test.TestCase, parameterized.TestCase):
"""Tests that `GetGANModel` produces the correct model."""
@parameterized.named_parameters(
('train', model_fn_lib.ModeKeys.TRAIN),
('eval', model_fn_lib.ModeKeys.EVAL),
('predict', model_fn_lib.ModeKeys.PREDICT))
def test_get_gan_model(self, mode):
with ops.Graph().as_default():
generator_inputs = {'x': array_ops.ones([3, 4])}
real_data = (array_ops.zeros([3, 4]) if
mode != model_fn_lib.ModeKeys.PREDICT else None)
gan_model = estimator._get_gan_model(
mode, generator_fn, discriminator_fn, real_data, generator_inputs,
add_summaries=False)
self.assertEqual(generator_inputs, gan_model.generator_inputs)
self.assertIsNotNone(gan_model.generated_data)
self.assertEqual(2, len(gan_model.generator_variables)) # 1 FC layer
self.assertIsNotNone(gan_model.generator_fn)
if mode == model_fn_lib.ModeKeys.PREDICT:
self.assertIsNone(gan_model.real_data)
self.assertIsNone(gan_model.discriminator_real_outputs)
self.assertIsNone(gan_model.discriminator_gen_outputs)
self.assertIsNone(gan_model.discriminator_variables)
self.assertIsNone(gan_model.discriminator_scope)
self.assertIsNone(gan_model.discriminator_fn)
else:
self.assertIsNotNone(gan_model.real_data)
self.assertIsNotNone(gan_model.discriminator_real_outputs)
self.assertIsNotNone(gan_model.discriminator_gen_outputs)
self.assertEqual(2, len(gan_model.discriminator_variables)) # 1 FC layer
self.assertIsNotNone(gan_model.discriminator_scope)
self.assertIsNotNone(gan_model.discriminator_fn)
def get_dummy_gan_model():
# TODO(joelshor): Find a better way of creating a variable scope.
with variable_scope.variable_scope('generator') as gen_scope:
gen_var = variable_scope.get_variable('dummy_var', initializer=0.0)
with variable_scope.variable_scope('discriminator') as dis_scope:
dis_var = variable_scope.get_variable('dummy_var', initializer=0.0)
return tfgan_tuples.GANModel(
generator_inputs=None,
generated_data=array_ops.ones([3, 4]),
generator_variables=[gen_var],
generator_scope=gen_scope,
generator_fn=None,
real_data=array_ops.zeros([3, 4]),
discriminator_real_outputs=array_ops.ones([1, 2, 3]) * dis_var,
discriminator_gen_outputs=array_ops.ones([1, 2, 3]) * gen_var * dis_var,
discriminator_variables=[dis_var],
discriminator_scope=dis_scope,
discriminator_fn=None)
def dummy_loss_fn(gan_model, add_summaries=True):
return math_ops.reduce_sum(gan_model.discriminator_real_outputs -
gan_model.discriminator_gen_outputs)
def get_metrics(gan_model):
return {
'mse_custom_metric': metrics_lib.mean_squared_error(
gan_model.real_data, gan_model.generated_data)
}
class GetEstimatorSpecTest(test.TestCase, parameterized.TestCase):
"""Tests that the EstimatorSpec is constructed appropriately."""
@classmethod
def setUpClass(cls):
cls._generator_optimizer = training.GradientDescentOptimizer(1.0)
cls._discriminator_optimizer = training.GradientDescentOptimizer(1.0)
@parameterized.named_parameters(
('train', model_fn_lib.ModeKeys.TRAIN),
('eval', model_fn_lib.ModeKeys.EVAL),
('predict', model_fn_lib.ModeKeys.PREDICT))
def test_get_estimator_spec(self, mode):
with ops.Graph().as_default():
self._gan_model = get_dummy_gan_model()
spec = estimator._get_estimator_spec(
mode,
self._gan_model,
generator_loss_fn=dummy_loss_fn,
discriminator_loss_fn=dummy_loss_fn,
get_eval_metric_ops_fn=get_metrics,
generator_optimizer=self._generator_optimizer,
discriminator_optimizer=self._discriminator_optimizer)
self.assertEqual(mode, spec.mode)
if mode == model_fn_lib.ModeKeys.PREDICT:
self.assertEqual(self._gan_model.generated_data, spec.predictions)
elif mode == model_fn_lib.ModeKeys.TRAIN:
self.assertShapeEqual(np.array(0), spec.loss) # must be a scalar
self.assertIsNotNone(spec.train_op)
self.assertIsNotNone(spec.training_hooks)
elif mode == model_fn_lib.ModeKeys.EVAL:
self.assertEqual(self._gan_model.generated_data, spec.predictions)
self.assertShapeEqual(np.array(0), spec.loss) # must be a scalar
self.assertIsNotNone(spec.eval_metric_ops)
# TODO(joelshor): Add pandas test.
class GANEstimatorIntegrationTest(test.TestCase):
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
writer_cache.FileWriterCache.clear()
shutil.rmtree(self._model_dir)
def _test_complete_flow(
self, train_input_fn, eval_input_fn, predict_input_fn, prediction_size,
lr_decay=False):
def make_opt():
gstep = training_util.get_or_create_global_step()
lr = learning_rate_decay.exponential_decay(1.0, gstep, 10, 0.9)
return training.GradientDescentOptimizer(lr)
gopt = make_opt if lr_decay else training.GradientDescentOptimizer(1.0)
dopt = make_opt if lr_decay else training.GradientDescentOptimizer(1.0)
est = estimator.GANEstimator(
generator_fn=generator_fn,
discriminator_fn=discriminator_fn,
generator_loss_fn=losses.wasserstein_generator_loss,
discriminator_loss_fn=losses.wasserstein_discriminator_loss,
generator_optimizer=gopt,
discriminator_optimizer=dopt,
get_eval_metric_ops_fn=get_metrics,
model_dir=self._model_dir)
# TRAIN
num_steps = 10
est.train(train_input_fn, steps=num_steps)
# EVALUTE
scores = est.evaluate(eval_input_fn)
self.assertEqual(num_steps, scores[ops.GraphKeys.GLOBAL_STEP])
self.assertIn('loss', six.iterkeys(scores))
self.assertEqual(scores['discriminator_loss'] + scores['generator_loss'],
scores['loss'])
self.assertIn('mse_custom_metric', six.iterkeys(scores))
# PREDICT
predictions = np.array([x for x in est.predict(predict_input_fn)])
self.assertAllEqual(prediction_size, predictions.shape)
def test_numpy_input_fn(self):
"""Tests complete flow with numpy_input_fn."""
input_dim = 4
batch_size = 5
data = np.zeros([batch_size, input_dim])
train_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=data,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
eval_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=data,
batch_size=batch_size,
shuffle=False)
predict_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
batch_size=batch_size,
shuffle=False)
self._test_complete_flow(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
prediction_size=[batch_size, input_dim])
def test_numpy_input_fn_lrdecay(self):
"""Tests complete flow with numpy_input_fn."""
input_dim = 4
batch_size = 5
data = np.zeros([batch_size, input_dim])
train_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=data,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
eval_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=data,
batch_size=batch_size,
shuffle=False)
predict_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
batch_size=batch_size,
shuffle=False)
self._test_complete_flow(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
prediction_size=[batch_size, input_dim],
lr_decay=True)
def test_input_fn_from_parse_example(self):
"""Tests complete flow with input_fn constructed from parse_example."""
input_dim = 4
batch_size = 6
data = np.zeros([batch_size, input_dim])
serialized_examples = []
for datum in data:
example = example_pb2.Example(features=feature_pb2.Features(
feature={
'x': feature_pb2.Feature(
float_list=feature_pb2.FloatList(value=datum)),
'y': feature_pb2.Feature(
float_list=feature_pb2.FloatList(value=datum)),
}))
serialized_examples.append(example.SerializeToString())
feature_spec = {
'x': parsing_ops.FixedLenFeature([input_dim], dtypes.float32),
'y': parsing_ops.FixedLenFeature([input_dim], dtypes.float32),
}
def _train_input_fn():
feature_map = parsing_ops.parse_example(
serialized_examples, feature_spec)
_, features = graph_io.queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _eval_input_fn():
feature_map = parsing_ops.parse_example(
input_lib.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
_, features = graph_io.queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _predict_input_fn():
feature_map = parsing_ops.parse_example(
input_lib.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
_, features = graph_io.queue_parsed_features(feature_map)
features.pop('y')
return features, None
self._test_complete_flow(
train_input_fn=_train_input_fn,
eval_input_fn=_eval_input_fn,
predict_input_fn=_predict_input_fn,
prediction_size=[batch_size, input_dim])
class GANEstimatorWarmStartTest(test.TestCase):
def setUp(self):
self._model_dir = self.get_temp_dir()
self.new_variable_name = 'new_var'
self.new_variable_value = [1, 2, 3]
def tearDown(self):
writer_cache.FileWriterCache.clear()
def _test_warm_start(self, warm_start_from=None):
"""Tests whether WarmStartSettings work as intended."""
def generator_with_new_variable(noise_dict, mode):
variable_scope.get_variable(name=self.new_variable_name,
initializer=self.new_variable_value,
trainable=True)
return generator_fn(noise_dict, mode)
def train_input_fn():
data = np.zeros([3, 4])
return {'x': data}, data
est = estimator.GANEstimator(
generator_fn=generator_fn,
discriminator_fn=discriminator_fn,
generator_loss_fn=losses.wasserstein_generator_loss,
discriminator_loss_fn=losses.wasserstein_discriminator_loss,
generator_optimizer=training.GradientDescentOptimizer(1.0),
discriminator_optimizer=training.GradientDescentOptimizer(1.0),
model_dir=self._model_dir)
est.train(train_input_fn, steps=1)
est_warm = estimator.GANEstimator(
generator_fn=generator_with_new_variable,
discriminator_fn=discriminator_fn,
generator_loss_fn=losses.wasserstein_generator_loss,
discriminator_loss_fn=losses.wasserstein_discriminator_loss,
generator_optimizer=training.GradientDescentOptimizer(1.0),
discriminator_optimizer=training.GradientDescentOptimizer(1.0),
model_dir=None if warm_start_from else self._model_dir,
warm_start_from=warm_start_from)
est_warm.train(train_input_fn, steps=1)
return est_warm
def test_warm_start_error(self):
"""Test if exception when reloading different estimators."""
with self.assertRaises(NotFoundError):
self._test_warm_start()
def test_warm_start_success(self):
"""Test if GANEstimator allows explicit warm start variable assignment."""
# Regex matches all variable names in ckpt except for new_var.
var_regex = '^(?!.*%s.*)' % self.new_variable_name
warmstart = WarmStartSettings(ckpt_to_initialize_from=self._model_dir,
vars_to_warm_start=var_regex)
est_warm = self._test_warm_start(warm_start_from=warmstart)
full_variable_name = 'Generator/%s' % self.new_variable_name
self.assertIn(full_variable_name, est_warm.get_variable_names())
equal_vals = np.array_equal(est_warm.get_variable_value(full_variable_name),
self.new_variable_value)
self.assertTrue(equal_vals)
if __name__ == '__main__':
test.main()
|
apache-2.0
|
lmc2179/savvy
|
savvy/explainable_classifier.py
|
2
|
7304
|
"""
"""
import numpy as np
import copy
import random
import seaborn as sns
import matplotlib.pyplot as plt
sns.set(style="white", context="talk")
class ExplainableClassifier(object):
def __init__(self, feature_names, model):
"""
Produce a wrapper around the model which allows explanations of particular classifications.
Model should define the following, similar to scikit-learn classes:
1. fit(X,y) method
2. predict_proba(X)
3. classes_ attribute
"""
self.model = model
self.feature_names = feature_names
self.samplers = None
def fit(self, X, y):
"""
"""
[self._add_observations_to_sample(x) for x in X]
self.model.fit(X, y)
def partial_fit(self, X, y):
"""
"""
[self._add_observations_to_sample(x) for x in X]
self.model.partial_fit(X, y)
def _add_observations_to_sample(self, x):
if self.samplers:
[sampler.observe(x_i) for sampler, x_i in zip(self.samplers, x)]
else: # Perform initial construction of samplers by attempting type inference
samplers = []
for x_i in x:
if isinstance(x_i, (int, float)) and not isinstance(x_i, bool):
sampler = _UniformRealSampler()
else:
sampler = _UniformCategoricalSampler()
sampler.observe(x_i)
samplers.append(sampler)
self.samplers = samplers
def __getattr__(self, item):
try:
return getattr(self.model, item)
except AttributeError:
raise AttributeError
def _sample_feature_contribution(self, feature, feature_permutations, input_sample, x):
substitute_features = feature_permutations[:feature_permutations.index(feature)]
perturbed_input_with_feature = self._substitute(x, input_sample, substitute_features + [feature])
perturbed_input_without_feature = self._substitute(x, input_sample, substitute_features)
predicted_probability_with_feature = self.model.predict_proba(perturbed_input_with_feature)
predicted_probability_without_feature = self.model.predict_proba(perturbed_input_without_feature)
feature_contribution_sample = predicted_probability_with_feature - predicted_probability_without_feature
return feature_contribution_sample
def _get_mean_of_samples(self, feature_contributions, number_of_samples):
normalized_feature_contributions = {}
for feature, contributions in feature_contributions.items():
contribution_vector = contributions * (1.0 / number_of_samples)
normalized_feature_contributions[feature] = {cls: contribution for cls, contribution in zip(self.classes_,
contribution_vector[0])}
return normalized_feature_contributions
def explain_classification(self, x, number_of_samples=1000):
"""Produce an explanation for the model's decision about the data point x. Returns an Explanation object,
which will indicate the importance of each feature for each possible class in the model's decision."""
feature_contributions = self._get_sum_of_sample_contributions(number_of_samples, x)
normalized_feature_contributions = self._get_mean_of_samples(feature_contributions, number_of_samples)
return Explanation(normalized_feature_contributions, x, self.model.predict_proba(x))
def _get_sum_of_sample_contributions(self, number_of_samples, x):
feature_contributions = {f: np.zeros((1, len(self.classes_))) for f in self.feature_names}
feature_permutations = copy.deepcopy(self.feature_names)
for i in range(number_of_samples):
random.shuffle(feature_permutations)
input_sample = self._sample_input_space()
for feature in self.feature_names:
feature_contribution_sample = self._sample_feature_contribution(feature, feature_permutations,
input_sample, x)
feature_contributions[feature] += feature_contribution_sample
return feature_contributions
def _sample_input_space(self):
return [sampler.sample() for sampler in self.samplers]
def _substitute(self, x, y, substituted_features):
return [x[i] if feature in substituted_features else y[i] for i, feature in enumerate(self.feature_names)]
class _UniformCategoricalSampler(object):
def __init__(self):
self.observed = set()
def observe(self, inp):
self.observed.add(inp)
def sample(self):
return random.choice(list(self.observed))
class _UniformRealSampler(object):
def __init__(self):
self.min = None
self.max = None
def observe(self, inp):
if self.min is None:
self.min = inp
self.max = inp
else:
self.min = min(self.min, inp)
self.max = max(self.max, inp)
def sample(self):
return random.uniform(self.min, self.max)
class Explanation(object):
"A plain old data object containing the explanation results."
def __init__(self, feature_contributions, inputs, probabilistic_prediction):
self.feature_names = np.array(list(feature_contributions.keys()))
self.class_names = list(feature_contributions[self.feature_names[0]].keys())
self.feature_contributions = feature_contributions
self.inputs = inputs
self.probabilistic_prediction = probabilistic_prediction
def get_contribution(self, feature, cls):
return self.feature_contributions[feature][cls]
def get_feature_contribution_vector(self, feature):
"""Returns vector where each row is a contribution of the feature toward a class. Classes are represented
in the same order as the class_names attribute."""
return np.array([self.feature_contributions[feature][cls] for cls in self.classes_])
def get_class_contribution_vector(self, cls):
"""Returns vector where each row is a contribution of a feature toward the class. Features are represented in the
same order as the feature_names attribute.
"""
return np.array([self.feature_contributions[f][cls] for f in self.feature_names])
def BarPlot(explanation):
"Produce a number of barplots, one for each class."
#TODO: Hide all this unwrapping of arrays for axes
#TODO: Determination of y-axis dynamically?
feature_names = explanation.feature_names
x_axis = np.array(['{0}={1}'.format(f, str(explanation.inputs[i])) for i, f in enumerate(feature_names)])
class_names = explanation.class_names
f, *ax = plt.subplots(len(class_names), 1, figsize=(8, 6), sharex=True)
for axis, cls in zip(ax[0], class_names):
contribution_vector = explanation.get_class_contribution_vector(cls)
sns.barplot(x_axis, contribution_vector, ci=None, hline=0, ax=axis)
axis.set_ylabel('{0}'.format(cls))
sns.despine(bottom=True)
plt.setp(f.axes, yticks=[-1.0, -0.75,-0.5, 0.0, 0.5, 0.75, 1.0])
plt.tight_layout(h_pad=3)
plt.show()
|
mit
|
kevinthesun/mxnet
|
example/dec/dec.py
|
24
|
7846
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: skip-file
from __future__ import print_function
import sys
import os
# code to automatically download dataset
curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
sys.path = [os.path.join(curr_path, "../autoencoder")] + sys.path
import mxnet as mx
import numpy as np
import data
from scipy.spatial.distance import cdist
from sklearn.cluster import KMeans
import model
from autoencoder import AutoEncoderModel
from solver import Solver, Monitor
import logging
def cluster_acc(Y_pred, Y):
from sklearn.utils.linear_assignment_ import linear_assignment
assert Y_pred.size == Y.size
D = max(Y_pred.max(), Y.max())+1
w = np.zeros((D,D), dtype=np.int64)
for i in range(Y_pred.size):
w[Y_pred[i], int(Y[i])] += 1
ind = linear_assignment(w.max() - w)
return sum([w[i,j] for i,j in ind])*1.0/Y_pred.size, w
class DECModel(model.MXModel):
class DECLoss(mx.operator.NumpyOp):
def __init__(self, num_centers, alpha):
super(DECModel.DECLoss, self).__init__(need_top_grad=False)
self.num_centers = num_centers
self.alpha = alpha
def forward(self, in_data, out_data):
z = in_data[0]
mu = in_data[1]
q = out_data[0]
self.mask = 1.0/(1.0+cdist(z, mu)**2/self.alpha)
q[:] = self.mask**((self.alpha+1.0)/2.0)
q[:] = (q.T/q.sum(axis=1)).T
def backward(self, out_grad, in_data, out_data, in_grad):
q = out_data[0]
z = in_data[0]
mu = in_data[1]
p = in_data[2]
dz = in_grad[0]
dmu = in_grad[1]
self.mask *= (self.alpha+1.0)/self.alpha*(p-q)
dz[:] = (z.T*self.mask.sum(axis=1)).T - self.mask.dot(mu)
dmu[:] = (mu.T*self.mask.sum(axis=0)).T - self.mask.T.dot(z)
def infer_shape(self, in_shape):
assert len(in_shape) == 3
assert len(in_shape[0]) == 2
input_shape = in_shape[0]
label_shape = (input_shape[0], self.num_centers)
mu_shape = (self.num_centers, input_shape[1])
out_shape = (input_shape[0], self.num_centers)
return [input_shape, mu_shape, label_shape], [out_shape]
def list_arguments(self):
return ['data', 'mu', 'label']
def setup(self, X, num_centers, alpha, save_to='dec_model'):
sep = X.shape[0]*9/10
X_train = X[:sep]
X_val = X[sep:]
ae_model = AutoEncoderModel(self.xpu, [X.shape[1],500,500,2000,10], pt_dropout=0.2)
if not os.path.exists(save_to+'_pt.arg'):
ae_model.layerwise_pretrain(X_train, 256, 50000, 'sgd', l_rate=0.1, decay=0.0,
lr_scheduler=mx.misc.FactorScheduler(20000,0.1))
ae_model.finetune(X_train, 256, 100000, 'sgd', l_rate=0.1, decay=0.0,
lr_scheduler=mx.misc.FactorScheduler(20000,0.1))
ae_model.save(save_to+'_pt.arg')
logging.log(logging.INFO, "Autoencoder Training error: %f"%ae_model.eval(X_train))
logging.log(logging.INFO, "Autoencoder Validation error: %f"%ae_model.eval(X_val))
else:
ae_model.load(save_to+'_pt.arg')
self.ae_model = ae_model
self.dec_op = DECModel.DECLoss(num_centers, alpha)
label = mx.sym.Variable('label')
self.feature = self.ae_model.encoder
self.loss = self.dec_op(data=self.ae_model.encoder, label=label, name='dec')
self.args.update({k:v for k,v in self.ae_model.args.items() if k in self.ae_model.encoder.list_arguments()})
self.args['dec_mu'] = mx.nd.empty((num_centers, self.ae_model.dims[-1]), ctx=self.xpu)
self.args_grad.update({k: mx.nd.empty(v.shape, ctx=self.xpu) for k,v in self.args.items()})
self.args_mult.update({k: k.endswith('bias') and 2.0 or 1.0 for k in self.args})
self.num_centers = num_centers
def cluster(self, X, y=None, update_interval=None):
N = X.shape[0]
if not update_interval:
update_interval = N
batch_size = 256
test_iter = mx.io.NDArrayIter({'data': X}, batch_size=batch_size, shuffle=False,
last_batch_handle='pad')
args = {k: mx.nd.array(v.asnumpy(), ctx=self.xpu) for k, v in self.args.items()}
z = list(model.extract_feature(self.feature, args, None, test_iter, N, self.xpu).values())[0]
kmeans = KMeans(self.num_centers, n_init=20)
kmeans.fit(z)
args['dec_mu'][:] = kmeans.cluster_centers_
solver = Solver('sgd', momentum=0.9, wd=0.0, learning_rate=0.01)
def ce(label, pred):
return np.sum(label*np.log(label/(pred+0.000001)))/label.shape[0]
solver.set_metric(mx.metric.CustomMetric(ce))
label_buff = np.zeros((X.shape[0], self.num_centers))
train_iter = mx.io.NDArrayIter({'data': X}, {'label': label_buff}, batch_size=batch_size,
shuffle=False, last_batch_handle='roll_over')
self.y_pred = np.zeros((X.shape[0]))
def refresh(i):
if i%update_interval == 0:
z = list(model.extract_feature(self.feature, args, None, test_iter, N, self.xpu).values())[0]
p = np.zeros((z.shape[0], self.num_centers))
self.dec_op.forward([z, args['dec_mu'].asnumpy()], [p])
y_pred = p.argmax(axis=1)
print(np.std(np.bincount(y_pred)), np.bincount(y_pred))
print(np.std(np.bincount(y.astype(np.int))), np.bincount(y.astype(np.int)))
if y is not None:
print(cluster_acc(y_pred, y)[0])
weight = 1.0/p.sum(axis=0)
weight *= self.num_centers/weight.sum()
p = (p**2)*weight
train_iter.data_list[1][:] = (p.T/p.sum(axis=1)).T
print(np.sum(y_pred != self.y_pred), 0.001*y_pred.shape[0])
if np.sum(y_pred != self.y_pred) < 0.001*y_pred.shape[0]:
self.y_pred = y_pred
return True
self.y_pred = y_pred
solver.set_iter_start_callback(refresh)
solver.set_monitor(Monitor(50))
solver.solve(self.xpu, self.loss, args, self.args_grad, None,
train_iter, 0, 1000000000, {}, False)
self.end_args = args
if y is not None:
return cluster_acc(self.y_pred, y)[0]
else:
return -1
def mnist_exp(xpu):
X, Y = data.get_mnist()
dec_model = DECModel(xpu, X, 10, 1.0, 'data/mnist')
acc = []
for i in [10*(2**j) for j in range(9)]:
acc.append(dec_model.cluster(X, Y, i))
logging.log(logging.INFO, 'Clustering Acc: %f at update interval: %d'%(acc[-1], i))
logging.info(str(acc))
logging.info('Best Clustering ACC: %f at update_interval: %d'%(np.max(acc), 10*(2**np.argmax(acc))))
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
mnist_exp(mx.gpu(0))
|
apache-2.0
|
ant9000/RIOT
|
tests/pkg_cmsis-nn/generate_image.py
|
15
|
1140
|
#!/usr/bin/env python3
"""Generate a binary file from a sample image of the CIFAR-10 dataset.
Pixel of the sample are stored as uint8, images have size 32x32x3.
"""
import os
import argparse
import numpy as np
import matplotlib.pyplot as plt
from tensorflow.keras.datasets import cifar10
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
def main(args):
_, (cifar10_test, _) = cifar10.load_data()
data = cifar10_test[args.index]
data = data.astype('uint8')
output_path = os.path.join(SCRIPT_DIR, args.output)
np.ndarray.tofile(data, output_path)
if args.no_plot is False:
plt.imshow(data)
plt.show()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--index", type=int, default=0,
help="Image index in CIFAR test dataset")
parser.add_argument("-o", "--output", type=str, default='input',
help="Output filename")
parser.add_argument("--no-plot", default=False, action='store_true',
help="Disable image display in matplotlib")
main(parser.parse_args())
|
lgpl-2.1
|
redarmy30/Eurobot-2017
|
old year/RESET-master/PyRPLidar/rplidar.py
|
2
|
8549
|
"""
RPLidar Python Driver
...
...
"""
import serial
import logging
import Queue
from collections import deque
import numpy as np
import matplotlib.pyplot as plt
from rplidar_monitor import *
class RPLidar(object):
def __init__(self, portname, baudrate=115200, timeout=1):
# init serial port
self.serial_port = None
self.portname = portname
self.baudrate = baudrate
self.timeout = timeout
self.serial_arg = dict(port=portname,
baudrate=baudrate,
stopbits=serial.STOPBITS_ONE,
parity=serial.PARITY_NONE,
timeout=timeout)
# status variables
self.isConnected = False
self.motorRunning = None
# init monitor
self.monitor = None
# data containers
self.raw_points = Queue.Queue()
self.raw_frames = Queue.Queue()
self.current_frame = RPLidarFrame()
def connect(self):
if not self.isConnected:
try:
self.serial_port = serial.Serial(**self.serial_arg)
self.isConnected = True
logging.debug("Connected to RPLidar on port %s", self.portname)
self.stop_motor()
except serial.SerialException as e:
logging.error(e.message)
def disconnect(self):
if self.isConnected:
try:
if self.monitor:
self.stop_monitor()
self.serial_port.close()
self.isConnected = False
logging.debug("Disconnected from RPLidar on port %s", self.portname)
except serial.SerialException as e:
logging.error(e.message)
def reset(self):
self.send_command(RPLIDAR_CMD_RESET)
logging.debug("Command RESET sent.")
time.sleep(0.1)
def start_motor(self):
"""Start RPLidar motor by setting DTR (which is connected to pin MOTOCTL
on RPLidar) to False."""
self.serial_port.setDTR(False)
self.motorRunning = True
logging.debug("RPLidar motor is turned ON.")
def stop_motor(self):
"""Stop RPLidar motor by setting DTR to True."""
self.serial_port.setDTR(True)
self.motorRunning = False
logging.debug("RPLidar motor is turned OFF.")
def send_command(self, command):
"""Send command to RPLidar through the serial connection"""
cmd_bytes = rplidar_command_format.build(Container(
sync_byte=RPLIDAR_CMD_SYNC_BYTE, cmd_flag=command))
self.serial_port.write(cmd_bytes)
logging.debug("Command %s sent.", toHex(cmd_bytes))
def response_header(self, timeout=1):
"""Read response header from RPLidar through the serial connection"""
start_time = time.time()
while time.time() < start_time + timeout:
if self.serial_port.inWaiting() < rplidar_response_header_format.sizeof():
#logging.debug(serial_port.inWaiting())
time.sleep(0.01)
else:
raw = self.serial_port.read(rplidar_response_header_format.sizeof())
parsed = rplidar_response_header_format.parse(raw)
#logging.debug(parsed)
if ((parsed.sync_byte1 != RPLIDAR_ANS_SYNC_BYTE1) or
(parsed.sync_byte2 != RPLIDAR_ANS_SYNC_BYTE2)):
raise RPLidarError("RESULT_INVALID_ANS_HEADER")
else:
return parsed.response_type
raise RPLidarError("RESULT_READING_TIMEOUT")
def get_device_info(self):
"""Obtain hardware information about RPLidar"""
self.serial_port.flushInput()
self.send_command(RPLIDAR_CMD_GET_DEVICE_INFO)
if self.response_header() == RPLIDAR_ANS_TYPE_DEVINFO:
raw = self.serial_port.read(rplidar_response_device_info_format.sizeof())
parsed = rplidar_response_device_info_format.parse(raw)
return {"model": parsed.model,
"firmware_version_major": parsed.firmware_version_major,
"firmware_version_minor": parsed.firmware_version_minor,
"hardware_version": parsed.hardware_version,
"serial_number": toHex(parsed.serial_number)}
else:
raise RPLidarError("RESULT_INVALID_ANS_TYPE")
def get_health(self):
"""Obtain health information about RPLidar"""
self.serial_port.flushInput()
self.send_command(RPLIDAR_CMD_GET_DEVICE_HEALTH)
if self.response_header() == RPLIDAR_ANS_TYPE_DEVHEALTH:
raw = self.serial_port.read(rplidar_response_device_health_format.sizeof())
parsed = rplidar_response_device_health_format.parse(raw)
return {"status": parsed.status,
"error_code": parsed.error_code}
else:
raise RPLidarError("RESULT_INVALID_ANS_TYPE")
def start_monitor(self, archive=False):
""" Start the monitor thread """
if self.monitor is None:
logging.debug("Try to start monitor thread.")
self.monitor = RPLidarMonitor(self, archive=archive)
self.monitor.start()
def stop_monitor(self):
""" Stop the monitor """
if self.monitor is not None:
logging.debug("Try to stop monitor thread.")
self.monitor.join()
self.monitor = None
def init_xy_plot(self):
""" setup an XY plot canvas """
plt.ion()
self.figure = plt.figure(figsize=(6, 6),
dpi=160,
facecolor="w",
edgecolor="k")
self.ax = self.figure.add_subplot(111)
self.lines, = self.ax.plot([],[],
linestyle="none",
marker=".",
markersize=3,
markerfacecolor="blue")
self.ax.set_xlim(-1000, 1000)
self.ax.set_ylim(-1000, 1000)
self.ax.grid()
def update_xy_plot(self):
""" re-draw the XY plot with new current_frame """
self.lines.set_xdata(self.current_frame.x)
self.lines.set_ydata(self.current_frame.y)
self.figure.canvas.draw()
def init_polar_plot(self):
""" setup a polar plot canvas """
plt.ion()
self.figure = plt.figure(figsize=(6, 6),
dpi=160,
facecolor="w",
edgecolor="k")
self.ax = self.figure.add_subplot(111, polar=True)
self.lines, = self.ax.plot([],[],
linestyle="none",
marker=".",
markersize=3,
markerfacecolor="blue")
self.ax.set_rmax(5000)
self.ax.set_theta_direction(-1) #set to clockwise
self.ax.set_theta_offset(np.pi/2) #offset by 90 degree so that 0 degree is at 12 o'clock
#self.ax.grid()
def update_polar_plot(self):
""" re-draw the polar plot with new current_frame """
self.lines.set_xdata(self.current_frame.angle_r)
self.lines.set_ydata(self.current_frame.distance)
self.figure.canvas.draw()
if __name__ == "__main__":
# logging config
logging.basicConfig(level=logging.DEBUG,
format="[%(levelname)s] (%(threadName)-10s) %(message)s")
rplidar = RPLidar("/dev/ttyUSB0")
rplidar.connect()
print rplidar.get_device_info()
print rplidar.get_health()
rplidar.start_monitor(archive=False) #can put False and it won't archive
#rplidar.init_polar_plot()
rplidar.init_xy_plot()
try:
while True:
#rplidar.update_polar_plot()
rplidar.update_xy_plot()
time.sleep(0.15)
pass
except KeyboardInterrupt:
logging.debug("CTRL-c pressed, exiting...")
pass
rplidar.stop_monitor()
rplidar.disconnect()
rplidar = None
|
mit
|
Phyks/replot
|
replot/helpers/plot.py
|
1
|
1557
|
"""
Various helper functions for plotting.
"""
import numpy as np
from replot import adaptive_sampling
from replot import exceptions as exc
def plot_function(data, *args, **kwargs):
"""
Helper function to handle plotting of unevaluated functions (trying \
to evaluate it nicely and rendering the plot).
:param data: The function to plot.
:returns: A tuple of ``(args, kwargs)`` representing the plot.
.. seealso:: The documentation of the ``replot.Figure.plot`` method.
.. note:: ``args`` is used to handle the interval or point series on \
which the function should be evaluated. ``kwargs`` are passed \
directly to ``matplotlib.pyplot.plot`.
"""
if len(args) == 0:
# If no interval specified, raise an issue
raise exc.InvalidParameterError(
"You should pass a plotting interval to the plot command.")
elif isinstance(args[0], tuple):
# Interval specified, use it and adaptive plotting
x_values, y_values = adaptive_sampling.sample_function(
data,
args[0],
tol=1e-3)
elif isinstance(args[0], (list, np.ndarray)):
# List of points specified, use them and compute values of the
# function
x_values = args[0]
y_values = [data(i) for i in x_values]
else:
raise exc.InvalidParameterError(
"Second parameter in plot command should be a tuple " +
"specifying plotting interval.")
return ((x_values, y_values) + args[1:], kwargs)
|
mit
|
Autoplectic/dit
|
dit/profiles/entropy_triangle.py
|
1
|
5520
|
"""
The entropy triangle, from [Valverde-Albacete, Francisco Jose, and Carmen
Pelaez-Moreno. "The Multivariate Entropy Triangle and Applications." Hybrid
Artificial Intelligent Systems. Springer International Publishing, 2016.
647-658].
"""
from abc import ABCMeta, abstractmethod
from six import with_metaclass
from ..distribution import BaseDistribution
from ..distconst import product_distribution, uniform_like
from ..multivariate import (entropy, residual_entropy, dual_total_correlation,
total_correlation)
__all__ = [
'EntropyTriangle',
'EntropyTriangle2',
]
class BaseEntropyTriangle(with_metaclass(ABCMeta, object)):
"""
BaseEntropyTriangle
Static Attributes
-----------------
left_label : str
The label for the bottom axis when plotting.
right_label : str
The label for the right axis when plotting.
bottom_label : str
The label for the bottom axis when plotting.
Attributes
----------
dists : [Distribution]
points : list of tuples
Methods
-------
draw
Plot the entropy triangle.
"""
left_label = r"$\operatorname{R}[\mathrm{dist}]$"
right_label = r"$\operatorname{T}[\mathrm{dist}] + \operatorname{B}[\mathrm{dist}]$"
bottom_label = r"$\Delta \operatorname{H}_{\Pi_\overline{X}}$"
def __init__(self, dists):
"""
Initialize the entropy triangle.
Parameters
----------
dists : [Distribution] or Distribution
The list of distributions to plot on the entropy triangle. If a
single distribution is provided, it alone will be computed.
"""
if isinstance(dists, BaseDistribution):
self.dists = [dists]
else:
self.dists = dists
self.points = [self._compute_point(dist) for dist in self.dists]
@staticmethod
@abstractmethod
def _compute_point(dist):
"""
Compute the three normalized axis.
Parameters
----------
dist : Distribution
The distribution to compute values for.
"""
pass
def draw(self, ax=None, setup=True, marker='o', color='k'): # pragma: no cover
"""
Plot the entropy triangle.
Parameters
----------
ax : Axis or None
The matplotlib axis to plot on. If none is provided, one will be
constructed.
setup : bool
If true, labels, tick marks, gridlines, and a boundary will be added
to the plot. Defaults to True.
marker : str
The matplotlib marker shape to use.
color : str
The color of marker to use.
"""
import ternary
if ax is None:
fig, ax = ternary.figure()
fig.set_size_inches(10, 8)
else:
ax = ternary.TernaryAxesSubplot(ax=ax)
if setup:
ax.boundary()
ax.gridlines(multiple=0.1)
fontsize = 20
ax.set_title("Entropy Triangle", fontsize=fontsize)
ax.left_axis_label(self.left_label, fontsize=fontsize)
ax.right_axis_label(self.right_label, fontsize=fontsize)
ax.bottom_axis_label(self.bottom_label, fontsize=fontsize)
ax.ticks(axis='lbr', multiple=0.1, linewidth=1)
ax.clear_matplotlib_ticks()
ax.scatter(self.points, marker=marker, color=color)
ax._redraw_labels()
return ax
class EntropyTriangle(BaseEntropyTriangle):
"""
Construct the Multivariate Entropy Triangle, as defined in
[Valverde-Albacete, Francisco Jose, and Carmen Pelaez-Moreno. "The
Multivariate Entropy Triangle and Applications." Hybrid Artificial
Intelligent Systems. Springer International Publishing, 2016. 647-658]
"""
left_label = r"$\operatorname{R}[\mathrm{dist}]$"
right_label = r"$\operatorname{T}[\mathrm{dist}] + \operatorname{B}[\mathrm{dist}]$"
bottom_label = r"$\Delta \operatorname{H}_{\Pi_\overline{X}}$"
@staticmethod
def _compute_point(dist):
"""
Compute the deviation from uniformity, dependence, and independence of a
distribution.
Parameters
----------
dist : Distribution
The distribution to compute values for.
"""
H_U = entropy(uniform_like(dist))
H_P = entropy(product_distribution(dist))
Delta = H_U - H_P
VI = residual_entropy(dist)
M = H_P - VI
return (Delta/H_U, M/H_U, VI/H_U)
class EntropyTriangle2(BaseEntropyTriangle):
"""
Construct a variation on the Entropy Triangle, comparing the amount of
independence in the distribution (residual entropy) to two types of
dependence (total correlation and dual total correlation).
"""
left_label = r"$\operatorname{B}[\mathrm{dist}]$"
right_label = r"$\operatorname{T}[\mathrm{dist}]$"
bottom_label = r"$\operatorname{R}[\mathrm{dist}]$"
@staticmethod
def _compute_point(dist):
"""
Compute the residual entropy, total correlation, and dual total
correlation for the distribution, and normalize them.
Parameters
----------
dist : Distribution
The distribution to compute values for.
"""
R = residual_entropy(dist)
B = dual_total_correlation(dist)
T = total_correlation(dist)
total = R + B + T
return (R/total, T/total, B/total)
|
bsd-3-clause
|
fredhusser/scikit-learn
|
benchmarks/bench_plot_fastkmeans.py
|
294
|
4676
|
from __future__ import print_function
from collections import defaultdict
from time import time
import numpy as np
from numpy import random as nr
from sklearn.cluster.k_means_ import KMeans, MiniBatchKMeans
def compute_bench(samples_range, features_range):
it = 0
results = defaultdict(lambda: [])
chunk = 100
max_it = len(samples_range) * len(features_range)
for n_samples in samples_range:
for n_features in features_range:
it += 1
print('==============================')
print('Iteration %03d of %03d' % (it, max_it))
print('==============================')
print()
data = nr.random_integers(-50, 50, (n_samples, n_features))
print('K-Means')
tstart = time()
kmeans = KMeans(init='k-means++', n_clusters=10).fit(data)
delta = time() - tstart
print("Speed: %0.3fs" % delta)
print("Inertia: %0.5f" % kmeans.inertia_)
print()
results['kmeans_speed'].append(delta)
results['kmeans_quality'].append(kmeans.inertia_)
print('Fast K-Means')
# let's prepare the data in small chunks
mbkmeans = MiniBatchKMeans(init='k-means++',
n_clusters=10,
batch_size=chunk)
tstart = time()
mbkmeans.fit(data)
delta = time() - tstart
print("Speed: %0.3fs" % delta)
print("Inertia: %f" % mbkmeans.inertia_)
print()
print()
results['MiniBatchKMeans Speed'].append(delta)
results['MiniBatchKMeans Quality'].append(mbkmeans.inertia_)
return results
def compute_bench_2(chunks):
results = defaultdict(lambda: [])
n_features = 50000
means = np.array([[1, 1], [-1, -1], [1, -1], [-1, 1],
[0.5, 0.5], [0.75, -0.5], [-1, 0.75], [1, 0]])
X = np.empty((0, 2))
for i in range(8):
X = np.r_[X, means[i] + 0.8 * np.random.randn(n_features, 2)]
max_it = len(chunks)
it = 0
for chunk in chunks:
it += 1
print('==============================')
print('Iteration %03d of %03d' % (it, max_it))
print('==============================')
print()
print('Fast K-Means')
tstart = time()
mbkmeans = MiniBatchKMeans(init='k-means++',
n_clusters=8,
batch_size=chunk)
mbkmeans.fit(X)
delta = time() - tstart
print("Speed: %0.3fs" % delta)
print("Inertia: %0.3fs" % mbkmeans.inertia_)
print()
results['MiniBatchKMeans Speed'].append(delta)
results['MiniBatchKMeans Quality'].append(mbkmeans.inertia_)
return results
if __name__ == '__main__':
from mpl_toolkits.mplot3d import axes3d # register the 3d projection
import matplotlib.pyplot as plt
samples_range = np.linspace(50, 150, 5).astype(np.int)
features_range = np.linspace(150, 50000, 5).astype(np.int)
chunks = np.linspace(500, 10000, 15).astype(np.int)
results = compute_bench(samples_range, features_range)
results_2 = compute_bench_2(chunks)
max_time = max([max(i) for i in [t for (label, t) in results.iteritems()
if "speed" in label]])
max_inertia = max([max(i) for i in [
t for (label, t) in results.iteritems()
if "speed" not in label]])
fig = plt.figure('scikit-learn K-Means benchmark results')
for c, (label, timings) in zip('brcy',
sorted(results.iteritems())):
if 'speed' in label:
ax = fig.add_subplot(2, 2, 1, projection='3d')
ax.set_zlim3d(0.0, max_time * 1.1)
else:
ax = fig.add_subplot(2, 2, 2, projection='3d')
ax.set_zlim3d(0.0, max_inertia * 1.1)
X, Y = np.meshgrid(samples_range, features_range)
Z = np.asarray(timings).reshape(samples_range.shape[0],
features_range.shape[0])
ax.plot_surface(X, Y, Z.T, cstride=1, rstride=1, color=c, alpha=0.5)
ax.set_xlabel('n_samples')
ax.set_ylabel('n_features')
i = 0
for c, (label, timings) in zip('br',
sorted(results_2.iteritems())):
i += 1
ax = fig.add_subplot(2, 2, i + 2)
y = np.asarray(timings)
ax.plot(chunks, y, color=c, alpha=0.8)
ax.set_xlabel('Chunks')
ax.set_ylabel(label)
plt.show()
|
bsd-3-clause
|
peterbarker/ardupilot
|
libraries/AP_Math/tools/geodesic_grid/plot.py
|
110
|
2876
|
# Copyright (C) 2016 Intel Corporation. All rights reserved.
#
# This file is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This file is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
from mpl_toolkits.mplot3d import Axes3D
from mpl_toolkits.mplot3d.art3d import Poly3DCollection
import icosahedron as ico
import grid
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.set_xlim3d(-2, 2)
ax.set_ylim3d(-2, 2)
ax.set_zlim3d(-2, 2)
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
ax.invert_zaxis()
ax.invert_xaxis()
ax.set_aspect('equal')
added_polygons = set()
added_sections = set()
def polygons(polygons):
for p in polygons:
polygon(p)
def polygon(polygon):
added_polygons.add(polygon)
def section(s):
added_sections.add(s)
def sections(sections):
for s in sections:
section(s)
def show(subtriangles=False):
polygons = []
facecolors = []
triangles_indexes = set()
subtriangle_facecolors = (
'#CCCCCC',
'#CCE5FF',
'#E5FFCC',
'#FFCCCC',
)
if added_sections:
subtriangles = True
for p in added_polygons:
try:
i = ico.triangles.index(p)
except ValueError:
polygons.append(p)
continue
if subtriangles:
sections(range(i * 4, i * 4 + 4))
else:
triangles_indexes.add(i)
polygons.append(p)
facecolors.append('#DDDDDD')
for s in added_sections:
triangles_indexes.add(int(s / 4))
subtriangle_index = s % 4
polygons.append(grid.section_triangle(s))
facecolors.append(subtriangle_facecolors[subtriangle_index])
ax.add_collection3d(Poly3DCollection(
polygons,
facecolors=facecolors,
edgecolors="#777777",
))
for i in triangles_indexes:
t = ico.triangles[i]
mx = my = mz = 0
for x, y, z in t:
mx += x
my += y
mz += z
ax.text(mx / 2.6, my / 2.6, mz / 2.6, i, color='#444444')
if subtriangles:
ax.legend(
handles=tuple(
mpatches.Patch(color=c, label='Sub-triangle #%d' % i)
for i, c in enumerate(subtriangle_facecolors)
),
)
plt.show()
|
gpl-3.0
|
marcocaccin/scikit-learn
|
examples/model_selection/plot_learning_curve.py
|
250
|
4171
|
"""
========================
Plotting Learning Curves
========================
On the left side the learning curve of a naive Bayes classifier is shown for
the digits dataset. Note that the training score and the cross-validation score
are both not very good at the end. However, the shape of the curve can be found
in more complex datasets very often: the training score is very high at the
beginning and decreases and the cross-validation score is very low at the
beginning and increases. On the right side we see the learning curve of an SVM
with RBF kernel. We can see clearly that the training score is still around
the maximum and the validation score could be increased with more training
samples.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import cross_validation
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
from sklearn.datasets import load_digits
from sklearn.learning_curve import learning_curve
def plot_learning_curve(estimator, title, X, y, ylim=None, cv=None,
n_jobs=1, train_sizes=np.linspace(.1, 1.0, 5)):
"""
Generate a simple plot of the test and traning learning curve.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
title : string
Title for the chart.
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
ylim : tuple, shape (ymin, ymax), optional
Defines minimum and maximum yvalues plotted.
cv : integer, cross-validation generator, optional
If an integer is passed, it is the number of folds (defaults to 3).
Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects
n_jobs : integer, optional
Number of jobs to run in parallel (default 1).
"""
plt.figure()
plt.title(title)
if ylim is not None:
plt.ylim(*ylim)
plt.xlabel("Training examples")
plt.ylabel("Score")
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.grid()
plt.fill_between(train_sizes, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.1,
color="r")
plt.fill_between(train_sizes, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.1, color="g")
plt.plot(train_sizes, train_scores_mean, 'o-', color="r",
label="Training score")
plt.plot(train_sizes, test_scores_mean, 'o-', color="g",
label="Cross-validation score")
plt.legend(loc="best")
return plt
digits = load_digits()
X, y = digits.data, digits.target
title = "Learning Curves (Naive Bayes)"
# Cross validation with 100 iterations to get smoother mean test and train
# score curves, each time with 20% data randomly selected as a validation set.
cv = cross_validation.ShuffleSplit(digits.data.shape[0], n_iter=100,
test_size=0.2, random_state=0)
estimator = GaussianNB()
plot_learning_curve(estimator, title, X, y, ylim=(0.7, 1.01), cv=cv, n_jobs=4)
title = "Learning Curves (SVM, RBF kernel, $\gamma=0.001$)"
# SVC is more expensive so we do a lower number of CV iterations:
cv = cross_validation.ShuffleSplit(digits.data.shape[0], n_iter=10,
test_size=0.2, random_state=0)
estimator = SVC(gamma=0.001)
plot_learning_curve(estimator, title, X, y, (0.7, 1.01), cv=cv, n_jobs=4)
plt.show()
|
bsd-3-clause
|
Tjorriemorrie/trading
|
02_fractals/fractals.py
|
1
|
1455
|
from __future__ import division
from random import shuffle
from matplotlib import pyplot
class FractalFactory():
graph = set()
def make_graph(self, diepte, start, end, turns):
# add points to graph
self.graph.add(start)
self.graph.add(end)
if diepte > 0:
# unpack input values
fromtime, fromvalue = start
totime, tovalue = end
# calcualte differences between points
diffs = []
last_time, last_val = fromtime, fromvalue
for t, v in turns:
new_time = fromtime + (totime - fromtime) * t
new_val = fromvalue + (tovalue - fromvalue) * v
diffs.append((new_time - last_time, new_val - last_val))
last_time, last_val = new_time, new_val
# add 'brownian motion' by reordering the segments
# shuffle(diffs)
# calculate actual intermediate points and recurse
last = start
for segment in diffs:
p = last[0] + segment[0], last[1] + segment[1]
self.make_graph(diepte - 1, last, p, turns)
last = p
self.make_graph(diepte - 1, last, end, turns)
if __name__ == '__main__':
depth = 1
ff = FractalFactory()
ff.make_graph(depth, (0, 0), (1, 1), [(1/9, 2/3), (5/9, 1/3)])
graph = ff.graph
pyplot.plot(*zip(*sorted(graph)))
pyplot.show()
|
mit
|
terkkila/scikit-learn
|
sklearn/decomposition/dict_learning.py
|
83
|
44062
|
""" Dictionary learning
"""
from __future__ import print_function
# Author: Vlad Niculae, Gael Varoquaux, Alexandre Gramfort
# License: BSD 3 clause
import time
import sys
import itertools
from math import sqrt, ceil
import numpy as np
from scipy import linalg
from numpy.lib.stride_tricks import as_strided
from ..base import BaseEstimator, TransformerMixin
from ..externals.joblib import Parallel, delayed, cpu_count
from ..externals.six.moves import zip
from ..utils import (check_array, check_random_state, gen_even_slices,
gen_batches, _get_n_jobs)
from ..utils.extmath import randomized_svd, row_norms
from ..utils.validation import check_is_fitted
from ..linear_model import Lasso, orthogonal_mp_gram, LassoLars, Lars
def _sparse_encode(X, dictionary, gram, cov=None, algorithm='lasso_lars',
regularization=None, copy_cov=True,
init=None, max_iter=1000):
"""Generic sparse coding
Each column of the result is the solution to a Lasso problem.
Parameters
----------
X: array of shape (n_samples, n_features)
Data matrix.
dictionary: array of shape (n_components, n_features)
The dictionary matrix against which to solve the sparse coding of
the data. Some of the algorithms assume normalized rows.
gram: None | array, shape=(n_components, n_components)
Precomputed Gram matrix, dictionary * dictionary'
gram can be None if method is 'threshold'.
cov: array, shape=(n_components, n_samples)
Precomputed covariance, dictionary * X'
algorithm: {'lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'}
lars: uses the least angle regression method (linear_model.lars_path)
lasso_lars: uses Lars to compute the Lasso solution
lasso_cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). lasso_lars will be faster if
the estimated components are sparse.
omp: uses orthogonal matching pursuit to estimate the sparse solution
threshold: squashes to zero all coefficients less than regularization
from the projection dictionary * data'
regularization : int | float
The regularization parameter. It corresponds to alpha when
algorithm is 'lasso_lars', 'lasso_cd' or 'threshold'.
Otherwise it corresponds to n_nonzero_coefs.
init: array of shape (n_samples, n_components)
Initialization value of the sparse code. Only used if
`algorithm='lasso_cd'`.
max_iter: int, 1000 by default
Maximum number of iterations to perform if `algorithm='lasso_cd'`.
copy_cov: boolean, optional
Whether to copy the precomputed covariance matrix; if False, it may be
overwritten.
Returns
-------
code: array of shape (n_components, n_features)
The sparse codes
See also
--------
sklearn.linear_model.lars_path
sklearn.linear_model.orthogonal_mp
sklearn.linear_model.Lasso
SparseCoder
"""
if X.ndim == 1:
X = X[:, np.newaxis]
n_samples, n_features = X.shape
if cov is None and algorithm != 'lasso_cd':
# overwriting cov is safe
copy_cov = False
cov = np.dot(dictionary, X.T)
if algorithm == 'lasso_lars':
alpha = float(regularization) / n_features # account for scaling
try:
err_mgt = np.seterr(all='ignore')
lasso_lars = LassoLars(alpha=alpha, fit_intercept=False,
verbose=False, normalize=False,
precompute=gram, fit_path=False)
lasso_lars.fit(dictionary.T, X.T, Xy=cov)
new_code = lasso_lars.coef_
finally:
np.seterr(**err_mgt)
elif algorithm == 'lasso_cd':
alpha = float(regularization) / n_features # account for scaling
clf = Lasso(alpha=alpha, fit_intercept=False, precompute=gram,
max_iter=max_iter, warm_start=True)
clf.coef_ = init
clf.fit(dictionary.T, X.T)
new_code = clf.coef_
elif algorithm == 'lars':
try:
err_mgt = np.seterr(all='ignore')
lars = Lars(fit_intercept=False, verbose=False, normalize=False,
precompute=gram, n_nonzero_coefs=int(regularization),
fit_path=False)
lars.fit(dictionary.T, X.T, Xy=cov)
new_code = lars.coef_
finally:
np.seterr(**err_mgt)
elif algorithm == 'threshold':
new_code = ((np.sign(cov) *
np.maximum(np.abs(cov) - regularization, 0)).T)
elif algorithm == 'omp':
new_code = orthogonal_mp_gram(gram, cov, regularization, None,
row_norms(X, squared=True),
copy_Xy=copy_cov).T
else:
raise ValueError('Sparse coding method must be "lasso_lars" '
'"lasso_cd", "lasso", "threshold" or "omp", got %s.'
% algorithm)
return new_code
# XXX : could be moved to the linear_model module
def sparse_encode(X, dictionary, gram=None, cov=None, algorithm='lasso_lars',
n_nonzero_coefs=None, alpha=None, copy_cov=True, init=None,
max_iter=1000, n_jobs=1):
"""Sparse coding
Each row of the result is the solution to a sparse coding problem.
The goal is to find a sparse array `code` such that::
X ~= code * dictionary
Read more in the :ref:`User Guide <SparseCoder>`.
Parameters
----------
X: array of shape (n_samples, n_features)
Data matrix
dictionary: array of shape (n_components, n_features)
The dictionary matrix against which to solve the sparse coding of
the data. Some of the algorithms assume normalized rows for meaningful
output.
gram: array, shape=(n_components, n_components)
Precomputed Gram matrix, dictionary * dictionary'
cov: array, shape=(n_components, n_samples)
Precomputed covariance, dictionary' * X
algorithm: {'lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'}
lars: uses the least angle regression method (linear_model.lars_path)
lasso_lars: uses Lars to compute the Lasso solution
lasso_cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). lasso_lars will be faster if
the estimated components are sparse.
omp: uses orthogonal matching pursuit to estimate the sparse solution
threshold: squashes to zero all coefficients less than alpha from
the projection dictionary * X'
n_nonzero_coefs: int, 0.1 * n_features by default
Number of nonzero coefficients to target in each column of the
solution. This is only used by `algorithm='lars'` and `algorithm='omp'`
and is overridden by `alpha` in the `omp` case.
alpha: float, 1. by default
If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the
penalty applied to the L1 norm.
If `algorithm='threhold'`, `alpha` is the absolute value of the
threshold below which coefficients will be squashed to zero.
If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of
the reconstruction error targeted. In this case, it overrides
`n_nonzero_coefs`.
init: array of shape (n_samples, n_components)
Initialization value of the sparse codes. Only used if
`algorithm='lasso_cd'`.
max_iter: int, 1000 by default
Maximum number of iterations to perform if `algorithm='lasso_cd'`.
copy_cov: boolean, optional
Whether to copy the precomputed covariance matrix; if False, it may be
overwritten.
n_jobs: int, optional
Number of parallel jobs to run.
Returns
-------
code: array of shape (n_samples, n_components)
The sparse codes
See also
--------
sklearn.linear_model.lars_path
sklearn.linear_model.orthogonal_mp
sklearn.linear_model.Lasso
SparseCoder
"""
dictionary = check_array(dictionary)
X = check_array(X)
n_samples, n_features = X.shape
n_components = dictionary.shape[0]
if gram is None and algorithm != 'threshold':
gram = np.dot(dictionary, dictionary.T)
if cov is None:
copy_cov = False
cov = np.dot(dictionary, X.T)
if algorithm in ('lars', 'omp'):
regularization = n_nonzero_coefs
if regularization is None:
regularization = min(max(n_features / 10, 1), n_components)
else:
regularization = alpha
if regularization is None:
regularization = 1.
if n_jobs == 1 or algorithm == 'threshold':
return _sparse_encode(X, dictionary, gram, cov=cov,
algorithm=algorithm,
regularization=regularization, copy_cov=copy_cov,
init=init, max_iter=max_iter)
# Enter parallel code block
code = np.empty((n_samples, n_components))
slices = list(gen_even_slices(n_samples, _get_n_jobs(n_jobs)))
code_views = Parallel(n_jobs=n_jobs)(
delayed(_sparse_encode)(
X[this_slice], dictionary, gram, cov[:, this_slice], algorithm,
regularization=regularization, copy_cov=copy_cov,
init=init[this_slice] if init is not None else None,
max_iter=max_iter)
for this_slice in slices)
for this_slice, this_view in zip(slices, code_views):
code[this_slice] = this_view
return code
def _update_dict(dictionary, Y, code, verbose=False, return_r2=False,
random_state=None):
"""Update the dense dictionary factor in place.
Parameters
----------
dictionary: array of shape (n_features, n_components)
Value of the dictionary at the previous iteration.
Y: array of shape (n_features, n_samples)
Data matrix.
code: array of shape (n_components, n_samples)
Sparse coding of the data against which to optimize the dictionary.
verbose:
Degree of output the procedure will print.
return_r2: bool
Whether to compute and return the residual sum of squares corresponding
to the computed solution.
random_state: int or RandomState
Pseudo number generator state used for random sampling.
Returns
-------
dictionary: array of shape (n_features, n_components)
Updated dictionary.
"""
n_components = len(code)
n_samples = Y.shape[0]
random_state = check_random_state(random_state)
# Residuals, computed 'in-place' for efficiency
R = -np.dot(dictionary, code)
R += Y
R = np.asfortranarray(R)
ger, = linalg.get_blas_funcs(('ger',), (dictionary, code))
for k in range(n_components):
# R <- 1.0 * U_k * V_k^T + R
R = ger(1.0, dictionary[:, k], code[k, :], a=R, overwrite_a=True)
dictionary[:, k] = np.dot(R, code[k, :].T)
# Scale k'th atom
atom_norm_square = np.dot(dictionary[:, k], dictionary[:, k])
if atom_norm_square < 1e-20:
if verbose == 1:
sys.stdout.write("+")
sys.stdout.flush()
elif verbose:
print("Adding new random atom")
dictionary[:, k] = random_state.randn(n_samples)
# Setting corresponding coefs to 0
code[k, :] = 0.0
dictionary[:, k] /= sqrt(np.dot(dictionary[:, k],
dictionary[:, k]))
else:
dictionary[:, k] /= sqrt(atom_norm_square)
# R <- -1.0 * U_k * V_k^T + R
R = ger(-1.0, dictionary[:, k], code[k, :], a=R, overwrite_a=True)
if return_r2:
R **= 2
# R is fortran-ordered. For numpy version < 1.6, sum does not
# follow the quick striding first, and is thus inefficient on
# fortran ordered data. We take a flat view of the data with no
# striding
R = as_strided(R, shape=(R.size, ), strides=(R.dtype.itemsize,))
R = np.sum(R)
return dictionary, R
return dictionary
def dict_learning(X, n_components, alpha, max_iter=100, tol=1e-8,
method='lars', n_jobs=1, dict_init=None, code_init=None,
callback=None, verbose=False, random_state=None,
return_n_iter=False):
"""Solves a dictionary learning matrix factorization problem.
Finds the best dictionary and the corresponding sparse code for
approximating the data matrix X by solving::
(U^*, V^*) = argmin 0.5 || X - U V ||_2^2 + alpha * || U ||_1
(U,V)
with || V_k ||_2 = 1 for all 0 <= k < n_components
where V is the dictionary and U is the sparse code.
Read more in the :ref:`User Guide <DictionaryLearning>`.
Parameters
----------
X: array of shape (n_samples, n_features)
Data matrix.
n_components: int,
Number of dictionary atoms to extract.
alpha: int,
Sparsity controlling parameter.
max_iter: int,
Maximum number of iterations to perform.
tol: float,
Tolerance for the stopping condition.
method: {'lars', 'cd'}
lars: uses the least angle regression method to solve the lasso problem
(linear_model.lars_path)
cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). Lars will be faster if
the estimated components are sparse.
n_jobs: int,
Number of parallel jobs to run, or -1 to autodetect.
dict_init: array of shape (n_components, n_features),
Initial value for the dictionary for warm restart scenarios.
code_init: array of shape (n_samples, n_components),
Initial value for the sparse code for warm restart scenarios.
callback:
Callable that gets invoked every five iterations.
verbose:
Degree of output the procedure will print.
random_state: int or RandomState
Pseudo number generator state used for random sampling.
return_n_iter : bool
Whether or not to return the number of iterations.
Returns
-------
code: array of shape (n_samples, n_components)
The sparse code factor in the matrix factorization.
dictionary: array of shape (n_components, n_features),
The dictionary factor in the matrix factorization.
errors: array
Vector of errors at each iteration.
n_iter : int
Number of iterations run. Returned only if `return_n_iter` is
set to True.
See also
--------
dict_learning_online
DictionaryLearning
MiniBatchDictionaryLearning
SparsePCA
MiniBatchSparsePCA
"""
if method not in ('lars', 'cd'):
raise ValueError('Coding method %r not supported as a fit algorithm.'
% method)
method = 'lasso_' + method
t0 = time.time()
# Avoid integer division problems
alpha = float(alpha)
random_state = check_random_state(random_state)
if n_jobs == -1:
n_jobs = cpu_count()
# Init the code and the dictionary with SVD of Y
if code_init is not None and dict_init is not None:
code = np.array(code_init, order='F')
# Don't copy V, it will happen below
dictionary = dict_init
else:
code, S, dictionary = linalg.svd(X, full_matrices=False)
dictionary = S[:, np.newaxis] * dictionary
r = len(dictionary)
if n_components <= r: # True even if n_components=None
code = code[:, :n_components]
dictionary = dictionary[:n_components, :]
else:
code = np.c_[code, np.zeros((len(code), n_components - r))]
dictionary = np.r_[dictionary,
np.zeros((n_components - r, dictionary.shape[1]))]
# Fortran-order dict, as we are going to access its row vectors
dictionary = np.array(dictionary, order='F')
residuals = 0
errors = []
current_cost = np.nan
if verbose == 1:
print('[dict_learning]', end=' ')
# If max_iter is 0, number of iterations returned should be zero
ii = -1
for ii in range(max_iter):
dt = (time.time() - t0)
if verbose == 1:
sys.stdout.write(".")
sys.stdout.flush()
elif verbose:
print ("Iteration % 3i "
"(elapsed time: % 3is, % 4.1fmn, current cost % 7.3f)"
% (ii, dt, dt / 60, current_cost))
# Update code
code = sparse_encode(X, dictionary, algorithm=method, alpha=alpha,
init=code, n_jobs=n_jobs)
# Update dictionary
dictionary, residuals = _update_dict(dictionary.T, X.T, code.T,
verbose=verbose, return_r2=True,
random_state=random_state)
dictionary = dictionary.T
# Cost function
current_cost = 0.5 * residuals + alpha * np.sum(np.abs(code))
errors.append(current_cost)
if ii > 0:
dE = errors[-2] - errors[-1]
# assert(dE >= -tol * errors[-1])
if dE < tol * errors[-1]:
if verbose == 1:
# A line return
print("")
elif verbose:
print("--- Convergence reached after %d iterations" % ii)
break
if ii % 5 == 0 and callback is not None:
callback(locals())
if return_n_iter:
return code, dictionary, errors, ii + 1
else:
return code, dictionary, errors
def dict_learning_online(X, n_components=2, alpha=1, n_iter=100,
return_code=True, dict_init=None, callback=None,
batch_size=3, verbose=False, shuffle=True, n_jobs=1,
method='lars', iter_offset=0, random_state=None,
return_inner_stats=False, inner_stats=None,
return_n_iter=False):
"""Solves a dictionary learning matrix factorization problem online.
Finds the best dictionary and the corresponding sparse code for
approximating the data matrix X by solving::
(U^*, V^*) = argmin 0.5 || X - U V ||_2^2 + alpha * || U ||_1
(U,V)
with || V_k ||_2 = 1 for all 0 <= k < n_components
where V is the dictionary and U is the sparse code. This is
accomplished by repeatedly iterating over mini-batches by slicing
the input data.
Read more in the :ref:`User Guide <DictionaryLearning>`.
Parameters
----------
X: array of shape (n_samples, n_features)
Data matrix.
n_components : int,
Number of dictionary atoms to extract.
alpha : float,
Sparsity controlling parameter.
n_iter : int,
Number of iterations to perform.
return_code : boolean,
Whether to also return the code U or just the dictionary V.
dict_init : array of shape (n_components, n_features),
Initial value for the dictionary for warm restart scenarios.
callback :
Callable that gets invoked every five iterations.
batch_size : int,
The number of samples to take in each batch.
verbose :
Degree of output the procedure will print.
shuffle : boolean,
Whether to shuffle the data before splitting it in batches.
n_jobs : int,
Number of parallel jobs to run, or -1 to autodetect.
method : {'lars', 'cd'}
lars: uses the least angle regression method to solve the lasso problem
(linear_model.lars_path)
cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). Lars will be faster if
the estimated components are sparse.
iter_offset : int, default 0
Number of previous iterations completed on the dictionary used for
initialization.
random_state : int or RandomState
Pseudo number generator state used for random sampling.
return_inner_stats : boolean, optional
Return the inner statistics A (dictionary covariance) and B
(data approximation). Useful to restart the algorithm in an
online setting. If return_inner_stats is True, return_code is
ignored
inner_stats : tuple of (A, B) ndarrays
Inner sufficient statistics that are kept by the algorithm.
Passing them at initialization is useful in online settings, to
avoid loosing the history of the evolution.
A (n_components, n_components) is the dictionary covariance matrix.
B (n_features, n_components) is the data approximation matrix
return_n_iter : bool
Whether or not to return the number of iterations.
Returns
-------
code : array of shape (n_samples, n_components),
the sparse code (only returned if `return_code=True`)
dictionary : array of shape (n_components, n_features),
the solutions to the dictionary learning problem
n_iter : int
Number of iterations run. Returned only if `return_n_iter` is
set to `True`.
See also
--------
dict_learning
DictionaryLearning
MiniBatchDictionaryLearning
SparsePCA
MiniBatchSparsePCA
"""
if n_components is None:
n_components = X.shape[1]
if method not in ('lars', 'cd'):
raise ValueError('Coding method not supported as a fit algorithm.')
method = 'lasso_' + method
t0 = time.time()
n_samples, n_features = X.shape
# Avoid integer division problems
alpha = float(alpha)
random_state = check_random_state(random_state)
if n_jobs == -1:
n_jobs = cpu_count()
# Init V with SVD of X
if dict_init is not None:
dictionary = dict_init
else:
_, S, dictionary = randomized_svd(X, n_components,
random_state=random_state)
dictionary = S[:, np.newaxis] * dictionary
r = len(dictionary)
if n_components <= r:
dictionary = dictionary[:n_components, :]
else:
dictionary = np.r_[dictionary,
np.zeros((n_components - r, dictionary.shape[1]))]
dictionary = np.ascontiguousarray(dictionary.T)
if verbose == 1:
print('[dict_learning]', end=' ')
if shuffle:
X_train = X.copy()
random_state.shuffle(X_train)
else:
X_train = X
batches = gen_batches(n_samples, batch_size)
batches = itertools.cycle(batches)
# The covariance of the dictionary
if inner_stats is None:
A = np.zeros((n_components, n_components))
# The data approximation
B = np.zeros((n_features, n_components))
else:
A = inner_stats[0].copy()
B = inner_stats[1].copy()
# If n_iter is zero, we need to return zero.
ii = iter_offset - 1
for ii, batch in zip(range(iter_offset, iter_offset + n_iter), batches):
this_X = X_train[batch]
dt = (time.time() - t0)
if verbose == 1:
sys.stdout.write(".")
sys.stdout.flush()
elif verbose:
if verbose > 10 or ii % ceil(100. / verbose) == 0:
print ("Iteration % 3i (elapsed time: % 3is, % 4.1fmn)"
% (ii, dt, dt / 60))
this_code = sparse_encode(this_X, dictionary.T, algorithm=method,
alpha=alpha, n_jobs=n_jobs).T
# Update the auxiliary variables
if ii < batch_size - 1:
theta = float((ii + 1) * batch_size)
else:
theta = float(batch_size ** 2 + ii + 1 - batch_size)
beta = (theta + 1 - batch_size) / (theta + 1)
A *= beta
A += np.dot(this_code, this_code.T)
B *= beta
B += np.dot(this_X.T, this_code.T)
# Update dictionary
dictionary = _update_dict(dictionary, B, A, verbose=verbose,
random_state=random_state)
# XXX: Can the residuals be of any use?
# Maybe we need a stopping criteria based on the amount of
# modification in the dictionary
if callback is not None:
callback(locals())
if return_inner_stats:
if return_n_iter:
return dictionary.T, (A, B), ii - iter_offset + 1
else:
return dictionary.T, (A, B)
if return_code:
if verbose > 1:
print('Learning code...', end=' ')
elif verbose == 1:
print('|', end=' ')
code = sparse_encode(X, dictionary.T, algorithm=method, alpha=alpha,
n_jobs=n_jobs)
if verbose > 1:
dt = (time.time() - t0)
print('done (total time: % 3is, % 4.1fmn)' % (dt, dt / 60))
if return_n_iter:
return code, dictionary.T, ii - iter_offset + 1
else:
return code, dictionary.T
if return_n_iter:
return dictionary.T, ii - iter_offset + 1
else:
return dictionary.T
class SparseCodingMixin(TransformerMixin):
"""Sparse coding mixin"""
def _set_sparse_coding_params(self, n_components,
transform_algorithm='omp',
transform_n_nonzero_coefs=None,
transform_alpha=None, split_sign=False,
n_jobs=1):
self.n_components = n_components
self.transform_algorithm = transform_algorithm
self.transform_n_nonzero_coefs = transform_n_nonzero_coefs
self.transform_alpha = transform_alpha
self.split_sign = split_sign
self.n_jobs = n_jobs
def transform(self, X, y=None):
"""Encode the data as a sparse combination of the dictionary atoms.
Coding method is determined by the object parameter
`transform_algorithm`.
Parameters
----------
X : array of shape (n_samples, n_features)
Test data to be transformed, must have the same number of
features as the data used to train the model.
Returns
-------
X_new : array, shape (n_samples, n_components)
Transformed data
"""
check_is_fitted(self, 'components_')
# XXX : kwargs is not documented
X = check_array(X)
n_samples, n_features = X.shape
code = sparse_encode(
X, self.components_, algorithm=self.transform_algorithm,
n_nonzero_coefs=self.transform_n_nonzero_coefs,
alpha=self.transform_alpha, n_jobs=self.n_jobs)
if self.split_sign:
# feature vector is split into a positive and negative side
n_samples, n_features = code.shape
split_code = np.empty((n_samples, 2 * n_features))
split_code[:, :n_features] = np.maximum(code, 0)
split_code[:, n_features:] = -np.minimum(code, 0)
code = split_code
return code
class SparseCoder(BaseEstimator, SparseCodingMixin):
"""Sparse coding
Finds a sparse representation of data against a fixed, precomputed
dictionary.
Each row of the result is the solution to a sparse coding problem.
The goal is to find a sparse array `code` such that::
X ~= code * dictionary
Read more in the :ref:`User Guide <SparseCoder>`.
Parameters
----------
dictionary : array, [n_components, n_features]
The dictionary atoms used for sparse coding. Lines are assumed to be
normalized to unit norm.
transform_algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', \
'threshold'}
Algorithm used to transform the data:
lars: uses the least angle regression method (linear_model.lars_path)
lasso_lars: uses Lars to compute the Lasso solution
lasso_cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). lasso_lars will be faster if
the estimated components are sparse.
omp: uses orthogonal matching pursuit to estimate the sparse solution
threshold: squashes to zero all coefficients less than alpha from
the projection ``dictionary * X'``
transform_n_nonzero_coefs : int, ``0.1 * n_features`` by default
Number of nonzero coefficients to target in each column of the
solution. This is only used by `algorithm='lars'` and `algorithm='omp'`
and is overridden by `alpha` in the `omp` case.
transform_alpha : float, 1. by default
If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the
penalty applied to the L1 norm.
If `algorithm='threshold'`, `alpha` is the absolute value of the
threshold below which coefficients will be squashed to zero.
If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of
the reconstruction error targeted. In this case, it overrides
`n_nonzero_coefs`.
split_sign : bool, False by default
Whether to split the sparse feature vector into the concatenation of
its negative part and its positive part. This can improve the
performance of downstream classifiers.
n_jobs : int,
number of parallel jobs to run
Attributes
----------
components_ : array, [n_components, n_features]
The unchanged dictionary atoms
See also
--------
DictionaryLearning
MiniBatchDictionaryLearning
SparsePCA
MiniBatchSparsePCA
sparse_encode
"""
def __init__(self, dictionary, transform_algorithm='omp',
transform_n_nonzero_coefs=None, transform_alpha=None,
split_sign=False, n_jobs=1):
self._set_sparse_coding_params(dictionary.shape[0],
transform_algorithm,
transform_n_nonzero_coefs,
transform_alpha, split_sign, n_jobs)
self.components_ = dictionary
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
return self
class DictionaryLearning(BaseEstimator, SparseCodingMixin):
"""Dictionary learning
Finds a dictionary (a set of atoms) that can best be used to represent data
using a sparse code.
Solves the optimization problem::
(U^*,V^*) = argmin 0.5 || Y - U V ||_2^2 + alpha * || U ||_1
(U,V)
with || V_k ||_2 = 1 for all 0 <= k < n_components
Read more in the :ref:`User Guide <DictionaryLearning>`.
Parameters
----------
n_components : int,
number of dictionary elements to extract
alpha : float,
sparsity controlling parameter
max_iter : int,
maximum number of iterations to perform
tol : float,
tolerance for numerical error
fit_algorithm : {'lars', 'cd'}
lars: uses the least angle regression method to solve the lasso problem
(linear_model.lars_path)
cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). Lars will be faster if
the estimated components are sparse.
transform_algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', \
'threshold'}
Algorithm used to transform the data
lars: uses the least angle regression method (linear_model.lars_path)
lasso_lars: uses Lars to compute the Lasso solution
lasso_cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). lasso_lars will be faster if
the estimated components are sparse.
omp: uses orthogonal matching pursuit to estimate the sparse solution
threshold: squashes to zero all coefficients less than alpha from
the projection ``dictionary * X'``
transform_n_nonzero_coefs : int, ``0.1 * n_features`` by default
Number of nonzero coefficients to target in each column of the
solution. This is only used by `algorithm='lars'` and `algorithm='omp'`
and is overridden by `alpha` in the `omp` case.
transform_alpha : float, 1. by default
If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the
penalty applied to the L1 norm.
If `algorithm='threshold'`, `alpha` is the absolute value of the
threshold below which coefficients will be squashed to zero.
If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of
the reconstruction error targeted. In this case, it overrides
`n_nonzero_coefs`.
split_sign : bool, False by default
Whether to split the sparse feature vector into the concatenation of
its negative part and its positive part. This can improve the
performance of downstream classifiers.
n_jobs : int,
number of parallel jobs to run
code_init : array of shape (n_samples, n_components),
initial value for the code, for warm restart
dict_init : array of shape (n_components, n_features),
initial values for the dictionary, for warm restart
verbose :
degree of verbosity of the printed output
random_state : int or RandomState
Pseudo number generator state used for random sampling.
Attributes
----------
components_ : array, [n_components, n_features]
dictionary atoms extracted from the data
error_ : array
vector of errors at each iteration
n_iter_ : int
Number of iterations run.
Notes
-----
**References:**
J. Mairal, F. Bach, J. Ponce, G. Sapiro, 2009: Online dictionary learning
for sparse coding (http://www.di.ens.fr/sierra/pdfs/icml09.pdf)
See also
--------
SparseCoder
MiniBatchDictionaryLearning
SparsePCA
MiniBatchSparsePCA
"""
def __init__(self, n_components=None, alpha=1, max_iter=1000, tol=1e-8,
fit_algorithm='lars', transform_algorithm='omp',
transform_n_nonzero_coefs=None, transform_alpha=None,
n_jobs=1, code_init=None, dict_init=None, verbose=False,
split_sign=False, random_state=None):
self._set_sparse_coding_params(n_components, transform_algorithm,
transform_n_nonzero_coefs,
transform_alpha, split_sign, n_jobs)
self.alpha = alpha
self.max_iter = max_iter
self.tol = tol
self.fit_algorithm = fit_algorithm
self.code_init = code_init
self.dict_init = dict_init
self.verbose = verbose
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model from data in X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self: object
Returns the object itself
"""
random_state = check_random_state(self.random_state)
X = check_array(X)
if self.n_components is None:
n_components = X.shape[1]
else:
n_components = self.n_components
V, U, E, self.n_iter_ = dict_learning(
X, n_components, self.alpha,
tol=self.tol, max_iter=self.max_iter,
method=self.fit_algorithm,
n_jobs=self.n_jobs,
code_init=self.code_init,
dict_init=self.dict_init,
verbose=self.verbose,
random_state=random_state,
return_n_iter=True)
self.components_ = U
self.error_ = E
return self
class MiniBatchDictionaryLearning(BaseEstimator, SparseCodingMixin):
"""Mini-batch dictionary learning
Finds a dictionary (a set of atoms) that can best be used to represent data
using a sparse code.
Solves the optimization problem::
(U^*,V^*) = argmin 0.5 || Y - U V ||_2^2 + alpha * || U ||_1
(U,V)
with || V_k ||_2 = 1 for all 0 <= k < n_components
Read more in the :ref:`User Guide <DictionaryLearning>`.
Parameters
----------
n_components : int,
number of dictionary elements to extract
alpha : float,
sparsity controlling parameter
n_iter : int,
total number of iterations to perform
fit_algorithm : {'lars', 'cd'}
lars: uses the least angle regression method to solve the lasso problem
(linear_model.lars_path)
cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). Lars will be faster if
the estimated components are sparse.
transform_algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', \
'threshold'}
Algorithm used to transform the data.
lars: uses the least angle regression method (linear_model.lars_path)
lasso_lars: uses Lars to compute the Lasso solution
lasso_cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). lasso_lars will be faster if
the estimated components are sparse.
omp: uses orthogonal matching pursuit to estimate the sparse solution
threshold: squashes to zero all coefficients less than alpha from
the projection dictionary * X'
transform_n_nonzero_coefs : int, ``0.1 * n_features`` by default
Number of nonzero coefficients to target in each column of the
solution. This is only used by `algorithm='lars'` and `algorithm='omp'`
and is overridden by `alpha` in the `omp` case.
transform_alpha : float, 1. by default
If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the
penalty applied to the L1 norm.
If `algorithm='threshold'`, `alpha` is the absolute value of the
threshold below which coefficients will be squashed to zero.
If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of
the reconstruction error targeted. In this case, it overrides
`n_nonzero_coefs`.
split_sign : bool, False by default
Whether to split the sparse feature vector into the concatenation of
its negative part and its positive part. This can improve the
performance of downstream classifiers.
n_jobs : int,
number of parallel jobs to run
dict_init : array of shape (n_components, n_features),
initial value of the dictionary for warm restart scenarios
verbose :
degree of verbosity of the printed output
batch_size : int,
number of samples in each mini-batch
shuffle : bool,
whether to shuffle the samples before forming batches
random_state : int or RandomState
Pseudo number generator state used for random sampling.
Attributes
----------
components_ : array, [n_components, n_features]
components extracted from the data
inner_stats_ : tuple of (A, B) ndarrays
Internal sufficient statistics that are kept by the algorithm.
Keeping them is useful in online settings, to avoid loosing the
history of the evolution, but they shouldn't have any use for the
end user.
A (n_components, n_components) is the dictionary covariance matrix.
B (n_features, n_components) is the data approximation matrix
n_iter_ : int
Number of iterations run.
Notes
-----
**References:**
J. Mairal, F. Bach, J. Ponce, G. Sapiro, 2009: Online dictionary learning
for sparse coding (http://www.di.ens.fr/sierra/pdfs/icml09.pdf)
See also
--------
SparseCoder
DictionaryLearning
SparsePCA
MiniBatchSparsePCA
"""
def __init__(self, n_components=None, alpha=1, n_iter=1000,
fit_algorithm='lars', n_jobs=1, batch_size=3,
shuffle=True, dict_init=None, transform_algorithm='omp',
transform_n_nonzero_coefs=None, transform_alpha=None,
verbose=False, split_sign=False, random_state=None):
self._set_sparse_coding_params(n_components, transform_algorithm,
transform_n_nonzero_coefs,
transform_alpha, split_sign, n_jobs)
self.alpha = alpha
self.n_iter = n_iter
self.fit_algorithm = fit_algorithm
self.dict_init = dict_init
self.verbose = verbose
self.shuffle = shuffle
self.batch_size = batch_size
self.split_sign = split_sign
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model from data in X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
random_state = check_random_state(self.random_state)
X = check_array(X)
U, (A, B), self.n_iter_ = dict_learning_online(
X, self.n_components, self.alpha,
n_iter=self.n_iter, return_code=False,
method=self.fit_algorithm,
n_jobs=self.n_jobs, dict_init=self.dict_init,
batch_size=self.batch_size, shuffle=self.shuffle,
verbose=self.verbose, random_state=random_state,
return_inner_stats=True,
return_n_iter=True)
self.components_ = U
# Keep track of the state of the algorithm to be able to do
# some online fitting (partial_fit)
self.inner_stats_ = (A, B)
self.iter_offset_ = self.n_iter
return self
def partial_fit(self, X, y=None, iter_offset=None):
"""Updates the model using the data in X as a mini-batch.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples
and n_features is the number of features.
iter_offset: integer, optional
The number of iteration on data batches that has been
performed before this call to partial_fit. This is optional:
if no number is passed, the memory of the object is
used.
Returns
-------
self : object
Returns the instance itself.
"""
if not hasattr(self, 'random_state_'):
self.random_state_ = check_random_state(self.random_state)
X = check_array(X)
if hasattr(self, 'components_'):
dict_init = self.components_
else:
dict_init = self.dict_init
inner_stats = getattr(self, 'inner_stats_', None)
if iter_offset is None:
iter_offset = getattr(self, 'iter_offset_', 0)
U, (A, B) = dict_learning_online(
X, self.n_components, self.alpha,
n_iter=self.n_iter, method=self.fit_algorithm,
n_jobs=self.n_jobs, dict_init=dict_init,
batch_size=len(X), shuffle=False,
verbose=self.verbose, return_code=False,
iter_offset=iter_offset, random_state=self.random_state_,
return_inner_stats=True, inner_stats=inner_stats)
self.components_ = U
# Keep track of the state of the algorithm to be able to do
# some online fitting (partial_fit)
self.inner_stats_ = (A, B)
self.iter_offset_ = iter_offset + self.n_iter
return self
|
bsd-3-clause
|
xubenben/scikit-learn
|
sklearn/linear_model/__init__.py
|
270
|
3096
|
"""
The :mod:`sklearn.linear_model` module implements generalized linear models. It
includes Ridge regression, Bayesian Regression, Lasso and Elastic Net
estimators computed with Least Angle Regression and coordinate descent. It also
implements Stochastic Gradient Descent related algorithms.
"""
# See http://scikit-learn.sourceforge.net/modules/sgd.html and
# http://scikit-learn.sourceforge.net/modules/linear_model.html for
# complete documentation.
from .base import LinearRegression
from .bayes import BayesianRidge, ARDRegression
from .least_angle import (Lars, LassoLars, lars_path, LarsCV, LassoLarsCV,
LassoLarsIC)
from .coordinate_descent import (Lasso, ElasticNet, LassoCV, ElasticNetCV,
lasso_path, enet_path, MultiTaskLasso,
MultiTaskElasticNet, MultiTaskElasticNetCV,
MultiTaskLassoCV)
from .sgd_fast import Hinge, Log, ModifiedHuber, SquaredLoss, Huber
from .stochastic_gradient import SGDClassifier, SGDRegressor
from .ridge import (Ridge, RidgeCV, RidgeClassifier, RidgeClassifierCV,
ridge_regression)
from .logistic import (LogisticRegression, LogisticRegressionCV,
logistic_regression_path)
from .omp import (orthogonal_mp, orthogonal_mp_gram, OrthogonalMatchingPursuit,
OrthogonalMatchingPursuitCV)
from .passive_aggressive import PassiveAggressiveClassifier
from .passive_aggressive import PassiveAggressiveRegressor
from .perceptron import Perceptron
from .randomized_l1 import (RandomizedLasso, RandomizedLogisticRegression,
lasso_stability_path)
from .ransac import RANSACRegressor
from .theil_sen import TheilSenRegressor
__all__ = ['ARDRegression',
'BayesianRidge',
'ElasticNet',
'ElasticNetCV',
'Hinge',
'Huber',
'Lars',
'LarsCV',
'Lasso',
'LassoCV',
'LassoLars',
'LassoLarsCV',
'LassoLarsIC',
'LinearRegression',
'Log',
'LogisticRegression',
'LogisticRegressionCV',
'ModifiedHuber',
'MultiTaskElasticNet',
'MultiTaskElasticNetCV',
'MultiTaskLasso',
'MultiTaskLassoCV',
'OrthogonalMatchingPursuit',
'OrthogonalMatchingPursuitCV',
'PassiveAggressiveClassifier',
'PassiveAggressiveRegressor',
'Perceptron',
'RandomizedLasso',
'RandomizedLogisticRegression',
'Ridge',
'RidgeCV',
'RidgeClassifier',
'RidgeClassifierCV',
'SGDClassifier',
'SGDRegressor',
'SquaredLoss',
'TheilSenRegressor',
'enet_path',
'lars_path',
'lasso_path',
'lasso_stability_path',
'logistic_regression_path',
'orthogonal_mp',
'orthogonal_mp_gram',
'ridge_regression',
'RANSACRegressor']
|
bsd-3-clause
|
gfyoung/pandas
|
pandas/tests/indexes/categorical/test_equals.py
|
2
|
3033
|
import numpy as np
import pytest
from pandas import Categorical, CategoricalIndex, Index
class TestEquals:
def test_equals_categorical(self):
ci1 = CategoricalIndex(["a", "b"], categories=["a", "b"], ordered=True)
ci2 = CategoricalIndex(["a", "b"], categories=["a", "b", "c"], ordered=True)
assert ci1.equals(ci1)
assert not ci1.equals(ci2)
assert ci1.equals(ci1.astype(object))
assert ci1.astype(object).equals(ci1)
assert (ci1 == ci1).all()
assert not (ci1 != ci1).all()
assert not (ci1 > ci1).all()
assert not (ci1 < ci1).all()
assert (ci1 <= ci1).all()
assert (ci1 >= ci1).all()
assert not (ci1 == 1).all()
assert (ci1 == Index(["a", "b"])).all()
assert (ci1 == ci1.values).all()
# invalid comparisons
with pytest.raises(ValueError, match="Lengths must match"):
ci1 == Index(["a", "b", "c"])
msg = "Categoricals can only be compared if 'categories' are the same"
with pytest.raises(TypeError, match=msg):
ci1 == ci2
with pytest.raises(TypeError, match=msg):
ci1 == Categorical(ci1.values, ordered=False)
with pytest.raises(TypeError, match=msg):
ci1 == Categorical(ci1.values, categories=list("abc"))
# tests
# make sure that we are testing for category inclusion properly
ci = CategoricalIndex(list("aabca"), categories=["c", "a", "b"])
assert not ci.equals(list("aabca"))
# Same categories, but different order
# Unordered
assert ci.equals(CategoricalIndex(list("aabca")))
# Ordered
assert not ci.equals(CategoricalIndex(list("aabca"), ordered=True))
assert ci.equals(ci.copy())
ci = CategoricalIndex(list("aabca") + [np.nan], categories=["c", "a", "b"])
assert not ci.equals(list("aabca"))
assert not ci.equals(CategoricalIndex(list("aabca")))
assert ci.equals(ci.copy())
ci = CategoricalIndex(list("aabca") + [np.nan], categories=["c", "a", "b"])
assert not ci.equals(list("aabca") + [np.nan])
assert ci.equals(CategoricalIndex(list("aabca") + [np.nan]))
assert not ci.equals(CategoricalIndex(list("aabca") + [np.nan], ordered=True))
assert ci.equals(ci.copy())
def test_equals_categorical_unordered(self):
# https://github.com/pandas-dev/pandas/issues/16603
a = CategoricalIndex(["A"], categories=["A", "B"])
b = CategoricalIndex(["A"], categories=["B", "A"])
c = CategoricalIndex(["C"], categories=["B", "A"])
assert a.equals(b)
assert not a.equals(c)
assert not b.equals(c)
def test_equals_non_category(self):
# GH#37667 Case where other contains a value not among ci's
# categories ("D") and also contains np.nan
ci = CategoricalIndex(["A", "B", np.nan, np.nan])
other = Index(["A", "B", "D", np.nan])
assert not ci.equals(other)
|
bsd-3-clause
|
ywcui1990/nupic.research
|
projects/union_pooling/experiments/tp_learning/tp_trained_tm_backwardLearning.py
|
8
|
23013
|
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import copy
import sys
import time
import os
import yaml
from optparse import OptionParser
import numpy
from pylab import rcParams
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from matplotlib.backends.backend_pdf import PdfPages
from matplotlib import rcParams
rcParams.update({'figure.autolayout': True})
plt.ion()
from nupic.data.generators.pattern_machine import PatternMachine
from nupic.data.generators.sequence_machine import SequenceMachine
from nupic.algorithms.monitor_mixin.monitor_mixin_base import MonitorMixinBase
from htmresearch.frameworks.union_temporal_pooling.union_temporal_pooler_experiment import (
UnionTemporalPoolerExperiment)
_SHOW_PROGRESS_INTERVAL = 200
"""
Experiment 1
Runs UnionTemporalPooler on input from a Temporal Memory after training
on a long sequence
Enables learning in UnionTemporalPooler
Tests different learning rules
- Forward learning is Hebbian learning on union pooled cells
- Backward learning is Reinforcement-like learning that allows cells to
connect to inputs from the previous few time steps
- Several metrics are measured before and after learning, including
average response latency, total size of union, overlap between learned &
naive representations
"""
ncol = 1024
learnType = 'ForwardBackward'
learningPasses = 100
paramDir = 'params/5_trainingPasses_'+str(ncol)+'_columns_'+learnType+'.yaml'
outputDir = 'results/'+str(ncol)+learnType+'/'
if not os.path.exists(outputDir):
os.makedirs(outputDir)
params = yaml.safe_load(open(paramDir, 'r'))
options = {'plotVerbosity': 2, 'consoleVerbosity': 2}
plotVerbosity = 2
consoleVerbosity = 1
print "Running SDR overlap experiment...\n"
print "Params dir: {0}".format(paramDir)
print "Output dir: {0}\n".format(outputDir)
# Dimensionality of sequence patterns
patternDimensionality = params["patternDimensionality"]
# Cardinality (ON / true bits) of sequence patterns
patternCardinality = params["patternCardinality"]
# Length of sequences shown to network
sequenceLength = params["sequenceLength"]
# Number of sequences used. Sequences may share common elements.
numberOfSequences = params["numberOfSequences"]
# Number of sequence passes for training the TM. Zero => no training.
trainingPasses = params["trainingPasses"]
# Generate a sequence list and an associated labeled list (both containing a
# set of sequences separated by None)
print "\nGenerating sequences..."
patternAlphabetSize = sequenceLength * numberOfSequences
patternMachine = PatternMachine(patternDimensionality, patternCardinality,
patternAlphabetSize)
sequenceMachine = SequenceMachine(patternMachine)
numbers = sequenceMachine.generateNumbers(numberOfSequences, sequenceLength)
generatedSequences = sequenceMachine.generateFromNumbers(numbers)
sequenceLabels = [str(numbers[i + i*sequenceLength: i + (i+1)*sequenceLength])
for i in xrange(numberOfSequences)]
labeledSequences = []
for label in sequenceLabels:
for _ in xrange(sequenceLength):
labeledSequences.append(label)
labeledSequences.append(None)
def initializeNetwork():
tmParamOverrides = params["temporalMemoryParams"]
upParamOverrides = params["unionPoolerParams"]
# Set up the Temporal Memory and Union Pooler network
print "\nCreating network..."
experiment = UnionTemporalPoolerExperiment(tmParamOverrides, upParamOverrides)
return experiment
def runTMtrainingPhase(experiment):
# Train only the Temporal Memory on the generated sequences
if trainingPasses > 0:
print "\nTraining Temporal Memory..."
if consoleVerbosity > 0:
print "\nPass\tBursting Columns Mean\tStdDev\tMax"
for i in xrange(trainingPasses):
experiment.runNetworkOnSequences(generatedSequences,
labeledSequences,
tmLearn=True,
upLearn=None,
verbosity=consoleVerbosity,
progressInterval=_SHOW_PROGRESS_INTERVAL)
if consoleVerbosity > 0:
stats = experiment.getBurstingColumnsStats()
print "{0}\t{1}\t{2}\t{3}".format(i, stats[0], stats[1], stats[2])
# Reset the TM monitor mixin's records accrued during this training pass
# experiment.tm.mmClearHistory()
print
print MonitorMixinBase.mmPrettyPrintMetrics(
experiment.tm.mmGetDefaultMetrics())
print
def SDRsimilarity(SDR1, SDR2):
return float(len(SDR1 & SDR2)) / float(max(len(SDR1), len(SDR2) ))
def getUnionSDRSimilarityCurve(activeCellsTrace, trainingPasses, sequenceLength, maxSeparation, skipBeginningElements=0):
similarityVsSeparation = numpy.zeros((trainingPasses, maxSeparation))
for rpts in xrange(trainingPasses):
for sep in xrange(maxSeparation):
similarity = []
for i in xrange(rpts*sequenceLength+skipBeginningElements, rpts*sequenceLength+sequenceLength-sep):
similarity.append(SDRsimilarity(activeCellsTrace[i], activeCellsTrace[i+sep]))
similarityVsSeparation[rpts, sep] = numpy.mean(similarity)
return similarityVsSeparation
def plotSDRsimilarityVsTemporalSeparation(similarityVsSeparationBefore, similarityVsSeparationAfter):
# plot SDR similarity as a function of temporal separation
f, (axs) = plt.subplots(nrows=2,ncols=2)
rpt = 0
ax1 = axs[0,0]
ax1.plot(similarityVsSeparationBefore[rpt,:],label='Before')
ax1.plot(similarityVsSeparationAfter[rpt,:],label='After')
ax1.set_xlabel('Separation in time between SDRs')
ax1.set_ylabel('SDRs overlap')
# ax1.set_title('Initial Cycle')
ax1.set_ylim([0,1])
ax1.legend(loc='upper right')
# rpt=4
# ax2.plot(similarityVsSeparationBefore[rpt,:],label='Before')
# ax2.plot(similarityVsSeparationAfter[rpt,:],label='After')
# ax2.set_xlabel('Separation in time between SDRs')
# ax2.set_ylabel('SDRs overlap')
# ax2.set_title('Last Cycle')
# ax2.set_ylim([0,1])
# ax2.legend(loc='upper right')
f.savefig(outputDir+'UnionSDRoverlapVsTemporalSeparation.eps',format='eps')
def plotSimilarityMatrix(similarityMatrixBefore, similarityMatrixAfter):
f, (ax1, ax2) = plt.subplots(nrows=1,ncols=2)
im = ax1.imshow(similarityMatrixBefore[0:sequenceLength, 0:sequenceLength],interpolation="nearest")
ax1.set_xlabel('Time (steps)')
ax1.set_ylabel('Time (steps)')
ax1.set_title('Overlap - Before Learning')
im = ax2.imshow(similarityMatrixAfter[0:sequenceLength, 0:sequenceLength],interpolation="nearest")
ax2.set_xlabel('Time (steps)')
ax2.set_ylabel('Time (steps)')
ax2.set_title('Overlap - After Learning')
# cax,kw = mpl.colorbar.make_axes([ax1, ax2])
# plt.colorbar(im, cax=cax, **kw)
# plt.tight_layout()
f.savefig(outputDir+'/UnionSDRoverlapBeforeVsAfterLearning.eps',format='eps')
def calculateSimilarityMatrix(activeCellsTraceBefore, activeCellsTraceAfter):
nSteps = sequenceLength # len(activeCellsTraceBefore)
similarityMatrixBeforeAfter = numpy.zeros((nSteps, nSteps))
similarityMatrixBefore = numpy.zeros((nSteps, nSteps))
similarityMatrixAfter = numpy.zeros((nSteps, nSteps))
for i in xrange(nSteps):
for j in xrange(nSteps):
similarityMatrixBefore[i,j] = SDRsimilarity(activeCellsTraceBefore[i], activeCellsTraceBefore[j])
similarityMatrixAfter[i,j] = SDRsimilarity(activeCellsTraceAfter[i], activeCellsTraceAfter[j])
similarityMatrixBeforeAfter[i,j] = SDRsimilarity(activeCellsTraceBefore[i], activeCellsTraceAfter[j])
return (similarityMatrixBefore, similarityMatrixAfter, similarityMatrixBeforeAfter)
def plotTPRvsUPROverlap(similarityMatrix):
f = plt.figure()
plt.subplot(2,2,1)
im = plt.imshow(similarityMatrix[0:sequenceLength, 0:sequenceLength],
interpolation="nearest",aspect='auto', vmin=0, vmax=0.3)
plt.colorbar(im)
plt.xlabel('UPR over time')
plt.ylabel('TPR over time')
plt.title(' Overlap between UPR & TPR')
f.savefig(outputDir+'OverlapTPRvsUPR.eps',format='eps')
def bitLifeVsLearningCycles(activeCellsTrace, numColumns,learningPasses, sequenceLength):
bitLifeVsLearning = numpy.zeros(learningPasses)
for i in xrange(learningPasses):
bitLifeList = []
bitLifeCounter = numpy.zeros(numColumns)
for t in xrange(sequenceLength):
preActiveCells = set(numpy.where(bitLifeCounter>0)[0])
currentActiveCells = activeCellsTrace[i*sequenceLength+t]
newActiveCells = list(currentActiveCells - preActiveCells)
stopActiveCells = list(preActiveCells - currentActiveCells)
if t == sequenceLength-1:
stopActiveCells = list(currentActiveCells)
continuousActiveCells = list(preActiveCells & currentActiveCells)
bitLifeList += list(bitLifeCounter[stopActiveCells])
bitLifeCounter[stopActiveCells] = 0
bitLifeCounter[newActiveCells] = 1
bitLifeCounter[continuousActiveCells] += 1
bitLifeVsLearning[i] = numpy.mean(bitLifeList)
return bitLifeVsLearning
def showSequenceStartLine(ax, trainingPasses, sequenceLength):
for i in xrange(trainingPasses):
ax.vlines(i*sequenceLength, 0, ax.get_ylim()[0], linestyles='--')
def runTestPhase(experiment, tmLearn=False, upLearn=True, outputfileName='results/TemporalPoolingOutputs.pdf'):
print "\nRunning test phase..."
print "tmLearn: ", tmLearn
print "upLearn: ", upLearn
inputSequences = generatedSequences
inputCategories = labeledSequences
experiment.tm.mmClearHistory()
experiment.up.mmClearHistory()
experiment.tm.reset()
experiment.up.reset()
# Persistence levels across time
poolingActivationTrace = numpy.zeros((experiment.up._numColumns, 0))
# union SDR across time
activeCellsTrace = numpy.zeros((experiment.up._numColumns, 0))
# active cells in SP across time
activeSPTrace = numpy.zeros((experiment.up._numColumns, 0))
# number of connections for SP cells
connectionCountTrace = numpy.zeros((experiment.up._numColumns, 0))
# number of active inputs per SP cells
activeOverlapsTrace = numpy.zeros((experiment.up._numColumns, 0))
# number of predicted active inputs per SP cells
predictedActiveOverlapsTrace = numpy.zeros((experiment.up._numColumns, 0))
for _ in xrange(trainingPasses):
experiment.tm.reset()
experiment.up.reset()
for i in xrange(len(inputSequences)):
sensorPattern = inputSequences[i]
inputCategory = inputCategories[i]
if sensorPattern is None:
pass
else:
experiment.tm.compute(sensorPattern,
learn=tmLearn,
sequenceLabel=inputCategory)
activeCells, predActiveCells, burstingCols, = experiment.getUnionTemporalPoolerInput()
overlapsActive = experiment.up._calculateOverlap(activeCells)
overlapsPredictedActive = experiment.up._calculateOverlap(predActiveCells)
activeOverlapsTrace = numpy.concatenate((activeOverlapsTrace, overlapsActive.reshape((experiment.up._numColumns,1))), 1)
predictedActiveOverlapsTrace = numpy.concatenate((predictedActiveOverlapsTrace, overlapsPredictedActive.reshape((experiment.up._numColumns,1))), 1)
experiment.up.compute(activeCells,
predActiveCells,
learn=upLearn,
sequenceLabel=inputCategory)
currentPoolingActivation = experiment.up._poolingActivation.reshape((experiment.up._numColumns, 1))
poolingActivationTrace = numpy.concatenate((poolingActivationTrace, currentPoolingActivation), 1)
currentUnionSDR = numpy.zeros((experiment.up._numColumns, 1))
currentUnionSDR[experiment.up._unionSDR] = 1
activeCellsTrace = numpy.concatenate((activeCellsTrace, currentUnionSDR), 1)
currentSPSDR = numpy.zeros((experiment.up._numColumns, 1))
currentSPSDR[experiment.up._activeCells] = 1
activeSPTrace = numpy.concatenate((activeSPTrace, currentSPSDR), 1)
connectionCountTrace = numpy.concatenate((connectionCountTrace,
experiment.up._connectedCounts.reshape((experiment.up._numColumns, 1))), 1)
print "\nPass\tBursting Columns Mean\tStdDev\tMax"
stats = experiment.getBurstingColumnsStats()
print "{0}\t{1}\t{2}\t{3}".format(_, stats[0], stats[1], stats[2])
# print
# print MonitorMixinBase.mmPrettyPrintMetrics(\
# experiment.tm.mmGetDefaultMetrics() + experiment.up.mmGetDefaultMetrics())
# print
experiment.tm.mmClearHistory()
newConnectionCountTrace = numpy.zeros(connectionCountTrace.shape)
n = newConnectionCountTrace.shape[1]
newConnectionCountTrace[:,0:n-2] = connectionCountTrace[:,1:n-1] - connectionCountTrace[:,0:n-2]
# estimate fraction of shared bits across adjacent time point
unionSDRshared = experiment.up._mmComputeUnionSDRdiff()
bitLifeList = experiment.up._mmComputeBitLifeStats()
bitLife = numpy.array(bitLifeList)
# Plot SP outputs, UP persistence and UP outputs in testing phase
ncolShow = 100
f, (ax1, ax2, ax3) = plt.subplots(nrows=1,ncols=3)
ax1.imshow(activeSPTrace[1:ncolShow,:], cmap=cm.Greys,interpolation="nearest",aspect='auto')
showSequenceStartLine(ax1, trainingPasses, sequenceLength)
ax1.set_title('SP SDR')
ax1.set_ylabel('Columns')
ax2.imshow(poolingActivationTrace[1:ncolShow,:], cmap=cm.Greys, interpolation="nearest",aspect='auto')
showSequenceStartLine(ax2, trainingPasses, sequenceLength)
ax2.set_title('Persistence')
ax3.imshow(activeCellsTrace[1:ncolShow,:], cmap=cm.Greys, interpolation="nearest",aspect='auto')
showSequenceStartLine(ax3, trainingPasses, sequenceLength)
ax3.set_title('Union SDR')
# ax4.imshow(newConnectionCountTrace[1:ncolShow,:], cmap=cm.Greys, interpolation="nearest",aspect='auto')
# showSequenceStartLine(ax4, trainingPasses, sequenceLength)
# ax4.set_title('New Connection #')
# ax2.set_xlabel('Time (steps)')
pp = PdfPages(outputfileName)
pp.savefig()
pp.close()
def runTPLearnPhase(experiment, learningPasses):
tmLearn = False
upLearn = True
inputSequences = generatedSequences
inputCategories = labeledSequences
experiment.tm.mmClearHistory()
experiment.up.mmClearHistory()
experiment.tm.reset()
experiment.up.reset()
# Persistence levels across time
poolingActivationTrace = numpy.zeros((experiment.up._numColumns, 0))
# union SDR across time
activeCellsTrace = numpy.zeros((experiment.up._numColumns, 0))
# active cells in SP across time
activeSPTrace = numpy.zeros((experiment.up._numColumns, 0))
# number of connections for SP cells
connectionCountTrace = numpy.zeros((experiment.up._numColumns, 0))
# number of active inputs per SP cells
activeOverlapsTrace = numpy.zeros((experiment.up._numColumns, 0))
# number of predicted active inputs per SP cells
predictedActiveOverlapsTrace = numpy.zeros((experiment.up._numColumns, 0))
permamenceTrace = numpy.zeros((experiment.tm.numberOfCells(), 0))
for _ in xrange(learningPasses):
experiment.tm.reset()
experiment.up.reset()
for i in xrange(len(inputSequences)):
sensorPattern = inputSequences[i]
inputCategory = inputCategories[i]
if sensorPattern is None:
pass
else:
experiment.tm.compute(sensorPattern,
learn=tmLearn,
sequenceLabel=inputCategory)
activeCells, predActiveCells, burstingCols, = experiment.getUnionTemporalPoolerInput()
overlapsActive = experiment.up._calculateOverlap(activeCells)
overlapsPredictedActive = experiment.up._calculateOverlap(predActiveCells)
activeOverlapsTrace = numpy.concatenate((activeOverlapsTrace, overlapsActive.reshape((experiment.up._numColumns,1))), 1)
predictedActiveOverlapsTrace = numpy.concatenate((predictedActiveOverlapsTrace, overlapsPredictedActive.reshape((experiment.up._numColumns,1))), 1)
# print ' step: ', i
# print 'current predicted input: ',numpy.where(predActiveCells)[0]
# print 'previous predicted input: ',numpy.where(experiment.up._prePredictedActiveInput)[0]
# print 'overlapsPredictedActive: ', overlapsPredictedActive[31]
experiment.up.compute(activeCells,
predActiveCells,
learn=upLearn,
sequenceLabel=inputCategory)
# print 'active UP cells: ', experiment.up._activeCells
# permamenceTrace = numpy.concatenate((permamenceTrace,
# experiment.up._permanences.getRow(31).reshape((experiment.tm.numberOfCells(),1))),1)
#
# currentPoolingActivation = experiment.up._poolingActivation.reshape((experiment.up._numColumns, 1))
# poolingActivationTrace = numpy.concatenate((poolingActivationTrace, currentPoolingActivation), 1)
#
# currentUnionSDR = numpy.zeros((experiment.up._numColumns, 1))
# currentUnionSDR[experiment.up._unionSDR] = 1
# activeCellsTrace = numpy.concatenate((activeCellsTrace, currentUnionSDR), 1)
#
# currentSPSDR = numpy.zeros((experiment.up._numColumns, 1))
# currentSPSDR[experiment.up._activeCells] = 1
# activeSPTrace = numpy.concatenate((activeSPTrace, currentSPSDR), 1)
#
# connectionCountTrace = numpy.concatenate((connectionCountTrace,
# experiment.up._connectedCounts.reshape((experiment.up._numColumns, 1))), 1)
print "\nPass\tBursting Columns Mean\tStdDev\tMax"
stats = experiment.getBurstingColumnsStats()
print "{0}\t{1}\t{2}\t{3}".format(_, stats[0], stats[1], stats[2])
# print
# print MonitorMixinBase.mmPrettyPrintMetrics(\
# experiment.tm.mmGetDefaultMetrics() + experiment.up.mmGetDefaultMetrics())
# print
experiment.tm.mmClearHistory()
# Plot SP outputs, UP persistence and UP outputs in testing phase
ncolShow = 50
f, (ax1, ax2, ax3, ax4) = plt.subplots(nrows=1,ncols=4)
ax1.imshow(activeSPTrace[1:ncolShow,:], cmap=cm.Greys, interpolation="nearest", aspect='auto')
showSequenceStartLine(ax1, trainingPasses, sequenceLength)
ax1.set_title('SP SDR')
ax1.set_ylabel('Columns')
ax2.imshow(poolingActivationTrace[1:ncolShow,:], cmap=cm.Greys, interpolation="nearest", aspect='auto')
showSequenceStartLine(ax2, trainingPasses, sequenceLength)
ax2.set_title('Persistence')
ax3.imshow(activeCellsTrace[1:ncolShow,:], cmap=cm.Greys, interpolation="nearest", aspect='auto')
showSequenceStartLine(ax3, trainingPasses, sequenceLength)
ax3.set_title('Union SDR')
ax4.imshow(predictedActiveOverlapsTrace[1:ncolShow,:], cmap=cm.Greys, interpolation="nearest",aspect='auto')
showSequenceStartLine(ax4, trainingPasses, sequenceLength)
ax4.set_title('New Connection #')
ax2.set_xlabel('Time (steps)')
def plotSummaryResults(upBeforeLearning, upDuringLearning, upAfterLearning, learningPasses):
maxSeparation = 30
skipBeginningElements = 10
activeCellsTraceBefore = upBeforeLearning._mmTraces['activeCells'].data
similarityVsSeparationBefore = getUnionSDRSimilarityCurve(activeCellsTraceBefore, trainingPasses, sequenceLength,
maxSeparation, skipBeginningElements)
activeCellsTraceAfter = upAfterLearning._mmTraces['activeCells'].data
similarityVsSeparationAfter = getUnionSDRSimilarityCurve(activeCellsTraceAfter, trainingPasses, sequenceLength,
maxSeparation, skipBeginningElements)
plotSDRsimilarityVsTemporalSeparation(similarityVsSeparationBefore, similarityVsSeparationAfter)
(similarityMatrixBefore, similarityMatrixAfter, similarityMatrixBeforeAfter) = \
calculateSimilarityMatrix(activeCellsTraceBefore, activeCellsTraceAfter)
plotTPRvsUPROverlap(similarityMatrixBeforeAfter)
plotSimilarityMatrix(similarityMatrixBefore, similarityMatrixAfter)
activeCellsTrace = upDuringLearning._mmTraces["activeCells"].data
meanBitLifeVsLearning = bitLifeVsLearningCycles(activeCellsTrace, experiment.up._numColumns, learningPasses, sequenceLength)
numBitsUsed = []
avgBitLatency = []
for rpt in xrange(learningPasses):
allActiveBits = set()
for i in xrange(sequenceLength):
allActiveBits |= (set(activeCellsTrace[rpt*sequenceLength+i]))
bitActiveTime = numpy.ones(experiment.up._numColumns) * -1
for i in xrange(sequenceLength):
curActiveCells = list(activeCellsTrace[rpt*sequenceLength+i])
for j in xrange(len(curActiveCells)):
if bitActiveTime[curActiveCells[j]] < 0:
bitActiveTime[curActiveCells[j]] = i
bitActiveTimeSummary = bitActiveTime[bitActiveTime>0]
print 'pass ', rpt, ' num bits: ', len(allActiveBits), ' latency : ',numpy.mean(bitActiveTimeSummary)
numBitsUsed.append(len(allActiveBits))
avgBitLatency.append(numpy.mean(bitActiveTimeSummary))
f = plt.figure()
plt.subplot(2,2,1)
plt.plot(numBitsUsed)
plt.xlabel(' learning pass #')
plt.ylabel(' number of cells in UPR')
plt.ylim([100,600])
plt.subplot(2,2,2)
plt.plot(avgBitLatency)
plt.xlabel(' learning pass #')
plt.ylabel(' average latency ')
plt.ylim([11,19])
plt.subplot(2,2,3)
plt.plot(meanBitLifeVsLearning)
plt.xlabel(' learning pass #')
plt.ylabel(' average lifespan ')
plt.ylim([10,30])
f.savefig(outputDir+'SDRpropertyOverLearning.eps',format='eps')
if __name__ == "__main__":
experiment = initializeNetwork()
runTMtrainingPhase(experiment)
runTestPhase(experiment, tmLearn=False, upLearn=False, outputfileName=outputDir+'TemporalPoolingBeforeLearning.pdf')
upBeforeLearning = copy.deepcopy(experiment.up)
runTPLearnPhase(experiment, learningPasses)
upDuringLearning = copy.deepcopy(experiment.up)
runTestPhase(experiment, tmLearn=False, upLearn=False, outputfileName=outputDir+'TemporalPoolingAfterLearning.pdf')
upAfterLearning = copy.deepcopy(experiment.up)
plotSummaryResults(upBeforeLearning, upDuringLearning, upAfterLearning, learningPasses)
|
agpl-3.0
|
abhishekkrthakur/scikit-learn
|
examples/linear_model/plot_sparse_recovery.py
|
243
|
7461
|
"""
============================================================
Sparse recovery: feature selection for sparse linear models
============================================================
Given a small number of observations, we want to recover which features
of X are relevant to explain y. For this :ref:`sparse linear models
<l1_feature_selection>` can outperform standard statistical tests if the
true model is sparse, i.e. if a small fraction of the features are
relevant.
As detailed in :ref:`the compressive sensing notes
<compressive_sensing>`, the ability of L1-based approach to identify the
relevant variables depends on the sparsity of the ground truth, the
number of samples, the number of features, the conditioning of the
design matrix on the signal subspace, the amount of noise, and the
absolute value of the smallest non-zero coefficient [Wainwright2006]
(http://statistics.berkeley.edu/tech-reports/709.pdf).
Here we keep all parameters constant and vary the conditioning of the
design matrix. For a well-conditioned design matrix (small mutual
incoherence) we are exactly in compressive sensing conditions (i.i.d
Gaussian sensing matrix), and L1-recovery with the Lasso performs very
well. For an ill-conditioned matrix (high mutual incoherence),
regressors are very correlated, and the Lasso randomly selects one.
However, randomized-Lasso can recover the ground truth well.
In each situation, we first vary the alpha parameter setting the sparsity
of the estimated model and look at the stability scores of the randomized
Lasso. This analysis, knowing the ground truth, shows an optimal regime
in which relevant features stand out from the irrelevant ones. If alpha
is chosen too small, non-relevant variables enter the model. On the
opposite, if alpha is selected too large, the Lasso is equivalent to
stepwise regression, and thus brings no advantage over a univariate
F-test.
In a second time, we set alpha and compare the performance of different
feature selection methods, using the area under curve (AUC) of the
precision-recall.
"""
print(__doc__)
# Author: Alexandre Gramfort and Gael Varoquaux
# License: BSD 3 clause
import warnings
import matplotlib.pyplot as plt
import numpy as np
from scipy import linalg
from sklearn.linear_model import (RandomizedLasso, lasso_stability_path,
LassoLarsCV)
from sklearn.feature_selection import f_regression
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import auc, precision_recall_curve
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.utils.extmath import pinvh
from sklearn.utils import ConvergenceWarning
def mutual_incoherence(X_relevant, X_irelevant):
"""Mutual incoherence, as defined by formula (26a) of [Wainwright2006].
"""
projector = np.dot(np.dot(X_irelevant.T, X_relevant),
pinvh(np.dot(X_relevant.T, X_relevant)))
return np.max(np.abs(projector).sum(axis=1))
for conditioning in (1, 1e-4):
###########################################################################
# Simulate regression data with a correlated design
n_features = 501
n_relevant_features = 3
noise_level = .2
coef_min = .2
# The Donoho-Tanner phase transition is around n_samples=25: below we
# will completely fail to recover in the well-conditioned case
n_samples = 25
block_size = n_relevant_features
rng = np.random.RandomState(42)
# The coefficients of our model
coef = np.zeros(n_features)
coef[:n_relevant_features] = coef_min + rng.rand(n_relevant_features)
# The correlation of our design: variables correlated by blocs of 3
corr = np.zeros((n_features, n_features))
for i in range(0, n_features, block_size):
corr[i:i + block_size, i:i + block_size] = 1 - conditioning
corr.flat[::n_features + 1] = 1
corr = linalg.cholesky(corr)
# Our design
X = rng.normal(size=(n_samples, n_features))
X = np.dot(X, corr)
# Keep [Wainwright2006] (26c) constant
X[:n_relevant_features] /= np.abs(
linalg.svdvals(X[:n_relevant_features])).max()
X = StandardScaler().fit_transform(X.copy())
# The output variable
y = np.dot(X, coef)
y /= np.std(y)
# We scale the added noise as a function of the average correlation
# between the design and the output variable
y += noise_level * rng.normal(size=n_samples)
mi = mutual_incoherence(X[:, :n_relevant_features],
X[:, n_relevant_features:])
###########################################################################
# Plot stability selection path, using a high eps for early stopping
# of the path, to save computation time
alpha_grid, scores_path = lasso_stability_path(X, y, random_state=42,
eps=0.05)
plt.figure()
# We plot the path as a function of alpha/alpha_max to the power 1/3: the
# power 1/3 scales the path less brutally than the log, and enables to
# see the progression along the path
hg = plt.plot(alpha_grid[1:] ** .333, scores_path[coef != 0].T[1:], 'r')
hb = plt.plot(alpha_grid[1:] ** .333, scores_path[coef == 0].T[1:], 'k')
ymin, ymax = plt.ylim()
plt.xlabel(r'$(\alpha / \alpha_{max})^{1/3}$')
plt.ylabel('Stability score: proportion of times selected')
plt.title('Stability Scores Path - Mutual incoherence: %.1f' % mi)
plt.axis('tight')
plt.legend((hg[0], hb[0]), ('relevant features', 'irrelevant features'),
loc='best')
###########################################################################
# Plot the estimated stability scores for a given alpha
# Use 6-fold cross-validation rather than the default 3-fold: it leads to
# a better choice of alpha:
# Stop the user warnings outputs- they are not necessary for the example
# as it is specifically set up to be challenging.
with warnings.catch_warnings():
warnings.simplefilter('ignore', UserWarning)
warnings.simplefilter('ignore', ConvergenceWarning)
lars_cv = LassoLarsCV(cv=6).fit(X, y)
# Run the RandomizedLasso: we use a paths going down to .1*alpha_max
# to avoid exploring the regime in which very noisy variables enter
# the model
alphas = np.linspace(lars_cv.alphas_[0], .1 * lars_cv.alphas_[0], 6)
clf = RandomizedLasso(alpha=alphas, random_state=42).fit(X, y)
trees = ExtraTreesRegressor(100).fit(X, y)
# Compare with F-score
F, _ = f_regression(X, y)
plt.figure()
for name, score in [('F-test', F),
('Stability selection', clf.scores_),
('Lasso coefs', np.abs(lars_cv.coef_)),
('Trees', trees.feature_importances_),
]:
precision, recall, thresholds = precision_recall_curve(coef != 0,
score)
plt.semilogy(np.maximum(score / np.max(score), 1e-4),
label="%s. AUC: %.3f" % (name, auc(recall, precision)))
plt.plot(np.where(coef != 0)[0], [2e-4] * n_relevant_features, 'mo',
label="Ground truth")
plt.xlabel("Features")
plt.ylabel("Score")
# Plot only the 100 first coefficients
plt.xlim(0, 100)
plt.legend(loc='best')
plt.title('Feature selection scores - Mutual incoherence: %.1f'
% mi)
plt.show()
|
bsd-3-clause
|
RobertABT/heightmap
|
build/matplotlib/setup.py
|
1
|
8851
|
"""
The matplotlib build options can be modified with a setup.cfg file. See
setup.cfg.template for more information.
"""
from __future__ import print_function, absolute_import
# This needs to be the very first thing to use distribute
from distribute_setup import use_setuptools
use_setuptools()
import sys
# distutils is breaking our sdists for files in symlinked dirs.
# distutils will copy if os.link is not available, so this is a hack
# to force copying
import os
try:
del os.link
except AttributeError:
pass
# This 'if' statement is needed to prevent spawning infinite processes
# on Windows
if __name__ == '__main__':
# BEFORE importing distutils, remove MANIFEST. distutils doesn't properly
# update it when the contents of directories change.
if os.path.exists('MANIFEST'):
os.remove('MANIFEST')
try:
from setuptools import setup
except ImportError:
try:
from setuptools.core import setup
except ImportError:
from distutils.core import setup
# The setuptools version of sdist adds a setup.cfg file to the tree.
# We don't want that, so we simply remove it, and it will fall back to
# vanilla distutils.
try:
from setuptools.command import sdist
except ImportError:
pass
else:
del sdist.sdist.make_release_tree
from distutils.dist import Distribution
import setupext
from setupext import print_line, print_raw, print_message, print_status
# Get the version from the source code
__version__ = setupext.Matplotlib().check()
# These are the packages in the order we want to display them. This
# list may contain strings to create section headers for the display.
mpl_packages = [
'Building Matplotlib',
setupext.Matplotlib(),
setupext.Python(),
setupext.Platform(),
'Required dependencies and extensions',
setupext.Numpy(),
setupext.Dateutil(),
setupext.Tornado(),
setupext.Pyparsing(),
setupext.CXX(),
setupext.LibAgg(),
setupext.FreeType(),
setupext.FT2Font(),
setupext.Png(),
setupext.Image(),
setupext.TTConv(),
setupext.Path(),
setupext.Contour(),
setupext.Delaunay(),
setupext.Tri(),
'Optional subpackages',
setupext.SampleData(),
setupext.Toolkits(),
setupext.Tests(),
'Optional backend extensions',
# These backends are listed in order of preference, the first
# being the most preferred. The first one that looks like it will
# work will be selected as the default backend.
setupext.BackendMacOSX(),
setupext.BackendQt4(),
setupext.BackendGtk3Agg(),
setupext.BackendGtk3Cairo(),
setupext.BackendGtkAgg(),
setupext.BackendTkAgg(),
setupext.BackendWxAgg(),
setupext.BackendGtk(),
setupext.BackendAgg(),
setupext.BackendCairo(),
setupext.Windowing(),
'Optional LaTeX dependencies',
setupext.DviPng(),
setupext.Ghostscript(),
setupext.LaTeX(),
setupext.PdfToPs()
]
classifiers = [
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Python Software Foundation License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Topic :: Scientific/Engineering :: Visualization',
]
# One doesn't normally see `if __name__ == '__main__'` blocks in a setup.py,
# however, this is needed on Windows to avoid creating infinite subprocesses
# when using multiprocessing.
if __name__ == '__main__':
# These are distutils.setup parameters that the various packages add
# things to.
packages = []
namespace_packages = []
py_modules = []
ext_modules = []
package_data = {}
package_dir = {'': 'lib'}
install_requires = []
setup_requires = []
default_backend = None
# Go through all of the packages and figure out which ones we are
# going to build/install.
print_line()
print_raw("Edit setup.cfg to change the build options")
required_failed = []
good_packages = []
for package in mpl_packages:
if isinstance(package, str):
print_raw('')
print_raw(package.upper())
else:
try:
result = package.check()
if result is not None:
message = 'yes [%s]' % result
print_status(package.name, message)
except setupext.CheckFailed as e:
msg = str(e).strip()
if len(msg):
print_status(package.name, 'no [%s]' % msg)
else:
print_status(package.name, 'no')
if not package.optional:
required_failed.append(package)
else:
good_packages.append(package)
if isinstance(package, setupext.OptionalBackendPackage):
if default_backend is None:
default_backend = package.name
print_raw('')
# Abort if any of the required packages can not be built.
if required_failed:
print_line()
print_message(
"The following required packages can not "
"be built: %s" %
', '.join(x.name for x in required_failed))
sys.exit(1)
# Now collect all of the information we need to build all of the
# packages.
for package in good_packages:
if isinstance(package, str):
continue
packages.extend(package.get_packages())
namespace_packages.extend(package.get_namespace_packages())
py_modules.extend(package.get_py_modules())
ext = package.get_extension()
if ext is not None:
ext_modules.append(ext)
data = package.get_package_data()
for key, val in data.items():
package_data.setdefault(key, [])
package_data[key] = list(set(val + package_data[key]))
install_requires.extend(package.get_install_requires())
setup_requires.extend(package.get_setup_requires())
# Write the default matplotlibrc file
if default_backend is None:
default_backend = 'svg'
if setupext.options['backend']:
default_backend = setupext.options['backend']
with open('matplotlibrc.template') as fd:
template = fd.read()
with open('lib/matplotlib/mpl-data/matplotlibrc', 'w') as fd:
fd.write(template % {'backend': default_backend})
# Build in verbose mode if requested
if setupext.options['verbose']:
for mod in ext_modules:
mod.extra_compile_args.append('-DVERBOSE')
extra_args = {}
if sys.version_info[0] >= 3:
# Automatically 2to3 source on Python 3.x. This isn't set on
# Python 2 because it's not needed, and some really old
# versions of distribute don't support it.
extra_args['use_2to3'] = True
# Finalize the extension modules so they can get the Numpy include
# dirs
for mod in ext_modules:
mod.finalize()
# Avoid installing setup_requires dependencies if the user just
# queries for information
if (any('--' + opt in sys.argv for opt in
Distribution.display_option_names + ['help']) or
'clean' in sys.argv):
setup_requires = []
# Finally, pass this all along to distutils to do the heavy lifting.
distrib = setup(name="matplotlib",
version=__version__,
description="Python plotting package",
author="John D. Hunter, Michael Droettboom",
author_email="[email protected]",
url="http://matplotlib.org",
long_description="""
matplotlib strives to produce publication quality 2D graphics
for interactive graphing, scientific publishing, user interface
development and web application servers targeting multiple user
interfaces and hardcopy output formats. There is a 'pylab' mode
which emulates matlab graphics.
""",
license="BSD",
packages=packages,
namespace_packages = namespace_packages,
platforms='any',
py_modules=py_modules,
ext_modules=ext_modules,
package_dir=package_dir,
package_data=package_data,
classifiers=classifiers,
download_url="https://downloads.sourceforge.net/project/matplotlib/matplotlib/matplotlib-{0}/matplotlib-{0}.tar.gz".format(__version__),
# List third-party Python packages that we require
install_requires=install_requires,
setup_requires=setup_requires,
# matplotlib has C/C++ extensions, so it's not zip safe.
# Telling setuptools this prevents it from doing an automatic
# check for zip safety.
zip_safe=False,
**extra_args
)
|
mit
|
plowman/python-mcparseface
|
models/transformer/example.py
|
7
|
2114
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from scipy import ndimage
import tensorflow as tf
from spatial_transformer import transformer
import numpy as np
import matplotlib.pyplot as plt
# %% Create a batch of three images (1600 x 1200)
# %% Image retrieved from:
# %% https://raw.githubusercontent.com/skaae/transformer_network/master/cat.jpg
im = ndimage.imread('cat.jpg')
im = im / 255.
im = im.reshape(1, 1200, 1600, 3)
im = im.astype('float32')
# %% Let the output size of the transformer be half the image size.
out_size = (600, 800)
# %% Simulate batch
batch = np.append(im, im, axis=0)
batch = np.append(batch, im, axis=0)
num_batch = 3
x = tf.placeholder(tf.float32, [None, 1200, 1600, 3])
x = tf.cast(batch, 'float32')
# %% Create localisation network and convolutional layer
with tf.variable_scope('spatial_transformer_0'):
# %% Create a fully-connected layer with 6 output nodes
n_fc = 6
W_fc1 = tf.Variable(tf.zeros([1200 * 1600 * 3, n_fc]), name='W_fc1')
# %% Zoom into the image
initial = np.array([[0.5, 0, 0], [0, 0.5, 0]])
initial = initial.astype('float32')
initial = initial.flatten()
b_fc1 = tf.Variable(initial_value=initial, name='b_fc1')
h_fc1 = tf.matmul(tf.zeros([num_batch, 1200 * 1600 * 3]), W_fc1) + b_fc1
h_trans = transformer(x, h_fc1, out_size)
# %% Run session
sess = tf.Session()
sess.run(tf.initialize_all_variables())
y = sess.run(h_trans, feed_dict={x: batch})
# plt.imshow(y[0])
|
apache-2.0
|
bigdataelephants/scikit-learn
|
sklearn/neighbors/tests/test_nearest_centroid.py
|
21
|
4207
|
"""
Testing for the nearest centroid module.
"""
import numpy as np
from scipy import sparse as sp
from numpy.testing import assert_array_equal
from numpy.testing import assert_equal
from sklearn.neighbors import NearestCentroid
from sklearn import datasets
from sklearn.metrics.pairwise import pairwise_distances
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
X_csr = sp.csr_matrix(X) # Sparse matrix
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
T_csr = sp.csr_matrix(T)
true_result = [-1, 1, 1]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
rng = np.random.RandomState(1)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
def test_classification_toy():
"""Check classification on a toy dataset, including sparse versions."""
clf = NearestCentroid()
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
# Same test, but with a sparse matrix to fit and test.
clf = NearestCentroid()
clf.fit(X_csr, y)
assert_array_equal(clf.predict(T_csr), true_result)
# Fit with sparse, test with non-sparse
clf = NearestCentroid()
clf.fit(X_csr, y)
assert_array_equal(clf.predict(T), true_result)
# Fit with non-sparse, test with sparse
clf = NearestCentroid()
clf.fit(X, y)
assert_array_equal(clf.predict(T_csr), true_result)
# Fit and predict with non-CSR sparse matrices
clf = NearestCentroid()
clf.fit(X_csr.tocoo(), y)
assert_array_equal(clf.predict(T_csr.tolil()), true_result)
def test_precomputed():
clf = NearestCentroid(metric="precomputed")
clf.fit(X, y)
S = pairwise_distances(T, clf.centroids_)
assert_array_equal(clf.predict(S), true_result)
def test_iris():
"""Check consistency on dataset iris."""
for metric in ('euclidean', 'cosine'):
clf = NearestCentroid(metric=metric).fit(iris.data, iris.target)
score = np.mean(clf.predict(iris.data) == iris.target)
assert score > 0.9, "Failed with score = " + str(score)
def test_iris_shrinkage():
"""Check consistency on dataset iris, when using shrinkage."""
for metric in ('euclidean', 'cosine'):
for shrink_threshold in [None, 0.1, 0.5]:
clf = NearestCentroid(metric=metric,
shrink_threshold=shrink_threshold)
clf = clf.fit(iris.data, iris.target)
score = np.mean(clf.predict(iris.data) == iris.target)
assert score > 0.8, "Failed with score = " + str(score)
def test_pickle():
import pickle
# classification
obj = NearestCentroid()
obj.fit(iris.data, iris.target)
score = obj.score(iris.data, iris.target)
s = pickle.dumps(obj)
obj2 = pickle.loads(s)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(iris.data, iris.target)
assert_array_equal(score, score2,
"Failed to generate same score"
" after pickling (classification).")
def test_shrinkage_threshold_decoded_y():
clf = NearestCentroid(shrink_threshold=0.01)
y_ind = np.asarray(y)
y_ind[y_ind == -1] = 0
clf.fit(X, y_ind)
centroid_encoded = clf.centroids_
clf.fit(X, y)
assert_array_equal(centroid_encoded, clf.centroids_)
def test_predict_translated_data():
"""Test that NearestCentroid gives same results on translated data"""
rng = np.random.RandomState(0)
X = rng.rand(50, 50)
y = rng.randint(0, 3, 50)
noise = rng.rand(50)
clf = NearestCentroid(shrink_threshold=0.1)
clf.fit(X, y)
y_init = clf.predict(X)
clf = NearestCentroid(shrink_threshold=0.1)
X_noise = X + noise
clf.fit(X_noise, y)
y_translate = clf.predict(X_noise)
assert_array_equal(y_init, y_translate)
def test_manhattan_metric():
"""Test the manhattan metric."""
clf = NearestCentroid(metric='manhattan')
clf.fit(X, y)
dense_centroid = clf.centroids_
clf.fit(X_csr, y)
assert_array_equal(clf.centroids_, dense_centroid)
assert_array_equal(dense_centroid, [[-1, -1], [1, 1]])
if __name__ == "__main__":
import nose
nose.runmodule()
|
bsd-3-clause
|
akidhruv/Computational_Cauldron
|
FORTRAN/INS_PyF/NEW_Solver.py
|
1
|
11325
|
# Importing Libraries
import INS
import POISSON
import HEAT
import numpy as np
from math import *
from mpi4py import MPI
import time
#import matplotlib.pyplot as plt
#__________________Defining MPI communication function______________________#
def MPI_applyBC(u,x_id,y_id,x_procs,y_procs,x_comm,y_comm):
if(x_procs > 1):
if(x_id%2 == 0):
if(x_id == 0):
x_comm.send(u[-2,:],dest=(x_id+1)%x_procs,tag=1)
u[-1,:] = x_comm.recv(source=(x_id+1)%x_procs,tag=2)
elif(x_id == nblockx - 1):
x_comm.send(u[1,:],dest=(x_id-1+x_procs)%x_procs,tag=3)
u[0,:] = x_comm.recv(source=(x_id-1+x_procs)%x_procs,tag=4)
else:
x_comm.send(u[-2,:],dest=(x_id+1)%x_procs,tag=1)
u[-1,:] = x_comm.recv(source=(x_id+1)%x_procs,tag=2)
x_comm.send(u[1,:],dest=(x_id-1+x_procs)%x_procs,tag=3)
u[0,:] = x_comm.recv(source=(x_id-1+x_procs)%x_procs,tag=4)
elif(x_id%2 == 1):
if(x_id == nblockx - 1):
x_comm.send(u[1,:],dest=(x_id-1+x_procs)%x_procs,tag=2)
u[0,:] = x_comm.recv(source=(x_id-1+x_procs)%x_procs,tag=1)
else:
x_comm.send(u[1,:],dest=(x_id-1+x_procs)%x_procs,tag=2)
u[0,:] = x_comm.recv(source=(x_id-1+x_procs)%x_procs,tag=1)
x_comm.send(u[-2,:],dest=(x_id+1)%x_procs,tag=4)
u[-1,:] = x_comm.recv(source=(x_id+1)%x_procs,tag=3)
if(y_procs > 1):
if(y_id%2 == 0):
if(y_id == 0):
y_comm.send(u[:,-2],dest=(y_id+1)%y_procs,tag=5)
u[:,-1] = y_comm.recv(source=(y_id+1)%y_procs,tag=6)
elif(y_id == nblocky - 1):
y_comm.send(u[:,1],dest=(y_id-1+y_procs)%y_procs,tag=7)
u[:,0] = y_comm.recv(source=(y_id-1+y_procs)%y_procs,tag=8)
else:
y_comm.send(u[:,-2],dest=(y_id+1)%y_procs,tag=5)
u[:,-1] = y_comm.recv(source=(y_id+1)%y_procs,tag=6)
y_comm.send(u[:,1],dest=(y_id-1+y_procs)%y_procs,tag=7)
u[:,0] = y_comm.recv(source=(y_id-1+y_procs)%y_procs,tag=8)
elif(y_id%2 == 1):
if(y_id == nblocky - 1):
y_comm.send(u[:,1],dest=(y_id-1+y_procs)%y_procs,tag=6)
u[:,0] = y_comm.recv(source=(y_id-1+y_procs)%y_procs,tag=5)
else:
y_comm.send(u[:,1],dest=(y_id-1+y_procs)%y_procs,tag=6)
u[:,0] = y_comm.recv(source=(y_id-1+y_procs)%y_procs,tag=5)
y_comm.send(u[:,-2],dest=(y_id+1)%y_procs,tag=8)
u[:,-1] = y_comm.recv(source=(y_id+1)%y_procs, tag=7)
return u
##_______________________________________MAIN_______________________________________________#
#_____________________________Initializing MPI environment__________________________________#
nblockx = 2
nblocky = 1
comm = MPI.COMM_WORLD
myid = comm.Get_rank()
procs = comm.Get_size()
x_comm = comm.Split(myid/nblockx,myid%nblockx)
y_comm = comm.Split(myid%nblockx,myid/nblockx)
x_id = x_comm.Get_rank()
x_procs = x_comm.Get_size()
y_id = y_comm.Get_rank()
y_procs = y_comm.Get_size()
x_fcomm = x_comm.py2f()
y_fcomm = y_comm.py2f()
fcomm = comm.py2f()
t1 = MPI.Wtime()
#______________________________Domain Length and Limits_____________________________________#
Dx_min = -0.5
Dx_max = 0.5
Dy_min = -0.5
Dy_max = 0.5
Lx = Dx_max - Dx_min
Ly = Dy_max - Dy_min
gr_Lx = Lx/nblockx
gr_Ly = Ly/nblocky
#______________________________________Block size__________________________________________#
Nxb = 60
Nyb = 120
dx = gr_Lx/Nxb
dy = gr_Ly/Nyb
#_______________________________________Constants__________________________________________#
PRES_VAR = 0
TEMP_VAR = 1
PNEW_VAR = 2
TNEW_VAR = 3
CENT_VAR = 4
VELC_VAR = 0
VSTR_VAR = 1
VOLD_VAR = 2
FACE_VAR = 3
GONE_VAR = 0
GTWO_VAR = 1
G1NW_VAR = 2
G2NW_VAR = 3
PRHS_VAR = 4
WORK_VAR = 5
#___________________________________physical variables___________________________________#
x = Dx_min + (myid%nblockx)*gr_Lx + dx*np.linspace(0,Nxb,Nxb+1)
y = Dy_min + (myid/nblockx)*gr_Ly + dy*np.linspace(0,Nyb,Nyb+1)
[X,Y] = np.meshgrid(x,y)
center = np.zeros((CENT_VAR,Nxb+2,Nyb+2),dtype=float)
facex = np.zeros((FACE_VAR,Nxb+2,Nyb+2),dtype=float)
facey = np.zeros((FACE_VAR,Nxb+2,Nyb+2),dtype=float)
work = np.zeros((WORK_VAR,Nxb,Nyb),dtype=float)
center[TEMP_VAR,:,:] = 313.0
#___________________________________ins parameters______________________________________#
ins_inRe = 0.001
ins_sig = 0.01
ins_cfl = 0.15
#__________________________________heat parameters______________________________________#
ht_Pr = 0.7
#_________________________________driver parameters_____________________________________#
dt_sig = ins_sig*(min(dx,dy)**2)/ins_inRe
dt_cfl = ins_cfl*min(dx,dy)
dt_temp = dt_sig*ht_Pr
dt = min(dt_sig,dt_cfl)
dt = min(dt,dt_temp)
t = 60.0
nt = int(t/dt)
Maxit = 1500
p_res = 0.
u_res = 0.
v_res = 0.
T_res = 0.
maxdiv = 0.
mindiv = 0.
ins_p_res = 0.
ins_v_res = 0.
ins_v_res = 0.
ins_T_res = 0.
ins_maxdiv = 0.
ins_mindiv = 0.
#________________________________Physics Squence_____________________________________#
tstep = 0
while(tstep<=nt):
facex[VOLD_VAR,:,:] = facex[VELC_VAR,:,:]
facey[VOLD_VAR,:,:] = facey[VELC_VAR,:,:]
#_____________________________Predictor_____________________________#
#G1_new,G2_new,ut,vt = INS.predictor(u,v,G1,G2,ins_inRe,dx,dy,dt,tstep,Nxb,Nyb)
work[G1NW_VAR,:,:],work[G2NW_VAR,:,:],facex[VSTR_VAR,:,:],facey[VSTR_VAR,:,:] = INS.predictor(facex[VELC_VAR,:,:],facey[VELC_VAR,:,:],work[GONE_VAR,:,:],work[GTWO_VAR,:,:],ins_inRe,dx,dy,dt,tstep,Nxb,Nyb)
work[GONE_VAR,:,:] = work[G1NW_VAR,:,:]
work[GTWO_VAR,:,:] = work[G2NW_VAR,:,:]
#__________________Predictor Boundary Conditions_____________________#
facex[VSTR_VAR,:,:] = MPI_applyBC(facex[VSTR_VAR,:,:],x_id,y_id,x_procs,y_procs,x_comm,y_comm)
facey[VSTR_VAR,:,:] = MPI_applyBC(facey[VSTR_VAR,:,:],x_id,y_id,x_procs,y_procs,x_comm,y_comm)
# LOW X
if(x_id == 0):
facex[VSTR_VAR,0,:] = 0.0
facey[VSTR_VAR,0,:] = -facey[VSTR_VAR,1,:]
# HIGH X
if(x_id == nblockx-1):
facex[VSTR_VAR,-2,:] = 0.0
facex[VSTR_VAR,-1,:] = 0.0
facey[VSTR_VAR,-1,:] = -facey[VSTR_VAR,-2,:]
# LOW Y
if(y_id == 0):
facey[VSTR_VAR,:,0] = 0.0
facex[VSTR_VAR,:,0] = -facex[VSTR_VAR,:,1]
# HIGH Y
if(y_id == nblocky-1):
facey[VSTR_VAR,:,-1] = 0.0
facey[VSTR_VAR,:,-2] = 0.0
facex[VSTR_VAR,:,-1] = 2.0 -facex[VSTR_VAR,:,-2]
#_____________________________Poisson Solver________________________#
work[PRHS_VAR,:,:] = -((1/(dy*dt))*(facey[VSTR_VAR,1:-1,1:-1]-facey[VSTR_VAR,1:-1,:-2]))-((1/(dx*dt))*(facex[VSTR_VAR,1:-1,1:-1]-facex[VSTR_VAR,:-2,1:-1]))
center[PNEW_VAR,:,:],p_counter,ins_p_res = POISSON.solver(center[PRES_VAR,:,:],work[PRHS_VAR,:,:],Maxit,\
x_fcomm,y_fcomm,fcomm,x_id,y_id,myid,x_procs,y_procs,\
nblockx,nblocky,dx,dy,Nxb,Nyb)
#________________________________Corrector____________________________#
facex[VELC_VAR,:,:],facey[VELC_VAR,:,:] = INS.corrector(facex[VSTR_VAR,:,:],facey[VSTR_VAR,:,:],center[PRES_VAR,:,:],dt,dx,dy,Nxb,Nyb)
#__________________Corrector Boundary Conditions_____________________#
facex[VELC_VAR,:,:] = MPI_applyBC(facex[VELC_VAR,:,:],x_id,y_id,x_procs,y_procs,x_comm,y_comm)
facey[VELC_VAR,:,:] = MPI_applyBC(facey[VELC_VAR,:,:],x_id,y_id,x_procs,y_procs,x_comm,y_comm)
# LOW X
if(x_id == 0):
facex[VELC_VAR,0,:] = 0.0
facey[VELC_VAR,0,:] = -facey[VELC_VAR,1,:]
# HIGH X
if(x_id == nblockx - 1):
facex[VELC_VAR,-2,:] = 0.0
facex[VELC_VAR,-1,:] = 0.0
facey[VELC_VAR,-1,:] = -facey[VELC_VAR,-2,:]
# LOW Y
if(y_id == 0):
facey[VELC_VAR,:,0] = 0.0
facex[VELC_VAR,:,0] = -facex[VELC_VAR,:,1]
# HIGH Y
if(y_id == nblocky - 1):
facey[VELC_VAR,:,-1] = 0.0
facey[VELC_VAR,:,-2] = 0.0
facex[VELC_VAR,:,-1] = 2.0 - facex[VELC_VAR,:,-2]
#___________________________Residuals_______________________________#
u_res = np.sum((facex[VOLD_VAR,:,:]-facex[VELC_VAR,:,:])**2)
v_res = np.sum((facey[VOLD_VAR,:,:]-facey[VELC_VAR,:,:])**2)
ins_u_res = comm.allreduce(u_res, op=MPI.SUM)
ins_u_res = sqrt(ins_u_res/((Nxb+2)*(Nyb+2)*procs))
ins_v_res = comm.allreduce(v_res, op=MPI.SUM)
ins_v_res = sqrt(ins_v_res/((Nxb+2)*(Nyb+2)*procs))
#____________________________Divergence_____________________________#
maxdiv = -10.0**(10)
mindiv = 10.0**(10)
maxdiv = max(maxdiv,np.max(((1/(dy))*(facey[VELC_VAR,1:-1,1:-1]-facey[VELC_VAR,1:-1,:-2])) + ((1/(dx))*(facex[VELC_VAR,1:-1,1:-1]-facex[VELC_VAR,:-2,1:-1]))))
mindiv = min(mindiv,np.min(((1/(dy))*(facey[VELC_VAR,1:-1,1:-1]-facey[VELC_VAR,1:-1,:-2])) + ((1/(dx))*(facex[VELC_VAR,1:-1,1:-1]-facex[VELC_VAR,:-2,1:-1]))))
ins_maxdiv = comm.allreduce(maxdiv, op=MPI.MAX)
ins_mindiv = comm.allreduce(mindiv, op=MPI.MIN)
#_______________________Heat Advection Diffusion____________________#
center[TNEW_VAR,:,:] = HEAT.tempsolver(center[TEMP_VAR,:,:],facex[VELC_VAR,:,:],facey[VELC_VAR,:,:],dx,dy,dt,ins_inRe,ht_Pr,Nxb,Nyb)
#____________________Temperature Boundary Conditions________________#
center[TNEW_VAR,:,:] = MPI_applyBC(center[TNEW_VAR,:,:],x_id,y_id,x_procs,y_procs,x_comm,y_comm)
# LOW X
if(x_id == 0):
center[TNEW_VAR,0,:] = center[TNEW_VAR,1,:]
# HIGH X
if(x_id == nblockx - 1):
center[TNEW_VAR,-1,:] = center[TNEW_VAR,-2,:]
# LOW Y
if(y_id == 0):
center[TNEW_VAR,:,0] = center[TNEW_VAR,:,1]
# HIGH Y
if(y_id == nblocky - 1):
center[TNEW_VAR,:,-1] = 2*383.15 - center[TNEW_VAR,:,-2]
#___________________________Residuals_______________________________#
T_res = np.sum((center[TNEW_VAR,:,:]-center[TEMP_VAR,:,:])**2)
ins_T_res = comm.allreduce(T_res, op=MPI.SUM)
ins_T_res = sqrt(ins_T_res/((Nxb+2)*(Nyb+2)*procs))
center[TEMP_VAR,:,:] = center[TNEW_VAR,:,:]
#____________________________Display_________________________________#
if(myid == 0 and tstep%5 == 0):
print "---------------------------PARAMETER DISPLAY-----------------------"
print "Simulation Time : ",tstep*dt," s"
print "U velocity Residual : ",ins_u_res
print "V velocity Residual : ",ins_v_res
print "Temperature Residual: ",ins_T_res
print "Pressure Residual : ",ins_p_res
print "Poisson Counter : ",p_counter
print "MAXDIV : ",ins_maxdiv," MINDIV: ",ins_mindiv
#__________________________Convergence Check_________________________#
tstep += 1
if(ins_u_res<10**-7 and ins_u_res != 0. and ins_v_res<10**-7 and ins_v_res != 0.):
break
#_________________________Post Processing and Writing Data to File_____________#
uu = 0.5*(facex[VELC_VAR,:-1,:-1] + facex[VELC_VAR,:-1,1:])
vv = 0.5*(facey[VELC_VAR,:-1,:-1] + facey[VELC_VAR,1:,:-1])
pp = 0.25*(center[PRES_VAR,:-1,:-1] + center[PRES_VAR,1:,:-1] + center[PRES_VAR,:-1,1:] + center[PRES_VAR,1:,1:])
tt = 0.25*(center[TEMP_VAR,:-1,:-1] + center[TEMP_VAR,1:,:-1] + center[TEMP_VAR,:-1,1:] + center[TEMP_VAR,1:,1:])
uu = uu.T
vv = vv.T
pp = pp.T
tt = tt.T
X = np.reshape(X,np.size(X))
Y = np.reshape(Y,np.size(Y))
uu = np.reshape(uu,np.size(uu))
vv = np.reshape(vv,np.size(vv))
pp = np.reshape(pp,np.size(pp))
tt = np.reshape(tt,np.size(tt))
DataOut = np.column_stack((X.T,Y.T,uu.T,vv.T,pp.T,tt.T))
np.savetxt('LidData0%d.dat' % myid,DataOut)
t2 = MPI.Wtime()
print t2-t1
|
mit
|
MohammedWasim/scikit-learn
|
examples/cluster/plot_agglomerative_clustering_metrics.py
|
402
|
4492
|
"""
Agglomerative clustering with different metrics
===============================================
Demonstrates the effect of different metrics on the hierarchical clustering.
The example is engineered to show the effect of the choice of different
metrics. It is applied to waveforms, which can be seen as
high-dimensional vector. Indeed, the difference between metrics is
usually more pronounced in high dimension (in particular for euclidean
and cityblock).
We generate data from three groups of waveforms. Two of the waveforms
(waveform 1 and waveform 2) are proportional one to the other. The cosine
distance is invariant to a scaling of the data, as a result, it cannot
distinguish these two waveforms. Thus even with no noise, clustering
using this distance will not separate out waveform 1 and 2.
We add observation noise to these waveforms. We generate very sparse
noise: only 6% of the time points contain noise. As a result, the
l1 norm of this noise (ie "cityblock" distance) is much smaller than it's
l2 norm ("euclidean" distance). This can be seen on the inter-class
distance matrices: the values on the diagonal, that characterize the
spread of the class, are much bigger for the Euclidean distance than for
the cityblock distance.
When we apply clustering to the data, we find that the clustering
reflects what was in the distance matrices. Indeed, for the Euclidean
distance, the classes are ill-separated because of the noise, and thus
the clustering does not separate the waveforms. For the cityblock
distance, the separation is good and the waveform classes are recovered.
Finally, the cosine distance does not separate at all waveform 1 and 2,
thus the clustering puts them in the same cluster.
"""
# Author: Gael Varoquaux
# License: BSD 3-Clause or CC-0
import matplotlib.pyplot as plt
import numpy as np
from sklearn.cluster import AgglomerativeClustering
from sklearn.metrics import pairwise_distances
np.random.seed(0)
# Generate waveform data
n_features = 2000
t = np.pi * np.linspace(0, 1, n_features)
def sqr(x):
return np.sign(np.cos(x))
X = list()
y = list()
for i, (phi, a) in enumerate([(.5, .15), (.5, .6), (.3, .2)]):
for _ in range(30):
phase_noise = .01 * np.random.normal()
amplitude_noise = .04 * np.random.normal()
additional_noise = 1 - 2 * np.random.rand(n_features)
# Make the noise sparse
additional_noise[np.abs(additional_noise) < .997] = 0
X.append(12 * ((a + amplitude_noise)
* (sqr(6 * (t + phi + phase_noise)))
+ additional_noise))
y.append(i)
X = np.array(X)
y = np.array(y)
n_clusters = 3
labels = ('Waveform 1', 'Waveform 2', 'Waveform 3')
# Plot the ground-truth labelling
plt.figure()
plt.axes([0, 0, 1, 1])
for l, c, n in zip(range(n_clusters), 'rgb',
labels):
lines = plt.plot(X[y == l].T, c=c, alpha=.5)
lines[0].set_label(n)
plt.legend(loc='best')
plt.axis('tight')
plt.axis('off')
plt.suptitle("Ground truth", size=20)
# Plot the distances
for index, metric in enumerate(["cosine", "euclidean", "cityblock"]):
avg_dist = np.zeros((n_clusters, n_clusters))
plt.figure(figsize=(5, 4.5))
for i in range(n_clusters):
for j in range(n_clusters):
avg_dist[i, j] = pairwise_distances(X[y == i], X[y == j],
metric=metric).mean()
avg_dist /= avg_dist.max()
for i in range(n_clusters):
for j in range(n_clusters):
plt.text(i, j, '%5.3f' % avg_dist[i, j],
verticalalignment='center',
horizontalalignment='center')
plt.imshow(avg_dist, interpolation='nearest', cmap=plt.cm.gnuplot2,
vmin=0)
plt.xticks(range(n_clusters), labels, rotation=45)
plt.yticks(range(n_clusters), labels)
plt.colorbar()
plt.suptitle("Interclass %s distances" % metric, size=18)
plt.tight_layout()
# Plot clustering results
for index, metric in enumerate(["cosine", "euclidean", "cityblock"]):
model = AgglomerativeClustering(n_clusters=n_clusters,
linkage="average", affinity=metric)
model.fit(X)
plt.figure()
plt.axes([0, 0, 1, 1])
for l, c in zip(np.arange(model.n_clusters), 'rgbk'):
plt.plot(X[model.labels_ == l].T, c=c, alpha=.5)
plt.axis('tight')
plt.axis('off')
plt.suptitle("AgglomerativeClustering(affinity=%s)" % metric, size=20)
plt.show()
|
bsd-3-clause
|
Sentient07/scikit-learn
|
sklearn/gaussian_process/tests/test_kernels.py
|
51
|
12799
|
"""Testing for kernels for Gaussian processes."""
# Author: Jan Hendrik Metzen <[email protected]>
# License: BSD 3 clause
from sklearn.externals.funcsigs import signature
import numpy as np
from sklearn.gaussian_process.kernels import _approx_fprime
from sklearn.metrics.pairwise \
import PAIRWISE_KERNEL_FUNCTIONS, euclidean_distances, pairwise_kernels
from sklearn.gaussian_process.kernels \
import (RBF, Matern, RationalQuadratic, ExpSineSquared, DotProduct,
ConstantKernel, WhiteKernel, PairwiseKernel, KernelOperator,
Exponentiation)
from sklearn.base import clone
from sklearn.utils.testing import (assert_equal, assert_almost_equal,
assert_not_equal, assert_array_equal,
assert_array_almost_equal)
X = np.random.RandomState(0).normal(0, 1, (5, 2))
Y = np.random.RandomState(0).normal(0, 1, (6, 2))
kernel_white = RBF(length_scale=2.0) + WhiteKernel(noise_level=3.0)
kernels = [RBF(length_scale=2.0), RBF(length_scale_bounds=(0.5, 2.0)),
ConstantKernel(constant_value=10.0),
2.0 * RBF(length_scale=0.33, length_scale_bounds="fixed"),
2.0 * RBF(length_scale=0.5), kernel_white,
2.0 * RBF(length_scale=[0.5, 2.0]),
2.0 * Matern(length_scale=0.33, length_scale_bounds="fixed"),
2.0 * Matern(length_scale=0.5, nu=0.5),
2.0 * Matern(length_scale=1.5, nu=1.5),
2.0 * Matern(length_scale=2.5, nu=2.5),
2.0 * Matern(length_scale=[0.5, 2.0], nu=0.5),
3.0 * Matern(length_scale=[2.0, 0.5], nu=1.5),
4.0 * Matern(length_scale=[0.5, 0.5], nu=2.5),
RationalQuadratic(length_scale=0.5, alpha=1.5),
ExpSineSquared(length_scale=0.5, periodicity=1.5),
DotProduct(sigma_0=2.0), DotProduct(sigma_0=2.0) ** 2,
RBF(length_scale=[2.0]), Matern(length_scale=[2.0])]
for metric in PAIRWISE_KERNEL_FUNCTIONS:
if metric in ["additive_chi2", "chi2"]:
continue
kernels.append(PairwiseKernel(gamma=1.0, metric=metric))
def test_kernel_gradient():
# Compare analytic and numeric gradient of kernels.
for kernel in kernels:
K, K_gradient = kernel(X, eval_gradient=True)
assert_equal(K_gradient.shape[0], X.shape[0])
assert_equal(K_gradient.shape[1], X.shape[0])
assert_equal(K_gradient.shape[2], kernel.theta.shape[0])
def eval_kernel_for_theta(theta):
kernel_clone = kernel.clone_with_theta(theta)
K = kernel_clone(X, eval_gradient=False)
return K
K_gradient_approx = \
_approx_fprime(kernel.theta, eval_kernel_for_theta, 1e-10)
assert_almost_equal(K_gradient, K_gradient_approx, 4)
def test_kernel_theta():
# Check that parameter vector theta of kernel is set correctly.
for kernel in kernels:
if isinstance(kernel, KernelOperator) \
or isinstance(kernel, Exponentiation): # skip non-basic kernels
continue
theta = kernel.theta
_, K_gradient = kernel(X, eval_gradient=True)
# Determine kernel parameters that contribute to theta
init_sign = signature(kernel.__class__.__init__).parameters.values()
args = [p.name for p in init_sign if p.name != 'self']
theta_vars = map(lambda s: s[0:-len("_bounds")],
filter(lambda s: s.endswith("_bounds"), args))
assert_equal(
set(hyperparameter.name
for hyperparameter in kernel.hyperparameters),
set(theta_vars))
# Check that values returned in theta are consistent with
# hyperparameter values (being their logarithms)
for i, hyperparameter in enumerate(kernel.hyperparameters):
assert_equal(theta[i],
np.log(getattr(kernel, hyperparameter.name)))
# Fixed kernel parameters must be excluded from theta and gradient.
for i, hyperparameter in enumerate(kernel.hyperparameters):
# create copy with certain hyperparameter fixed
params = kernel.get_params()
params[hyperparameter.name + "_bounds"] = "fixed"
kernel_class = kernel.__class__
new_kernel = kernel_class(**params)
# Check that theta and K_gradient are identical with the fixed
# dimension left out
_, K_gradient_new = new_kernel(X, eval_gradient=True)
assert_equal(theta.shape[0], new_kernel.theta.shape[0] + 1)
assert_equal(K_gradient.shape[2], K_gradient_new.shape[2] + 1)
if i > 0:
assert_equal(theta[:i], new_kernel.theta[:i])
assert_array_equal(K_gradient[..., :i],
K_gradient_new[..., :i])
if i + 1 < len(kernel.hyperparameters):
assert_equal(theta[i + 1:], new_kernel.theta[i:])
assert_array_equal(K_gradient[..., i + 1:],
K_gradient_new[..., i:])
# Check that values of theta are modified correctly
for i, hyperparameter in enumerate(kernel.hyperparameters):
theta[i] = np.log(42)
kernel.theta = theta
assert_almost_equal(getattr(kernel, hyperparameter.name), 42)
setattr(kernel, hyperparameter.name, 43)
assert_almost_equal(kernel.theta[i], np.log(43))
def test_auto_vs_cross():
# Auto-correlation and cross-correlation should be consistent.
for kernel in kernels:
if kernel == kernel_white:
continue # Identity is not satisfied on diagonal
K_auto = kernel(X)
K_cross = kernel(X, X)
assert_almost_equal(K_auto, K_cross, 5)
def test_kernel_diag():
# Test that diag method of kernel returns consistent results.
for kernel in kernels:
K_call_diag = np.diag(kernel(X))
K_diag = kernel.diag(X)
assert_almost_equal(K_call_diag, K_diag, 5)
def test_kernel_operator_commutative():
# Adding kernels and multiplying kernels should be commutative.
# Check addition
assert_almost_equal((RBF(2.0) + 1.0)(X),
(1.0 + RBF(2.0))(X))
# Check multiplication
assert_almost_equal((3.0 * RBF(2.0))(X),
(RBF(2.0) * 3.0)(X))
def test_kernel_anisotropic():
# Anisotropic kernel should be consistent with isotropic kernels.
kernel = 3.0 * RBF([0.5, 2.0])
K = kernel(X)
X1 = np.array(X)
X1[:, 0] *= 4
K1 = 3.0 * RBF(2.0)(X1)
assert_almost_equal(K, K1)
X2 = np.array(X)
X2[:, 1] /= 4
K2 = 3.0 * RBF(0.5)(X2)
assert_almost_equal(K, K2)
# Check getting and setting via theta
kernel.theta = kernel.theta + np.log(2)
assert_array_equal(kernel.theta, np.log([6.0, 1.0, 4.0]))
assert_array_equal(kernel.k2.length_scale, [1.0, 4.0])
def test_kernel_stationary():
# Test stationarity of kernels.
for kernel in kernels:
if not kernel.is_stationary():
continue
K = kernel(X, X + 1)
assert_almost_equal(K[0, 0], np.diag(K))
def check_hyperparameters_equal(kernel1, kernel2):
# Check that hyperparameters of two kernels are equal
for attr in set(dir(kernel1) + dir(kernel2)):
if attr.startswith("hyperparameter_"):
attr_value1 = getattr(kernel1, attr)
attr_value2 = getattr(kernel2, attr)
assert_equal(attr_value1, attr_value2)
def test_kernel_clone():
# Test that sklearn's clone works correctly on kernels.
for kernel in kernels:
kernel_cloned = clone(kernel)
# XXX: Should this be fixed?
# This differs from the sklearn's estimators equality check.
assert_equal(kernel, kernel_cloned)
assert_not_equal(id(kernel), id(kernel_cloned))
# Check that all constructor parameters are equal.
assert_equal(kernel.get_params(), kernel_cloned.get_params())
# Check that all hyperparameters are equal.
yield check_hyperparameters_equal, kernel, kernel_cloned
def test_kernel_clone_after_set_params():
# This test is to verify that using set_params does not
# break clone on kernels.
# This used to break because in kernels such as the RBF, non-trivial
# logic that modified the length scale used to be in the constructor
# See https://github.com/scikit-learn/scikit-learn/issues/6961
# for more details.
bounds = (1e-5, 1e5)
for kernel in kernels:
kernel_cloned = clone(kernel)
params = kernel.get_params()
# RationalQuadratic kernel is isotropic.
isotropic_kernels = (ExpSineSquared, RationalQuadratic)
if 'length_scale' in params and not isinstance(kernel,
isotropic_kernels):
length_scale = params['length_scale']
if np.iterable(length_scale):
params['length_scale'] = length_scale[0]
params['length_scale_bounds'] = bounds
else:
params['length_scale'] = [length_scale] * 2
params['length_scale_bounds'] = bounds * 2
kernel_cloned.set_params(**params)
kernel_cloned_clone = clone(kernel_cloned)
assert_equal(kernel_cloned_clone.get_params(),
kernel_cloned.get_params())
assert_not_equal(id(kernel_cloned_clone), id(kernel_cloned))
yield (check_hyperparameters_equal, kernel_cloned,
kernel_cloned_clone)
def test_matern_kernel():
# Test consistency of Matern kernel for special values of nu.
K = Matern(nu=1.5, length_scale=1.0)(X)
# the diagonal elements of a matern kernel are 1
assert_array_almost_equal(np.diag(K), np.ones(X.shape[0]))
# matern kernel for coef0==0.5 is equal to absolute exponential kernel
K_absexp = np.exp(-euclidean_distances(X, X, squared=False))
K = Matern(nu=0.5, length_scale=1.0)(X)
assert_array_almost_equal(K, K_absexp)
# test that special cases of matern kernel (coef0 in [0.5, 1.5, 2.5])
# result in nearly identical results as the general case for coef0 in
# [0.5 + tiny, 1.5 + tiny, 2.5 + tiny]
tiny = 1e-10
for nu in [0.5, 1.5, 2.5]:
K1 = Matern(nu=nu, length_scale=1.0)(X)
K2 = Matern(nu=nu + tiny, length_scale=1.0)(X)
assert_array_almost_equal(K1, K2)
def test_kernel_versus_pairwise():
# Check that GP kernels can also be used as pairwise kernels.
for kernel in kernels:
# Test auto-kernel
if kernel != kernel_white:
# For WhiteKernel: k(X) != k(X,X). This is assumed by
# pairwise_kernels
K1 = kernel(X)
K2 = pairwise_kernels(X, metric=kernel)
assert_array_almost_equal(K1, K2)
# Test cross-kernel
K1 = kernel(X, Y)
K2 = pairwise_kernels(X, Y, metric=kernel)
assert_array_almost_equal(K1, K2)
def test_set_get_params():
# Check that set_params()/get_params() is consistent with kernel.theta.
for kernel in kernels:
# Test get_params()
index = 0
params = kernel.get_params()
for hyperparameter in kernel.hyperparameters:
if isinstance("string", type(hyperparameter.bounds)):
if hyperparameter.bounds == "fixed":
continue
size = hyperparameter.n_elements
if size > 1: # anisotropic kernels
assert_almost_equal(np.exp(kernel.theta[index:index + size]),
params[hyperparameter.name])
index += size
else:
assert_almost_equal(np.exp(kernel.theta[index]),
params[hyperparameter.name])
index += 1
# Test set_params()
index = 0
value = 10 # arbitrary value
for hyperparameter in kernel.hyperparameters:
if isinstance("string", type(hyperparameter.bounds)):
if hyperparameter.bounds == "fixed":
continue
size = hyperparameter.n_elements
if size > 1: # anisotropic kernels
kernel.set_params(**{hyperparameter.name: [value] * size})
assert_almost_equal(np.exp(kernel.theta[index:index + size]),
[value] * size)
index += size
else:
kernel.set_params(**{hyperparameter.name: value})
assert_almost_equal(np.exp(kernel.theta[index]), value)
index += 1
def test_repr_kernels():
# Smoke-test for repr in kernels.
for kernel in kernels:
repr(kernel)
|
bsd-3-clause
|
MatthieuBizien/scikit-learn
|
examples/cluster/plot_adjusted_for_chance_measures.py
|
105
|
4300
|
"""
==========================================================
Adjustment for chance in clustering performance evaluation
==========================================================
The following plots demonstrate the impact of the number of clusters and
number of samples on various clustering performance evaluation metrics.
Non-adjusted measures such as the V-Measure show a dependency between
the number of clusters and the number of samples: the mean V-Measure
of random labeling increases significantly as the number of clusters is
closer to the total number of samples used to compute the measure.
Adjusted for chance measure such as ARI display some random variations
centered around a mean score of 0.0 for any number of samples and
clusters.
Only adjusted measures can hence safely be used as a consensus index
to evaluate the average stability of clustering algorithms for a given
value of k on various overlapping sub-samples of the dataset.
"""
print(__doc__)
# Author: Olivier Grisel <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from time import time
from sklearn import metrics
def uniform_labelings_scores(score_func, n_samples, n_clusters_range,
fixed_n_classes=None, n_runs=5, seed=42):
"""Compute score for 2 random uniform cluster labelings.
Both random labelings have the same number of clusters for each value
possible value in ``n_clusters_range``.
When fixed_n_classes is not None the first labeling is considered a ground
truth class assignment with fixed number of classes.
"""
random_labels = np.random.RandomState(seed).randint
scores = np.zeros((len(n_clusters_range), n_runs))
if fixed_n_classes is not None:
labels_a = random_labels(low=0, high=fixed_n_classes, size=n_samples)
for i, k in enumerate(n_clusters_range):
for j in range(n_runs):
if fixed_n_classes is None:
labels_a = random_labels(low=0, high=k, size=n_samples)
labels_b = random_labels(low=0, high=k, size=n_samples)
scores[i, j] = score_func(labels_a, labels_b)
return scores
score_funcs = [
metrics.adjusted_rand_score,
metrics.v_measure_score,
metrics.adjusted_mutual_info_score,
metrics.mutual_info_score,
]
# 2 independent random clusterings with equal cluster number
n_samples = 100
n_clusters_range = np.linspace(2, n_samples, 10).astype(np.int)
plt.figure(1)
plots = []
names = []
for score_func in score_funcs:
print("Computing %s for %d values of n_clusters and n_samples=%d"
% (score_func.__name__, len(n_clusters_range), n_samples))
t0 = time()
scores = uniform_labelings_scores(score_func, n_samples, n_clusters_range)
print("done in %0.3fs" % (time() - t0))
plots.append(plt.errorbar(
n_clusters_range, np.median(scores, axis=1), scores.std(axis=1))[0])
names.append(score_func.__name__)
plt.title("Clustering measures for 2 random uniform labelings\n"
"with equal number of clusters")
plt.xlabel('Number of clusters (Number of samples is fixed to %d)' % n_samples)
plt.ylabel('Score value')
plt.legend(plots, names)
plt.ylim(ymin=-0.05, ymax=1.05)
# Random labeling with varying n_clusters against ground class labels
# with fixed number of clusters
n_samples = 1000
n_clusters_range = np.linspace(2, 100, 10).astype(np.int)
n_classes = 10
plt.figure(2)
plots = []
names = []
for score_func in score_funcs:
print("Computing %s for %d values of n_clusters and n_samples=%d"
% (score_func.__name__, len(n_clusters_range), n_samples))
t0 = time()
scores = uniform_labelings_scores(score_func, n_samples, n_clusters_range,
fixed_n_classes=n_classes)
print("done in %0.3fs" % (time() - t0))
plots.append(plt.errorbar(
n_clusters_range, scores.mean(axis=1), scores.std(axis=1))[0])
names.append(score_func.__name__)
plt.title("Clustering measures for random uniform labeling\n"
"against reference assignment with %d classes" % n_classes)
plt.xlabel('Number of clusters (Number of samples is fixed to %d)' % n_samples)
plt.ylabel('Score value')
plt.ylim(ymin=-0.05, ymax=1.05)
plt.legend(plots, names)
plt.show()
|
bsd-3-clause
|
mhue/scikit-learn
|
examples/model_selection/plot_underfitting_overfitting.py
|
230
|
2649
|
"""
============================
Underfitting vs. Overfitting
============================
This example demonstrates the problems of underfitting and overfitting and
how we can use linear regression with polynomial features to approximate
nonlinear functions. The plot shows the function that we want to approximate,
which is a part of the cosine function. In addition, the samples from the
real function and the approximations of different models are displayed. The
models have polynomial features of different degrees. We can see that a
linear function (polynomial with degree 1) is not sufficient to fit the
training samples. This is called **underfitting**. A polynomial of degree 4
approximates the true function almost perfectly. However, for higher degrees
the model will **overfit** the training data, i.e. it learns the noise of the
training data.
We evaluate quantitatively **overfitting** / **underfitting** by using
cross-validation. We calculate the mean squared error (MSE) on the validation
set, the higher, the less likely the model generalizes correctly from the
training data.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import LinearRegression
from sklearn import cross_validation
np.random.seed(0)
n_samples = 30
degrees = [1, 4, 15]
true_fun = lambda X: np.cos(1.5 * np.pi * X)
X = np.sort(np.random.rand(n_samples))
y = true_fun(X) + np.random.randn(n_samples) * 0.1
plt.figure(figsize=(14, 5))
for i in range(len(degrees)):
ax = plt.subplot(1, len(degrees), i + 1)
plt.setp(ax, xticks=(), yticks=())
polynomial_features = PolynomialFeatures(degree=degrees[i],
include_bias=False)
linear_regression = LinearRegression()
pipeline = Pipeline([("polynomial_features", polynomial_features),
("linear_regression", linear_regression)])
pipeline.fit(X[:, np.newaxis], y)
# Evaluate the models using crossvalidation
scores = cross_validation.cross_val_score(pipeline,
X[:, np.newaxis], y, scoring="mean_squared_error", cv=10)
X_test = np.linspace(0, 1, 100)
plt.plot(X_test, pipeline.predict(X_test[:, np.newaxis]), label="Model")
plt.plot(X_test, true_fun(X_test), label="True function")
plt.scatter(X, y, label="Samples")
plt.xlabel("x")
plt.ylabel("y")
plt.xlim((0, 1))
plt.ylim((-2, 2))
plt.legend(loc="best")
plt.title("Degree {}\nMSE = {:.2e}(+/- {:.2e})".format(
degrees[i], -scores.mean(), scores.std()))
plt.show()
|
bsd-3-clause
|
glouppe/scikit-learn
|
benchmarks/bench_tree.py
|
297
|
3617
|
"""
To run this, you'll need to have installed.
* scikit-learn
Does two benchmarks
First, we fix a training set, increase the number of
samples to classify and plot number of classified samples as a
function of time.
In the second benchmark, we increase the number of dimensions of the
training set, classify a sample and plot the time taken as a function
of the number of dimensions.
"""
import numpy as np
import pylab as pl
import gc
from datetime import datetime
# to store the results
scikit_classifier_results = []
scikit_regressor_results = []
mu_second = 0.0 + 10 ** 6 # number of microseconds in a second
def bench_scikit_tree_classifier(X, Y):
"""Benchmark with scikit-learn decision tree classifier"""
from sklearn.tree import DecisionTreeClassifier
gc.collect()
# start time
tstart = datetime.now()
clf = DecisionTreeClassifier()
clf.fit(X, Y).predict(X)
delta = (datetime.now() - tstart)
# stop time
scikit_classifier_results.append(
delta.seconds + delta.microseconds / mu_second)
def bench_scikit_tree_regressor(X, Y):
"""Benchmark with scikit-learn decision tree regressor"""
from sklearn.tree import DecisionTreeRegressor
gc.collect()
# start time
tstart = datetime.now()
clf = DecisionTreeRegressor()
clf.fit(X, Y).predict(X)
delta = (datetime.now() - tstart)
# stop time
scikit_regressor_results.append(
delta.seconds + delta.microseconds / mu_second)
if __name__ == '__main__':
print('============================================')
print('Warning: this is going to take a looong time')
print('============================================')
n = 10
step = 10000
n_samples = 10000
dim = 10
n_classes = 10
for i in range(n):
print('============================================')
print('Entering iteration %s of %s' % (i, n))
print('============================================')
n_samples += step
X = np.random.randn(n_samples, dim)
Y = np.random.randint(0, n_classes, (n_samples,))
bench_scikit_tree_classifier(X, Y)
Y = np.random.randn(n_samples)
bench_scikit_tree_regressor(X, Y)
xx = range(0, n * step, step)
pl.figure('scikit-learn tree benchmark results')
pl.subplot(211)
pl.title('Learning with varying number of samples')
pl.plot(xx, scikit_classifier_results, 'g-', label='classification')
pl.plot(xx, scikit_regressor_results, 'r-', label='regression')
pl.legend(loc='upper left')
pl.xlabel('number of samples')
pl.ylabel('Time (s)')
scikit_classifier_results = []
scikit_regressor_results = []
n = 10
step = 500
start_dim = 500
n_classes = 10
dim = start_dim
for i in range(0, n):
print('============================================')
print('Entering iteration %s of %s' % (i, n))
print('============================================')
dim += step
X = np.random.randn(100, dim)
Y = np.random.randint(0, n_classes, (100,))
bench_scikit_tree_classifier(X, Y)
Y = np.random.randn(100)
bench_scikit_tree_regressor(X, Y)
xx = np.arange(start_dim, start_dim + n * step, step)
pl.subplot(212)
pl.title('Learning in high dimensional spaces')
pl.plot(xx, scikit_classifier_results, 'g-', label='classification')
pl.plot(xx, scikit_regressor_results, 'r-', label='regression')
pl.legend(loc='upper left')
pl.xlabel('number of dimensions')
pl.ylabel('Time (s)')
pl.axis('tight')
pl.show()
|
bsd-3-clause
|
plizonczyk/potential-octo-computing-machine
|
ahp.py
|
1
|
1823
|
from configparser import ConfigParser
import numpy as np
import pandas as pd
class AHP(object):
def __init__(self, filename='default.conf'):
self.cfg = ConfigParser()
self.cfg.read(filename)
self.names = pd.read_csv(self.cfg.get('filenames', 'alternatives'), header=None, dtype=np.str).as_matrix()[0]
criteria = pd.read_csv(self.cfg.get('filenames', 'criteria'), dtype=np.float)
self.criteria = criteria.as_matrix()
self.criteria_names = list(criteria.columns)
self.judgements = self._read_judgements()
self.criteria_eig, self.judgements_eig, self.scores = None, None, None
def _read_judgements(self):
judgements = {}
for name in self.cfg['judgements']:
judgements[name] = pd.read_csv(self.cfg.get('judgements', name), header=None, dtype=np.float).as_matrix()
return judgements
def _calculate_eigenvectors(self):
criteria_eig = self._get_eig(self.criteria)
judgements_eig = {name: self._get_eig(matrix) for name, matrix in self.judgements.items()}
return criteria_eig, judgements_eig
def _get_eig(self, matrix):
values, vectors = np.linalg.eig(matrix)
abs_values = np.absolute(values)
max_value_indice = np.argmax(abs_values)
return np.real((vectors[:, max_value_indice] / sum(vectors[:, max_value_indice])))
def _calculate_scores(self):
scores = np.zeros(3)
for i, name in enumerate(self.criteria_names):
scores += self.judgements_eig[name] * self.criteria_eig[i]
return {name: score for name, score in zip(self.names, scores)}
def run(self):
self.criteria_eig, self.judgements_eig = self._calculate_eigenvectors()
self.scores = self._calculate_scores()
return self.scores
|
unlicense
|
Erotemic/hotspotter
|
_graveyard/setup.old.py
|
2
|
4577
|
import textwrap
from os.path import isdir, isfile
from distutils.core import setup
from distutils.util import convert_path
from fnmatch import fnmatchcase
from _setup import git_helpers as git_helpers
def get_hotspotter_datafiles():
'Build the data files used by py2exe and py2app'
import matplotlib
data_files = []
# Include Matplotlib data (for figure images and things)
data_files.extend(matplotlib.get_py2exe_datafiles())
# Include TPL Libs
plat_tpllibdir = join('hotspotter', '_tpl', 'lib', sys.platform)
if sys.platform == 'win32':
# Hack to get MinGW dlls in for FLANN
data_files.append(('',[join(plat_tpllibdir, 'libgcc_s_dw2-1.dll'),
join(plat_tpllibdir,'libstdc++-6.dll')]))
if sys.platform == 'darwin':
pass
else:
for root,dlist,flist in os.walk(plat_tpllibdir):
tpl_dest = root
tpl_srcs = [realpath(join(root,fname)) for fname in flist]
data_files.append((tpl_dest, tpl_srcs))
# Include Splash Screen
splash_dest = normpath('_frontend')
splash_srcs = [realpath('_frontend/splash.png')]
data_files.append((splash_dest, splash_srcs))
return data_files
if cmd in ['localize', 'setup_localize.py']:
from setup_localize import *
#package_application() - moved to graveyard (pyapp_pyexe.py)
def get_system_setup_kwargs():
return dict(
platforms=PLATFORMS,
packages=find_packages(),
install_requires=INSTALL_REQUIRES,
install_optional=INSTALL_OPTIONAL,
)
def get_info_setup_kwarg():
return dict(
name=NAME,
version=VERSION,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
classifiers=CLASSIFIERS,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
url=URL,
license=LICENSE,
keywords=' '.join([
'hotsoptter', 'vision', 'animals', 'object recognition',
'instance recognition', 'naive bayes' ]))
def ensure_findable_windows_dlls():
numpy_core = r'C:\Python27\Lib\site-packages\numpy\core'
numpy_libs = ['libiomp5md.dll', 'libifcoremd.dll', 'libiompstubs5md.dll', 'libmmd.dll']
pydll_dir = r'C:\Python27\DLLs'
for nplib in numpy_libs:
dest = join(pydll_dir, nplib)
if not exists(dest):
src = join(numpy_core, nplib)
shutil.copyfile(src, dest)
zmqpyd_target = r'C:\Python27\DLLs\libzmq.pyd'
if not exists(zmqpyd_target):
#HACK http://stackoverflow.com/questions/14870825/
#py2exe-error-libzmq-pyd-no-such-file-or-directory
pyzmg_source = r'C:\Python27\Lib\site-packages\zmq\libzmq.pyd'
shutil.copyfile(pyzmg_source, zmqpyd_target)
def write_text(filename, text, mode='w'):
with open(filename, mode='w') as a:
try:
a.write(text)
except Exception as e:
print(e)
def write_version_py(filename=None):
if filename is None:
hsdir = os.path.split(realpath(__file__))[0]
filename = join(hsdir, 'generated_version.py')
cnt = textwrap.dedent('''
# THIS FILE IS GENERATED FROM HOTSPOTTER SETUP.PY
short_version = '%(version)s'
version = '%(version)s'
git_revision = '%(git_revision)s'
full_version = '%(version)s.dev-%(git_revision)s'
release = %(isrelease)s
if not release:
version = full_version''')
FULL_VERSION = VERSION
if isdir('.git'):
GIT_REVISION = git_helpers.git_version()
# must be a source distribution, use existing version file
elif exists(filename):
GIT_REVISION = 'RELEASE'
else:
GIT_REVISION = 'unknown-git'
FULL_VERSION += '.dev-' + GIT_REVISION
text = cnt % {'version': VERSION,
'full_version': FULL_VERSION,
'git_revision': GIT_REVISION,
'isrelease': str(ISRELEASED)}
write_text(filename, text)
def find_packages(where='.', exclude=()):
out = []
stack=[(convert_path(where), '')]
while stack:
where, prefix = stack.pop(0)
for name in os.listdir(where):
fn = join(where,name)
if ('.' not in name and isdir(fn) and
isfile(join(fn, '__init__.py'))
):
out.append(prefix+name)
stack.append((fn, prefix+name+'.'))
for pat in list(exclude) + ['ez_setup', 'distribute_setup']:
out = [item for item in out if not fnmatchcase(item, pat)]
return out
if cmd == 'setup_boost':
setup_boost()
sys.exit(0)
|
apache-2.0
|
jorik041/scikit-learn
|
examples/semi_supervised/plot_label_propagation_structure.py
|
247
|
2432
|
"""
==============================================
Label Propagation learning a complex structure
==============================================
Example of LabelPropagation learning a complex internal structure
to demonstrate "manifold learning". The outer circle should be
labeled "red" and the inner circle "blue". Because both label groups
lie inside their own distinct shape, we can see that the labels
propagate correctly around the circle.
"""
print(__doc__)
# Authors: Clay Woolam <[email protected]>
# Andreas Mueller <[email protected]>
# Licence: BSD
import numpy as np
import matplotlib.pyplot as plt
from sklearn.semi_supervised import label_propagation
from sklearn.datasets import make_circles
# generate ring with inner box
n_samples = 200
X, y = make_circles(n_samples=n_samples, shuffle=False)
outer, inner = 0, 1
labels = -np.ones(n_samples)
labels[0] = outer
labels[-1] = inner
###############################################################################
# Learn with LabelSpreading
label_spread = label_propagation.LabelSpreading(kernel='knn', alpha=1.0)
label_spread.fit(X, labels)
###############################################################################
# Plot output labels
output_labels = label_spread.transduction_
plt.figure(figsize=(8.5, 4))
plt.subplot(1, 2, 1)
plot_outer_labeled, = plt.plot(X[labels == outer, 0],
X[labels == outer, 1], 'rs')
plot_unlabeled, = plt.plot(X[labels == -1, 0], X[labels == -1, 1], 'g.')
plot_inner_labeled, = plt.plot(X[labels == inner, 0],
X[labels == inner, 1], 'bs')
plt.legend((plot_outer_labeled, plot_inner_labeled, plot_unlabeled),
('Outer Labeled', 'Inner Labeled', 'Unlabeled'), 'upper left',
numpoints=1, shadow=False)
plt.title("Raw data (2 classes=red and blue)")
plt.subplot(1, 2, 2)
output_label_array = np.asarray(output_labels)
outer_numbers = np.where(output_label_array == outer)[0]
inner_numbers = np.where(output_label_array == inner)[0]
plot_outer, = plt.plot(X[outer_numbers, 0], X[outer_numbers, 1], 'rs')
plot_inner, = plt.plot(X[inner_numbers, 0], X[inner_numbers, 1], 'bs')
plt.legend((plot_outer, plot_inner), ('Outer Learned', 'Inner Learned'),
'upper left', numpoints=1, shadow=False)
plt.title("Labels learned with Label Spreading (KNN)")
plt.subplots_adjust(left=0.07, bottom=0.07, right=0.93, top=0.92)
plt.show()
|
bsd-3-clause
|
bigfootproject/OSMEF
|
data_processing/graphs/vm2vm_distance.py
|
1
|
1825
|
#!/usr/bin/python
import numpy as np
import matplotlib.pyplot as plt
import json
data = json.load(open("../../osmef/data.json"))
N = 3
MS = 10 # markersize
xpoints = (0, 2, 4)
fig = plt.figure()
ax = fig.add_subplot(111)
lines = []
btc_data = []
btc_data.append(data["vm_to_vm_1"]["c=1"]["rx.rate_MBps"]["avg"])
btc_data.append(data["vm_to_vm_2"]["c=1"]["rx.rate_MBps"]["avg"])
btc_data.append(data["vm_to_vm_3"]["c=1"]["rx.rate_MBps"]["avg"])
#point = data["vm_to_vm_3"]["c=1"]["rx.rate_MBps"]["avg"]
print(btc_data)
lines.append(ax.semilogy(xpoints, btc_data, "o-", markersize=MS))
btc_data = []
btc_data.append(data["vm_to_vm_1"]["c=10"]["rx.rate_MBps"]["avg"])
btc_data.append(data["vm_to_vm_2"]["c=10"]["rx.rate_MBps"]["avg"])
btc_data.append(data["vm_to_vm_3"]["c=10"]["rx.rate_MBps"]["avg"])
lines.append(ax.semilogy(xpoints, btc_data, "*-", markersize=MS))
btc_data = []
btc_data.append(data["vm_to_vm_1"]["c=30"]["rx.rate_MBps"]["avg"])
btc_data.append(data["vm_to_vm_2"]["c=30"]["rx.rate_MBps"]["avg"])
btc_data.append(data["vm_to_vm_3"]["c=30"]["rx.rate_MBps"]["avg"])
lines.append(ax.semilogy(xpoints, btc_data, "^-", markersize=MS))
btc_data = []
btc_data.append(data["vm_to_vm_1"]["c=50"]["rx.rate_MBps"]["avg"])
btc_data.append(data["vm_to_vm_2"]["c=50"]["rx.rate_MBps"]["avg"])
btc_data.append(data["vm_to_vm_3"]["c=50"]["rx.rate_MBps"]["avg"])
lines.append(ax.semilogy(xpoints, btc_data, "s-", markersize=MS))
#ax.plot(4, point, "^")
legend = ["p = 1", "p = 10", "p = 30", "p = 50"]
# add some
ax.set_title('VM to VM BTC')
ax.set_xlabel('Distance')
ax.set_ylabel('BTC in MB/s')
ax.set_xticks([0, 2, 4])
#ax.set_xbound(-0.5, 4.5)
#ax.set_xticklabels(xpoints)
ax.grid(True)
ax.legend([x[0] for x in lines], legend, loc="upper right")
#fig.autofmt_xdate()
plt.savefig("vm2vm_distance.pdf")
|
apache-2.0
|
tacaswell/dataportal
|
dataportal/testing/decorators.py
|
2
|
4644
|
########################################################################
# Copyright (c) 2014, Brookhaven Science Associates, Brookhaven #
# National Laboratory. All rights reserved. #
# #
# Redistribution and use in source and binary forms, with or without #
# modification, are permitted provided that the following conditions #
# are met: #
# #
# * Redistributions of source code must retain the above copyright #
# notice, this list of conditions and the following disclaimer. #
# #
# * Redistributions in binary form must reproduce the above copyright #
# notice this list of conditions and the following disclaimer in #
# the documentation and/or other materials provided with the #
# distribution. #
# #
# * Neither the name of the Brookhaven Science Associates, Brookhaven #
# National Laboratory nor the names of its contributors may be used #
# to endorse or promote products derived from this software without #
# specific prior written permission. #
# #
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS #
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT #
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS #
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE #
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, #
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES #
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR #
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) #
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, #
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OTHERWISE) ARISING #
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE #
# POSSIBILITY OF SUCH DAMAGE. #
########################################################################
"""
This module is for decorators related to testing.
Much of this code is inspired by the code in matplotlib. Exact copies
are noted.
"""
import nose
from nose.tools import make_decorator
# This code is copied from numpy
class KnownFailureTest(Exception):
'''Raise this exception to mark a test as a known failing test.'''
pass
# copied from matplotlib
class KnownFailureDidNotFailTest(Exception):
'''Raise this exception to mark a test should have failed but did not.'''
pass
def known_fail_if(cond):
"""
Make sure a known failure fails.
This function is a decorator factory.
"""
# make the decorator function
def dec(in_func):
# make the wrapper function
# if the condition is True
if cond:
def inner_wrap():
# try the test anywoy
try:
in_func()
# when in fails, raises KnownFailureTest
# which is registered with nose and it will be marked
# as K in the results
except Exception:
raise KnownFailureTest()
# if it does not fail, raise KnownFailureDidNotFailTest which
# is a normal exception. This may seem counter-intuitive
# but knowing when tests that _should_ fail don't can be useful
else:
raise KnownFailureDidNotFailTest()
# use `make_decorator` from nose to make sure that the meta-data on
# the function is forwarded properly (name, teardown, setup, etc)
return make_decorator(in_func)(inner_wrap)
# if the condition is false, don't make a wrapper function
# this is effectively a no-op
else:
return in_func
# return the decorator function
return dec
def skip_if(cond, msg=''):
"""
A decorator to skip a test if condition is met
"""
def dec(in_func):
if cond:
def wrapper():
raise nose.SkipTest(msg)
return make_decorator(in_func)(wrapper)
else:
return in_func
return dec
|
bsd-3-clause
|
fabioticconi/scikit-learn
|
examples/feature_selection/plot_select_from_model_boston.py
|
146
|
1527
|
"""
===================================================
Feature selection using SelectFromModel and LassoCV
===================================================
Use SelectFromModel meta-transformer along with Lasso to select the best
couple of features from the Boston dataset.
"""
# Author: Manoj Kumar <[email protected]>
# License: BSD 3 clause
print(__doc__)
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import load_boston
from sklearn.feature_selection import SelectFromModel
from sklearn.linear_model import LassoCV
# Load the boston dataset.
boston = load_boston()
X, y = boston['data'], boston['target']
# We use the base estimator LassoCV since the L1 norm promotes sparsity of features.
clf = LassoCV()
# Set a minimum threshold of 0.25
sfm = SelectFromModel(clf, threshold=0.25)
sfm.fit(X, y)
n_features = sfm.transform(X).shape[1]
# Reset the threshold till the number of features equals two.
# Note that the attribute can be set directly instead of repeatedly
# fitting the metatransformer.
while n_features > 2:
sfm.threshold += 0.1
X_transform = sfm.transform(X)
n_features = X_transform.shape[1]
# Plot the selected two features from X.
plt.title(
"Features selected from Boston using SelectFromModel with "
"threshold %0.3f." % sfm.threshold)
feature1 = X_transform[:, 0]
feature2 = X_transform[:, 1]
plt.plot(feature1, feature2, 'r.')
plt.xlabel("Feature number 1")
plt.ylabel("Feature number 2")
plt.ylim([np.min(feature2), np.max(feature2)])
plt.show()
|
bsd-3-clause
|
Achuth17/scikit-learn
|
sklearn/ensemble/tests/test_partial_dependence.py
|
365
|
6996
|
"""
Testing for the partial dependence module.
"""
import numpy as np
from numpy.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import if_matplotlib
from sklearn.ensemble.partial_dependence import partial_dependence
from sklearn.ensemble.partial_dependence import plot_partial_dependence
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import GradientBoostingRegressor
from sklearn import datasets
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
# also load the boston dataset
boston = datasets.load_boston()
# also load the iris dataset
iris = datasets.load_iris()
def test_partial_dependence_classifier():
# Test partial dependence for classifier
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(X, y)
pdp, axes = partial_dependence(clf, [0], X=X, grid_resolution=5)
# only 4 grid points instead of 5 because only 4 unique X[:,0] vals
assert pdp.shape == (1, 4)
assert axes[0].shape[0] == 4
# now with our own grid
X_ = np.asarray(X)
grid = np.unique(X_[:, 0])
pdp_2, axes = partial_dependence(clf, [0], grid=grid)
assert axes is None
assert_array_equal(pdp, pdp_2)
def test_partial_dependence_multiclass():
# Test partial dependence for multi-class classifier
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(iris.data, iris.target)
grid_resolution = 25
n_classes = clf.n_classes_
pdp, axes = partial_dependence(
clf, [0], X=iris.data, grid_resolution=grid_resolution)
assert pdp.shape == (n_classes, grid_resolution)
assert len(axes) == 1
assert axes[0].shape[0] == grid_resolution
def test_partial_dependence_regressor():
# Test partial dependence for regressor
clf = GradientBoostingRegressor(n_estimators=10, random_state=1)
clf.fit(boston.data, boston.target)
grid_resolution = 25
pdp, axes = partial_dependence(
clf, [0], X=boston.data, grid_resolution=grid_resolution)
assert pdp.shape == (1, grid_resolution)
assert axes[0].shape[0] == grid_resolution
def test_partial_dependecy_input():
# Test input validation of partial dependence.
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(X, y)
assert_raises(ValueError, partial_dependence,
clf, [0], grid=None, X=None)
assert_raises(ValueError, partial_dependence,
clf, [0], grid=[0, 1], X=X)
# first argument must be an instance of BaseGradientBoosting
assert_raises(ValueError, partial_dependence,
{}, [0], X=X)
# Gradient boosting estimator must be fit
assert_raises(ValueError, partial_dependence,
GradientBoostingClassifier(), [0], X=X)
assert_raises(ValueError, partial_dependence, clf, [-1], X=X)
assert_raises(ValueError, partial_dependence, clf, [100], X=X)
# wrong ndim for grid
grid = np.random.rand(10, 2, 1)
assert_raises(ValueError, partial_dependence, clf, [0], grid=grid)
@if_matplotlib
def test_plot_partial_dependence():
# Test partial dependence plot function.
clf = GradientBoostingRegressor(n_estimators=10, random_state=1)
clf.fit(boston.data, boston.target)
grid_resolution = 25
fig, axs = plot_partial_dependence(clf, boston.data, [0, 1, (0, 1)],
grid_resolution=grid_resolution,
feature_names=boston.feature_names)
assert len(axs) == 3
assert all(ax.has_data for ax in axs)
# check with str features and array feature names
fig, axs = plot_partial_dependence(clf, boston.data, ['CRIM', 'ZN',
('CRIM', 'ZN')],
grid_resolution=grid_resolution,
feature_names=boston.feature_names)
assert len(axs) == 3
assert all(ax.has_data for ax in axs)
# check with list feature_names
feature_names = boston.feature_names.tolist()
fig, axs = plot_partial_dependence(clf, boston.data, ['CRIM', 'ZN',
('CRIM', 'ZN')],
grid_resolution=grid_resolution,
feature_names=feature_names)
assert len(axs) == 3
assert all(ax.has_data for ax in axs)
@if_matplotlib
def test_plot_partial_dependence_input():
# Test partial dependence plot function input checks.
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
# not fitted yet
assert_raises(ValueError, plot_partial_dependence,
clf, X, [0])
clf.fit(X, y)
assert_raises(ValueError, plot_partial_dependence,
clf, np.array(X)[:, :0], [0])
# first argument must be an instance of BaseGradientBoosting
assert_raises(ValueError, plot_partial_dependence,
{}, X, [0])
# must be larger than -1
assert_raises(ValueError, plot_partial_dependence,
clf, X, [-1])
# too large feature value
assert_raises(ValueError, plot_partial_dependence,
clf, X, [100])
# str feature but no feature_names
assert_raises(ValueError, plot_partial_dependence,
clf, X, ['foobar'])
# not valid features value
assert_raises(ValueError, plot_partial_dependence,
clf, X, [{'foo': 'bar'}])
@if_matplotlib
def test_plot_partial_dependence_multiclass():
# Test partial dependence plot function on multi-class input.
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(iris.data, iris.target)
grid_resolution = 25
fig, axs = plot_partial_dependence(clf, iris.data, [0, 1],
label=0,
grid_resolution=grid_resolution)
assert len(axs) == 2
assert all(ax.has_data for ax in axs)
# now with symbol labels
target = iris.target_names[iris.target]
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(iris.data, target)
grid_resolution = 25
fig, axs = plot_partial_dependence(clf, iris.data, [0, 1],
label='setosa',
grid_resolution=grid_resolution)
assert len(axs) == 2
assert all(ax.has_data for ax in axs)
# label not in gbrt.classes_
assert_raises(ValueError, plot_partial_dependence,
clf, iris.data, [0, 1], label='foobar',
grid_resolution=grid_resolution)
# label not provided
assert_raises(ValueError, plot_partial_dependence,
clf, iris.data, [0, 1],
grid_resolution=grid_resolution)
|
bsd-3-clause
|
phoebe-project/phoebe2-docs
|
development/tutorials/reflection_heating.py
|
2
|
4683
|
#!/usr/bin/env python
# coding: utf-8
# Reflection and Heating
# ============================
#
# For a comparison between "Horvat" and "Wilson" methods in the "irad_method" parameter, see the tutorial on [Lambert Scattering](./irrad_method_horvat.ipynb).
#
# Setup
# -----------------------------
# Let's first make sure we have the latest version of PHOEBE 2.3 installed (uncomment this line if running in an online notebook session such as colab).
# In[1]:
#!pip install -I "phoebe>=2.3,<2.4"
# As always, let's do imports and initialize a logger and a new bundle.
# In[2]:
import phoebe
from phoebe import u # units
import numpy as np
import matplotlib.pyplot as plt
#logger = phoebe.logger('error')
b = phoebe.default_binary()
# Relevant Parameters
# ---------------------------------
# The parameters that define reflection and heating are all prefaced by "irrad_frac" (fraction of incident flux) and suffixed by "bol" to indicate that they all refer to a bolometric (rather than passband-dependent) process. For this reason, they are *not* stored in the dataset, but rather directly in the component.
#
# Each of these parameters dictates how much incident flux will be handled by each of the available processes. For now these only include reflection (heating with immediate re-emission, without heat distribution) and lost flux. In the future, heating with distribution and scattering will also be supported.
#
# For each component, these parameters *must* add up to exactly 1.0 - and this is handled by a constraint which by default constrains the "lost" parameter.
# In[3]:
print(b['irrad_frac_refl_bol'])
# In[4]:
print(b['irrad_frac_lost_bol'])
# In[5]:
print(b['irrad_frac_refl_bol@primary'])
# In[6]:
print(b['irrad_frac_lost_bol@primary@component'])
# In order to see the effect of reflection, let's set "irrad_frac_refl_bol" of both of our stars to 0.9 - that is 90% of the incident flux will go towards reflection and 10% will be ignored.
# In[7]:
b.set_value_all('irrad_frac_refl_bol', 0.9)
# Since reflection can be a computationally expensive process and in most cases is a low-order effect, there is a switch in the compute options that needs to be enabled in order for reflection to be taken into account. If this switch is False (which it is by default), the albedos are completely ignored and will be treated as if all incident light is lost/ignored.
# In[8]:
print(b['irrad_method@compute'])
# Reflection has the most noticeable effect when the two stars are close to each other and have a large temperature ratio.
# In[9]:
b['sma@orbit'] = 4.0
# In[10]:
b['teff@primary'] = 10000
# In[11]:
b['teff@secondary'] = 5000
# Influence on Light Curves (fluxes)
# ---------------------------------
# In[12]:
b.add_dataset('lc', times=np.linspace(0,1,101))
# Let's run models with the reflection switch both turned on and off so that we can compare the two results. We'll also override delta to be a larger number since the computation time required by delta depends largely on the number of surface elements.
# In[13]:
b.run_compute(irrad_method='none', ntriangles=700, model='refl_false')
# In[14]:
b.run_compute(irrad_method='wilson', ntriangles=700, model='refl_true')
# In[15]:
afig, mplfig = b.plot(show=True, legend=True)
# In[16]:
artists = plt.plot(b['value@times@refl_false'], b['value@fluxes@refl_true']-b['value@fluxes@refl_false'], 'r-')
# Influence on Meshes (Intensities)
# ------------------------------------------
# In[17]:
b.add_dataset('mesh', times=[0.2], columns=['teffs', 'intensities@lc01'])
# In[18]:
b.disable_dataset('lc01')
# In[19]:
b.run_compute(irrad_method='none', ntriangles=700, model='refl_false', overwrite=True)
# In[20]:
b.run_compute(irrad_method='wilson', ntriangles=700, model='refl_true', overwrite=True)
# In[21]:
#phoebe.logger('debug')
# In[22]:
afig, mplfig = b.plot(component='secondary', kind='mesh', model='refl_false',
fc='intensities', ec='face',
draw_sidebars=True, show=True)
# In[23]:
afig, mplfig = b.plot(component='secondary', kind='mesh', model='refl_true',
fc='intensities', ec='face',
draw_sidebars=True, show=True)
# In[24]:
afig, mplfig = b.plot(component='secondary', kind='mesh', model='refl_false',
fc='teffs', ec='face',
draw_sidebars=True, show=True)
# In[25]:
afig, mplfig = b.plot(component='secondary', kind='mesh', model='refl_true',
fc='teffs', ec='face',
draw_sidebars=True, show=True)
|
gpl-3.0
|
TomAugspurger/pandas
|
pandas/io/stata.py
|
1
|
126673
|
"""
Module contains tools for processing Stata files into DataFrames
The StataReader below was originally written by Joe Presbrey as part of PyDTA.
It has been extended and improved by Skipper Seabold from the Statsmodels
project who also developed the StataWriter and was finally added to pandas in
a once again improved version.
You can find more information on http://presbrey.mit.edu/PyDTA and
https://www.statsmodels.org/devel/
"""
from collections import abc
import datetime
from io import BytesIO, IOBase
import os
from pathlib import Path
import struct
import sys
from typing import (
Any,
AnyStr,
BinaryIO,
Dict,
List,
Mapping,
Optional,
Sequence,
Tuple,
Union,
)
import warnings
from dateutil.relativedelta import relativedelta
import numpy as np
from pandas._libs.lib import infer_dtype
from pandas._libs.writers import max_len_string_array
from pandas._typing import FilePathOrBuffer, Label
from pandas.util._decorators import Appender
from pandas.core.dtypes.common import (
ensure_object,
is_categorical_dtype,
is_datetime64_dtype,
)
from pandas import (
Categorical,
DatetimeIndex,
NaT,
Timestamp,
concat,
isna,
to_datetime,
to_timedelta,
)
from pandas.core.frame import DataFrame
from pandas.core.indexes.base import Index
from pandas.core.series import Series
from pandas.io.common import (
get_compression_method,
get_filepath_or_buffer,
get_handle,
infer_compression,
stringify_path,
)
_version_error = (
"Version of given Stata file is {version}. pandas supports importing "
"versions 105, 108, 111 (Stata 7SE), 113 (Stata 8/9), "
"114 (Stata 10/11), 115 (Stata 12), 117 (Stata 13), 118 (Stata 14/15/16),"
"and 119 (Stata 15/16, over 32,767 variables)."
)
_statafile_processing_params1 = """\
convert_dates : bool, default True
Convert date variables to DataFrame time values.
convert_categoricals : bool, default True
Read value labels and convert columns to Categorical/Factor variables."""
_statafile_processing_params2 = """\
index_col : str, optional
Column to set as index.
convert_missing : bool, default False
Flag indicating whether to convert missing values to their Stata
representations. If False, missing values are replaced with nan.
If True, columns containing missing values are returned with
object data types and missing values are represented by
StataMissingValue objects.
preserve_dtypes : bool, default True
Preserve Stata datatypes. If False, numeric data are upcast to pandas
default types for foreign data (float64 or int64).
columns : list or None
Columns to retain. Columns will be returned in the given order. None
returns all columns.
order_categoricals : bool, default True
Flag indicating whether converted categorical data are ordered."""
_chunksize_params = """\
chunksize : int, default None
Return StataReader object for iterations, returns chunks with
given number of lines."""
_iterator_params = """\
iterator : bool, default False
Return StataReader object."""
_reader_notes = """\
Notes
-----
Categorical variables read through an iterator may not have the same
categories and dtype. This occurs when a variable stored in a DTA
file is associated to an incomplete set of value labels that only
label a strict subset of the values."""
_read_stata_doc = f"""
Read Stata file into DataFrame.
Parameters
----------
filepath_or_buffer : str, path object or file-like object
Any valid string path is acceptable. The string could be a URL. Valid
URL schemes include http, ftp, s3, and file. For file URLs, a host is
expected. A local file could be: ``file://localhost/path/to/table.dta``.
If you want to pass in a path object, pandas accepts any ``os.PathLike``.
By file-like object, we refer to objects with a ``read()`` method,
such as a file handler (e.g. via builtin ``open`` function)
or ``StringIO``.
{_statafile_processing_params1}
{_statafile_processing_params2}
{_chunksize_params}
{_iterator_params}
Returns
-------
DataFrame or StataReader
See Also
--------
io.stata.StataReader : Low-level reader for Stata data files.
DataFrame.to_stata: Export Stata data files.
{_reader_notes}
Examples
--------
Read a Stata dta file:
>>> df = pd.read_stata('filename.dta')
Read a Stata dta file in 10,000 line chunks:
>>> itr = pd.read_stata('filename.dta', chunksize=10000)
>>> for chunk in itr:
... do_something(chunk)
"""
_read_method_doc = f"""\
Reads observations from Stata file, converting them into a dataframe
Parameters
----------
nrows : int
Number of lines to read from data file, if None read whole file.
{_statafile_processing_params1}
{_statafile_processing_params2}
Returns
-------
DataFrame
"""
_stata_reader_doc = f"""\
Class for reading Stata dta files.
Parameters
----------
path_or_buf : path (string), buffer or path object
string, path object (pathlib.Path or py._path.local.LocalPath) or object
implementing a binary read() functions.
.. versionadded:: 0.23.0 support for pathlib, py.path.
{_statafile_processing_params1}
{_statafile_processing_params2}
{_chunksize_params}
{_reader_notes}
"""
_date_formats = ["%tc", "%tC", "%td", "%d", "%tw", "%tm", "%tq", "%th", "%ty"]
stata_epoch = datetime.datetime(1960, 1, 1)
# TODO: Add typing. As of January 2020 it is not possible to type this function since
# mypy doesn't understand that a Series and an int can be combined using mathematical
# operations. (+, -).
def _stata_elapsed_date_to_datetime_vec(dates, fmt) -> Series:
"""
Convert from SIF to datetime. https://www.stata.com/help.cgi?datetime
Parameters
----------
dates : Series
The Stata Internal Format date to convert to datetime according to fmt
fmt : str
The format to convert to. Can be, tc, td, tw, tm, tq, th, ty
Returns
Returns
-------
converted : Series
The converted dates
Examples
--------
>>> dates = pd.Series([52])
>>> _stata_elapsed_date_to_datetime_vec(dates , "%tw")
0 1961-01-01
dtype: datetime64[ns]
Notes
-----
datetime/c - tc
milliseconds since 01jan1960 00:00:00.000, assuming 86,400 s/day
datetime/C - tC - NOT IMPLEMENTED
milliseconds since 01jan1960 00:00:00.000, adjusted for leap seconds
date - td
days since 01jan1960 (01jan1960 = 0)
weekly date - tw
weeks since 1960w1
This assumes 52 weeks in a year, then adds 7 * remainder of the weeks.
The datetime value is the start of the week in terms of days in the
year, not ISO calendar weeks.
monthly date - tm
months since 1960m1
quarterly date - tq
quarters since 1960q1
half-yearly date - th
half-years since 1960h1 yearly
date - ty
years since 0000
"""
MIN_YEAR, MAX_YEAR = Timestamp.min.year, Timestamp.max.year
MAX_DAY_DELTA = (Timestamp.max - datetime.datetime(1960, 1, 1)).days
MIN_DAY_DELTA = (Timestamp.min - datetime.datetime(1960, 1, 1)).days
MIN_MS_DELTA = MIN_DAY_DELTA * 24 * 3600 * 1000
MAX_MS_DELTA = MAX_DAY_DELTA * 24 * 3600 * 1000
def convert_year_month_safe(year, month) -> Series:
"""
Convert year and month to datetimes, using pandas vectorized versions
when the date range falls within the range supported by pandas.
Otherwise it falls back to a slower but more robust method
using datetime.
"""
if year.max() < MAX_YEAR and year.min() > MIN_YEAR:
return to_datetime(100 * year + month, format="%Y%m")
else:
index = getattr(year, "index", None)
return Series(
[datetime.datetime(y, m, 1) for y, m in zip(year, month)], index=index
)
def convert_year_days_safe(year, days) -> Series:
"""
Converts year (e.g. 1999) and days since the start of the year to a
datetime or datetime64 Series
"""
if year.max() < (MAX_YEAR - 1) and year.min() > MIN_YEAR:
return to_datetime(year, format="%Y") + to_timedelta(days, unit="d")
else:
index = getattr(year, "index", None)
value = [
datetime.datetime(y, 1, 1) + relativedelta(days=int(d))
for y, d in zip(year, days)
]
return Series(value, index=index)
def convert_delta_safe(base, deltas, unit) -> Series:
"""
Convert base dates and deltas to datetimes, using pandas vectorized
versions if the deltas satisfy restrictions required to be expressed
as dates in pandas.
"""
index = getattr(deltas, "index", None)
if unit == "d":
if deltas.max() > MAX_DAY_DELTA or deltas.min() < MIN_DAY_DELTA:
values = [base + relativedelta(days=int(d)) for d in deltas]
return Series(values, index=index)
elif unit == "ms":
if deltas.max() > MAX_MS_DELTA or deltas.min() < MIN_MS_DELTA:
values = [
base + relativedelta(microseconds=(int(d) * 1000)) for d in deltas
]
return Series(values, index=index)
else:
raise ValueError("format not understood")
base = to_datetime(base)
deltas = to_timedelta(deltas, unit=unit)
return base + deltas
# TODO: If/when pandas supports more than datetime64[ns], this should be
# improved to use correct range, e.g. datetime[Y] for yearly
bad_locs = np.isnan(dates)
has_bad_values = False
if bad_locs.any():
has_bad_values = True
data_col = Series(dates)
data_col[bad_locs] = 1.0 # Replace with NaT
dates = dates.astype(np.int64)
if fmt.startswith(("%tc", "tc")): # Delta ms relative to base
base = stata_epoch
ms = dates
conv_dates = convert_delta_safe(base, ms, "ms")
elif fmt.startswith(("%tC", "tC")):
warnings.warn("Encountered %tC format. Leaving in Stata Internal Format.")
conv_dates = Series(dates, dtype=np.object)
if has_bad_values:
conv_dates[bad_locs] = NaT
return conv_dates
# Delta days relative to base
elif fmt.startswith(("%td", "td", "%d", "d")):
base = stata_epoch
days = dates
conv_dates = convert_delta_safe(base, days, "d")
# does not count leap days - 7 days is a week.
# 52nd week may have more than 7 days
elif fmt.startswith(("%tw", "tw")):
year = stata_epoch.year + dates // 52
days = (dates % 52) * 7
conv_dates = convert_year_days_safe(year, days)
elif fmt.startswith(("%tm", "tm")): # Delta months relative to base
year = stata_epoch.year + dates // 12
month = (dates % 12) + 1
conv_dates = convert_year_month_safe(year, month)
elif fmt.startswith(("%tq", "tq")): # Delta quarters relative to base
year = stata_epoch.year + dates // 4
quarter_month = (dates % 4) * 3 + 1
conv_dates = convert_year_month_safe(year, quarter_month)
elif fmt.startswith(("%th", "th")): # Delta half-years relative to base
year = stata_epoch.year + dates // 2
month = (dates % 2) * 6 + 1
conv_dates = convert_year_month_safe(year, month)
elif fmt.startswith(("%ty", "ty")): # Years -- not delta
year = dates
first_month = np.ones_like(dates)
conv_dates = convert_year_month_safe(year, first_month)
else:
raise ValueError(f"Date fmt {fmt} not understood")
if has_bad_values: # Restore NaT for bad values
conv_dates[bad_locs] = NaT
return conv_dates
def _datetime_to_stata_elapsed_vec(dates: Series, fmt: str) -> Series:
"""
Convert from datetime to SIF. https://www.stata.com/help.cgi?datetime
Parameters
----------
dates : Series
Series or array containing datetime.datetime or datetime64[ns] to
convert to the Stata Internal Format given by fmt
fmt : str
The format to convert to. Can be, tc, td, tw, tm, tq, th, ty
"""
index = dates.index
NS_PER_DAY = 24 * 3600 * 1000 * 1000 * 1000
US_PER_DAY = NS_PER_DAY / 1000
def parse_dates_safe(dates, delta=False, year=False, days=False):
d = {}
if is_datetime64_dtype(dates.dtype):
if delta:
time_delta = dates - stata_epoch
d["delta"] = time_delta._values.astype(np.int64) // 1000 # microseconds
if days or year:
date_index = DatetimeIndex(dates)
d["year"] = date_index.year
d["month"] = date_index.month
if days:
days_in_ns = dates.astype(np.int64) - to_datetime(
d["year"], format="%Y"
).astype(np.int64)
d["days"] = days_in_ns // NS_PER_DAY
elif infer_dtype(dates, skipna=False) == "datetime":
if delta:
delta = dates._values - stata_epoch
def f(x: datetime.timedelta) -> float:
return US_PER_DAY * x.days + 1000000 * x.seconds + x.microseconds
v = np.vectorize(f)
d["delta"] = v(delta)
if year:
year_month = dates.apply(lambda x: 100 * x.year + x.month)
d["year"] = year_month._values // 100
d["month"] = year_month._values - d["year"] * 100
if days:
def g(x: datetime.datetime) -> int:
return (x - datetime.datetime(x.year, 1, 1)).days
v = np.vectorize(g)
d["days"] = v(dates)
else:
raise ValueError(
"Columns containing dates must contain either "
"datetime64, datetime.datetime or null values."
)
return DataFrame(d, index=index)
bad_loc = isna(dates)
index = dates.index
if bad_loc.any():
dates = Series(dates)
if is_datetime64_dtype(dates):
dates[bad_loc] = to_datetime(stata_epoch)
else:
dates[bad_loc] = stata_epoch
if fmt in ["%tc", "tc"]:
d = parse_dates_safe(dates, delta=True)
conv_dates = d.delta / 1000
elif fmt in ["%tC", "tC"]:
warnings.warn("Stata Internal Format tC not supported.")
conv_dates = dates
elif fmt in ["%td", "td"]:
d = parse_dates_safe(dates, delta=True)
conv_dates = d.delta // US_PER_DAY
elif fmt in ["%tw", "tw"]:
d = parse_dates_safe(dates, year=True, days=True)
conv_dates = 52 * (d.year - stata_epoch.year) + d.days // 7
elif fmt in ["%tm", "tm"]:
d = parse_dates_safe(dates, year=True)
conv_dates = 12 * (d.year - stata_epoch.year) + d.month - 1
elif fmt in ["%tq", "tq"]:
d = parse_dates_safe(dates, year=True)
conv_dates = 4 * (d.year - stata_epoch.year) + (d.month - 1) // 3
elif fmt in ["%th", "th"]:
d = parse_dates_safe(dates, year=True)
conv_dates = 2 * (d.year - stata_epoch.year) + (d.month > 6).astype(np.int)
elif fmt in ["%ty", "ty"]:
d = parse_dates_safe(dates, year=True)
conv_dates = d.year
else:
raise ValueError(f"Format {fmt} is not a known Stata date format")
conv_dates = Series(conv_dates, dtype=np.float64)
missing_value = struct.unpack("<d", b"\x00\x00\x00\x00\x00\x00\xe0\x7f")[0]
conv_dates[bad_loc] = missing_value
return Series(conv_dates, index=index)
excessive_string_length_error = """
Fixed width strings in Stata .dta files are limited to 244 (or fewer)
characters. Column '{0}' does not satisfy this restriction. Use the
'version=117' parameter to write the newer (Stata 13 and later) format.
"""
class PossiblePrecisionLoss(Warning):
pass
precision_loss_doc = """
Column converted from %s to %s, and some data are outside of the lossless
conversion range. This may result in a loss of precision in the saved data.
"""
class ValueLabelTypeMismatch(Warning):
pass
value_label_mismatch_doc = """
Stata value labels (pandas categories) must be strings. Column {0} contains
non-string labels which will be converted to strings. Please check that the
Stata data file created has not lost information due to duplicate labels.
"""
class InvalidColumnName(Warning):
pass
invalid_name_doc = """
Not all pandas column names were valid Stata variable names.
The following replacements have been made:
{0}
If this is not what you expect, please make sure you have Stata-compliant
column names in your DataFrame (strings only, max 32 characters, only
alphanumerics and underscores, no Stata reserved words)
"""
class CategoricalConversionWarning(Warning):
pass
categorical_conversion_warning = """
One or more series with value labels are not fully labeled. Reading this
dataset with an iterator results in categorical variable with different
categories. This occurs since it is not possible to know all possible values
until the entire dataset has been read. To avoid this warning, you can either
read dataset without an interator, or manually convert categorical data by
``convert_categoricals`` to False and then accessing the variable labels
through the value_labels method of the reader.
"""
def _cast_to_stata_types(data: DataFrame) -> DataFrame:
"""
Checks the dtypes of the columns of a pandas DataFrame for
compatibility with the data types and ranges supported by Stata, and
converts if necessary.
Parameters
----------
data : DataFrame
The DataFrame to check and convert
Notes
-----
Numeric columns in Stata must be one of int8, int16, int32, float32 or
float64, with some additional value restrictions. int8 and int16 columns
are checked for violations of the value restrictions and upcast if needed.
int64 data is not usable in Stata, and so it is downcast to int32 whenever
the value are in the int32 range, and sidecast to float64 when larger than
this range. If the int64 values are outside of the range of those
perfectly representable as float64 values, a warning is raised.
bool columns are cast to int8. uint columns are converted to int of the
same size if there is no loss in precision, otherwise are upcast to a
larger type. uint64 is currently not supported since it is concerted to
object in a DataFrame.
"""
ws = ""
# original, if small, if large
conversion_data = (
(np.bool, np.int8, np.int8),
(np.uint8, np.int8, np.int16),
(np.uint16, np.int16, np.int32),
(np.uint32, np.int32, np.int64),
)
float32_max = struct.unpack("<f", b"\xff\xff\xff\x7e")[0]
float64_max = struct.unpack("<d", b"\xff\xff\xff\xff\xff\xff\xdf\x7f")[0]
for col in data:
dtype = data[col].dtype
# Cast from unsupported types to supported types
for c_data in conversion_data:
if dtype == c_data[0]:
if data[col].max() <= np.iinfo(c_data[1]).max:
dtype = c_data[1]
else:
dtype = c_data[2]
if c_data[2] == np.float64: # Warn if necessary
if data[col].max() >= 2 ** 53:
ws = precision_loss_doc.format("uint64", "float64")
data[col] = data[col].astype(dtype)
# Check values and upcast if necessary
if dtype == np.int8:
if data[col].max() > 100 or data[col].min() < -127:
data[col] = data[col].astype(np.int16)
elif dtype == np.int16:
if data[col].max() > 32740 or data[col].min() < -32767:
data[col] = data[col].astype(np.int32)
elif dtype == np.int64:
if data[col].max() <= 2147483620 and data[col].min() >= -2147483647:
data[col] = data[col].astype(np.int32)
else:
data[col] = data[col].astype(np.float64)
if data[col].max() >= 2 ** 53 or data[col].min() <= -(2 ** 53):
ws = precision_loss_doc.format("int64", "float64")
elif dtype in (np.float32, np.float64):
value = data[col].max()
if np.isinf(value):
raise ValueError(
f"Column {col} has a maximum value of infinity which is outside "
"the range supported by Stata."
)
if dtype == np.float32 and value > float32_max:
data[col] = data[col].astype(np.float64)
elif dtype == np.float64:
if value > float64_max:
raise ValueError(
f"Column {col} has a maximum value ({value}) outside the range "
f"supported by Stata ({float64_max})"
)
if ws:
warnings.warn(ws, PossiblePrecisionLoss)
return data
class StataValueLabel:
"""
Parse a categorical column and prepare formatted output
Parameters
----------
catarray : Series
Categorical Series to encode
encoding : {"latin-1", "utf-8"}
Encoding to use for value labels.
"""
def __init__(self, catarray: Series, encoding: str = "latin-1"):
if encoding not in ("latin-1", "utf-8"):
raise ValueError("Only latin-1 and utf-8 are supported.")
self.labname = catarray.name
self._encoding = encoding
categories = catarray.cat.categories
self.value_labels = list(zip(np.arange(len(categories)), categories))
self.value_labels.sort(key=lambda x: x[0])
self.text_len = 0
self.off: List[int] = []
self.val: List[int] = []
self.txt: List[bytes] = []
self.n = 0
# Compute lengths and setup lists of offsets and labels
for vl in self.value_labels:
category = vl[1]
if not isinstance(category, str):
category = str(category)
warnings.warn(
value_label_mismatch_doc.format(catarray.name),
ValueLabelTypeMismatch,
)
category = category.encode(encoding)
self.off.append(self.text_len)
self.text_len += len(category) + 1 # +1 for the padding
self.val.append(vl[0])
self.txt.append(category)
self.n += 1
if self.text_len > 32000:
raise ValueError(
"Stata value labels for a single variable must "
"have a combined length less than 32,000 characters."
)
# Ensure int32
self.off = np.array(self.off, dtype=np.int32)
self.val = np.array(self.val, dtype=np.int32)
# Total length
self.len = 4 + 4 + 4 * self.n + 4 * self.n + self.text_len
def generate_value_label(self, byteorder: str) -> bytes:
"""
Generate the binary representation of the value labels.
Parameters
----------
byteorder : str
Byte order of the output
Returns
-------
value_label : bytes
Bytes containing the formatted value label
"""
encoding = self._encoding
bio = BytesIO()
null_byte = b"\x00"
# len
bio.write(struct.pack(byteorder + "i", self.len))
# labname
labname = str(self.labname)[:32].encode(encoding)
lab_len = 32 if encoding not in ("utf-8", "utf8") else 128
labname = _pad_bytes(labname, lab_len + 1)
bio.write(labname)
# padding - 3 bytes
for i in range(3):
bio.write(struct.pack("c", null_byte))
# value_label_table
# n - int32
bio.write(struct.pack(byteorder + "i", self.n))
# textlen - int32
bio.write(struct.pack(byteorder + "i", self.text_len))
# off - int32 array (n elements)
for offset in self.off:
bio.write(struct.pack(byteorder + "i", offset))
# val - int32 array (n elements)
for value in self.val:
bio.write(struct.pack(byteorder + "i", value))
# txt - Text labels, null terminated
for text in self.txt:
bio.write(text + null_byte)
bio.seek(0)
return bio.read()
class StataMissingValue:
"""
An observation's missing value.
Parameters
----------
value : {int, float}
The Stata missing value code
Notes
-----
More information: <https://www.stata.com/help.cgi?missing>
Integer missing values make the code '.', '.a', ..., '.z' to the ranges
101 ... 127 (for int8), 32741 ... 32767 (for int16) and 2147483621 ...
2147483647 (for int32). Missing values for floating point data types are
more complex but the pattern is simple to discern from the following table.
np.float32 missing values (float in Stata)
0000007f .
0008007f .a
0010007f .b
...
00c0007f .x
00c8007f .y
00d0007f .z
np.float64 missing values (double in Stata)
000000000000e07f .
000000000001e07f .a
000000000002e07f .b
...
000000000018e07f .x
000000000019e07f .y
00000000001ae07f .z
"""
# Construct a dictionary of missing values
MISSING_VALUES: Dict[float, str] = {}
bases = (101, 32741, 2147483621)
for b in bases:
# Conversion to long to avoid hash issues on 32 bit platforms #8968
MISSING_VALUES[b] = "."
for i in range(1, 27):
MISSING_VALUES[i + b] = "." + chr(96 + i)
float32_base = b"\x00\x00\x00\x7f"
increment = struct.unpack("<i", b"\x00\x08\x00\x00")[0]
for i in range(27):
key = struct.unpack("<f", float32_base)[0]
MISSING_VALUES[key] = "."
if i > 0:
MISSING_VALUES[key] += chr(96 + i)
int_value = struct.unpack("<i", struct.pack("<f", key))[0] + increment
float32_base = struct.pack("<i", int_value)
float64_base = b"\x00\x00\x00\x00\x00\x00\xe0\x7f"
increment = struct.unpack("q", b"\x00\x00\x00\x00\x00\x01\x00\x00")[0]
for i in range(27):
key = struct.unpack("<d", float64_base)[0]
MISSING_VALUES[key] = "."
if i > 0:
MISSING_VALUES[key] += chr(96 + i)
int_value = struct.unpack("q", struct.pack("<d", key))[0] + increment
float64_base = struct.pack("q", int_value)
BASE_MISSING_VALUES = {
"int8": 101,
"int16": 32741,
"int32": 2147483621,
"float32": struct.unpack("<f", float32_base)[0],
"float64": struct.unpack("<d", float64_base)[0],
}
def __init__(self, value: Union[int, float]):
self._value = value
# Conversion to int to avoid hash issues on 32 bit platforms #8968
value = int(value) if value < 2147483648 else float(value)
self._str = self.MISSING_VALUES[value]
@property
def string(self) -> str:
"""
The Stata representation of the missing value: '.', '.a'..'.z'
Returns
-------
str
The representation of the missing value.
"""
return self._str
@property
def value(self) -> Union[int, float]:
"""
The binary representation of the missing value.
Returns
-------
{int, float}
The binary representation of the missing value.
"""
return self._value
def __str__(self) -> str:
return self.string
def __repr__(self) -> str:
return f"{type(self)}({self})"
def __eq__(self, other: Any) -> bool:
return (
isinstance(other, type(self))
and self.string == other.string
and self.value == other.value
)
@classmethod
def get_base_missing_value(cls, dtype: np.dtype) -> Union[int, float]:
if dtype == np.int8:
value = cls.BASE_MISSING_VALUES["int8"]
elif dtype == np.int16:
value = cls.BASE_MISSING_VALUES["int16"]
elif dtype == np.int32:
value = cls.BASE_MISSING_VALUES["int32"]
elif dtype == np.float32:
value = cls.BASE_MISSING_VALUES["float32"]
elif dtype == np.float64:
value = cls.BASE_MISSING_VALUES["float64"]
else:
raise ValueError("Unsupported dtype")
return value
class StataParser:
def __init__(self):
# type code.
# --------------------
# str1 1 = 0x01
# str2 2 = 0x02
# ...
# str244 244 = 0xf4
# byte 251 = 0xfb (sic)
# int 252 = 0xfc
# long 253 = 0xfd
# float 254 = 0xfe
# double 255 = 0xff
# --------------------
# NOTE: the byte type seems to be reserved for categorical variables
# with a label, but the underlying variable is -127 to 100
# we're going to drop the label and cast to int
self.DTYPE_MAP = dict(
list(zip(range(1, 245), ["a" + str(i) for i in range(1, 245)]))
+ [
(251, np.int8),
(252, np.int16),
(253, np.int32),
(254, np.float32),
(255, np.float64),
]
)
self.DTYPE_MAP_XML = dict(
[
(32768, np.uint8), # Keys to GSO
(65526, np.float64),
(65527, np.float32),
(65528, np.int32),
(65529, np.int16),
(65530, np.int8),
]
)
self.TYPE_MAP = list(range(251)) + list("bhlfd")
self.TYPE_MAP_XML = dict(
[
# Not really a Q, unclear how to handle byteswap
(32768, "Q"),
(65526, "d"),
(65527, "f"),
(65528, "l"),
(65529, "h"),
(65530, "b"),
]
)
# NOTE: technically, some of these are wrong. there are more numbers
# that can be represented. it's the 27 ABOVE and BELOW the max listed
# numeric data type in [U] 12.2.2 of the 11.2 manual
float32_min = b"\xff\xff\xff\xfe"
float32_max = b"\xff\xff\xff\x7e"
float64_min = b"\xff\xff\xff\xff\xff\xff\xef\xff"
float64_max = b"\xff\xff\xff\xff\xff\xff\xdf\x7f"
self.VALID_RANGE = {
"b": (-127, 100),
"h": (-32767, 32740),
"l": (-2147483647, 2147483620),
"f": (
np.float32(struct.unpack("<f", float32_min)[0]),
np.float32(struct.unpack("<f", float32_max)[0]),
),
"d": (
np.float64(struct.unpack("<d", float64_min)[0]),
np.float64(struct.unpack("<d", float64_max)[0]),
),
}
self.OLD_TYPE_MAPPING = {
98: 251, # byte
105: 252, # int
108: 253, # long
102: 254, # float
100: 255, # double
}
# These missing values are the generic '.' in Stata, and are used
# to replace nans
self.MISSING_VALUES = {
"b": 101,
"h": 32741,
"l": 2147483621,
"f": np.float32(struct.unpack("<f", b"\x00\x00\x00\x7f")[0]),
"d": np.float64(
struct.unpack("<d", b"\x00\x00\x00\x00\x00\x00\xe0\x7f")[0]
),
}
self.NUMPY_TYPE_MAP = {
"b": "i1",
"h": "i2",
"l": "i4",
"f": "f4",
"d": "f8",
"Q": "u8",
}
# Reserved words cannot be used as variable names
self.RESERVED_WORDS = (
"aggregate",
"array",
"boolean",
"break",
"byte",
"case",
"catch",
"class",
"colvector",
"complex",
"const",
"continue",
"default",
"delegate",
"delete",
"do",
"double",
"else",
"eltypedef",
"end",
"enum",
"explicit",
"export",
"external",
"float",
"for",
"friend",
"function",
"global",
"goto",
"if",
"inline",
"int",
"local",
"long",
"NULL",
"pragma",
"protected",
"quad",
"rowvector",
"short",
"typedef",
"typename",
"virtual",
"_all",
"_N",
"_skip",
"_b",
"_pi",
"str#",
"in",
"_pred",
"strL",
"_coef",
"_rc",
"using",
"_cons",
"_se",
"with",
"_n",
)
class StataReader(StataParser, abc.Iterator):
__doc__ = _stata_reader_doc
def __init__(
self,
path_or_buf: FilePathOrBuffer,
convert_dates: bool = True,
convert_categoricals: bool = True,
index_col: Optional[str] = None,
convert_missing: bool = False,
preserve_dtypes: bool = True,
columns: Optional[Sequence[str]] = None,
order_categoricals: bool = True,
chunksize: Optional[int] = None,
):
super().__init__()
self.col_sizes: List[int] = []
# Arguments to the reader (can be temporarily overridden in
# calls to read).
self._convert_dates = convert_dates
self._convert_categoricals = convert_categoricals
self._index_col = index_col
self._convert_missing = convert_missing
self._preserve_dtypes = preserve_dtypes
self._columns = columns
self._order_categoricals = order_categoricals
self._encoding = ""
self._chunksize = chunksize
if self._chunksize is not None and (
not isinstance(chunksize, int) or chunksize <= 0
):
raise ValueError("chunksize must be a positive integer when set.")
# State variables for the file
self._has_string_data = False
self._missing_values = False
self._can_read_value_labels = False
self._column_selector_set = False
self._value_labels_read = False
self._data_read = False
self._dtype = None
self._lines_read = 0
self._native_byteorder = _set_endianness(sys.byteorder)
path_or_buf = stringify_path(path_or_buf)
if isinstance(path_or_buf, str):
path_or_buf, encoding, _, should_close = get_filepath_or_buffer(path_or_buf)
if isinstance(path_or_buf, (str, bytes)):
self.path_or_buf = open(path_or_buf, "rb")
elif isinstance(path_or_buf, IOBase):
# Copy to BytesIO, and ensure no encoding
contents = path_or_buf.read()
self.path_or_buf = BytesIO(contents)
self._read_header()
self._setup_dtype()
def __enter__(self) -> "StataReader":
""" enter context manager """
return self
def __exit__(self, exc_type, exc_value, traceback) -> None:
""" exit context manager """
self.close()
def close(self) -> None:
""" close the handle if its open """
try:
self.path_or_buf.close()
except IOError:
pass
def _set_encoding(self) -> None:
"""
Set string encoding which depends on file version
"""
if self.format_version < 118:
self._encoding = "latin-1"
else:
self._encoding = "utf-8"
def _read_header(self) -> None:
first_char = self.path_or_buf.read(1)
if struct.unpack("c", first_char)[0] == b"<":
self._read_new_header()
else:
self._read_old_header(first_char)
self.has_string_data = len([x for x in self.typlist if type(x) is int]) > 0
# calculate size of a data record
self.col_sizes = [self._calcsize(typ) for typ in self.typlist]
def _read_new_header(self) -> None:
# The first part of the header is common to 117 - 119.
self.path_or_buf.read(27) # stata_dta><header><release>
self.format_version = int(self.path_or_buf.read(3))
if self.format_version not in [117, 118, 119]:
raise ValueError(_version_error.format(version=self.format_version))
self._set_encoding()
self.path_or_buf.read(21) # </release><byteorder>
self.byteorder = self.path_or_buf.read(3) == b"MSF" and ">" or "<"
self.path_or_buf.read(15) # </byteorder><K>
nvar_type = "H" if self.format_version <= 118 else "I"
nvar_size = 2 if self.format_version <= 118 else 4
self.nvar = struct.unpack(
self.byteorder + nvar_type, self.path_or_buf.read(nvar_size)
)[0]
self.path_or_buf.read(7) # </K><N>
self.nobs = self._get_nobs()
self.path_or_buf.read(11) # </N><label>
self._data_label = self._get_data_label()
self.path_or_buf.read(19) # </label><timestamp>
self.time_stamp = self._get_time_stamp()
self.path_or_buf.read(26) # </timestamp></header><map>
self.path_or_buf.read(8) # 0x0000000000000000
self.path_or_buf.read(8) # position of <map>
self._seek_vartypes = (
struct.unpack(self.byteorder + "q", self.path_or_buf.read(8))[0] + 16
)
self._seek_varnames = (
struct.unpack(self.byteorder + "q", self.path_or_buf.read(8))[0] + 10
)
self._seek_sortlist = (
struct.unpack(self.byteorder + "q", self.path_or_buf.read(8))[0] + 10
)
self._seek_formats = (
struct.unpack(self.byteorder + "q", self.path_or_buf.read(8))[0] + 9
)
self._seek_value_label_names = (
struct.unpack(self.byteorder + "q", self.path_or_buf.read(8))[0] + 19
)
# Requires version-specific treatment
self._seek_variable_labels = self._get_seek_variable_labels()
self.path_or_buf.read(8) # <characteristics>
self.data_location = (
struct.unpack(self.byteorder + "q", self.path_or_buf.read(8))[0] + 6
)
self.seek_strls = (
struct.unpack(self.byteorder + "q", self.path_or_buf.read(8))[0] + 7
)
self.seek_value_labels = (
struct.unpack(self.byteorder + "q", self.path_or_buf.read(8))[0] + 14
)
self.typlist, self.dtyplist = self._get_dtypes(self._seek_vartypes)
self.path_or_buf.seek(self._seek_varnames)
self.varlist = self._get_varlist()
self.path_or_buf.seek(self._seek_sortlist)
self.srtlist = struct.unpack(
self.byteorder + ("h" * (self.nvar + 1)),
self.path_or_buf.read(2 * (self.nvar + 1)),
)[:-1]
self.path_or_buf.seek(self._seek_formats)
self.fmtlist = self._get_fmtlist()
self.path_or_buf.seek(self._seek_value_label_names)
self.lbllist = self._get_lbllist()
self.path_or_buf.seek(self._seek_variable_labels)
self._variable_labels = self._get_variable_labels()
# Get data type information, works for versions 117-119.
def _get_dtypes(
self, seek_vartypes: int
) -> Tuple[List[Union[int, str]], List[Union[int, np.dtype]]]:
self.path_or_buf.seek(seek_vartypes)
raw_typlist = [
struct.unpack(self.byteorder + "H", self.path_or_buf.read(2))[0]
for _ in range(self.nvar)
]
def f(typ: int) -> Union[int, str]:
if typ <= 2045:
return typ
try:
return self.TYPE_MAP_XML[typ]
except KeyError as err:
raise ValueError(f"cannot convert stata types [{typ}]") from err
typlist = [f(x) for x in raw_typlist]
def g(typ: int) -> Union[str, np.dtype]:
if typ <= 2045:
return str(typ)
try:
return self.DTYPE_MAP_XML[typ]
except KeyError as err:
raise ValueError(f"cannot convert stata dtype [{typ}]") from err
dtyplist = [g(x) for x in raw_typlist]
return typlist, dtyplist
def _get_varlist(self) -> List[str]:
# 33 in order formats, 129 in formats 118 and 119
b = 33 if self.format_version < 118 else 129
return [self._decode(self.path_or_buf.read(b)) for _ in range(self.nvar)]
# Returns the format list
def _get_fmtlist(self) -> List[str]:
if self.format_version >= 118:
b = 57
elif self.format_version > 113:
b = 49
elif self.format_version > 104:
b = 12
else:
b = 7
return [self._decode(self.path_or_buf.read(b)) for _ in range(self.nvar)]
# Returns the label list
def _get_lbllist(self) -> List[str]:
if self.format_version >= 118:
b = 129
elif self.format_version > 108:
b = 33
else:
b = 9
return [self._decode(self.path_or_buf.read(b)) for _ in range(self.nvar)]
def _get_variable_labels(self) -> List[str]:
if self.format_version >= 118:
vlblist = [
self._decode(self.path_or_buf.read(321)) for _ in range(self.nvar)
]
elif self.format_version > 105:
vlblist = [
self._decode(self.path_or_buf.read(81)) for _ in range(self.nvar)
]
else:
vlblist = [
self._decode(self.path_or_buf.read(32)) for _ in range(self.nvar)
]
return vlblist
def _get_nobs(self) -> int:
if self.format_version >= 118:
return struct.unpack(self.byteorder + "Q", self.path_or_buf.read(8))[0]
else:
return struct.unpack(self.byteorder + "I", self.path_or_buf.read(4))[0]
def _get_data_label(self) -> str:
if self.format_version >= 118:
strlen = struct.unpack(self.byteorder + "H", self.path_or_buf.read(2))[0]
return self._decode(self.path_or_buf.read(strlen))
elif self.format_version == 117:
strlen = struct.unpack("b", self.path_or_buf.read(1))[0]
return self._decode(self.path_or_buf.read(strlen))
elif self.format_version > 105:
return self._decode(self.path_or_buf.read(81))
else:
return self._decode(self.path_or_buf.read(32))
def _get_time_stamp(self) -> str:
if self.format_version >= 118:
strlen = struct.unpack("b", self.path_or_buf.read(1))[0]
return self.path_or_buf.read(strlen).decode("utf-8")
elif self.format_version == 117:
strlen = struct.unpack("b", self.path_or_buf.read(1))[0]
return self._decode(self.path_or_buf.read(strlen))
elif self.format_version > 104:
return self._decode(self.path_or_buf.read(18))
else:
raise ValueError()
def _get_seek_variable_labels(self) -> int:
if self.format_version == 117:
self.path_or_buf.read(8) # <variable_labels>, throw away
# Stata 117 data files do not follow the described format. This is
# a work around that uses the previous label, 33 bytes for each
# variable, 20 for the closing tag and 17 for the opening tag
return self._seek_value_label_names + (33 * self.nvar) + 20 + 17
elif self.format_version >= 118:
return struct.unpack(self.byteorder + "q", self.path_or_buf.read(8))[0] + 17
else:
raise ValueError()
def _read_old_header(self, first_char: bytes) -> None:
self.format_version = struct.unpack("b", first_char)[0]
if self.format_version not in [104, 105, 108, 111, 113, 114, 115]:
raise ValueError(_version_error.format(version=self.format_version))
self._set_encoding()
self.byteorder = (
struct.unpack("b", self.path_or_buf.read(1))[0] == 0x1 and ">" or "<"
)
self.filetype = struct.unpack("b", self.path_or_buf.read(1))[0]
self.path_or_buf.read(1) # unused
self.nvar = struct.unpack(self.byteorder + "H", self.path_or_buf.read(2))[0]
self.nobs = self._get_nobs()
self._data_label = self._get_data_label()
self.time_stamp = self._get_time_stamp()
# descriptors
if self.format_version > 108:
typlist = [ord(self.path_or_buf.read(1)) for _ in range(self.nvar)]
else:
buf = self.path_or_buf.read(self.nvar)
typlistb = np.frombuffer(buf, dtype=np.uint8)
typlist = []
for tp in typlistb:
if tp in self.OLD_TYPE_MAPPING:
typlist.append(self.OLD_TYPE_MAPPING[tp])
else:
typlist.append(tp - 127) # bytes
try:
self.typlist = [self.TYPE_MAP[typ] for typ in typlist]
except ValueError as err:
invalid_types = ",".join(str(x) for x in typlist)
raise ValueError(f"cannot convert stata types [{invalid_types}]") from err
try:
self.dtyplist = [self.DTYPE_MAP[typ] for typ in typlist]
except ValueError as err:
invalid_dtypes = ",".join(str(x) for x in typlist)
raise ValueError(f"cannot convert stata dtypes [{invalid_dtypes}]") from err
if self.format_version > 108:
self.varlist = [
self._decode(self.path_or_buf.read(33)) for _ in range(self.nvar)
]
else:
self.varlist = [
self._decode(self.path_or_buf.read(9)) for _ in range(self.nvar)
]
self.srtlist = struct.unpack(
self.byteorder + ("h" * (self.nvar + 1)),
self.path_or_buf.read(2 * (self.nvar + 1)),
)[:-1]
self.fmtlist = self._get_fmtlist()
self.lbllist = self._get_lbllist()
self._variable_labels = self._get_variable_labels()
# ignore expansion fields (Format 105 and later)
# When reading, read five bytes; the last four bytes now tell you
# the size of the next read, which you discard. You then continue
# like this until you read 5 bytes of zeros.
if self.format_version > 104:
while True:
data_type = struct.unpack(
self.byteorder + "b", self.path_or_buf.read(1)
)[0]
if self.format_version > 108:
data_len = struct.unpack(
self.byteorder + "i", self.path_or_buf.read(4)
)[0]
else:
data_len = struct.unpack(
self.byteorder + "h", self.path_or_buf.read(2)
)[0]
if data_type == 0:
break
self.path_or_buf.read(data_len)
# necessary data to continue parsing
self.data_location = self.path_or_buf.tell()
def _setup_dtype(self) -> np.dtype:
"""Map between numpy and state dtypes"""
if self._dtype is not None:
return self._dtype
dtypes = [] # Convert struct data types to numpy data type
for i, typ in enumerate(self.typlist):
if typ in self.NUMPY_TYPE_MAP:
dtypes.append(("s" + str(i), self.byteorder + self.NUMPY_TYPE_MAP[typ]))
else:
dtypes.append(("s" + str(i), "S" + str(typ)))
self._dtype = np.dtype(dtypes)
return self._dtype
def _calcsize(self, fmt: Union[int, str]) -> int:
if isinstance(fmt, int):
return fmt
return struct.calcsize(self.byteorder + fmt)
def _decode(self, s: bytes) -> str:
# have bytes not strings, so must decode
s = s.partition(b"\0")[0]
try:
return s.decode(self._encoding)
except UnicodeDecodeError:
# GH 25960, fallback to handle incorrect format produced when 117
# files are converted to 118 files in Stata
encoding = self._encoding
msg = f"""
One or more strings in the dta file could not be decoded using {encoding}, and
so the fallback encoding of latin-1 is being used. This can happen when a file
has been incorrectly encoded by Stata or some other software. You should verify
the string values returned are correct."""
warnings.warn(msg, UnicodeWarning)
return s.decode("latin-1")
def _read_value_labels(self) -> None:
if self._value_labels_read:
# Don't read twice
return
if self.format_version <= 108:
# Value labels are not supported in version 108 and earlier.
self._value_labels_read = True
self.value_label_dict: Dict[str, Dict[Union[float, int], str]] = {}
return
if self.format_version >= 117:
self.path_or_buf.seek(self.seek_value_labels)
else:
assert self._dtype is not None
offset = self.nobs * self._dtype.itemsize
self.path_or_buf.seek(self.data_location + offset)
self._value_labels_read = True
self.value_label_dict = {}
while True:
if self.format_version >= 117:
if self.path_or_buf.read(5) == b"</val": # <lbl>
break # end of value label table
slength = self.path_or_buf.read(4)
if not slength:
break # end of value label table (format < 117)
if self.format_version <= 117:
labname = self._decode(self.path_or_buf.read(33))
else:
labname = self._decode(self.path_or_buf.read(129))
self.path_or_buf.read(3) # padding
n = struct.unpack(self.byteorder + "I", self.path_or_buf.read(4))[0]
txtlen = struct.unpack(self.byteorder + "I", self.path_or_buf.read(4))[0]
off = np.frombuffer(
self.path_or_buf.read(4 * n), dtype=self.byteorder + "i4", count=n
)
val = np.frombuffer(
self.path_or_buf.read(4 * n), dtype=self.byteorder + "i4", count=n
)
ii = np.argsort(off)
off = off[ii]
val = val[ii]
txt = self.path_or_buf.read(txtlen)
self.value_label_dict[labname] = dict()
for i in range(n):
end = off[i + 1] if i < n - 1 else txtlen
self.value_label_dict[labname][val[i]] = self._decode(txt[off[i] : end])
if self.format_version >= 117:
self.path_or_buf.read(6) # </lbl>
self._value_labels_read = True
def _read_strls(self) -> None:
self.path_or_buf.seek(self.seek_strls)
# Wrap v_o in a string to allow uint64 values as keys on 32bit OS
self.GSO = {"0": ""}
while True:
if self.path_or_buf.read(3) != b"GSO":
break
if self.format_version == 117:
v_o = struct.unpack(self.byteorder + "Q", self.path_or_buf.read(8))[0]
else:
buf = self.path_or_buf.read(12)
# Only tested on little endian file on little endian machine.
v_size = 2 if self.format_version == 118 else 3
if self.byteorder == "<":
buf = buf[0:v_size] + buf[4 : (12 - v_size)]
else:
# This path may not be correct, impossible to test
buf = buf[0:v_size] + buf[(4 + v_size) :]
v_o = struct.unpack("Q", buf)[0]
typ = struct.unpack("B", self.path_or_buf.read(1))[0]
length = struct.unpack(self.byteorder + "I", self.path_or_buf.read(4))[0]
va = self.path_or_buf.read(length)
if typ == 130:
decoded_va = va[0:-1].decode(self._encoding)
else:
# Stata says typ 129 can be binary, so use str
decoded_va = str(va)
# Wrap v_o in a string to allow uint64 values as keys on 32bit OS
self.GSO[str(v_o)] = decoded_va
def __next__(self) -> DataFrame:
if self._chunksize is None:
raise ValueError(
"chunksize must be set to a positive integer to use as an iterator."
)
return self.read(nrows=self._chunksize or 1)
def get_chunk(self, size: Optional[int] = None) -> DataFrame:
"""
Reads lines from Stata file and returns as dataframe
Parameters
----------
size : int, defaults to None
Number of lines to read. If None, reads whole file.
Returns
-------
DataFrame
"""
if size is None:
size = self._chunksize
return self.read(nrows=size)
@Appender(_read_method_doc)
def read(
self,
nrows: Optional[int] = None,
convert_dates: Optional[bool] = None,
convert_categoricals: Optional[bool] = None,
index_col: Optional[str] = None,
convert_missing: Optional[bool] = None,
preserve_dtypes: Optional[bool] = None,
columns: Optional[Sequence[str]] = None,
order_categoricals: Optional[bool] = None,
) -> DataFrame:
# Handle empty file or chunk. If reading incrementally raise
# StopIteration. If reading the whole thing return an empty
# data frame.
if (self.nobs == 0) and (nrows is None):
self._can_read_value_labels = True
self._data_read = True
self.close()
return DataFrame(columns=self.varlist)
# Handle options
if convert_dates is None:
convert_dates = self._convert_dates
if convert_categoricals is None:
convert_categoricals = self._convert_categoricals
if convert_missing is None:
convert_missing = self._convert_missing
if preserve_dtypes is None:
preserve_dtypes = self._preserve_dtypes
if columns is None:
columns = self._columns
if order_categoricals is None:
order_categoricals = self._order_categoricals
if index_col is None:
index_col = self._index_col
if nrows is None:
nrows = self.nobs
if (self.format_version >= 117) and (not self._value_labels_read):
self._can_read_value_labels = True
self._read_strls()
# Read data
assert self._dtype is not None
dtype = self._dtype
max_read_len = (self.nobs - self._lines_read) * dtype.itemsize
read_len = nrows * dtype.itemsize
read_len = min(read_len, max_read_len)
if read_len <= 0:
# Iterator has finished, should never be here unless
# we are reading the file incrementally
if convert_categoricals:
self._read_value_labels()
self.close()
raise StopIteration
offset = self._lines_read * dtype.itemsize
self.path_or_buf.seek(self.data_location + offset)
read_lines = min(nrows, self.nobs - self._lines_read)
data = np.frombuffer(
self.path_or_buf.read(read_len), dtype=dtype, count=read_lines
)
self._lines_read += read_lines
if self._lines_read == self.nobs:
self._can_read_value_labels = True
self._data_read = True
# if necessary, swap the byte order to native here
if self.byteorder != self._native_byteorder:
data = data.byteswap().newbyteorder()
if convert_categoricals:
self._read_value_labels()
if len(data) == 0:
data = DataFrame(columns=self.varlist)
else:
data = DataFrame.from_records(data)
data.columns = self.varlist
# If index is not specified, use actual row number rather than
# restarting at 0 for each chunk.
if index_col is None:
ix = np.arange(self._lines_read - read_lines, self._lines_read)
data = data.set_index(ix)
if columns is not None:
try:
data = self._do_select_columns(data, columns)
except ValueError:
self.close()
raise
# Decode strings
for col, typ in zip(data, self.typlist):
if type(typ) is int:
data[col] = data[col].apply(self._decode, convert_dtype=True)
data = self._insert_strls(data)
cols_ = np.where(self.dtyplist)[0]
# Convert columns (if needed) to match input type
ix = data.index
requires_type_conversion = False
data_formatted = []
for i in cols_:
if self.dtyplist[i] is not None:
col = data.columns[i]
dtype = data[col].dtype
if dtype != np.dtype(object) and dtype != self.dtyplist[i]:
requires_type_conversion = True
data_formatted.append(
(col, Series(data[col], ix, self.dtyplist[i]))
)
else:
data_formatted.append((col, data[col]))
if requires_type_conversion:
data = DataFrame.from_dict(dict(data_formatted))
del data_formatted
data = self._do_convert_missing(data, convert_missing)
if convert_dates:
def any_startswith(x: str) -> bool:
return any(x.startswith(fmt) for fmt in _date_formats)
cols = np.where([any_startswith(x) for x in self.fmtlist])[0]
for i in cols:
col = data.columns[i]
try:
data[col] = _stata_elapsed_date_to_datetime_vec(
data[col], self.fmtlist[i]
)
except ValueError:
self.close()
raise
if convert_categoricals and self.format_version > 108:
data = self._do_convert_categoricals(
data, self.value_label_dict, self.lbllist, order_categoricals
)
if not preserve_dtypes:
retyped_data = []
convert = False
for col in data:
dtype = data[col].dtype
if dtype in (np.float16, np.float32):
dtype = np.float64
convert = True
elif dtype in (np.int8, np.int16, np.int32):
dtype = np.int64
convert = True
retyped_data.append((col, data[col].astype(dtype)))
if convert:
data = DataFrame.from_dict(dict(retyped_data))
if index_col is not None:
data = data.set_index(data.pop(index_col))
return data
def _do_convert_missing(self, data: DataFrame, convert_missing: bool) -> DataFrame:
# Check for missing values, and replace if found
replacements = {}
for i, colname in enumerate(data):
fmt = self.typlist[i]
if fmt not in self.VALID_RANGE:
continue
nmin, nmax = self.VALID_RANGE[fmt]
series = data[colname]
missing = np.logical_or(series < nmin, series > nmax)
if not missing.any():
continue
if convert_missing: # Replacement follows Stata notation
missing_loc = np.nonzero(np.asarray(missing))[0]
umissing, umissing_loc = np.unique(series[missing], return_inverse=True)
replacement = Series(series, dtype=np.object)
for j, um in enumerate(umissing):
missing_value = StataMissingValue(um)
loc = missing_loc[umissing_loc == j]
replacement.iloc[loc] = missing_value
else: # All replacements are identical
dtype = series.dtype
if dtype not in (np.float32, np.float64):
dtype = np.float64
replacement = Series(series, dtype=dtype)
replacement[missing] = np.nan
replacements[colname] = replacement
if replacements:
columns = data.columns
replacement_df = DataFrame(replacements)
replaced = concat([data.drop(replacement_df.columns, 1), replacement_df], 1)
data = replaced[columns]
return data
def _insert_strls(self, data: DataFrame) -> DataFrame:
if not hasattr(self, "GSO") or len(self.GSO) == 0:
return data
for i, typ in enumerate(self.typlist):
if typ != "Q":
continue
# Wrap v_o in a string to allow uint64 values as keys on 32bit OS
data.iloc[:, i] = [self.GSO[str(k)] for k in data.iloc[:, i]]
return data
def _do_select_columns(self, data: DataFrame, columns: Sequence[str]) -> DataFrame:
if not self._column_selector_set:
column_set = set(columns)
if len(column_set) != len(columns):
raise ValueError("columns contains duplicate entries")
unmatched = column_set.difference(data.columns)
if unmatched:
joined = ", ".join(list(unmatched))
raise ValueError(
"The following columns were not "
f"found in the Stata data set: {joined}"
)
# Copy information for retained columns for later processing
dtyplist = []
typlist = []
fmtlist = []
lbllist = []
for col in columns:
i = data.columns.get_loc(col)
dtyplist.append(self.dtyplist[i])
typlist.append(self.typlist[i])
fmtlist.append(self.fmtlist[i])
lbllist.append(self.lbllist[i])
self.dtyplist = dtyplist
self.typlist = typlist
self.fmtlist = fmtlist
self.lbllist = lbllist
self._column_selector_set = True
return data[columns]
def _do_convert_categoricals(
self,
data: DataFrame,
value_label_dict: Dict[str, Dict[Union[float, int], str]],
lbllist: Sequence[str],
order_categoricals: bool,
) -> DataFrame:
"""
Converts categorical columns to Categorical type.
"""
value_labels = list(value_label_dict.keys())
cat_converted_data = []
for col, label in zip(data, lbllist):
if label in value_labels:
# Explicit call with ordered=True
vl = value_label_dict[label]
keys = np.array(list(vl.keys()))
column = data[col]
key_matches = column.isin(keys)
if self._chunksize is not None and key_matches.all():
initial_categories = keys
# If all categories are in the keys and we are iterating,
# use the same keys for all chunks. If some are missing
# value labels, then we will fall back to the categories
# varying across chunks.
else:
if self._chunksize is not None:
# warn is using an iterator
warnings.warn(
categorical_conversion_warning, CategoricalConversionWarning
)
initial_categories = None
cat_data = Categorical(
column, categories=initial_categories, ordered=order_categoricals
)
if initial_categories is None:
# If None here, then we need to match the cats in the Categorical
categories = []
for category in cat_data.categories:
if category in vl:
categories.append(vl[category])
else:
categories.append(category)
else:
# If all cats are matched, we can use the values
categories = list(vl.values())
try:
# Try to catch duplicate categories
cat_data.categories = categories
except ValueError as err:
vc = Series(categories).value_counts()
repeated_cats = list(vc.index[vc > 1])
repeats = "-" * 80 + "\n" + "\n".join(repeated_cats)
# GH 25772
msg = f"""
Value labels for column {col} are not unique. These cannot be converted to
pandas categoricals.
Either read the file with `convert_categoricals` set to False or use the
low level interface in `StataReader` to separately read the values and the
value_labels.
The repeated labels are:
{repeats}
"""
raise ValueError(msg) from err
# TODO: is the next line needed above in the data(...) method?
cat_series = Series(cat_data, index=data.index)
cat_converted_data.append((col, cat_series))
else:
cat_converted_data.append((col, data[col]))
data = DataFrame.from_dict(dict(cat_converted_data))
return data
@property
def data_label(self) -> str:
"""
Return data label of Stata file.
"""
return self._data_label
def variable_labels(self) -> Dict[str, str]:
"""
Return variable labels as a dict, associating each variable name
with corresponding label.
Returns
-------
dict
"""
return dict(zip(self.varlist, self._variable_labels))
def value_labels(self) -> Dict[str, Dict[Union[float, int], str]]:
"""
Return a dict, associating each variable name a dict, associating
each value its corresponding label.
Returns
-------
dict
"""
if not self._value_labels_read:
self._read_value_labels()
return self.value_label_dict
@Appender(_read_stata_doc)
def read_stata(
filepath_or_buffer: FilePathOrBuffer,
convert_dates: bool = True,
convert_categoricals: bool = True,
index_col: Optional[str] = None,
convert_missing: bool = False,
preserve_dtypes: bool = True,
columns: Optional[Sequence[str]] = None,
order_categoricals: bool = True,
chunksize: Optional[int] = None,
iterator: bool = False,
) -> Union[DataFrame, StataReader]:
reader = StataReader(
filepath_or_buffer,
convert_dates=convert_dates,
convert_categoricals=convert_categoricals,
index_col=index_col,
convert_missing=convert_missing,
preserve_dtypes=preserve_dtypes,
columns=columns,
order_categoricals=order_categoricals,
chunksize=chunksize,
)
if iterator or chunksize:
return reader
try:
data = reader.read()
finally:
reader.close()
return data
def _open_file_binary_write(
fname: FilePathOrBuffer, compression: Union[str, Mapping[str, str], None],
) -> Tuple[BinaryIO, bool, Optional[Union[str, Mapping[str, str]]]]:
"""
Open a binary file or no-op if file-like.
Parameters
----------
fname : string path, path object or buffer
The file name or buffer.
compression : {str, dict, None}
The compression method to use.
Returns
-------
file : file-like object
File object supporting write
own : bool
True if the file was created, otherwise False
"""
if hasattr(fname, "write"):
# See https://github.com/python/mypy/issues/1424 for hasattr challenges
return fname, False, None # type: ignore
elif isinstance(fname, (str, Path)):
# Extract compression mode as given, if dict
compression_typ, compression_args = get_compression_method(compression)
compression_typ = infer_compression(fname, compression_typ)
path_or_buf, _, compression_typ, _ = get_filepath_or_buffer(
fname, compression=compression_typ
)
if compression_typ is not None:
compression = compression_args
compression["method"] = compression_typ
else:
compression = None
f, _ = get_handle(path_or_buf, "wb", compression=compression, is_text=False)
return f, True, compression
else:
raise TypeError("fname must be a binary file, buffer or path-like.")
def _set_endianness(endianness: str) -> str:
if endianness.lower() in ["<", "little"]:
return "<"
elif endianness.lower() in [">", "big"]:
return ">"
else: # pragma : no cover
raise ValueError(f"Endianness {endianness} not understood")
def _pad_bytes(name: AnyStr, length: int) -> AnyStr:
"""
Take a char string and pads it with null bytes until it's length chars.
"""
if isinstance(name, bytes):
return name + b"\x00" * (length - len(name))
return name + "\x00" * (length - len(name))
def _convert_datetime_to_stata_type(fmt: str) -> np.dtype:
"""
Convert from one of the stata date formats to a type in TYPE_MAP.
"""
if fmt in [
"tc",
"%tc",
"td",
"%td",
"tw",
"%tw",
"tm",
"%tm",
"tq",
"%tq",
"th",
"%th",
"ty",
"%ty",
]:
return np.float64 # Stata expects doubles for SIFs
else:
raise NotImplementedError(f"Format {fmt} not implemented")
def _maybe_convert_to_int_keys(convert_dates: Dict, varlist: List[Label]) -> Dict:
new_dict = {}
for key in convert_dates:
if not convert_dates[key].startswith("%"): # make sure proper fmts
convert_dates[key] = "%" + convert_dates[key]
if key in varlist:
new_dict.update({varlist.index(key): convert_dates[key]})
else:
if not isinstance(key, int):
raise ValueError("convert_dates key must be a column or an integer")
new_dict.update({key: convert_dates[key]})
return new_dict
def _dtype_to_stata_type(dtype: np.dtype, column: Series) -> int:
"""
Convert dtype types to stata types. Returns the byte of the given ordinal.
See TYPE_MAP and comments for an explanation. This is also explained in
the dta spec.
1 - 244 are strings of this length
Pandas Stata
251 - for int8 byte
252 - for int16 int
253 - for int32 long
254 - for float32 float
255 - for double double
If there are dates to convert, then dtype will already have the correct
type inserted.
"""
# TODO: expand to handle datetime to integer conversion
if dtype.type == np.object_: # try to coerce it to the biggest string
# not memory efficient, what else could we
# do?
itemsize = max_len_string_array(ensure_object(column._values))
return max(itemsize, 1)
elif dtype == np.float64:
return 255
elif dtype == np.float32:
return 254
elif dtype == np.int32:
return 253
elif dtype == np.int16:
return 252
elif dtype == np.int8:
return 251
else: # pragma : no cover
raise NotImplementedError(f"Data type {dtype} not supported.")
def _dtype_to_default_stata_fmt(
dtype, column: Series, dta_version: int = 114, force_strl: bool = False
) -> str:
"""
Map numpy dtype to stata's default format for this type. Not terribly
important since users can change this in Stata. Semantics are
object -> "%DDs" where DD is the length of the string. If not a string,
raise ValueError
float64 -> "%10.0g"
float32 -> "%9.0g"
int64 -> "%9.0g"
int32 -> "%12.0g"
int16 -> "%8.0g"
int8 -> "%8.0g"
strl -> "%9s"
"""
# TODO: Refactor to combine type with format
# TODO: expand this to handle a default datetime format?
if dta_version < 117:
max_str_len = 244
else:
max_str_len = 2045
if force_strl:
return "%9s"
if dtype.type == np.object_:
itemsize = max_len_string_array(ensure_object(column._values))
if itemsize > max_str_len:
if dta_version >= 117:
return "%9s"
else:
raise ValueError(excessive_string_length_error.format(column.name))
return "%" + str(max(itemsize, 1)) + "s"
elif dtype == np.float64:
return "%10.0g"
elif dtype == np.float32:
return "%9.0g"
elif dtype == np.int32:
return "%12.0g"
elif dtype == np.int8 or dtype == np.int16:
return "%8.0g"
else: # pragma : no cover
raise NotImplementedError(f"Data type {dtype} not supported.")
class StataWriter(StataParser):
"""
A class for writing Stata binary dta files
Parameters
----------
fname : path (string), buffer or path object
string, path object (pathlib.Path or py._path.local.LocalPath) or
object implementing a binary write() functions. If using a buffer
then the buffer will not be automatically closed after the file
is written.
.. versionadded:: 0.23.0 support for pathlib, py.path.
data : DataFrame
Input to save
convert_dates : dict
Dictionary mapping columns containing datetime types to stata internal
format to use when writing the dates. Options are 'tc', 'td', 'tm',
'tw', 'th', 'tq', 'ty'. Column can be either an integer or a name.
Datetime columns that do not have a conversion type specified will be
converted to 'tc'. Raises NotImplementedError if a datetime column has
timezone information
write_index : bool
Write the index to Stata dataset.
byteorder : str
Can be ">", "<", "little", or "big". default is `sys.byteorder`
time_stamp : datetime
A datetime to use as file creation date. Default is the current time
data_label : str
A label for the data set. Must be 80 characters or smaller.
variable_labels : dict
Dictionary containing columns as keys and variable labels as values.
Each label must be 80 characters or smaller.
compression : str or dict, default 'infer'
For on-the-fly compression of the output dta. If string, specifies
compression mode. If dict, value at key 'method' specifies compression
mode. Compression mode must be one of {'infer', 'gzip', 'bz2', 'zip',
'xz', None}. If compression mode is 'infer' and `fname` is path-like,
then detect compression from the following extensions: '.gz', '.bz2',
'.zip', or '.xz' (otherwise no compression). If dict and compression
mode is one of {'zip', 'gzip', 'bz2'}, or inferred as one of the above,
other entries passed as additional compression options.
.. versionadded:: 1.1.0
Returns
-------
writer : StataWriter instance
The StataWriter instance has a write_file method, which will
write the file to the given `fname`.
Raises
------
NotImplementedError
* If datetimes contain timezone information
ValueError
* Columns listed in convert_dates are neither datetime64[ns]
or datetime.datetime
* Column dtype is not representable in Stata
* Column listed in convert_dates is not in DataFrame
* Categorical label contains more than 32,000 characters
Examples
--------
>>> data = pd.DataFrame([[1.0, 1]], columns=['a', 'b'])
>>> writer = StataWriter('./data_file.dta', data)
>>> writer.write_file()
Directly write a zip file
>>> compression = {"method": "zip", "archive_name": "data_file.dta"}
>>> writer = StataWriter('./data_file.zip', data, compression=compression)
>>> writer.write_file()
Save a DataFrame with dates
>>> from datetime import datetime
>>> data = pd.DataFrame([[datetime(2000,1,1)]], columns=['date'])
>>> writer = StataWriter('./date_data_file.dta', data, {'date' : 'tw'})
>>> writer.write_file()
"""
_max_string_length = 244
_encoding = "latin-1"
def __init__(
self,
fname: FilePathOrBuffer,
data: DataFrame,
convert_dates: Optional[Dict[Label, str]] = None,
write_index: bool = True,
byteorder: Optional[str] = None,
time_stamp: Optional[datetime.datetime] = None,
data_label: Optional[str] = None,
variable_labels: Optional[Dict[Label, str]] = None,
compression: Union[str, Mapping[str, str], None] = "infer",
):
super().__init__()
self._convert_dates = {} if convert_dates is None else convert_dates
self._write_index = write_index
self._time_stamp = time_stamp
self._data_label = data_label
self._variable_labels = variable_labels
self._own_file = True
self._compression = compression
self._output_file: Optional[BinaryIO] = None
# attach nobs, nvars, data, varlist, typlist
self._prepare_pandas(data)
if byteorder is None:
byteorder = sys.byteorder
self._byteorder = _set_endianness(byteorder)
self._fname = stringify_path(fname)
self.type_converters = {253: np.int32, 252: np.int16, 251: np.int8}
self._converted_names: Dict[Label, str] = {}
self._file: Optional[BinaryIO] = None
def _write(self, to_write: str) -> None:
"""
Helper to call encode before writing to file for Python 3 compat.
"""
assert self._file is not None
self._file.write(to_write.encode(self._encoding))
def _write_bytes(self, value: bytes) -> None:
"""
Helper to assert file is open before writing.
"""
assert self._file is not None
self._file.write(value)
def _prepare_categoricals(self, data: DataFrame) -> DataFrame:
"""
Check for categorical columns, retain categorical information for
Stata file and convert categorical data to int
"""
is_cat = [is_categorical_dtype(data[col].dtype) for col in data]
self._is_col_cat = is_cat
self._value_labels: List[StataValueLabel] = []
if not any(is_cat):
return data
get_base_missing_value = StataMissingValue.get_base_missing_value
data_formatted = []
for col, col_is_cat in zip(data, is_cat):
if col_is_cat:
svl = StataValueLabel(data[col], encoding=self._encoding)
self._value_labels.append(svl)
dtype = data[col].cat.codes.dtype
if dtype == np.int64:
raise ValueError(
"It is not possible to export "
"int64-based categorical data to Stata."
)
values = data[col].cat.codes._values.copy()
# Upcast if needed so that correct missing values can be set
if values.max() >= get_base_missing_value(dtype):
if dtype == np.int8:
dtype = np.int16
elif dtype == np.int16:
dtype = np.int32
else:
dtype = np.float64
values = np.array(values, dtype=dtype)
# Replace missing values with Stata missing value for type
values[values == -1] = get_base_missing_value(dtype)
data_formatted.append((col, values))
else:
data_formatted.append((col, data[col]))
return DataFrame.from_dict(dict(data_formatted))
def _replace_nans(self, data: DataFrame) -> DataFrame:
# return data
"""
Checks floating point data columns for nans, and replaces these with
the generic Stata for missing value (.)
"""
for c in data:
dtype = data[c].dtype
if dtype in (np.float32, np.float64):
if dtype == np.float32:
replacement = self.MISSING_VALUES["f"]
else:
replacement = self.MISSING_VALUES["d"]
data[c] = data[c].fillna(replacement)
return data
def _update_strl_names(self) -> None:
"""No-op, forward compatibility"""
pass
def _validate_variable_name(self, name: str) -> str:
"""
Validate variable names for Stata export.
Parameters
----------
name : str
Variable name
Returns
-------
str
The validated name with invalid characters replaced with
underscores.
Notes
-----
Stata 114 and 117 support ascii characters in a-z, A-Z, 0-9
and _.
"""
for c in name:
if (
(c < "A" or c > "Z")
and (c < "a" or c > "z")
and (c < "0" or c > "9")
and c != "_"
):
name = name.replace(c, "_")
return name
def _check_column_names(self, data: DataFrame) -> DataFrame:
"""
Checks column names to ensure that they are valid Stata column names.
This includes checks for:
* Non-string names
* Stata keywords
* Variables that start with numbers
* Variables with names that are too long
When an illegal variable name is detected, it is converted, and if
dates are exported, the variable name is propagated to the date
conversion dictionary
"""
converted_names: Dict[Label, str] = {}
columns: List[Label] = list(data.columns)
original_columns = columns[:]
duplicate_var_id = 0
for j, name in enumerate(columns):
orig_name = name
if not isinstance(name, str):
name = str(name)
name = self._validate_variable_name(name)
# Variable name must not be a reserved word
if name in self.RESERVED_WORDS:
name = "_" + name
# Variable name may not start with a number
if "0" <= name[0] <= "9":
name = "_" + name
name = name[: min(len(name), 32)]
if not name == orig_name:
# check for duplicates
while columns.count(name) > 0:
# prepend ascending number to avoid duplicates
name = "_" + str(duplicate_var_id) + name
name = name[: min(len(name), 32)]
duplicate_var_id += 1
converted_names[orig_name] = name
columns[j] = name
data.columns = Index(columns)
# Check date conversion, and fix key if needed
if self._convert_dates:
for c, o in zip(columns, original_columns):
if c != o:
self._convert_dates[c] = self._convert_dates[o]
del self._convert_dates[o]
if converted_names:
conversion_warning = []
for orig_name, name in converted_names.items():
msg = f"{orig_name} -> {name}"
conversion_warning.append(msg)
ws = invalid_name_doc.format("\n ".join(conversion_warning))
warnings.warn(ws, InvalidColumnName)
self._converted_names = converted_names
self._update_strl_names()
return data
def _set_formats_and_types(self, dtypes: Series) -> None:
self.fmtlist: List[str] = []
self.typlist: List[int] = []
for col, dtype in dtypes.items():
self.fmtlist.append(_dtype_to_default_stata_fmt(dtype, self.data[col]))
self.typlist.append(_dtype_to_stata_type(dtype, self.data[col]))
def _prepare_pandas(self, data: DataFrame) -> None:
# NOTE: we might need a different API / class for pandas objects so
# we can set different semantics - handle this with a PR to pandas.io
data = data.copy()
if self._write_index:
temp = data.reset_index()
if isinstance(temp, DataFrame):
data = temp
# Ensure column names are strings
data = self._check_column_names(data)
# Check columns for compatibility with stata, upcast if necessary
# Raise if outside the supported range
data = _cast_to_stata_types(data)
# Replace NaNs with Stata missing values
data = self._replace_nans(data)
# Convert categoricals to int data, and strip labels
data = self._prepare_categoricals(data)
self.nobs, self.nvar = data.shape
self.data = data
self.varlist = data.columns.tolist()
dtypes = data.dtypes
# Ensure all date columns are converted
for col in data:
if col in self._convert_dates:
continue
if is_datetime64_dtype(data[col]):
self._convert_dates[col] = "tc"
self._convert_dates = _maybe_convert_to_int_keys(
self._convert_dates, self.varlist
)
for key in self._convert_dates:
new_type = _convert_datetime_to_stata_type(self._convert_dates[key])
dtypes[key] = np.dtype(new_type)
# Verify object arrays are strings and encode to bytes
self._encode_strings()
self._set_formats_and_types(dtypes)
# set the given format for the datetime cols
if self._convert_dates is not None:
for key in self._convert_dates:
if isinstance(key, int):
self.fmtlist[key] = self._convert_dates[key]
def _encode_strings(self) -> None:
"""
Encode strings in dta-specific encoding
Do not encode columns marked for date conversion or for strL
conversion. The strL converter independently handles conversion and
also accepts empty string arrays.
"""
convert_dates = self._convert_dates
# _convert_strl is not available in dta 114
convert_strl = getattr(self, "_convert_strl", [])
for i, col in enumerate(self.data):
# Skip columns marked for date conversion or strl conversion
if i in convert_dates or col in convert_strl:
continue
column = self.data[col]
dtype = column.dtype
if dtype.type == np.object_:
inferred_dtype = infer_dtype(column, skipna=True)
if not ((inferred_dtype == "string") or len(column) == 0):
col = column.name
raise ValueError(
f"""\
Column `{col}` cannot be exported.\n\nOnly string-like object arrays
containing all strings or a mix of strings and None can be exported.
Object arrays containing only null values are prohibited. Other object
types cannot be exported and must first be converted to one of the
supported types."""
)
encoded = self.data[col].str.encode(self._encoding)
# If larger than _max_string_length do nothing
if (
max_len_string_array(ensure_object(encoded._values))
<= self._max_string_length
):
self.data[col] = encoded
def write_file(self) -> None:
self._file, self._own_file, compression = _open_file_binary_write(
self._fname, self._compression
)
if compression is not None:
self._output_file = self._file
self._file = BytesIO()
try:
self._write_header(data_label=self._data_label, time_stamp=self._time_stamp)
self._write_map()
self._write_variable_types()
self._write_varnames()
self._write_sortlist()
self._write_formats()
self._write_value_label_names()
self._write_variable_labels()
self._write_expansion_fields()
self._write_characteristics()
records = self._prepare_data()
self._write_data(records)
self._write_strls()
self._write_value_labels()
self._write_file_close_tag()
self._write_map()
except Exception as exc:
self._close()
if self._own_file:
try:
if isinstance(self._fname, (str, Path)):
os.unlink(self._fname)
except OSError:
warnings.warn(
f"This save was not successful but {self._fname} could not "
"be deleted. This file is not valid.",
ResourceWarning,
)
raise exc
else:
self._close()
def _close(self) -> None:
"""
Close the file if it was created by the writer.
If a buffer or file-like object was passed in, for example a GzipFile,
then leave this file open for the caller to close. In either case,
attempt to flush the file contents to ensure they are written to disk
(if supported)
"""
# Some file-like objects might not support flush
assert self._file is not None
if self._output_file is not None:
assert isinstance(self._file, BytesIO)
bio = self._file
bio.seek(0)
self._file = self._output_file
self._file.write(bio.read())
try:
self._file.flush()
except AttributeError:
pass
if self._own_file:
self._file.close()
def _write_map(self) -> None:
"""No-op, future compatibility"""
pass
def _write_file_close_tag(self) -> None:
"""No-op, future compatibility"""
pass
def _write_characteristics(self) -> None:
"""No-op, future compatibility"""
pass
def _write_strls(self) -> None:
"""No-op, future compatibility"""
pass
def _write_expansion_fields(self) -> None:
"""Write 5 zeros for expansion fields"""
self._write(_pad_bytes("", 5))
def _write_value_labels(self) -> None:
for vl in self._value_labels:
self._write_bytes(vl.generate_value_label(self._byteorder))
def _write_header(
self,
data_label: Optional[str] = None,
time_stamp: Optional[datetime.datetime] = None,
) -> None:
byteorder = self._byteorder
# ds_format - just use 114
self._write_bytes(struct.pack("b", 114))
# byteorder
self._write(byteorder == ">" and "\x01" or "\x02")
# filetype
self._write("\x01")
# unused
self._write("\x00")
# number of vars, 2 bytes
self._write_bytes(struct.pack(byteorder + "h", self.nvar)[:2])
# number of obs, 4 bytes
self._write_bytes(struct.pack(byteorder + "i", self.nobs)[:4])
# data label 81 bytes, char, null terminated
if data_label is None:
self._write_bytes(self._null_terminate_bytes(_pad_bytes("", 80)))
else:
self._write_bytes(
self._null_terminate_bytes(_pad_bytes(data_label[:80], 80))
)
# time stamp, 18 bytes, char, null terminated
# format dd Mon yyyy hh:mm
if time_stamp is None:
time_stamp = datetime.datetime.now()
elif not isinstance(time_stamp, datetime.datetime):
raise ValueError("time_stamp should be datetime type")
# GH #13856
# Avoid locale-specific month conversion
months = [
"Jan",
"Feb",
"Mar",
"Apr",
"May",
"Jun",
"Jul",
"Aug",
"Sep",
"Oct",
"Nov",
"Dec",
]
month_lookup = {i + 1: month for i, month in enumerate(months)}
ts = (
time_stamp.strftime("%d ")
+ month_lookup[time_stamp.month]
+ time_stamp.strftime(" %Y %H:%M")
)
self._write_bytes(self._null_terminate_bytes(ts))
def _write_variable_types(self) -> None:
for typ in self.typlist:
self._write_bytes(struct.pack("B", typ))
def _write_varnames(self) -> None:
# varlist names are checked by _check_column_names
# varlist, requires null terminated
for name in self.varlist:
name = self._null_terminate_str(name)
name = _pad_bytes(name[:32], 33)
self._write(name)
def _write_sortlist(self) -> None:
# srtlist, 2*(nvar+1), int array, encoded by byteorder
srtlist = _pad_bytes("", 2 * (self.nvar + 1))
self._write(srtlist)
def _write_formats(self) -> None:
# fmtlist, 49*nvar, char array
for fmt in self.fmtlist:
self._write(_pad_bytes(fmt, 49))
def _write_value_label_names(self) -> None:
# lbllist, 33*nvar, char array
for i in range(self.nvar):
# Use variable name when categorical
if self._is_col_cat[i]:
name = self.varlist[i]
name = self._null_terminate_str(name)
name = _pad_bytes(name[:32], 33)
self._write(name)
else: # Default is empty label
self._write(_pad_bytes("", 33))
def _write_variable_labels(self) -> None:
# Missing labels are 80 blank characters plus null termination
blank = _pad_bytes("", 81)
if self._variable_labels is None:
for i in range(self.nvar):
self._write(blank)
return
for col in self.data:
if col in self._variable_labels:
label = self._variable_labels[col]
if len(label) > 80:
raise ValueError("Variable labels must be 80 characters or fewer")
is_latin1 = all(ord(c) < 256 for c in label)
if not is_latin1:
raise ValueError(
"Variable labels must contain only characters that "
"can be encoded in Latin-1"
)
self._write(_pad_bytes(label, 81))
else:
self._write(blank)
def _convert_strls(self, data: DataFrame) -> DataFrame:
"""No-op, future compatibility"""
return data
def _prepare_data(self) -> np.recarray:
data = self.data
typlist = self.typlist
convert_dates = self._convert_dates
# 1. Convert dates
if self._convert_dates is not None:
for i, col in enumerate(data):
if i in convert_dates:
data[col] = _datetime_to_stata_elapsed_vec(
data[col], self.fmtlist[i]
)
# 2. Convert strls
data = self._convert_strls(data)
# 3. Convert bad string data to '' and pad to correct length
dtypes = {}
native_byteorder = self._byteorder == _set_endianness(sys.byteorder)
for i, col in enumerate(data):
typ = typlist[i]
if typ <= self._max_string_length:
data[col] = data[col].fillna("").apply(_pad_bytes, args=(typ,))
stype = f"S{typ}"
dtypes[col] = stype
data[col] = data[col].astype(stype)
else:
dtype = data[col].dtype
if not native_byteorder:
dtype = dtype.newbyteorder(self._byteorder)
dtypes[col] = dtype
return data.to_records(index=False, column_dtypes=dtypes)
def _write_data(self, records: np.recarray) -> None:
self._write_bytes(records.tobytes())
@staticmethod
def _null_terminate_str(s: str) -> str:
s += "\x00"
return s
def _null_terminate_bytes(self, s: str) -> bytes:
return self._null_terminate_str(s).encode(self._encoding)
def _dtype_to_stata_type_117(dtype: np.dtype, column: Series, force_strl: bool) -> int:
"""
Converts dtype types to stata types. Returns the byte of the given ordinal.
See TYPE_MAP and comments for an explanation. This is also explained in
the dta spec.
1 - 2045 are strings of this length
Pandas Stata
32768 - for object strL
65526 - for int8 byte
65527 - for int16 int
65528 - for int32 long
65529 - for float32 float
65530 - for double double
If there are dates to convert, then dtype will already have the correct
type inserted.
"""
# TODO: expand to handle datetime to integer conversion
if force_strl:
return 32768
if dtype.type == np.object_: # try to coerce it to the biggest string
# not memory efficient, what else could we
# do?
itemsize = max_len_string_array(ensure_object(column._values))
itemsize = max(itemsize, 1)
if itemsize <= 2045:
return itemsize
return 32768
elif dtype == np.float64:
return 65526
elif dtype == np.float32:
return 65527
elif dtype == np.int32:
return 65528
elif dtype == np.int16:
return 65529
elif dtype == np.int8:
return 65530
else: # pragma : no cover
raise NotImplementedError(f"Data type {dtype} not supported.")
def _pad_bytes_new(name: Union[str, bytes], length: int) -> bytes:
"""
Takes a bytes instance and pads it with null bytes until it's length chars.
"""
if isinstance(name, str):
name = bytes(name, "utf-8")
return name + b"\x00" * (length - len(name))
class StataStrLWriter:
"""
Converter for Stata StrLs
Stata StrLs map 8 byte values to strings which are stored using a
dictionary-like format where strings are keyed to two values.
Parameters
----------
df : DataFrame
DataFrame to convert
columns : Sequence[str]
List of columns names to convert to StrL
version : int, optional
dta version. Currently supports 117, 118 and 119
byteorder : str, optional
Can be ">", "<", "little", or "big". default is `sys.byteorder`
Notes
-----
Supports creation of the StrL block of a dta file for dta versions
117, 118 and 119. These differ in how the GSO is stored. 118 and
119 store the GSO lookup value as a uint32 and a uint64, while 117
uses two uint32s. 118 and 119 also encode all strings as unicode
which is required by the format. 117 uses 'latin-1' a fixed width
encoding that extends the 7-bit ascii table with an additional 128
characters.
"""
def __init__(
self,
df: DataFrame,
columns: Sequence[str],
version: int = 117,
byteorder: Optional[str] = None,
):
if version not in (117, 118, 119):
raise ValueError("Only dta versions 117, 118 and 119 supported")
self._dta_ver = version
self.df = df
self.columns = columns
self._gso_table = {"": (0, 0)}
if byteorder is None:
byteorder = sys.byteorder
self._byteorder = _set_endianness(byteorder)
gso_v_type = "I" # uint32
gso_o_type = "Q" # uint64
self._encoding = "utf-8"
if version == 117:
o_size = 4
gso_o_type = "I" # 117 used uint32
self._encoding = "latin-1"
elif version == 118:
o_size = 6
else: # version == 119
o_size = 5
self._o_offet = 2 ** (8 * (8 - o_size))
self._gso_o_type = gso_o_type
self._gso_v_type = gso_v_type
def _convert_key(self, key: Tuple[int, int]) -> int:
v, o = key
return v + self._o_offet * o
def generate_table(self) -> Tuple[Dict[str, Tuple[int, int]], DataFrame]:
"""
Generates the GSO lookup table for the DataFrame
Returns
-------
gso_table : dict
Ordered dictionary using the string found as keys
and their lookup position (v,o) as values
gso_df : DataFrame
DataFrame where strl columns have been converted to
(v,o) values
Notes
-----
Modifies the DataFrame in-place.
The DataFrame returned encodes the (v,o) values as uint64s. The
encoding depends on the dta version, and can be expressed as
enc = v + o * 2 ** (o_size * 8)
so that v is stored in the lower bits and o is in the upper
bits. o_size is
* 117: 4
* 118: 6
* 119: 5
"""
gso_table = self._gso_table
gso_df = self.df
columns = list(gso_df.columns)
selected = gso_df[self.columns]
col_index = [(col, columns.index(col)) for col in self.columns]
keys = np.empty(selected.shape, dtype=np.uint64)
for o, (idx, row) in enumerate(selected.iterrows()):
for j, (col, v) in enumerate(col_index):
val = row[col]
# Allow columns with mixed str and None (GH 23633)
val = "" if val is None else val
key = gso_table.get(val, None)
if key is None:
# Stata prefers human numbers
key = (v + 1, o + 1)
gso_table[val] = key
keys[o, j] = self._convert_key(key)
for i, col in enumerate(self.columns):
gso_df[col] = keys[:, i]
return gso_table, gso_df
def generate_blob(self, gso_table: Dict[str, Tuple[int, int]]) -> bytes:
"""
Generates the binary blob of GSOs that is written to the dta file.
Parameters
----------
gso_table : dict
Ordered dictionary (str, vo)
Returns
-------
gso : bytes
Binary content of dta file to be placed between strl tags
Notes
-----
Output format depends on dta version. 117 uses two uint32s to
express v and o while 118+ uses a uint32 for v and a uint64 for o.
"""
# Format information
# Length includes null term
# 117
# GSOvvvvooootllllxxxxxxxxxxxxxxx...x
# 3 u4 u4 u1 u4 string + null term
#
# 118, 119
# GSOvvvvooooooootllllxxxxxxxxxxxxxxx...x
# 3 u4 u8 u1 u4 string + null term
bio = BytesIO()
gso = bytes("GSO", "ascii")
gso_type = struct.pack(self._byteorder + "B", 130)
null = struct.pack(self._byteorder + "B", 0)
v_type = self._byteorder + self._gso_v_type
o_type = self._byteorder + self._gso_o_type
len_type = self._byteorder + "I"
for strl, vo in gso_table.items():
if vo == (0, 0):
continue
v, o = vo
# GSO
bio.write(gso)
# vvvv
bio.write(struct.pack(v_type, v))
# oooo / oooooooo
bio.write(struct.pack(o_type, o))
# t
bio.write(gso_type)
# llll
utf8_string = bytes(strl, "utf-8")
bio.write(struct.pack(len_type, len(utf8_string) + 1))
# xxx...xxx
bio.write(utf8_string)
bio.write(null)
bio.seek(0)
return bio.read()
class StataWriter117(StataWriter):
"""
A class for writing Stata binary dta files in Stata 13 format (117)
.. versionadded:: 0.23.0
Parameters
----------
fname : path (string), buffer or path object
string, path object (pathlib.Path or py._path.local.LocalPath) or
object implementing a binary write() functions. If using a buffer
then the buffer will not be automatically closed after the file
is written.
data : DataFrame
Input to save
convert_dates : dict
Dictionary mapping columns containing datetime types to stata internal
format to use when writing the dates. Options are 'tc', 'td', 'tm',
'tw', 'th', 'tq', 'ty'. Column can be either an integer or a name.
Datetime columns that do not have a conversion type specified will be
converted to 'tc'. Raises NotImplementedError if a datetime column has
timezone information
write_index : bool
Write the index to Stata dataset.
byteorder : str
Can be ">", "<", "little", or "big". default is `sys.byteorder`
time_stamp : datetime
A datetime to use as file creation date. Default is the current time
data_label : str
A label for the data set. Must be 80 characters or smaller.
variable_labels : dict
Dictionary containing columns as keys and variable labels as values.
Each label must be 80 characters or smaller.
convert_strl : list
List of columns names to convert to Stata StrL format. Columns with
more than 2045 characters are automatically written as StrL.
Smaller columns can be converted by including the column name. Using
StrLs can reduce output file size when strings are longer than 8
characters, and either frequently repeated or sparse.
compression : str or dict, default 'infer'
For on-the-fly compression of the output dta. If string, specifies
compression mode. If dict, value at key 'method' specifies compression
mode. Compression mode must be one of {'infer', 'gzip', 'bz2', 'zip',
'xz', None}. If compression mode is 'infer' and `fname` is path-like,
then detect compression from the following extensions: '.gz', '.bz2',
'.zip', or '.xz' (otherwise no compression). If dict and compression
mode is one of {'zip', 'gzip', 'bz2'}, or inferred as one of the above,
other entries passed as additional compression options.
.. versionadded:: 1.1.0
Returns
-------
writer : StataWriter117 instance
The StataWriter117 instance has a write_file method, which will
write the file to the given `fname`.
Raises
------
NotImplementedError
* If datetimes contain timezone information
ValueError
* Columns listed in convert_dates are neither datetime64[ns]
or datetime.datetime
* Column dtype is not representable in Stata
* Column listed in convert_dates is not in DataFrame
* Categorical label contains more than 32,000 characters
Examples
--------
>>> from pandas.io.stata import StataWriter117
>>> data = pd.DataFrame([[1.0, 1, 'a']], columns=['a', 'b', 'c'])
>>> writer = StataWriter117('./data_file.dta', data)
>>> writer.write_file()
Directly write a zip file
>>> compression = {"method": "zip", "archive_name": "data_file.dta"}
>>> writer = StataWriter117('./data_file.zip', data, compression=compression)
>>> writer.write_file()
Or with long strings stored in strl format
>>> data = pd.DataFrame([['A relatively long string'], [''], ['']],
... columns=['strls'])
>>> writer = StataWriter117('./data_file_with_long_strings.dta', data,
... convert_strl=['strls'])
>>> writer.write_file()
"""
_max_string_length = 2045
_dta_version = 117
def __init__(
self,
fname: FilePathOrBuffer,
data: DataFrame,
convert_dates: Optional[Dict[Label, str]] = None,
write_index: bool = True,
byteorder: Optional[str] = None,
time_stamp: Optional[datetime.datetime] = None,
data_label: Optional[str] = None,
variable_labels: Optional[Dict[Label, str]] = None,
convert_strl: Optional[Sequence[Label]] = None,
compression: Union[str, Mapping[str, str], None] = "infer",
):
# Copy to new list since convert_strl might be modified later
self._convert_strl: List[Label] = []
if convert_strl is not None:
self._convert_strl.extend(convert_strl)
super().__init__(
fname,
data,
convert_dates,
write_index,
byteorder=byteorder,
time_stamp=time_stamp,
data_label=data_label,
variable_labels=variable_labels,
compression=compression,
)
self._map: Dict[str, int] = {}
self._strl_blob = b""
@staticmethod
def _tag(val: Union[str, bytes], tag: str) -> bytes:
"""Surround val with <tag></tag>"""
if isinstance(val, str):
val = bytes(val, "utf-8")
return bytes("<" + tag + ">", "utf-8") + val + bytes("</" + tag + ">", "utf-8")
def _update_map(self, tag: str) -> None:
"""Update map location for tag with file position"""
assert self._file is not None
self._map[tag] = self._file.tell()
def _write_header(
self,
data_label: Optional[str] = None,
time_stamp: Optional[datetime.datetime] = None,
) -> None:
"""Write the file header"""
byteorder = self._byteorder
self._write_bytes(bytes("<stata_dta>", "utf-8"))
bio = BytesIO()
# ds_format - 117
bio.write(self._tag(bytes(str(self._dta_version), "utf-8"), "release"))
# byteorder
bio.write(self._tag(byteorder == ">" and "MSF" or "LSF", "byteorder"))
# number of vars, 2 bytes in 117 and 118, 4 byte in 119
nvar_type = "H" if self._dta_version <= 118 else "I"
bio.write(self._tag(struct.pack(byteorder + nvar_type, self.nvar), "K"))
# 117 uses 4 bytes, 118 uses 8
nobs_size = "I" if self._dta_version == 117 else "Q"
bio.write(self._tag(struct.pack(byteorder + nobs_size, self.nobs), "N"))
# data label 81 bytes, char, null terminated
label = data_label[:80] if data_label is not None else ""
encoded_label = label.encode(self._encoding)
label_size = "B" if self._dta_version == 117 else "H"
label_len = struct.pack(byteorder + label_size, len(encoded_label))
encoded_label = label_len + encoded_label
bio.write(self._tag(encoded_label, "label"))
# time stamp, 18 bytes, char, null terminated
# format dd Mon yyyy hh:mm
if time_stamp is None:
time_stamp = datetime.datetime.now()
elif not isinstance(time_stamp, datetime.datetime):
raise ValueError("time_stamp should be datetime type")
# Avoid locale-specific month conversion
months = [
"Jan",
"Feb",
"Mar",
"Apr",
"May",
"Jun",
"Jul",
"Aug",
"Sep",
"Oct",
"Nov",
"Dec",
]
month_lookup = {i + 1: month for i, month in enumerate(months)}
ts = (
time_stamp.strftime("%d ")
+ month_lookup[time_stamp.month]
+ time_stamp.strftime(" %Y %H:%M")
)
# '\x11' added due to inspection of Stata file
stata_ts = b"\x11" + bytes(ts, "utf-8")
bio.write(self._tag(stata_ts, "timestamp"))
bio.seek(0)
self._write_bytes(self._tag(bio.read(), "header"))
def _write_map(self) -> None:
"""
Called twice during file write. The first populates the values in
the map with 0s. The second call writes the final map locations when
all blocks have been written.
"""
assert self._file is not None
if not self._map:
self._map = dict(
(
("stata_data", 0),
("map", self._file.tell()),
("variable_types", 0),
("varnames", 0),
("sortlist", 0),
("formats", 0),
("value_label_names", 0),
("variable_labels", 0),
("characteristics", 0),
("data", 0),
("strls", 0),
("value_labels", 0),
("stata_data_close", 0),
("end-of-file", 0),
)
)
# Move to start of map
self._file.seek(self._map["map"])
bio = BytesIO()
for val in self._map.values():
bio.write(struct.pack(self._byteorder + "Q", val))
bio.seek(0)
self._write_bytes(self._tag(bio.read(), "map"))
def _write_variable_types(self) -> None:
self._update_map("variable_types")
bio = BytesIO()
for typ in self.typlist:
bio.write(struct.pack(self._byteorder + "H", typ))
bio.seek(0)
self._write_bytes(self._tag(bio.read(), "variable_types"))
def _write_varnames(self) -> None:
self._update_map("varnames")
bio = BytesIO()
# 118 scales by 4 to accommodate utf-8 data worst case encoding
vn_len = 32 if self._dta_version == 117 else 128
for name in self.varlist:
name = self._null_terminate_str(name)
name = _pad_bytes_new(name[:32].encode(self._encoding), vn_len + 1)
bio.write(name)
bio.seek(0)
self._write_bytes(self._tag(bio.read(), "varnames"))
def _write_sortlist(self) -> None:
self._update_map("sortlist")
sort_size = 2 if self._dta_version < 119 else 4
self._write_bytes(self._tag(b"\x00" * sort_size * (self.nvar + 1), "sortlist"))
def _write_formats(self) -> None:
self._update_map("formats")
bio = BytesIO()
fmt_len = 49 if self._dta_version == 117 else 57
for fmt in self.fmtlist:
bio.write(_pad_bytes_new(fmt.encode(self._encoding), fmt_len))
bio.seek(0)
self._write_bytes(self._tag(bio.read(), "formats"))
def _write_value_label_names(self) -> None:
self._update_map("value_label_names")
bio = BytesIO()
# 118 scales by 4 to accommodate utf-8 data worst case encoding
vl_len = 32 if self._dta_version == 117 else 128
for i in range(self.nvar):
# Use variable name when categorical
name = "" # default name
if self._is_col_cat[i]:
name = self.varlist[i]
name = self._null_terminate_str(name)
encoded_name = _pad_bytes_new(name[:32].encode(self._encoding), vl_len + 1)
bio.write(encoded_name)
bio.seek(0)
self._write_bytes(self._tag(bio.read(), "value_label_names"))
def _write_variable_labels(self) -> None:
# Missing labels are 80 blank characters plus null termination
self._update_map("variable_labels")
bio = BytesIO()
# 118 scales by 4 to accommodate utf-8 data worst case encoding
vl_len = 80 if self._dta_version == 117 else 320
blank = _pad_bytes_new("", vl_len + 1)
if self._variable_labels is None:
for _ in range(self.nvar):
bio.write(blank)
bio.seek(0)
self._write_bytes(self._tag(bio.read(), "variable_labels"))
return
for col in self.data:
if col in self._variable_labels:
label = self._variable_labels[col]
if len(label) > 80:
raise ValueError("Variable labels must be 80 characters or fewer")
try:
encoded = label.encode(self._encoding)
except UnicodeEncodeError as err:
raise ValueError(
"Variable labels must contain only characters that "
f"can be encoded in {self._encoding}"
) from err
bio.write(_pad_bytes_new(encoded, vl_len + 1))
else:
bio.write(blank)
bio.seek(0)
self._write_bytes(self._tag(bio.read(), "variable_labels"))
def _write_characteristics(self) -> None:
self._update_map("characteristics")
self._write_bytes(self._tag(b"", "characteristics"))
def _write_data(self, records) -> None:
self._update_map("data")
self._write_bytes(b"<data>")
self._write_bytes(records.tobytes())
self._write_bytes(b"</data>")
def _write_strls(self) -> None:
self._update_map("strls")
self._write_bytes(self._tag(self._strl_blob, "strls"))
def _write_expansion_fields(self) -> None:
"""No-op in dta 117+"""
pass
def _write_value_labels(self) -> None:
self._update_map("value_labels")
bio = BytesIO()
for vl in self._value_labels:
lab = vl.generate_value_label(self._byteorder)
lab = self._tag(lab, "lbl")
bio.write(lab)
bio.seek(0)
self._write_bytes(self._tag(bio.read(), "value_labels"))
def _write_file_close_tag(self) -> None:
self._update_map("stata_data_close")
self._write_bytes(bytes("</stata_dta>", "utf-8"))
self._update_map("end-of-file")
def _update_strl_names(self) -> None:
"""
Update column names for conversion to strl if they might have been
changed to comply with Stata naming rules
"""
# Update convert_strl if names changed
for orig, new in self._converted_names.items():
if orig in self._convert_strl:
idx = self._convert_strl.index(orig)
self._convert_strl[idx] = new
def _convert_strls(self, data: DataFrame) -> DataFrame:
"""
Convert columns to StrLs if either very large or in the
convert_strl variable
"""
convert_cols = [
col
for i, col in enumerate(data)
if self.typlist[i] == 32768 or col in self._convert_strl
]
if convert_cols:
ssw = StataStrLWriter(data, convert_cols, version=self._dta_version)
tab, new_data = ssw.generate_table()
data = new_data
self._strl_blob = ssw.generate_blob(tab)
return data
def _set_formats_and_types(self, dtypes: Series) -> None:
self.typlist = []
self.fmtlist = []
for col, dtype in dtypes.items():
force_strl = col in self._convert_strl
fmt = _dtype_to_default_stata_fmt(
dtype,
self.data[col],
dta_version=self._dta_version,
force_strl=force_strl,
)
self.fmtlist.append(fmt)
self.typlist.append(
_dtype_to_stata_type_117(dtype, self.data[col], force_strl)
)
class StataWriterUTF8(StataWriter117):
"""
Stata binary dta file writing in Stata 15 (118) and 16 (119) formats
DTA 118 and 119 format files support unicode string data (both fixed
and strL) format. Unicode is also supported in value labels, variable
labels and the dataset label. Format 119 is automatically used if the
file contains more than 32,767 variables.
.. versionadded:: 1.0.0
Parameters
----------
fname : path (string), buffer or path object
string, path object (pathlib.Path or py._path.local.LocalPath) or
object implementing a binary write() functions. If using a buffer
then the buffer will not be automatically closed after the file
is written.
data : DataFrame
Input to save
convert_dates : dict, default None
Dictionary mapping columns containing datetime types to stata internal
format to use when writing the dates. Options are 'tc', 'td', 'tm',
'tw', 'th', 'tq', 'ty'. Column can be either an integer or a name.
Datetime columns that do not have a conversion type specified will be
converted to 'tc'. Raises NotImplementedError if a datetime column has
timezone information
write_index : bool, default True
Write the index to Stata dataset.
byteorder : str, default None
Can be ">", "<", "little", or "big". default is `sys.byteorder`
time_stamp : datetime, default None
A datetime to use as file creation date. Default is the current time
data_label : str, default None
A label for the data set. Must be 80 characters or smaller.
variable_labels : dict, default None
Dictionary containing columns as keys and variable labels as values.
Each label must be 80 characters or smaller.
convert_strl : list, default None
List of columns names to convert to Stata StrL format. Columns with
more than 2045 characters are automatically written as StrL.
Smaller columns can be converted by including the column name. Using
StrLs can reduce output file size when strings are longer than 8
characters, and either frequently repeated or sparse.
version : int, default None
The dta version to use. By default, uses the size of data to determine
the version. 118 is used if data.shape[1] <= 32767, and 119 is used
for storing larger DataFrames.
compression : str or dict, default 'infer'
For on-the-fly compression of the output dta. If string, specifies
compression mode. If dict, value at key 'method' specifies compression
mode. Compression mode must be one of {'infer', 'gzip', 'bz2', 'zip',
'xz', None}. If compression mode is 'infer' and `fname` is path-like,
then detect compression from the following extensions: '.gz', '.bz2',
'.zip', or '.xz' (otherwise no compression). If dict and compression
mode is one of {'zip', 'gzip', 'bz2'}, or inferred as one of the above,
other entries passed as additional compression options.
.. versionadded:: 1.1.0
Returns
-------
StataWriterUTF8
The instance has a write_file method, which will write the file to the
given `fname`.
Raises
------
NotImplementedError
* If datetimes contain timezone information
ValueError
* Columns listed in convert_dates are neither datetime64[ns]
or datetime.datetime
* Column dtype is not representable in Stata
* Column listed in convert_dates is not in DataFrame
* Categorical label contains more than 32,000 characters
Examples
--------
Using Unicode data and column names
>>> from pandas.io.stata import StataWriterUTF8
>>> data = pd.DataFrame([[1.0, 1, 'ᴬ']], columns=['a', 'β', 'ĉ'])
>>> writer = StataWriterUTF8('./data_file.dta', data)
>>> writer.write_file()
Directly write a zip file
>>> compression = {"method": "zip", "archive_name": "data_file.dta"}
>>> writer = StataWriterUTF8('./data_file.zip', data, compression=compression)
>>> writer.write_file()
Or with long strings stored in strl format
>>> data = pd.DataFrame([['ᴀ relatively long ŝtring'], [''], ['']],
... columns=['strls'])
>>> writer = StataWriterUTF8('./data_file_with_long_strings.dta', data,
... convert_strl=['strls'])
>>> writer.write_file()
"""
_encoding = "utf-8"
def __init__(
self,
fname: FilePathOrBuffer,
data: DataFrame,
convert_dates: Optional[Dict[Label, str]] = None,
write_index: bool = True,
byteorder: Optional[str] = None,
time_stamp: Optional[datetime.datetime] = None,
data_label: Optional[str] = None,
variable_labels: Optional[Dict[Label, str]] = None,
convert_strl: Optional[Sequence[Label]] = None,
version: Optional[int] = None,
compression: Union[str, Mapping[str, str], None] = "infer",
):
if version is None:
version = 118 if data.shape[1] <= 32767 else 119
elif version not in (118, 119):
raise ValueError("version must be either 118 or 119.")
elif version == 118 and data.shape[1] > 32767:
raise ValueError(
"You must use version 119 for data sets containing more than"
"32,767 variables"
)
super().__init__(
fname,
data,
convert_dates=convert_dates,
write_index=write_index,
byteorder=byteorder,
time_stamp=time_stamp,
data_label=data_label,
variable_labels=variable_labels,
convert_strl=convert_strl,
compression=compression,
)
# Override version set in StataWriter117 init
self._dta_version = version
def _validate_variable_name(self, name: str) -> str:
"""
Validate variable names for Stata export.
Parameters
----------
name : str
Variable name
Returns
-------
str
The validated name with invalid characters replaced with
underscores.
Notes
-----
Stata 118+ support most unicode characters. The only limitation is in
the ascii range where the characters supported are a-z, A-Z, 0-9 and _.
"""
# High code points appear to be acceptable
for c in name:
if (
ord(c) < 128
and (c < "A" or c > "Z")
and (c < "a" or c > "z")
and (c < "0" or c > "9")
and c != "_"
) or 128 <= ord(c) < 256:
name = name.replace(c, "_")
return name
|
bsd-3-clause
|
Titan-C/scikit-learn
|
sklearn/cluster/bicluster.py
|
11
|
20245
|
"""Spectral biclustering algorithms.
Authors : Kemal Eren
License: BSD 3 clause
"""
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.linalg import norm
from scipy.sparse import dia_matrix, issparse
from scipy.sparse.linalg import eigsh, svds
from . import KMeans, MiniBatchKMeans
from ..base import BaseEstimator, BiclusterMixin
from ..externals import six
from ..utils import check_random_state
from ..utils.extmath import (make_nonnegative, randomized_svd,
safe_sparse_dot)
from ..utils.validation import assert_all_finite, check_array
__all__ = ['SpectralCoclustering',
'SpectralBiclustering']
def _scale_normalize(X):
"""Normalize ``X`` by scaling rows and columns independently.
Returns the normalized matrix and the row and column scaling
factors.
"""
X = make_nonnegative(X)
row_diag = np.asarray(1.0 / np.sqrt(X.sum(axis=1))).squeeze()
col_diag = np.asarray(1.0 / np.sqrt(X.sum(axis=0))).squeeze()
row_diag = np.where(np.isnan(row_diag), 0, row_diag)
col_diag = np.where(np.isnan(col_diag), 0, col_diag)
if issparse(X):
n_rows, n_cols = X.shape
r = dia_matrix((row_diag, [0]), shape=(n_rows, n_rows))
c = dia_matrix((col_diag, [0]), shape=(n_cols, n_cols))
an = r * X * c
else:
an = row_diag[:, np.newaxis] * X * col_diag
return an, row_diag, col_diag
def _bistochastic_normalize(X, max_iter=1000, tol=1e-5):
"""Normalize rows and columns of ``X`` simultaneously so that all
rows sum to one constant and all columns sum to a different
constant.
"""
# According to paper, this can also be done more efficiently with
# deviation reduction and balancing algorithms.
X = make_nonnegative(X)
X_scaled = X
dist = None
for _ in range(max_iter):
X_new, _, _ = _scale_normalize(X_scaled)
if issparse(X):
dist = norm(X_scaled.data - X.data)
else:
dist = norm(X_scaled - X_new)
X_scaled = X_new
if dist is not None and dist < tol:
break
return X_scaled
def _log_normalize(X):
"""Normalize ``X`` according to Kluger's log-interactions scheme."""
X = make_nonnegative(X, min_value=1)
if issparse(X):
raise ValueError("Cannot compute log of a sparse matrix,"
" because log(x) diverges to -infinity as x"
" goes to 0.")
L = np.log(X)
row_avg = L.mean(axis=1)[:, np.newaxis]
col_avg = L.mean(axis=0)
avg = L.mean()
return L - row_avg - col_avg + avg
class BaseSpectral(six.with_metaclass(ABCMeta, BaseEstimator,
BiclusterMixin)):
"""Base class for spectral biclustering."""
@abstractmethod
def __init__(self, n_clusters=3, svd_method="randomized",
n_svd_vecs=None, mini_batch=False, init="k-means++",
n_init=10, n_jobs=1, random_state=None):
self.n_clusters = n_clusters
self.svd_method = svd_method
self.n_svd_vecs = n_svd_vecs
self.mini_batch = mini_batch
self.init = init
self.n_init = n_init
self.n_jobs = n_jobs
self.random_state = random_state
def _check_parameters(self):
legal_svd_methods = ('randomized', 'arpack')
if self.svd_method not in legal_svd_methods:
raise ValueError("Unknown SVD method: '{0}'. svd_method must be"
" one of {1}.".format(self.svd_method,
legal_svd_methods))
def fit(self, X, y=None):
"""Creates a biclustering for X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
"""
X = check_array(X, accept_sparse='csr', dtype=np.float64)
self._check_parameters()
self._fit(X)
return self
def _svd(self, array, n_components, n_discard):
"""Returns first `n_components` left and right singular
vectors u and v, discarding the first `n_discard`.
"""
if self.svd_method == 'randomized':
kwargs = {}
if self.n_svd_vecs is not None:
kwargs['n_oversamples'] = self.n_svd_vecs
u, _, vt = randomized_svd(array, n_components,
random_state=self.random_state,
**kwargs)
elif self.svd_method == 'arpack':
u, _, vt = svds(array, k=n_components, ncv=self.n_svd_vecs)
if np.any(np.isnan(vt)):
# some eigenvalues of A * A.T are negative, causing
# sqrt() to be np.nan. This causes some vectors in vt
# to be np.nan.
A = safe_sparse_dot(array.T, array)
random_state = check_random_state(self.random_state)
# initialize with [-1,1] as in ARPACK
v0 = random_state.uniform(-1, 1, A.shape[0])
_, v = eigsh(A, ncv=self.n_svd_vecs, v0=v0)
vt = v.T
if np.any(np.isnan(u)):
A = safe_sparse_dot(array, array.T)
random_state = check_random_state(self.random_state)
# initialize with [-1,1] as in ARPACK
v0 = random_state.uniform(-1, 1, A.shape[0])
_, u = eigsh(A, ncv=self.n_svd_vecs, v0=v0)
assert_all_finite(u)
assert_all_finite(vt)
u = u[:, n_discard:]
vt = vt[n_discard:]
return u, vt.T
def _k_means(self, data, n_clusters):
if self.mini_batch:
model = MiniBatchKMeans(n_clusters,
init=self.init,
n_init=self.n_init,
random_state=self.random_state)
else:
model = KMeans(n_clusters, init=self.init,
n_init=self.n_init, n_jobs=self.n_jobs,
random_state=self.random_state)
model.fit(data)
centroid = model.cluster_centers_
labels = model.labels_
return centroid, labels
class SpectralCoclustering(BaseSpectral):
"""Spectral Co-Clustering algorithm (Dhillon, 2001).
Clusters rows and columns of an array `X` to solve the relaxed
normalized cut of the bipartite graph created from `X` as follows:
the edge between row vertex `i` and column vertex `j` has weight
`X[i, j]`.
The resulting bicluster structure is block-diagonal, since each
row and each column belongs to exactly one bicluster.
Supports sparse matrices, as long as they are nonnegative.
Read more in the :ref:`User Guide <spectral_coclustering>`.
Parameters
----------
n_clusters : integer, optional, default: 3
The number of biclusters to find.
svd_method : string, optional, default: 'randomized'
Selects the algorithm for finding singular vectors. May be
'randomized' or 'arpack'. If 'randomized', use
:func:`sklearn.utils.extmath.randomized_svd`, which may be faster
for large matrices. If 'arpack', use
:func:`scipy.sparse.linalg.svds`, which is more accurate, but
possibly slower in some cases.
n_svd_vecs : int, optional, default: None
Number of vectors to use in calculating the SVD. Corresponds
to `ncv` when `svd_method=arpack` and `n_oversamples` when
`svd_method` is 'randomized`.
mini_batch : bool, optional, default: False
Whether to use mini-batch k-means, which is faster but may get
different results.
init : {'k-means++', 'random' or an ndarray}
Method for initialization of k-means algorithm; defaults to
'k-means++'.
n_init : int, optional, default: 10
Number of random initializations that are tried with the
k-means algorithm.
If mini-batch k-means is used, the best initialization is
chosen and the algorithm runs once. Otherwise, the algorithm
is run for each initialization and the best solution chosen.
n_jobs : int, optional, default: 1
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
random_state : int, RandomState instance or None, optional, default: None
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
rows_ : array-like, shape (n_row_clusters, n_rows)
Results of the clustering. `rows[i, r]` is True if
cluster `i` contains row `r`. Available only after calling ``fit``.
columns_ : array-like, shape (n_column_clusters, n_columns)
Results of the clustering, like `rows`.
row_labels_ : array-like, shape (n_rows,)
The bicluster label of each row.
column_labels_ : array-like, shape (n_cols,)
The bicluster label of each column.
References
----------
* Dhillon, Inderjit S, 2001. `Co-clustering documents and words using
bipartite spectral graph partitioning
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.140.3011>`__.
"""
def __init__(self, n_clusters=3, svd_method='randomized',
n_svd_vecs=None, mini_batch=False, init='k-means++',
n_init=10, n_jobs=1, random_state=None):
super(SpectralCoclustering, self).__init__(n_clusters,
svd_method,
n_svd_vecs,
mini_batch,
init,
n_init,
n_jobs,
random_state)
def _fit(self, X):
normalized_data, row_diag, col_diag = _scale_normalize(X)
n_sv = 1 + int(np.ceil(np.log2(self.n_clusters)))
u, v = self._svd(normalized_data, n_sv, n_discard=1)
z = np.vstack((row_diag[:, np.newaxis] * u,
col_diag[:, np.newaxis] * v))
_, labels = self._k_means(z, self.n_clusters)
n_rows = X.shape[0]
self.row_labels_ = labels[:n_rows]
self.column_labels_ = labels[n_rows:]
self.rows_ = np.vstack(self.row_labels_ == c
for c in range(self.n_clusters))
self.columns_ = np.vstack(self.column_labels_ == c
for c in range(self.n_clusters))
class SpectralBiclustering(BaseSpectral):
"""Spectral biclustering (Kluger, 2003).
Partitions rows and columns under the assumption that the data has
an underlying checkerboard structure. For instance, if there are
two row partitions and three column partitions, each row will
belong to three biclusters, and each column will belong to two
biclusters. The outer product of the corresponding row and column
label vectors gives this checkerboard structure.
Read more in the :ref:`User Guide <spectral_biclustering>`.
Parameters
----------
n_clusters : integer or tuple (n_row_clusters, n_column_clusters)
The number of row and column clusters in the checkerboard
structure.
method : string, optional, default: 'bistochastic'
Method of normalizing and converting singular vectors into
biclusters. May be one of 'scale', 'bistochastic', or 'log'.
The authors recommend using 'log'. If the data is sparse,
however, log normalization will not work, which is why the
default is 'bistochastic'. CAUTION: if `method='log'`, the
data must not be sparse.
n_components : integer, optional, default: 6
Number of singular vectors to check.
n_best : integer, optional, default: 3
Number of best singular vectors to which to project the data
for clustering.
svd_method : string, optional, default: 'randomized'
Selects the algorithm for finding singular vectors. May be
'randomized' or 'arpack'. If 'randomized', uses
`sklearn.utils.extmath.randomized_svd`, which may be faster
for large matrices. If 'arpack', uses
`scipy.sparse.linalg.svds`, which is more accurate, but
possibly slower in some cases.
n_svd_vecs : int, optional, default: None
Number of vectors to use in calculating the SVD. Corresponds
to `ncv` when `svd_method=arpack` and `n_oversamples` when
`svd_method` is 'randomized`.
mini_batch : bool, optional, default: False
Whether to use mini-batch k-means, which is faster but may get
different results.
init : {'k-means++', 'random' or an ndarray}
Method for initialization of k-means algorithm; defaults to
'k-means++'.
n_init : int, optional, default: 10
Number of random initializations that are tried with the
k-means algorithm.
If mini-batch k-means is used, the best initialization is
chosen and the algorithm runs once. Otherwise, the algorithm
is run for each initialization and the best solution chosen.
n_jobs : int, optional, default: 1
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
random_state : int, RandomState instance or None, optional, default: None
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
rows_ : array-like, shape (n_row_clusters, n_rows)
Results of the clustering. `rows[i, r]` is True if
cluster `i` contains row `r`. Available only after calling ``fit``.
columns_ : array-like, shape (n_column_clusters, n_columns)
Results of the clustering, like `rows`.
row_labels_ : array-like, shape (n_rows,)
Row partition labels.
column_labels_ : array-like, shape (n_cols,)
Column partition labels.
References
----------
* Kluger, Yuval, et. al., 2003. `Spectral biclustering of microarray
data: coclustering genes and conditions
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.135.1608>`__.
"""
def __init__(self, n_clusters=3, method='bistochastic',
n_components=6, n_best=3, svd_method='randomized',
n_svd_vecs=None, mini_batch=False, init='k-means++',
n_init=10, n_jobs=1, random_state=None):
super(SpectralBiclustering, self).__init__(n_clusters,
svd_method,
n_svd_vecs,
mini_batch,
init,
n_init,
n_jobs,
random_state)
self.method = method
self.n_components = n_components
self.n_best = n_best
def _check_parameters(self):
super(SpectralBiclustering, self)._check_parameters()
legal_methods = ('bistochastic', 'scale', 'log')
if self.method not in legal_methods:
raise ValueError("Unknown method: '{0}'. method must be"
" one of {1}.".format(self.method, legal_methods))
try:
int(self.n_clusters)
except TypeError:
try:
r, c = self.n_clusters
int(r)
int(c)
except (ValueError, TypeError):
raise ValueError("Incorrect parameter n_clusters has value:"
" {}. It should either be a single integer"
" or an iterable with two integers:"
" (n_row_clusters, n_column_clusters)")
if self.n_components < 1:
raise ValueError("Parameter n_components must be greater than 0,"
" but its value is {}".format(self.n_components))
if self.n_best < 1:
raise ValueError("Parameter n_best must be greater than 0,"
" but its value is {}".format(self.n_best))
if self.n_best > self.n_components:
raise ValueError("n_best cannot be larger than"
" n_components, but {} > {}"
"".format(self.n_best, self.n_components))
def _fit(self, X):
n_sv = self.n_components
if self.method == 'bistochastic':
normalized_data = _bistochastic_normalize(X)
n_sv += 1
elif self.method == 'scale':
normalized_data, _, _ = _scale_normalize(X)
n_sv += 1
elif self.method == 'log':
normalized_data = _log_normalize(X)
n_discard = 0 if self.method == 'log' else 1
u, v = self._svd(normalized_data, n_sv, n_discard)
ut = u.T
vt = v.T
try:
n_row_clusters, n_col_clusters = self.n_clusters
except TypeError:
n_row_clusters = n_col_clusters = self.n_clusters
best_ut = self._fit_best_piecewise(ut, self.n_best,
n_row_clusters)
best_vt = self._fit_best_piecewise(vt, self.n_best,
n_col_clusters)
self.row_labels_ = self._project_and_cluster(X, best_vt.T,
n_row_clusters)
self.column_labels_ = self._project_and_cluster(X.T, best_ut.T,
n_col_clusters)
self.rows_ = np.vstack(self.row_labels_ == label
for label in range(n_row_clusters)
for _ in range(n_col_clusters))
self.columns_ = np.vstack(self.column_labels_ == label
for _ in range(n_row_clusters)
for label in range(n_col_clusters))
def _fit_best_piecewise(self, vectors, n_best, n_clusters):
"""Find the ``n_best`` vectors that are best approximated by piecewise
constant vectors.
The piecewise vectors are found by k-means; the best is chosen
according to Euclidean distance.
"""
def make_piecewise(v):
centroid, labels = self._k_means(v.reshape(-1, 1), n_clusters)
return centroid[labels].ravel()
piecewise_vectors = np.apply_along_axis(make_piecewise,
axis=1, arr=vectors)
dists = np.apply_along_axis(norm, axis=1,
arr=(vectors - piecewise_vectors))
result = vectors[np.argsort(dists)[:n_best]]
return result
def _project_and_cluster(self, data, vectors, n_clusters):
"""Project ``data`` to ``vectors`` and cluster the result."""
projected = safe_sparse_dot(data, vectors)
_, labels = self._k_means(projected, n_clusters)
return labels
|
bsd-3-clause
|
AustereCuriosity/astropy
|
astropy/convolution/utils.py
|
4
|
10609
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
from ..modeling.core import FittableModel, custom_model
from ..extern.six.moves import range
__all__ = ['discretize_model']
class DiscretizationError(Exception):
"""
Called when discretization of models goes wrong.
"""
class KernelSizeError(Exception):
"""
Called when size of kernels is even.
"""
def add_kernel_arrays_1D(array_1, array_2):
"""
Add two 1D kernel arrays of different size.
The arrays are added with the centers lying upon each other.
"""
if array_1.size > array_2.size:
new_array = array_1.copy()
center = array_1.size // 2
slice_ = slice(center - array_2.size // 2,
center + array_2.size // 2 + 1)
new_array[slice_] += array_2
return new_array
elif array_2.size > array_1.size:
new_array = array_2.copy()
center = array_2.size // 2
slice_ = slice(center - array_1.size // 2,
center + array_1.size // 2 + 1)
new_array[slice_] += array_1
return new_array
return array_2 + array_1
def add_kernel_arrays_2D(array_1, array_2):
"""
Add two 2D kernel arrays of different size.
The arrays are added with the centers lying upon each other.
"""
if array_1.size > array_2.size:
new_array = array_1.copy()
center = [axes_size // 2 for axes_size in array_1.shape]
slice_x = slice(center[1] - array_2.shape[1] // 2,
center[1] + array_2.shape[1] // 2 + 1)
slice_y = slice(center[0] - array_2.shape[0] // 2,
center[0] + array_2.shape[0] // 2 + 1)
new_array[slice_y, slice_x] += array_2
return new_array
elif array_2.size > array_1.size:
new_array = array_2.copy()
center = [axes_size // 2 for axes_size in array_2.shape]
slice_x = slice(center[1] - array_1.shape[1] // 2,
center[1] + array_1.shape[1] // 2 + 1)
slice_y = slice(center[0] - array_1.shape[0] // 2,
center[0] + array_1.shape[0] // 2 + 1)
new_array[slice_y, slice_x] += array_1
return new_array
return array_2 + array_1
def discretize_model(model, x_range, y_range=None, mode='center', factor=10):
"""
Function to evaluate analytical model functions on a grid.
So far the function can only deal with pixel coordinates.
Parameters
----------
model : `~astropy.modeling.FittableModel` or callable.
Analytic model function to be discretized. Callables, which are not an
instances of `~astropy.modeling.FittableModel` are passed to
`~astropy.modeling.custom_model` and then evaluated.
x_range : tuple
x range in which the model is evaluated. The difference between the
upper an lower limit must be a whole number, so that the output array
size is well defined.
y_range : tuple, optional
y range in which the model is evaluated. The difference between the
upper an lower limit must be a whole number, so that the output array
size is well defined. Necessary only for 2D models.
mode : str, optional
One of the following modes:
* ``'center'`` (default)
Discretize model by taking the value
at the center of the bin.
* ``'linear_interp'``
Discretize model by linearly interpolating
between the values at the corners of the bin.
For 2D models interpolation is bilinear.
* ``'oversample'``
Discretize model by taking the average
on an oversampled grid.
* ``'integrate'``
Discretize model by integrating the model
over the bin using `scipy.integrate.quad`.
Very slow.
factor : float or int
Factor of oversampling. Default = 10.
Returns
-------
array : `numpy.array`
Model value array
Notes
-----
The ``oversample`` mode allows to conserve the integral on a subpixel
scale. Here is the example of a normalized Gaussian1D:
.. plot::
:include-source:
import matplotlib.pyplot as plt
import numpy as np
from astropy.modeling.models import Gaussian1D
from astropy.convolution.utils import discretize_model
gauss_1D = Gaussian1D(1 / (0.5 * np.sqrt(2 * np.pi)), 0, 0.5)
y_center = discretize_model(gauss_1D, (-2, 3), mode='center')
y_corner = discretize_model(gauss_1D, (-2, 3), mode='linear_interp')
y_oversample = discretize_model(gauss_1D, (-2, 3), mode='oversample')
plt.plot(y_center, label='center sum = {0:3f}'.format(y_center.sum()))
plt.plot(y_corner, label='linear_interp sum = {0:3f}'.format(y_corner.sum()))
plt.plot(y_oversample, label='oversample sum = {0:3f}'.format(y_oversample.sum()))
plt.xlabel('pixels')
plt.ylabel('value')
plt.legend()
plt.show()
"""
if not callable(model):
raise TypeError('Model must be callable.')
if not isinstance(model, FittableModel):
model = custom_model(model)()
ndim = model.n_inputs
if ndim > 2:
raise ValueError('discretize_model only supports 1-d and 2-d models.')
if not float(np.diff(x_range)).is_integer():
raise ValueError("The difference between the upper an lower limit of"
" 'x_range' must be a whole number.")
if y_range:
if not float(np.diff(y_range)).is_integer():
raise ValueError("The difference between the upper an lower limit of"
" 'y_range' must be a whole number.")
if ndim == 2 and y_range is None:
raise ValueError("y range not specified, but model is 2-d")
if ndim == 1 and y_range is not None:
raise ValueError("y range specified, but model is only 1-d.")
if mode == "center":
if ndim == 1:
return discretize_center_1D(model, x_range)
elif ndim == 2:
return discretize_center_2D(model, x_range, y_range)
elif mode == "linear_interp":
if ndim == 1:
return discretize_linear_1D(model, x_range)
if ndim == 2:
return discretize_bilinear_2D(model, x_range, y_range)
elif mode == "oversample":
if ndim == 1:
return discretize_oversample_1D(model, x_range, factor)
if ndim == 2:
return discretize_oversample_2D(model, x_range, y_range, factor)
elif mode == "integrate":
if ndim == 1:
return discretize_integrate_1D(model, x_range)
if ndim == 2:
return discretize_integrate_2D(model, x_range, y_range)
else:
raise DiscretizationError('Invalid mode.')
def discretize_center_1D(model, x_range):
"""
Discretize model by taking the value at the center of the bin.
"""
x = np.arange(*x_range)
return model(x)
def discretize_center_2D(model, x_range, y_range):
"""
Discretize model by taking the value at the center of the pixel.
"""
x = np.arange(*x_range)
y = np.arange(*y_range)
x, y = np.meshgrid(x, y)
return model(x, y)
def discretize_linear_1D(model, x_range):
"""
Discretize model by performing a linear interpolation.
"""
# Evaluate model 0.5 pixel outside the boundaries
x = np.arange(x_range[0] - 0.5, x_range[1] + 0.5)
values_intermediate_grid = model(x)
return 0.5 * (values_intermediate_grid[1:] + values_intermediate_grid[:-1])
def discretize_bilinear_2D(model, x_range, y_range):
"""
Discretize model by performing a bilinear interpolation.
"""
# Evaluate model 0.5 pixel outside the boundaries
x = np.arange(x_range[0] - 0.5, x_range[1] + 0.5)
y = np.arange(y_range[0] - 0.5, y_range[1] + 0.5)
x, y = np.meshgrid(x, y)
values_intermediate_grid = model(x, y)
# Mean in y direction
values = 0.5 * (values_intermediate_grid[1:, :]
+ values_intermediate_grid[:-1, :])
# Mean in x direction
values = 0.5 * (values[:, 1:]
+ values[:, :-1])
return values
def discretize_oversample_1D(model, x_range, factor=10):
"""
Discretize model by taking the average on an oversampled grid.
"""
# Evaluate model on oversampled grid
x = np.arange(x_range[0] - 0.5 * (1 - 1 / factor),
x_range[1] + 0.5 * (1 + 1 / factor), 1. / factor)
values = model(x)
# Reshape and compute mean
values = np.reshape(values, (x.size // factor, factor))
return values.mean(axis=1)[:-1]
def discretize_oversample_2D(model, x_range, y_range, factor=10):
"""
Discretize model by taking the average on an oversampled grid.
"""
# Evaluate model on oversampled grid
x = np.arange(x_range[0] - 0.5 * (1 - 1 / factor),
x_range[1] + 0.5 * (1 + 1 / factor), 1. / factor)
y = np.arange(y_range[0] - 0.5 * (1 - 1 / factor),
y_range[1] + 0.5 * (1 + 1 / factor), 1. / factor)
x_grid, y_grid = np.meshgrid(x, y)
values = model(x_grid, y_grid)
# Reshape and compute mean
shape = (y.size // factor, factor, x.size // factor, factor)
values = np.reshape(values, shape)
return values.mean(axis=3).mean(axis=1)[:-1, :-1]
def discretize_integrate_1D(model, x_range):
"""
Discretize model by integrating numerically the model over the bin.
"""
from scipy.integrate import quad
# Set up grid
x = np.arange(x_range[0] - 0.5, x_range[1] + 0.5)
values = np.array([])
# Integrate over all bins
for i in range(x.size - 1):
values = np.append(values, quad(model, x[i], x[i + 1])[0])
return values
def discretize_integrate_2D(model, x_range, y_range):
"""
Discretize model by integrating the model over the pixel.
"""
from scipy.integrate import dblquad
# Set up grid
x = np.arange(x_range[0] - 0.5, x_range[1] + 0.5)
y = np.arange(y_range[0] - 0.5, y_range[1] + 0.5)
values = np.empty((y.size - 1, x.size - 1))
# Integrate over all pixels
for i in range(x.size - 1):
for j in range(y.size - 1):
values[j, i] = dblquad(lambda y, x: model(x, y), x[i], x[i + 1],
lambda x: y[j], lambda x: y[j + 1])[0]
return values
|
bsd-3-clause
|
tntnatbry/tensorflow
|
tensorflow/examples/learn/mnist.py
|
45
|
3999
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This showcases how simple it is to build image classification networks.
It follows description from this TensorFlow tutorial:
https://www.tensorflow.org/versions/master/tutorials/mnist/pros/index.html#deep-mnist-for-experts
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from sklearn import metrics
import tensorflow as tf
layers = tf.contrib.layers
learn = tf.contrib.learn
def max_pool_2x2(tensor_in):
return tf.nn.max_pool(
tensor_in, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
def conv_model(feature, target, mode):
"""2-layer convolution model."""
# Convert the target to a one-hot tensor of shape (batch_size, 10) and
# with a on-value of 1 for each one-hot vector of length 10.
target = tf.one_hot(tf.cast(target, tf.int32), 10, 1, 0)
# Reshape feature to 4d tensor with 2nd and 3rd dimensions being
# image width and height final dimension being the number of color channels.
feature = tf.reshape(feature, [-1, 28, 28, 1])
# First conv layer will compute 32 features for each 5x5 patch
with tf.variable_scope('conv_layer1'):
h_conv1 = layers.convolution2d(
feature, 32, kernel_size=[5, 5], activation_fn=tf.nn.relu)
h_pool1 = max_pool_2x2(h_conv1)
# Second conv layer will compute 64 features for each 5x5 patch.
with tf.variable_scope('conv_layer2'):
h_conv2 = layers.convolution2d(
h_pool1, 64, kernel_size=[5, 5], activation_fn=tf.nn.relu)
h_pool2 = max_pool_2x2(h_conv2)
# reshape tensor into a batch of vectors
h_pool2_flat = tf.reshape(h_pool2, [-1, 7 * 7 * 64])
# Densely connected layer with 1024 neurons.
h_fc1 = layers.dropout(
layers.fully_connected(
h_pool2_flat, 1024, activation_fn=tf.nn.relu),
keep_prob=0.5,
is_training=mode == tf.contrib.learn.ModeKeys.TRAIN)
# Compute logits (1 per class) and compute loss.
logits = layers.fully_connected(h_fc1, 10, activation_fn=None)
loss = tf.losses.softmax_cross_entropy(target, logits)
# Create a tensor for training op.
train_op = layers.optimize_loss(
loss,
tf.contrib.framework.get_global_step(),
optimizer='SGD',
learning_rate=0.001)
return tf.argmax(logits, 1), loss, train_op
def main(unused_args):
### Download and load MNIST dataset.
mnist = learn.datasets.load_dataset('mnist')
### Linear classifier.
feature_columns = learn.infer_real_valued_columns_from_input(
mnist.train.images)
classifier = learn.LinearClassifier(
feature_columns=feature_columns, n_classes=10)
classifier.fit(mnist.train.images,
mnist.train.labels.astype(np.int32),
batch_size=100,
steps=1000)
score = metrics.accuracy_score(mnist.test.labels,
list(classifier.predict(mnist.test.images)))
print('Accuracy: {0:f}'.format(score))
### Convolutional network
classifier = learn.Estimator(model_fn=conv_model)
classifier.fit(mnist.train.images,
mnist.train.labels,
batch_size=100,
steps=20000)
score = metrics.accuracy_score(mnist.test.labels,
list(classifier.predict(mnist.test.images)))
print('Accuracy: {0:f}'.format(score))
if __name__ == '__main__':
tf.app.run()
|
apache-2.0
|
jjx02230808/project0223
|
sklearn/linear_model/__init__.py
|
270
|
3096
|
"""
The :mod:`sklearn.linear_model` module implements generalized linear models. It
includes Ridge regression, Bayesian Regression, Lasso and Elastic Net
estimators computed with Least Angle Regression and coordinate descent. It also
implements Stochastic Gradient Descent related algorithms.
"""
# See http://scikit-learn.sourceforge.net/modules/sgd.html and
# http://scikit-learn.sourceforge.net/modules/linear_model.html for
# complete documentation.
from .base import LinearRegression
from .bayes import BayesianRidge, ARDRegression
from .least_angle import (Lars, LassoLars, lars_path, LarsCV, LassoLarsCV,
LassoLarsIC)
from .coordinate_descent import (Lasso, ElasticNet, LassoCV, ElasticNetCV,
lasso_path, enet_path, MultiTaskLasso,
MultiTaskElasticNet, MultiTaskElasticNetCV,
MultiTaskLassoCV)
from .sgd_fast import Hinge, Log, ModifiedHuber, SquaredLoss, Huber
from .stochastic_gradient import SGDClassifier, SGDRegressor
from .ridge import (Ridge, RidgeCV, RidgeClassifier, RidgeClassifierCV,
ridge_regression)
from .logistic import (LogisticRegression, LogisticRegressionCV,
logistic_regression_path)
from .omp import (orthogonal_mp, orthogonal_mp_gram, OrthogonalMatchingPursuit,
OrthogonalMatchingPursuitCV)
from .passive_aggressive import PassiveAggressiveClassifier
from .passive_aggressive import PassiveAggressiveRegressor
from .perceptron import Perceptron
from .randomized_l1 import (RandomizedLasso, RandomizedLogisticRegression,
lasso_stability_path)
from .ransac import RANSACRegressor
from .theil_sen import TheilSenRegressor
__all__ = ['ARDRegression',
'BayesianRidge',
'ElasticNet',
'ElasticNetCV',
'Hinge',
'Huber',
'Lars',
'LarsCV',
'Lasso',
'LassoCV',
'LassoLars',
'LassoLarsCV',
'LassoLarsIC',
'LinearRegression',
'Log',
'LogisticRegression',
'LogisticRegressionCV',
'ModifiedHuber',
'MultiTaskElasticNet',
'MultiTaskElasticNetCV',
'MultiTaskLasso',
'MultiTaskLassoCV',
'OrthogonalMatchingPursuit',
'OrthogonalMatchingPursuitCV',
'PassiveAggressiveClassifier',
'PassiveAggressiveRegressor',
'Perceptron',
'RandomizedLasso',
'RandomizedLogisticRegression',
'Ridge',
'RidgeCV',
'RidgeClassifier',
'RidgeClassifierCV',
'SGDClassifier',
'SGDRegressor',
'SquaredLoss',
'TheilSenRegressor',
'enet_path',
'lars_path',
'lasso_path',
'lasso_stability_path',
'logistic_regression_path',
'orthogonal_mp',
'orthogonal_mp_gram',
'ridge_regression',
'RANSACRegressor']
|
bsd-3-clause
|
coreyabshire/stacko
|
submission/topics.py
|
1
|
2173
|
import competition_utilities as cu
import numpy
import pandas as pd
import onlineldavb
import re
def istag(x):
return not pd.isnull(x) #(x is not None) and (x != 'nan')
class QuestionSet:
def __init__(self, dataframe):
self.dataframe = dataframe
def parse_doc(self, row):
title = row['Title']
body = row['BodyMarkdown']
tags = ' '.join(filter(istag, [row['Tag%d' % t] for t in range(1,6)]))
postid = row['PostId']
doc = ' '.join([title, body, tags])
name = postid
return doc, name
def parse_doc_no_code(self, row):
title = row['Title']
code_pattern = re.compile(r'^(?: {4}|\t).*$', re.M)
body = code_pattern.sub('', row['BodyMarkdown'])
tags = ' '.join(filter(istag, [row['Tag%d' % t] for t in range(1,6)]))
postid = row['PostId']
doc = ' '.join([title, body, tags])
name = postid
return doc, name
def get_batch(self, start, end):
docset = []
articlenames = []
for i in range(start, end):
row = self.dataframe.ix[i]
doc, name = self.parse_doc_no_code(row)
docset.append(doc)
articlenames.append(name)
return docset, articlenames
def close(self):
self.file.close()
def allocate_topics(lda, data, K, batchsize, D):
n_iterations = len(data) / batchsize
questions = QuestionSet(data)
topics = numpy.zeros((len(data), K))
# derive topics from data in batches
for iteration in range(0, n_iterations):
start = iteration * batchsize
end = start + batchsize
(docset, _) = questions.get_batch(start, end)
(gamma, bound) = lda.update_lambda(docset)
topics[start:end,:] = gamma
(wordids, wordcts) = onlineldavb.parse_doc_list(docset, lda._vocab)
perwordbound = bound * len(docset) / (D * sum(map(sum, wordcts)))
print '%d: rho_t = %f, held-out perplexity estimate = %f' % \
(iteration, lda._rhot, numpy.exp(-perwordbound))
# copy to dataframe
for k in range(K):
data['Topic%d'%k] = topics[:,k]
return topics
|
bsd-2-clause
|
pcuzner/fio-tools
|
reporting/fio_collector.py
|
1
|
5373
|
#!/usr/bin/env python
__author__ = 'paul'
import fio_parse
from optparse import OptionParser
import os
from fio_plot import FIOPlot
# Assumption - the data files we're parsing represent repeated fio runs where each run
# includes another vm's results ie. run1 = 1 job, run 2 = 2 jobs, etc
# the file names must therefore preserve this order so naming is important to ensure the
# data is read in the correct sequence
#
# must run on a platform with matplotlib to allow the chart to be generated - which requires
# an x server to also be installed
#
def get_files(path_name):
file_list = []
if os.path.isdir(path_name):
for f in os.listdir(path_name):
fq_path = os.path.join(path_name, f)
if os.path.isfile(fq_path):
# TODO - should add more checking here
if fq_path[-4:] == '.out':
file_list.append(fq_path)
else:
if os.path.exists(path_name):
file_list.append(path_name)
return sorted(file_list)
def get_max_listsize(data_in):
return max((len(obs_list)) for key, obs_list in data_in.iteritems())
def format_perf_data(perf_data):
max_size = get_max_listsize(perf_data)
for key in perf_data:
obs_list = perf_data[key]
if len(obs_list) < max_size:
padding = [None]*(max_size - len(obs_list))
perf_data[key] = padding + obs_list
return perf_data
def aggregate_data(data_in, aggr_type='iops'):
aggr_data = {}
summary_data = []
max_element = get_max_listsize(data_in)
for ptr in range(0,max_element):
data_points = []
divisor = 0
for key in data_in:
if data_in[key][ptr] is not None:
data_points.append(data_in[key][ptr])
divisor +=1
if aggr_type == 'iops':
summary_data.append(sum(data_points))
elif aggr_type == 'latency':
print "%d " % (sum(data_points)/float(divisor))
summary_data.append(sum(data_points)/float(divisor))
aggr_data['Aggregated Data'] = summary_data
return aggr_data
def main(options):
perf_data = {}
chart_ceiling = None if options.ceiling == "none" else options.ceiling
json_file_list = get_files(options.fio_file_path)
if json_file_list:
for f in json_file_list:
perf_sample = fio_parse.get_json_data(json_file=f, json_path=options.json_key)
if perf_sample['status'] == 'OK':
del perf_sample['status']
for key in perf_sample:
if key in perf_data:
perf_data[key].append(perf_sample[key])
else:
perf_data[key] = [perf_sample[key]]
# need to add padding to the data to make each entry have the same
# number of observations
fmtd_data = format_perf_data(perf_data)
if options.data_aggregate:
fmtd_data = aggregate_data(fmtd_data, options.chart_type)
chart = FIOPlot(chart_type=options.chart_type,
data=fmtd_data,
title=options.title,
ceiling=chart_ceiling,
xlabel='Concurrent jobs',
ylabel=options.ylabel)
chart.generate_plot(options.output_file)
print fmtd_data
else:
print "no files found matching the path provided %s" % options.fio_file_path
if __name__ == "__main__":
usage_info = "Usage: %prog [options]"
parser = OptionParser(usage=usage_info, version="%prog 0.1")
parser.add_option("-y", "--yaxis-label", dest="ylabel", action="store",
default="Response Time (ms)",
help="Chart label for the yaxis")
parser.add_option("-T", "--chart-type", dest="chart_type", action="store",
choices=['iops','latency'],default='latency',
help="chart type - either iops or [latency]")
parser.add_option("-a", "--aggr", dest="data_aggregate", action="store_true",
default=False,
help="aggregate the iops or latency data, instead of per job data")
parser.add_option("-D", "--debug", dest="debug", action="store_true",
default=False,
help="turn on debug output")
parser.add_option("-c", "--ceiling", dest="ceiling", action="store",
default=50000,
help="(int) ceiling to show Max acceptable values, or none")
parser.add_option("-p", "--pathname", dest="fio_file_path", action="store",
help="file name/path containing fio json output")
parser.add_option("-k", "--keyname", dest="json_key", action="store",
help="json path for the attribute to extract from the fio json file(s)")
parser.add_option("-t", "--title", dest="title", action="store",
help="Chart title", default="FIO Chart")
parser.add_option("-o", "--output", dest="output_file",action="store",
help="output filename", default="myfile.png")
(options, args) = parser.parse_args()
if options.fio_file_path and options.json_key:
main(options)
else:
print "You must provide a path or filename for the fio json file(s)"
|
gpl-3.0
|
harshaneelhg/scikit-learn
|
sklearn/linear_model/tests/test_ransac.py
|
216
|
13290
|
import numpy as np
from numpy.testing import assert_equal, assert_raises
from numpy.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises_regexp
from scipy import sparse
from sklearn.utils.testing import assert_less
from sklearn.linear_model import LinearRegression, RANSACRegressor
from sklearn.linear_model.ransac import _dynamic_max_trials
# Generate coordinates of line
X = np.arange(-200, 200)
y = 0.2 * X + 20
data = np.column_stack([X, y])
# Add some faulty data
outliers = np.array((10, 30, 200))
data[outliers[0], :] = (1000, 1000)
data[outliers[1], :] = (-1000, -1000)
data[outliers[2], :] = (-100, -50)
X = data[:, 0][:, np.newaxis]
y = data[:, 1]
def test_ransac_inliers_outliers():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
# Estimate parameters of corrupted data
ransac_estimator.fit(X, y)
# Ground truth / reference inlier mask
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_is_data_valid():
def is_data_valid(X, y):
assert_equal(X.shape[0], 2)
assert_equal(y.shape[0], 2)
return False
X = np.random.rand(10, 2)
y = np.random.rand(10, 1)
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5,
is_data_valid=is_data_valid,
random_state=0)
assert_raises(ValueError, ransac_estimator.fit, X, y)
def test_ransac_is_model_valid():
def is_model_valid(estimator, X, y):
assert_equal(X.shape[0], 2)
assert_equal(y.shape[0], 2)
return False
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5,
is_model_valid=is_model_valid,
random_state=0)
assert_raises(ValueError, ransac_estimator.fit, X, y)
def test_ransac_max_trials():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, max_trials=0,
random_state=0)
assert_raises(ValueError, ransac_estimator.fit, X, y)
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, max_trials=11,
random_state=0)
assert getattr(ransac_estimator, 'n_trials_', None) is None
ransac_estimator.fit(X, y)
assert_equal(ransac_estimator.n_trials_, 2)
def test_ransac_stop_n_inliers():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, stop_n_inliers=2,
random_state=0)
ransac_estimator.fit(X, y)
assert_equal(ransac_estimator.n_trials_, 1)
def test_ransac_stop_score():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, stop_score=0,
random_state=0)
ransac_estimator.fit(X, y)
assert_equal(ransac_estimator.n_trials_, 1)
def test_ransac_score():
X = np.arange(100)[:, None]
y = np.zeros((100, ))
y[0] = 1
y[1] = 100
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=0.5, random_state=0)
ransac_estimator.fit(X, y)
assert_equal(ransac_estimator.score(X[2:], y[2:]), 1)
assert_less(ransac_estimator.score(X[:2], y[:2]), 1)
def test_ransac_predict():
X = np.arange(100)[:, None]
y = np.zeros((100, ))
y[0] = 1
y[1] = 100
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=0.5, random_state=0)
ransac_estimator.fit(X, y)
assert_equal(ransac_estimator.predict(X), np.zeros(100))
def test_ransac_resid_thresh_no_inliers():
# When residual_threshold=0.0 there are no inliers and a
# ValueError with a message should be raised
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=0.0, random_state=0)
assert_raises_regexp(ValueError,
"No inliers.*residual_threshold.*0\.0",
ransac_estimator.fit, X, y)
def test_ransac_sparse_coo():
X_sparse = sparse.coo_matrix(X)
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator.fit(X_sparse, y)
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_sparse_csr():
X_sparse = sparse.csr_matrix(X)
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator.fit(X_sparse, y)
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_sparse_csc():
X_sparse = sparse.csc_matrix(X)
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator.fit(X_sparse, y)
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_none_estimator():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_none_estimator = RANSACRegressor(None, 2, 5, random_state=0)
ransac_estimator.fit(X, y)
ransac_none_estimator.fit(X, y)
assert_array_almost_equal(ransac_estimator.predict(X),
ransac_none_estimator.predict(X))
def test_ransac_min_n_samples():
base_estimator = LinearRegression()
ransac_estimator1 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator2 = RANSACRegressor(base_estimator,
min_samples=2. / X.shape[0],
residual_threshold=5, random_state=0)
ransac_estimator3 = RANSACRegressor(base_estimator, min_samples=-1,
residual_threshold=5, random_state=0)
ransac_estimator4 = RANSACRegressor(base_estimator, min_samples=5.2,
residual_threshold=5, random_state=0)
ransac_estimator5 = RANSACRegressor(base_estimator, min_samples=2.0,
residual_threshold=5, random_state=0)
ransac_estimator6 = RANSACRegressor(base_estimator,
residual_threshold=5, random_state=0)
ransac_estimator7 = RANSACRegressor(base_estimator,
min_samples=X.shape[0] + 1,
residual_threshold=5, random_state=0)
ransac_estimator1.fit(X, y)
ransac_estimator2.fit(X, y)
ransac_estimator5.fit(X, y)
ransac_estimator6.fit(X, y)
assert_array_almost_equal(ransac_estimator1.predict(X),
ransac_estimator2.predict(X))
assert_array_almost_equal(ransac_estimator1.predict(X),
ransac_estimator5.predict(X))
assert_array_almost_equal(ransac_estimator1.predict(X),
ransac_estimator6.predict(X))
assert_raises(ValueError, ransac_estimator3.fit, X, y)
assert_raises(ValueError, ransac_estimator4.fit, X, y)
assert_raises(ValueError, ransac_estimator7.fit, X, y)
def test_ransac_multi_dimensional_targets():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
# 3-D target values
yyy = np.column_stack([y, y, y])
# Estimate parameters of corrupted data
ransac_estimator.fit(X, yyy)
# Ground truth / reference inlier mask
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_residual_metric():
residual_metric1 = lambda dy: np.sum(np.abs(dy), axis=1)
residual_metric2 = lambda dy: np.sum(dy ** 2, axis=1)
yyy = np.column_stack([y, y, y])
base_estimator = LinearRegression()
ransac_estimator0 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator1 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0,
residual_metric=residual_metric1)
ransac_estimator2 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0,
residual_metric=residual_metric2)
# multi-dimensional
ransac_estimator0.fit(X, yyy)
ransac_estimator1.fit(X, yyy)
ransac_estimator2.fit(X, yyy)
assert_array_almost_equal(ransac_estimator0.predict(X),
ransac_estimator1.predict(X))
assert_array_almost_equal(ransac_estimator0.predict(X),
ransac_estimator2.predict(X))
# one-dimensional
ransac_estimator0.fit(X, y)
ransac_estimator2.fit(X, y)
assert_array_almost_equal(ransac_estimator0.predict(X),
ransac_estimator2.predict(X))
def test_ransac_default_residual_threshold():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
random_state=0)
# Estimate parameters of corrupted data
ransac_estimator.fit(X, y)
# Ground truth / reference inlier mask
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_dynamic_max_trials():
# Numbers hand-calculated and confirmed on page 119 (Table 4.3) in
# Hartley, R.~I. and Zisserman, A., 2004,
# Multiple View Geometry in Computer Vision, Second Edition,
# Cambridge University Press, ISBN: 0521540518
# e = 0%, min_samples = X
assert_equal(_dynamic_max_trials(100, 100, 2, 0.99), 1)
# e = 5%, min_samples = 2
assert_equal(_dynamic_max_trials(95, 100, 2, 0.99), 2)
# e = 10%, min_samples = 2
assert_equal(_dynamic_max_trials(90, 100, 2, 0.99), 3)
# e = 30%, min_samples = 2
assert_equal(_dynamic_max_trials(70, 100, 2, 0.99), 7)
# e = 50%, min_samples = 2
assert_equal(_dynamic_max_trials(50, 100, 2, 0.99), 17)
# e = 5%, min_samples = 8
assert_equal(_dynamic_max_trials(95, 100, 8, 0.99), 5)
# e = 10%, min_samples = 8
assert_equal(_dynamic_max_trials(90, 100, 8, 0.99), 9)
# e = 30%, min_samples = 8
assert_equal(_dynamic_max_trials(70, 100, 8, 0.99), 78)
# e = 50%, min_samples = 8
assert_equal(_dynamic_max_trials(50, 100, 8, 0.99), 1177)
# e = 0%, min_samples = 10
assert_equal(_dynamic_max_trials(1, 100, 10, 0), 0)
assert_equal(_dynamic_max_trials(1, 100, 10, 1), float('inf'))
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
stop_probability=-0.1)
assert_raises(ValueError, ransac_estimator.fit, X, y)
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
stop_probability=1.1)
assert_raises(ValueError, ransac_estimator.fit, X, y)
|
bsd-3-clause
|
kazemakase/scikit-learn
|
examples/linear_model/plot_lasso_coordinate_descent_path.py
|
254
|
2639
|
"""
=====================
Lasso and Elastic Net
=====================
Lasso and elastic net (L1 and L2 penalisation) implemented using a
coordinate descent.
The coefficients can be forced to be positive.
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import lasso_path, enet_path
from sklearn import datasets
diabetes = datasets.load_diabetes()
X = diabetes.data
y = diabetes.target
X /= X.std(axis=0) # Standardize data (easier to set the l1_ratio parameter)
# Compute paths
eps = 5e-3 # the smaller it is the longer is the path
print("Computing regularization path using the lasso...")
alphas_lasso, coefs_lasso, _ = lasso_path(X, y, eps, fit_intercept=False)
print("Computing regularization path using the positive lasso...")
alphas_positive_lasso, coefs_positive_lasso, _ = lasso_path(
X, y, eps, positive=True, fit_intercept=False)
print("Computing regularization path using the elastic net...")
alphas_enet, coefs_enet, _ = enet_path(
X, y, eps=eps, l1_ratio=0.8, fit_intercept=False)
print("Computing regularization path using the positve elastic net...")
alphas_positive_enet, coefs_positive_enet, _ = enet_path(
X, y, eps=eps, l1_ratio=0.8, positive=True, fit_intercept=False)
# Display results
plt.figure(1)
ax = plt.gca()
ax.set_color_cycle(2 * ['b', 'r', 'g', 'c', 'k'])
l1 = plt.plot(-np.log10(alphas_lasso), coefs_lasso.T)
l2 = plt.plot(-np.log10(alphas_enet), coefs_enet.T, linestyle='--')
plt.xlabel('-Log(alpha)')
plt.ylabel('coefficients')
plt.title('Lasso and Elastic-Net Paths')
plt.legend((l1[-1], l2[-1]), ('Lasso', 'Elastic-Net'), loc='lower left')
plt.axis('tight')
plt.figure(2)
ax = plt.gca()
ax.set_color_cycle(2 * ['b', 'r', 'g', 'c', 'k'])
l1 = plt.plot(-np.log10(alphas_lasso), coefs_lasso.T)
l2 = plt.plot(-np.log10(alphas_positive_lasso), coefs_positive_lasso.T,
linestyle='--')
plt.xlabel('-Log(alpha)')
plt.ylabel('coefficients')
plt.title('Lasso and positive Lasso')
plt.legend((l1[-1], l2[-1]), ('Lasso', 'positive Lasso'), loc='lower left')
plt.axis('tight')
plt.figure(3)
ax = plt.gca()
ax.set_color_cycle(2 * ['b', 'r', 'g', 'c', 'k'])
l1 = plt.plot(-np.log10(alphas_enet), coefs_enet.T)
l2 = plt.plot(-np.log10(alphas_positive_enet), coefs_positive_enet.T,
linestyle='--')
plt.xlabel('-Log(alpha)')
plt.ylabel('coefficients')
plt.title('Elastic-Net and positive Elastic-Net')
plt.legend((l1[-1], l2[-1]), ('Elastic-Net', 'positive Elastic-Net'),
loc='lower left')
plt.axis('tight')
plt.show()
|
bsd-3-clause
|
kristenkotkas/moviediary
|
recommender-python/Server.py
|
1
|
4539
|
#!/usr/bin/env python
# https://alyssaq.github.io/2015/20150426-simple-movie-recommender-using-svd/
import json
import numpy as np
import pandas as pd
import urllib
from http.server import BaseHTTPRequestHandler, HTTPServer
from gensim.models.doc2vec import Doc2Vec, TaggedDocument
np.seterr(divide='ignore', invalid='ignore')
lookup = {
"71": 862,
"66": 197,
"67": 278,
"68": 3049,
"69": 8587,
"62": 78,
"63": 771,
"3": 114,
"64": 627,
"65": 238,
"70": 872,
"4": 567,
"5": 770,
"6": 62,
"7": 88,
"8": 601,
"9": 85,
"10": 348,
"11": 703,
"12": 694,
"13": 914,
"14": 621,
"15": 578,
"2": 816,
"16": 18,
"17": 597,
"18": 1725,
"19": 11252,
"20": 8741,
"21": 11167,
"22": 603,
"23": 509,
"1": 2105,
"24": 550,
"25": 10784,
"26": 392,
"27": 77,
"28": 808,
"29": 676,
"30": 585,
"31": 120,
"32": 453,
"33": 855,
"34": 425,
"35": 672,
"36": 423,
"37": 12,
"38": 22,
"39": 24,
"40": 11846,
"41": 38,
"42": 11036,
"43": 6947,
"44": 9806,
"45": 477433,
"46": 591,
"47": 920,
"48": 350,
"49": 1858,
"50": 7326,
"51": 155,
"52": 8966,
"53": 13223,
"54": 19995,
"55": 50014,
"56": 84892,
"57": 157336,
"58": 207703,
"59": 140607,
"60": 286217,
"61": 259693,
}
reversed = dict([(v, k) for k, v in lookup.items()])
class HttpServer(BaseHTTPRequestHandler):
def do_GET(self):
path = self.path
params = urllib.parse.parse_qs(path[2:])
if not params:
self.send_response(404)
return
movie_id = params['id'][0]
response = json.dumps(getData(reversed[int(movie_id)]))
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.end_headers()
self.wfile.write(bytes(response, "utf8"))
return
def do_POST(self):
content_len = int(self.headers.get('content-length', 0))
if content_len == 0:
self.send_response(404)
return
post_body = self.rfile.read(content_len).decode('utf-8')
data = json.loads(post_body)
description = data["description"]
response = json.dumps(get_genre_from_desc(description))
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.end_headers()
self.wfile.write(bytes(response, "utf8"))
return
def run():
print('starting server...')
server_address = ('127.0.0.1', 9998)
httpd = HTTPServer(server_address, HttpServer)
print('running server...')
httpd.serve_forever()
movie_data = pd.io.parsers.read_csv('resources/final-new-movies.csv',
names=['movie_id', 'title', 'genre'],
engine='python', delimiter=';')
model = np.load('resources/model.npz')
evals = model['a']
evecs = model['b']
desc_to_genre_model = Doc2Vec.load("resources/DescToGenre")
def getData(movie_id):
movie_id = int(movie_id)
top_indexes = top_cosine_similarity(evecs[:, :25], movie_id, 71)
return get_similar_movies(movie_data, movie_id, top_indexes)
def top_cosine_similarity(data, movie_id, top_n):
index = movie_id - 1 # Movie id starts from 1
movie_row = data[index, :]
magnitude = np.sqrt(np.einsum('ij, ij -> i', data, data))
similarity = np.dot(movie_row, data.T) / (magnitude[index] * magnitude)
sort_indexes = np.argsort(-similarity)
return (sort_indexes[:top_n], similarity)
def get_similar_movies(movie_data, movie_id, top_indexes):
print('Recommendations for {0}: \n'.format(
movie_data[movie_data.movie_id == movie_id].title.values[0]))
result = []
for id in top_indexes[0] + 1:
result.append({
"tmdb_id": lookup[str(movie_data[movie_data.movie_id == id].movie_id.values[0])],
"similarity": top_indexes[1][id - 1],
"title": movie_data[movie_data.movie_id == id].title.values[0]
})
return {"result": result}
def get_genre_from_desc(desc):
document = desc.split()
inferred_docvec = desc_to_genre_model.infer_vector(document)
prediction = desc_to_genre_model.docvecs.most_similar([inferred_docvec], topn=3)
result = {
"best" : prediction[0][0],
"second" : prediction[1][0],
"third" : prediction[2][0]
}
return result
run()
|
mit
|
boomsbloom/dtm-fmri
|
DTM/for_gensim/lib/python2.7/site-packages/sklearn/tests/test_grid_search.py
|
68
|
28856
|
"""
Testing for grid search module (sklearn.grid_search)
"""
from collections import Iterable, Sized
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.externals.six.moves import xrange
from itertools import chain, product
import pickle
import warnings
import sys
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_false, assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.mocking import CheckingClassifier, MockDataFrame
from scipy.stats import bernoulli, expon, uniform
from sklearn.externals.six.moves import zip
from sklearn.base import BaseEstimator
from sklearn.datasets import make_classification
from sklearn.datasets import make_blobs
from sklearn.datasets import make_multilabel_classification
from sklearn.svm import LinearSVC, SVC
from sklearn.tree import DecisionTreeRegressor
from sklearn.tree import DecisionTreeClassifier
from sklearn.cluster import KMeans
from sklearn.neighbors import KernelDensity
from sklearn.metrics import f1_score
from sklearn.metrics import make_scorer
from sklearn.metrics import roc_auc_score
from sklearn.exceptions import ChangedBehaviorWarning
from sklearn.exceptions import FitFailedWarning
with warnings.catch_warnings():
warnings.simplefilter('ignore')
from sklearn.grid_search import (GridSearchCV, RandomizedSearchCV,
ParameterGrid, ParameterSampler)
from sklearn.cross_validation import KFold, StratifiedKFold
from sklearn.preprocessing import Imputer
from sklearn.pipeline import Pipeline
# Neither of the following two estimators inherit from BaseEstimator,
# to test hyperparameter search on user-defined classifiers.
class MockClassifier(object):
"""Dummy classifier to test the cross-validation"""
def __init__(self, foo_param=0):
self.foo_param = foo_param
def fit(self, X, Y):
assert_true(len(X) == len(Y))
return self
def predict(self, T):
return T.shape[0]
predict_proba = predict
decision_function = predict
transform = predict
def score(self, X=None, Y=None):
if self.foo_param > 1:
score = 1.
else:
score = 0.
return score
def get_params(self, deep=False):
return {'foo_param': self.foo_param}
def set_params(self, **params):
self.foo_param = params['foo_param']
return self
class LinearSVCNoScore(LinearSVC):
"""An LinearSVC classifier that has no score method."""
@property
def score(self):
raise AttributeError
X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
y = np.array([1, 1, 2, 2])
def assert_grid_iter_equals_getitem(grid):
assert_equal(list(grid), [grid[i] for i in range(len(grid))])
def test_parameter_grid():
# Test basic properties of ParameterGrid.
params1 = {"foo": [1, 2, 3]}
grid1 = ParameterGrid(params1)
assert_true(isinstance(grid1, Iterable))
assert_true(isinstance(grid1, Sized))
assert_equal(len(grid1), 3)
assert_grid_iter_equals_getitem(grid1)
params2 = {"foo": [4, 2],
"bar": ["ham", "spam", "eggs"]}
grid2 = ParameterGrid(params2)
assert_equal(len(grid2), 6)
# loop to assert we can iterate over the grid multiple times
for i in xrange(2):
# tuple + chain transforms {"a": 1, "b": 2} to ("a", 1, "b", 2)
points = set(tuple(chain(*(sorted(p.items())))) for p in grid2)
assert_equal(points,
set(("bar", x, "foo", y)
for x, y in product(params2["bar"], params2["foo"])))
assert_grid_iter_equals_getitem(grid2)
# Special case: empty grid (useful to get default estimator settings)
empty = ParameterGrid({})
assert_equal(len(empty), 1)
assert_equal(list(empty), [{}])
assert_grid_iter_equals_getitem(empty)
assert_raises(IndexError, lambda: empty[1])
has_empty = ParameterGrid([{'C': [1, 10]}, {}, {'C': [.5]}])
assert_equal(len(has_empty), 4)
assert_equal(list(has_empty), [{'C': 1}, {'C': 10}, {}, {'C': .5}])
assert_grid_iter_equals_getitem(has_empty)
def test_grid_search():
# Test that the best estimator contains the right value for foo_param
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, verbose=3)
# make sure it selects the smallest parameter in case of ties
old_stdout = sys.stdout
sys.stdout = StringIO()
grid_search.fit(X, y)
sys.stdout = old_stdout
assert_equal(grid_search.best_estimator_.foo_param, 2)
for i, foo_i in enumerate([1, 2, 3]):
assert_true(grid_search.grid_scores_[i][0]
== {'foo_param': foo_i})
# Smoke test the score etc:
grid_search.score(X, y)
grid_search.predict_proba(X)
grid_search.decision_function(X)
grid_search.transform(X)
# Test exception handling on scoring
grid_search.scoring = 'sklearn'
assert_raises(ValueError, grid_search.fit, X, y)
@ignore_warnings
def test_grid_search_no_score():
# Test grid-search on classifier that has no score function.
clf = LinearSVC(random_state=0)
X, y = make_blobs(random_state=0, centers=2)
Cs = [.1, 1, 10]
clf_no_score = LinearSVCNoScore(random_state=0)
grid_search = GridSearchCV(clf, {'C': Cs}, scoring='accuracy')
grid_search.fit(X, y)
grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs},
scoring='accuracy')
# smoketest grid search
grid_search_no_score.fit(X, y)
# check that best params are equal
assert_equal(grid_search_no_score.best_params_, grid_search.best_params_)
# check that we can call score and that it gives the correct result
assert_equal(grid_search.score(X, y), grid_search_no_score.score(X, y))
# giving no scoring function raises an error
grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs})
assert_raise_message(TypeError, "no scoring", grid_search_no_score.fit,
[[1]])
def test_grid_search_score_method():
X, y = make_classification(n_samples=100, n_classes=2, flip_y=.2,
random_state=0)
clf = LinearSVC(random_state=0)
grid = {'C': [.1]}
search_no_scoring = GridSearchCV(clf, grid, scoring=None).fit(X, y)
search_accuracy = GridSearchCV(clf, grid, scoring='accuracy').fit(X, y)
search_no_score_method_auc = GridSearchCV(LinearSVCNoScore(), grid,
scoring='roc_auc').fit(X, y)
search_auc = GridSearchCV(clf, grid, scoring='roc_auc').fit(X, y)
# Check warning only occurs in situation where behavior changed:
# estimator requires score method to compete with scoring parameter
score_no_scoring = assert_no_warnings(search_no_scoring.score, X, y)
score_accuracy = assert_warns(ChangedBehaviorWarning,
search_accuracy.score, X, y)
score_no_score_auc = assert_no_warnings(search_no_score_method_auc.score,
X, y)
score_auc = assert_warns(ChangedBehaviorWarning,
search_auc.score, X, y)
# ensure the test is sane
assert_true(score_auc < 1.0)
assert_true(score_accuracy < 1.0)
assert_not_equal(score_auc, score_accuracy)
assert_almost_equal(score_accuracy, score_no_scoring)
assert_almost_equal(score_auc, score_no_score_auc)
def test_trivial_grid_scores():
# Test search over a "grid" with only one point.
# Non-regression test: grid_scores_ wouldn't be set by GridSearchCV.
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1]})
grid_search.fit(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
random_search = RandomizedSearchCV(clf, {'foo_param': [0]}, n_iter=1)
random_search.fit(X, y)
assert_true(hasattr(random_search, "grid_scores_"))
def test_no_refit():
# Test that grid search can be used for model selection only
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, refit=False)
grid_search.fit(X, y)
assert_true(hasattr(grid_search, "best_params_"))
def test_grid_search_error():
# Test that grid search will capture errors on data with different
# length
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, X_[:180], y_)
def test_grid_search_iid():
# test the iid parameter
# noise-free simple 2d-data
X, y = make_blobs(centers=[[0, 0], [1, 0], [0, 1], [1, 1]], random_state=0,
cluster_std=0.1, shuffle=False, n_samples=80)
# split dataset into two folds that are not iid
# first one contains data of all 4 blobs, second only from two.
mask = np.ones(X.shape[0], dtype=np.bool)
mask[np.where(y == 1)[0][::2]] = 0
mask[np.where(y == 2)[0][::2]] = 0
# this leads to perfect classification on one fold and a score of 1/3 on
# the other
svm = SVC(kernel='linear')
# create "cv" for splits
cv = [[mask, ~mask], [~mask, mask]]
# once with iid=True (default)
grid_search = GridSearchCV(svm, param_grid={'C': [1, 10]}, cv=cv)
grid_search.fit(X, y)
first = grid_search.grid_scores_[0]
assert_equal(first.parameters['C'], 1)
assert_array_almost_equal(first.cv_validation_scores, [1, 1. / 3.])
# for first split, 1/4 of dataset is in test, for second 3/4.
# take weighted average
assert_almost_equal(first.mean_validation_score,
1 * 1. / 4. + 1. / 3. * 3. / 4.)
# once with iid=False
grid_search = GridSearchCV(svm, param_grid={'C': [1, 10]}, cv=cv,
iid=False)
grid_search.fit(X, y)
first = grid_search.grid_scores_[0]
assert_equal(first.parameters['C'], 1)
# scores are the same as above
assert_array_almost_equal(first.cv_validation_scores, [1, 1. / 3.])
# averaged score is just mean of scores
assert_almost_equal(first.mean_validation_score,
np.mean(first.cv_validation_scores))
def test_grid_search_one_grid_point():
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
param_dict = {"C": [1.0], "kernel": ["rbf"], "gamma": [0.1]}
clf = SVC()
cv = GridSearchCV(clf, param_dict)
cv.fit(X_, y_)
clf = SVC(C=1.0, kernel="rbf", gamma=0.1)
clf.fit(X_, y_)
assert_array_equal(clf.dual_coef_, cv.best_estimator_.dual_coef_)
def test_grid_search_bad_param_grid():
param_dict = {"C": 1.0}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
param_dict = {"C": []}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
param_dict = {"C": np.ones(6).reshape(3, 2)}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
def test_grid_search_sparse():
# Test that grid search works with both dense and sparse matrices
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(X_[:180], y_[:180])
y_pred = cv.predict(X_[180:])
C = cv.best_estimator_.C
X_ = sp.csr_matrix(X_)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(X_[:180].tocoo(), y_[:180])
y_pred2 = cv.predict(X_[180:])
C2 = cv.best_estimator_.C
assert_true(np.mean(y_pred == y_pred2) >= .9)
assert_equal(C, C2)
def test_grid_search_sparse_scoring():
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring="f1")
cv.fit(X_[:180], y_[:180])
y_pred = cv.predict(X_[180:])
C = cv.best_estimator_.C
X_ = sp.csr_matrix(X_)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring="f1")
cv.fit(X_[:180], y_[:180])
y_pred2 = cv.predict(X_[180:])
C2 = cv.best_estimator_.C
assert_array_equal(y_pred, y_pred2)
assert_equal(C, C2)
# Smoke test the score
# np.testing.assert_allclose(f1_score(cv.predict(X_[:180]), y[:180]),
# cv.score(X_[:180], y[:180]))
# test loss where greater is worse
def f1_loss(y_true_, y_pred_):
return -f1_score(y_true_, y_pred_)
F1Loss = make_scorer(f1_loss, greater_is_better=False)
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring=F1Loss)
cv.fit(X_[:180], y_[:180])
y_pred3 = cv.predict(X_[180:])
C3 = cv.best_estimator_.C
assert_equal(C, C3)
assert_array_equal(y_pred, y_pred3)
def test_grid_search_precomputed_kernel():
# Test that grid search works when the input features are given in the
# form of a precomputed kernel matrix
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
# compute the training kernel matrix corresponding to the linear kernel
K_train = np.dot(X_[:180], X_[:180].T)
y_train = y_[:180]
clf = SVC(kernel='precomputed')
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(K_train, y_train)
assert_true(cv.best_score_ >= 0)
# compute the test kernel matrix
K_test = np.dot(X_[180:], X_[:180].T)
y_test = y_[180:]
y_pred = cv.predict(K_test)
assert_true(np.mean(y_pred == y_test) >= 0)
# test error is raised when the precomputed kernel is not array-like
# or sparse
assert_raises(ValueError, cv.fit, K_train.tolist(), y_train)
def test_grid_search_precomputed_kernel_error_nonsquare():
# Test that grid search returns an error with a non-square precomputed
# training kernel matrix
K_train = np.zeros((10, 20))
y_train = np.ones((10, ))
clf = SVC(kernel='precomputed')
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, K_train, y_train)
def test_grid_search_precomputed_kernel_error_kernel_function():
# Test that grid search returns an error when using a kernel_function
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
kernel_function = lambda x1, x2: np.dot(x1, x2.T)
clf = SVC(kernel=kernel_function)
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, X_, y_)
class BrokenClassifier(BaseEstimator):
"""Broken classifier that cannot be fit twice"""
def __init__(self, parameter=None):
self.parameter = parameter
def fit(self, X, y):
assert_true(not hasattr(self, 'has_been_fit_'))
self.has_been_fit_ = True
def predict(self, X):
return np.zeros(X.shape[0])
@ignore_warnings
def test_refit():
# Regression test for bug in refitting
# Simulates re-fitting a broken estimator; this used to break with
# sparse SVMs.
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = GridSearchCV(BrokenClassifier(), [{'parameter': [0, 1]}],
scoring="precision", refit=True)
clf.fit(X, y)
def test_gridsearch_nd():
# Pass X as list in GridSearchCV
X_4d = np.arange(10 * 5 * 3 * 2).reshape(10, 5, 3, 2)
y_3d = np.arange(10 * 7 * 11).reshape(10, 7, 11)
check_X = lambda x: x.shape[1:] == (5, 3, 2)
check_y = lambda x: x.shape[1:] == (7, 11)
clf = CheckingClassifier(check_X=check_X, check_y=check_y)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]})
grid_search.fit(X_4d, y_3d).score(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_X_as_list():
# Pass X as list in GridSearchCV
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = CheckingClassifier(check_X=lambda x: isinstance(x, list))
cv = KFold(n=len(X), n_folds=3)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=cv)
grid_search.fit(X.tolist(), y).score(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_y_as_list():
# Pass y as list in GridSearchCV
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = CheckingClassifier(check_y=lambda x: isinstance(x, list))
cv = KFold(n=len(X), n_folds=3)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=cv)
grid_search.fit(X, y.tolist()).score(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_pandas_input():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((DataFrame, Series))
except ImportError:
pass
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
for InputFeatureType, TargetType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]})
grid_search.fit(X_df, y_ser).score(X_df, y_ser)
grid_search.predict(X_df)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_unsupervised_grid_search():
# test grid-search with unsupervised estimator
X, y = make_blobs(random_state=0)
km = KMeans(random_state=0)
grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]),
scoring='adjusted_rand_score')
grid_search.fit(X, y)
# ARI can find the right number :)
assert_equal(grid_search.best_params_["n_clusters"], 3)
# Now without a score, and without y
grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]))
grid_search.fit(X)
assert_equal(grid_search.best_params_["n_clusters"], 4)
def test_gridsearch_no_predict():
# test grid-search with an estimator without predict.
# slight duplication of a test from KDE
def custom_scoring(estimator, X):
return 42 if estimator.bandwidth == .1 else 0
X, _ = make_blobs(cluster_std=.1, random_state=1,
centers=[[0, 1], [1, 0], [0, 0]])
search = GridSearchCV(KernelDensity(),
param_grid=dict(bandwidth=[.01, .1, 1]),
scoring=custom_scoring)
search.fit(X)
assert_equal(search.best_params_['bandwidth'], .1)
assert_equal(search.best_score_, 42)
def test_param_sampler():
# test basic properties of param sampler
param_distributions = {"kernel": ["rbf", "linear"],
"C": uniform(0, 1)}
sampler = ParameterSampler(param_distributions=param_distributions,
n_iter=10, random_state=0)
samples = [x for x in sampler]
assert_equal(len(samples), 10)
for sample in samples:
assert_true(sample["kernel"] in ["rbf", "linear"])
assert_true(0 <= sample["C"] <= 1)
def test_randomized_search_grid_scores():
# Make a dataset with a lot of noise to get various kind of prediction
# errors across CV folds and parameter settings
X, y = make_classification(n_samples=200, n_features=100, n_informative=3,
random_state=0)
# XXX: as of today (scipy 0.12) it's not possible to set the random seed
# of scipy.stats distributions: the assertions in this test should thus
# not depend on the randomization
params = dict(C=expon(scale=10),
gamma=expon(scale=0.1))
n_cv_iter = 3
n_search_iter = 30
search = RandomizedSearchCV(SVC(), n_iter=n_search_iter, cv=n_cv_iter,
param_distributions=params, iid=False)
search.fit(X, y)
assert_equal(len(search.grid_scores_), n_search_iter)
# Check consistency of the structure of each cv_score item
for cv_score in search.grid_scores_:
assert_equal(len(cv_score.cv_validation_scores), n_cv_iter)
# Because we set iid to False, the mean_validation score is the
# mean of the fold mean scores instead of the aggregate sample-wise
# mean score
assert_almost_equal(np.mean(cv_score.cv_validation_scores),
cv_score.mean_validation_score)
assert_equal(list(sorted(cv_score.parameters.keys())),
list(sorted(params.keys())))
# Check the consistency with the best_score_ and best_params_ attributes
sorted_grid_scores = list(sorted(search.grid_scores_,
key=lambda x: x.mean_validation_score))
best_score = sorted_grid_scores[-1].mean_validation_score
assert_equal(search.best_score_, best_score)
tied_best_params = [s.parameters for s in sorted_grid_scores
if s.mean_validation_score == best_score]
assert_true(search.best_params_ in tied_best_params,
"best_params_={0} is not part of the"
" tied best models: {1}".format(
search.best_params_, tied_best_params))
def test_grid_search_score_consistency():
# test that correct scores are used
clf = LinearSVC(random_state=0)
X, y = make_blobs(random_state=0, centers=2)
Cs = [.1, 1, 10]
for score in ['f1', 'roc_auc']:
grid_search = GridSearchCV(clf, {'C': Cs}, scoring=score)
grid_search.fit(X, y)
cv = StratifiedKFold(n_folds=3, y=y)
for C, scores in zip(Cs, grid_search.grid_scores_):
clf.set_params(C=C)
scores = scores[2] # get the separate runs from grid scores
i = 0
for train, test in cv:
clf.fit(X[train], y[train])
if score == "f1":
correct_score = f1_score(y[test], clf.predict(X[test]))
elif score == "roc_auc":
dec = clf.decision_function(X[test])
correct_score = roc_auc_score(y[test], dec)
assert_almost_equal(correct_score, scores[i])
i += 1
def test_pickle():
# Test that a fit search can be pickled
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, refit=True)
grid_search.fit(X, y)
pickle.dumps(grid_search) # smoke test
random_search = RandomizedSearchCV(clf, {'foo_param': [1, 2, 3]},
refit=True, n_iter=3)
random_search.fit(X, y)
pickle.dumps(random_search) # smoke test
def test_grid_search_with_multioutput_data():
# Test search with multi-output estimator
X, y = make_multilabel_classification(random_state=0)
est_parameters = {"max_depth": [1, 2, 3, 4]}
cv = KFold(y.shape[0], random_state=0)
estimators = [DecisionTreeRegressor(random_state=0),
DecisionTreeClassifier(random_state=0)]
# Test with grid search cv
for est in estimators:
grid_search = GridSearchCV(est, est_parameters, cv=cv)
grid_search.fit(X, y)
for parameters, _, cv_validation_scores in grid_search.grid_scores_:
est.set_params(**parameters)
for i, (train, test) in enumerate(cv):
est.fit(X[train], y[train])
correct_score = est.score(X[test], y[test])
assert_almost_equal(correct_score,
cv_validation_scores[i])
# Test with a randomized search
for est in estimators:
random_search = RandomizedSearchCV(est, est_parameters,
cv=cv, n_iter=3)
random_search.fit(X, y)
for parameters, _, cv_validation_scores in random_search.grid_scores_:
est.set_params(**parameters)
for i, (train, test) in enumerate(cv):
est.fit(X[train], y[train])
correct_score = est.score(X[test], y[test])
assert_almost_equal(correct_score,
cv_validation_scores[i])
def test_predict_proba_disabled():
# Test predict_proba when disabled on estimator.
X = np.arange(20).reshape(5, -1)
y = [0, 0, 1, 1, 1]
clf = SVC(probability=False)
gs = GridSearchCV(clf, {}, cv=2).fit(X, y)
assert_false(hasattr(gs, "predict_proba"))
def test_grid_search_allows_nans():
# Test GridSearchCV with Imputer
X = np.arange(20, dtype=np.float64).reshape(5, -1)
X[2, :] = np.nan
y = [0, 0, 1, 1, 1]
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
GridSearchCV(p, {'classifier__foo_param': [1, 2, 3]}, cv=2).fit(X, y)
class FailingClassifier(BaseEstimator):
"""Classifier that raises a ValueError on fit()"""
FAILING_PARAMETER = 2
def __init__(self, parameter=None):
self.parameter = parameter
def fit(self, X, y=None):
if self.parameter == FailingClassifier.FAILING_PARAMETER:
raise ValueError("Failing classifier failed as required")
def predict(self, X):
return np.zeros(X.shape[0])
def test_grid_search_failing_classifier():
# GridSearchCV with on_error != 'raise'
# Ensures that a warning is raised and score reset where appropriate.
X, y = make_classification(n_samples=20, n_features=10, random_state=0)
clf = FailingClassifier()
# refit=False because we only want to check that errors caused by fits
# to individual folds will be caught and warnings raised instead. If
# refit was done, then an exception would be raised on refit and not
# caught by grid_search (expected behavior), and this would cause an
# error in this test.
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score=0.0)
assert_warns(FitFailedWarning, gs.fit, X, y)
# Ensure that grid scores were set to zero as required for those fits
# that are expected to fail.
assert all(np.all(this_point.cv_validation_scores == 0.0)
for this_point in gs.grid_scores_
if this_point.parameters['parameter'] ==
FailingClassifier.FAILING_PARAMETER)
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score=float('nan'))
assert_warns(FitFailedWarning, gs.fit, X, y)
assert all(np.all(np.isnan(this_point.cv_validation_scores))
for this_point in gs.grid_scores_
if this_point.parameters['parameter'] ==
FailingClassifier.FAILING_PARAMETER)
def test_grid_search_failing_classifier_raise():
# GridSearchCV with on_error == 'raise' raises the error
X, y = make_classification(n_samples=20, n_features=10, random_state=0)
clf = FailingClassifier()
# refit=False because we want to test the behaviour of the grid search part
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score='raise')
# FailingClassifier issues a ValueError so this is what we look for.
assert_raises(ValueError, gs.fit, X, y)
def test_parameters_sampler_replacement():
# raise error if n_iter too large
params = {'first': [0, 1], 'second': ['a', 'b', 'c']}
sampler = ParameterSampler(params, n_iter=7)
assert_raises(ValueError, list, sampler)
# degenerates to GridSearchCV if n_iter the same as grid_size
sampler = ParameterSampler(params, n_iter=6)
samples = list(sampler)
assert_equal(len(samples), 6)
for values in ParameterGrid(params):
assert_true(values in samples)
# test sampling without replacement in a large grid
params = {'a': range(10), 'b': range(10), 'c': range(10)}
sampler = ParameterSampler(params, n_iter=99, random_state=42)
samples = list(sampler)
assert_equal(len(samples), 99)
hashable_samples = ["a%db%dc%d" % (p['a'], p['b'], p['c'])
for p in samples]
assert_equal(len(set(hashable_samples)), 99)
# doesn't go into infinite loops
params_distribution = {'first': bernoulli(.5), 'second': ['a', 'b', 'c']}
sampler = ParameterSampler(params_distribution, n_iter=7)
samples = list(sampler)
assert_equal(len(samples), 7)
|
mit
|
louisLouL/pair_trading
|
capstone_env/lib/python3.6/site-packages/pandas/tests/io/msgpack/test_read_size.py
|
22
|
1870
|
"""Test Unpacker's read_array_header and read_map_header methods"""
from pandas.io.msgpack import packb, Unpacker, OutOfData
UnexpectedTypeException = ValueError
def test_read_array_header():
unpacker = Unpacker()
unpacker.feed(packb(['a', 'b', 'c']))
assert unpacker.read_array_header() == 3
assert unpacker.unpack() == b'a'
assert unpacker.unpack() == b'b'
assert unpacker.unpack() == b'c'
try:
unpacker.unpack()
assert 0, 'should raise exception'
except OutOfData:
assert 1, 'okay'
def test_read_map_header():
unpacker = Unpacker()
unpacker.feed(packb({'a': 'A'}))
assert unpacker.read_map_header() == 1
assert unpacker.unpack() == B'a'
assert unpacker.unpack() == B'A'
try:
unpacker.unpack()
assert 0, 'should raise exception'
except OutOfData:
assert 1, 'okay'
def test_incorrect_type_array():
unpacker = Unpacker()
unpacker.feed(packb(1))
try:
unpacker.read_array_header()
assert 0, 'should raise exception'
except UnexpectedTypeException:
assert 1, 'okay'
def test_incorrect_type_map():
unpacker = Unpacker()
unpacker.feed(packb(1))
try:
unpacker.read_map_header()
assert 0, 'should raise exception'
except UnexpectedTypeException:
assert 1, 'okay'
def test_correct_type_nested_array():
unpacker = Unpacker()
unpacker.feed(packb({'a': ['b', 'c', 'd']}))
try:
unpacker.read_array_header()
assert 0, 'should raise exception'
except UnexpectedTypeException:
assert 1, 'okay'
def test_incorrect_type_nested_map():
unpacker = Unpacker()
unpacker.feed(packb([{'a': 'b'}]))
try:
unpacker.read_map_header()
assert 0, 'should raise exception'
except UnexpectedTypeException:
assert 1, 'okay'
|
mit
|
marcocaccin/scikit-learn
|
examples/cluster/plot_segmentation_toy.py
|
258
|
3336
|
"""
===========================================
Spectral clustering for image segmentation
===========================================
In this example, an image with connected circles is generated and
spectral clustering is used to separate the circles.
In these settings, the :ref:`spectral_clustering` approach solves the problem
know as 'normalized graph cuts': the image is seen as a graph of
connected voxels, and the spectral clustering algorithm amounts to
choosing graph cuts defining regions while minimizing the ratio of the
gradient along the cut, and the volume of the region.
As the algorithm tries to balance the volume (ie balance the region
sizes), if we take circles with different sizes, the segmentation fails.
In addition, as there is no useful information in the intensity of the image,
or its gradient, we choose to perform the spectral clustering on a graph
that is only weakly informed by the gradient. This is close to performing
a Voronoi partition of the graph.
In addition, we use the mask of the objects to restrict the graph to the
outline of the objects. In this example, we are interested in
separating the objects one from the other, and not from the background.
"""
print(__doc__)
# Authors: Emmanuelle Gouillart <[email protected]>
# Gael Varoquaux <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.feature_extraction import image
from sklearn.cluster import spectral_clustering
###############################################################################
l = 100
x, y = np.indices((l, l))
center1 = (28, 24)
center2 = (40, 50)
center3 = (67, 58)
center4 = (24, 70)
radius1, radius2, radius3, radius4 = 16, 14, 15, 14
circle1 = (x - center1[0]) ** 2 + (y - center1[1]) ** 2 < radius1 ** 2
circle2 = (x - center2[0]) ** 2 + (y - center2[1]) ** 2 < radius2 ** 2
circle3 = (x - center3[0]) ** 2 + (y - center3[1]) ** 2 < radius3 ** 2
circle4 = (x - center4[0]) ** 2 + (y - center4[1]) ** 2 < radius4 ** 2
###############################################################################
# 4 circles
img = circle1 + circle2 + circle3 + circle4
mask = img.astype(bool)
img = img.astype(float)
img += 1 + 0.2 * np.random.randn(*img.shape)
# Convert the image into a graph with the value of the gradient on the
# edges.
graph = image.img_to_graph(img, mask=mask)
# Take a decreasing function of the gradient: we take it weakly
# dependent from the gradient the segmentation is close to a voronoi
graph.data = np.exp(-graph.data / graph.data.std())
# Force the solver to be arpack, since amg is numerically
# unstable on this example
labels = spectral_clustering(graph, n_clusters=4, eigen_solver='arpack')
label_im = -np.ones(mask.shape)
label_im[mask] = labels
plt.matshow(img)
plt.matshow(label_im)
###############################################################################
# 2 circles
img = circle1 + circle2
mask = img.astype(bool)
img = img.astype(float)
img += 1 + 0.2 * np.random.randn(*img.shape)
graph = image.img_to_graph(img, mask=mask)
graph.data = np.exp(-graph.data / graph.data.std())
labels = spectral_clustering(graph, n_clusters=2, eigen_solver='arpack')
label_im = -np.ones(mask.shape)
label_im[mask] = labels
plt.matshow(img)
plt.matshow(label_im)
plt.show()
|
bsd-3-clause
|
sdvillal/manysources
|
manysources/analyses/analyses_examples.py
|
1
|
10150
|
# coding=utf-8
"""Analyse experimental results."""
from collections import defaultdict
from itertools import product, izip
import h5py
from sklearn.linear_model import LogisticRegression
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from manysources.datasets import ManysourcesDataset, MANYSOURCES_DATA_ROOT, MANYSOURCES_MOLECULES
from manysources.experiments import ManysourcesResult
import os.path as op
# Everyting is about fighting superdumb rules deeply rooted in the chem comunity
# Validation valuation external only make sense when evaluation really comes from a different assay
################################
# Per-fold sizes, class proportions and AUCs
################################
def merge_cache_sizes_aucs(dset='bcrp',
feats='ecfps1',
model='logreg1',
expid=0,
lso=True):
"""
Merges and caches the sizes and performance information for the folds in the specified experiment,
returning a pandas dataframe with the informations in records format:
(dset, feats, model, expid, lso, fold_num, test_size, pos_proportion, auc)
"""
# Coordinates
cache_file = op.join(MANYSOURCES_DATA_ROOT, 'results', 'merged_fold_sizes_aucs.h5')
group_id = '/dset=%s/feats=%s/model=%s/expid=%d/lso=%r' % (dset, feats, model, expid, lso)
# Cache funcion for readability
def cache():
with h5py.File(cache_file, 'a') as h5w: # Using h5 as we do here is highly inefficient
# (just attributes, no locality)
# It allows for:
# - incremental addition of folds
# - easy merging with other HDF5 files we are creating
# (just add these attributes)
group = h5w.require_group(group_id)
def add_fold(dset, feats, model, expid, fold_num, fold):
try:
auc = fold.auc()
test_size = fold.test_size()
pos_proportion = fold.pos_proportion()
fold_group = group.require_group('fold=%d' % fold_num)
fold_group.attrs['test_size'] = test_size
fold_group.attrs['pos_proportion'] = pos_proportion
fold_group.attrs['auc'] = auc
except:
print 'Failed', dset, feats, model, expid, lso, fold_num
with ManysourcesResult(expid=expid, dset=dset, feats=feats, model=model) as res:
print dset, expid, model, feats, lso
cv = res.lsocv() if lso else res.crscv()
if cv is None:
return None
for fold_num, fold in enumerate(cv.folds()):
try:
add_fold(dset, feats, model, expid, fold_num, fold)
except:
print 'ERROR', dset, feats, model, expid
# Recache - LAME
recache = False
if not op.isfile(cache_file):
recache = True # LAME
else:
with h5py.File(cache_file, 'r') as h5:
if not group_id in h5:
recache = True
if recache:
cache()
# Read
with h5py.File(cache_file, 'r') as h5:
records = []
for fold_id, group in h5[group_id].iteritems():
records.append((dset, feats, model, expid, lso,
int(fold_id.split('=')[1]),
group.attrs['test_size'],
group.attrs['pos_proportion'],
group.attrs['auc']))
return pd.DataFrame(records, columns=['dset', 'feats', 'model', 'expid', 'lso',
'fold',
'test_size', 'pos_proportion', 'auc'])
def cache_all_fold_sizes_aucs(drop_na=True):
cache_file = op.join(MANYSOURCES_DATA_ROOT, 'results', 'merged_fold_sizes_aucs_bigdf.pickled')
if not op.isfile(cache_file):
dsets = MANYSOURCES_MOLECULES.keys()
feats_models = (('logreg1', 'ecfps1'),
('logreg3', 'ecfps1'),
('rfc1', 'rdkdescs1'))
expids = range(4096)
dfs = []
for dset, (model, feats), expid, lso in product(dsets, feats_models, expids, (True, False)):
dfs.append(merge_cache_sizes_aucs(dset=dset, model=model, feats=feats, expid=expid, lso=lso))
big_df = pd.concat(dfs, ignore_index=True).reset_index(drop=True)
big_df.set_index(['dset', 'feats', 'model', 'expid', 'lso', 'fold'])
big_df.to_pickle(cache_file,)
df = pd.read_pickle(cache_file)
if drop_na:
return df.dropna(axis=0)
return df
def average_perf_plot(df=None):
if not df:
df = cache_all_fold_sizes_aucs(drop_na=True)
# Quick review of average perf
plt.figure()
df.groupby(['dset', 'model', 'feats', 'lso'])['auc'].mean().plot(kind='bar')
plt.show()
def two_violins(df, dset='bcrp', model='logreg3', feats='ecfps1'):
# LSO vs CRS AUC, violin plot (missing Flo's aesthetics)
df = df[(df.dset == dset) & (df.model == model) & (df.feats == feats)]
plt.figure()
sns.violinplot(df.auc, df.lso)
plt.show()
#df = cache_all_fold_sizes_aucs(drop_na=True)
#two_violins(df, dset='mutagenicity')
#exit(33)
def unbalancy_scatterplot(df, dset='bcrp', model='logreg3', feats='ecfps1'):
# LSO vs CRS, proportion vs AUC, scatter plot (missing Flo's aesthetics)
# I guess we can do this with a groupby...
df = df[(df.dset == dset) & (df.model == model) & (df.feats == feats)]
plt.figure()
plt.scatter(x=df[df.lso]['pos_proportion'], y=df[df.lso]['auc'], color='r', label='LSO')
plt.scatter(x=df[~df.lso]['pos_proportion'], y=df[~df.lso]['auc'], color='b', label='CRS')
plt.axvline(x=df[~df.lso]['pos_proportion'].min(), color='k')
plt.axvline(x=df[~df.lso]['pos_proportion'].max() - 0.035, color='k')
plt.xlabel('Positive proportion in test set', fontsize=22)
plt.ylabel('AUC', fontsize=22)
plt.ylim((0, 1))
plt.legend(fontsize=22)
plt.tick_params(labelsize=16)
plt.show()
def twoviolins_nooutliers(df, dset='bcrp', model='logreg3', feats='ecfps1', pos_proportions_min=0.4,
pos_proportions_max=0.8):
# Filter out outliers or degenerated cases: "too imbalanced"
df = df[(df.dset == dset) & (df.model == model) & (df.feats == feats)]
balanced = df[(df['pos_proportion'] > pos_proportions_min) & (df['pos_proportion'] < pos_proportions_max)]
plt.figure()
sns.violinplot(balanced.auc, balanced.lso)
plt.draw()
# N.B. use violinplot ax parameter and others to control ranges, fonts etc.
# Do a multiplot
# from rdkit.Chem import AllChem
# dset = ManysourcesDataset('bcrp')
# fold_de_mierda = ['Ahmed-Belkacem_2005', 'Ali-Versiani_2011', 'Feng_2008', 'Giannini_2008']
# molids = dset.mols().sources2molids(fold_de_mierda)
# mols = [dset.mols().molid2mol(molid) for molid in molids]
# ys = [dset.mols().molid2target(molid) for molid in molids]
# print molids
# print ys
# for molid, mol in zip(molids, mols):
# print molid, AllChem.MolToSmiles(mol)
# exit(26)
def logistic_from_weights(weights, intercept):
# Rebuild the trained model given the parameters
logreg = LogisticRegression()
logreg.coef_ = weights
logreg.intercept_ = intercept
return logreg
def density(vector, eps=1E-6):
return np.sum(np.abs(vector) > eps) / float(len(vector))
#### HERE wE ARE NOT USING THE PARAMETER "SOURCE"...
def source_only_features(dset='bcrp',
model='logreg3',
feats='ecfps1',
expids=range(20),
source='phenylquinazolines_Juvale_2012'):
""""""
dset = ManysourcesDataset(dset)
sparsities = defaultdict(list)
for expid in expids:
res = ManysourcesResult(expid=expid, dset=dset.name, feats=feats, model=model)
# models
lso_models = [logistic_from_weights(weights, intercept) for weights, intercept, _ in res.lsocv().all_models()]
crs_models = [logistic_from_weights(weights, intercept) for weights, intercept, _ in res.crscv().all_models()]
# is sparsity the same?
for lsom, crsm in zip(lso_models, crs_models):
sparsities['sparsity_lso'].append(density(lsom.coef_[0, :]))
sparsities['sparsity_crs'].append(density(crsm.coef_[0, :]))
return pd.DataFrame(sparsities)
#
# Not very competitive, but logreg3 gets high weight sparsity at reasonable performance
#
# molids, lso_losses = collect_losses(dset='hERG')
# _, crs_losses = collect_losses(dset='hERG', lso=False)
# data = {'source': ManysourcesDataset(dset='hERG').mols().sources(molids),
# 'molid': molids,
# 'lso_sqloss': lso_losses,
# 'crs_sqloss': crs_losses}
#
# df = pd.DataFrame(data)
# df['loss_diff'] = df['lso_sqloss'] - df['crs_sqloss']
#
# print df['loss_diff'].mean()
# per_source = df.groupby('source')
# mean_losses = per_source.mean()
# print mean_losses.sort('loss_diff')
# for source, group in per_source:
# print '%s: %.4f (%d)' % (source, group['loss_diff'].mean(), len(group))
#
# exit(33)
#
# Tests for sources in lso
# assert set(chain(*coocs)) == dset.mols().present_sources(), 'All sources should be there'
# for s1, s2 in combinations(coocs, 2):
# assert len(s1 & s2) == 0, 'No source should be repeated for LSO'
#
if __name__ == '__main__':
dset = 'bcrp'
feats = 'ecfps1'
model = 'logreg3'
lso = True
# Pandas side of things
print scores_df().columns
#
# TODO: rerun experiments splitting sources into sub-sources (will make more sound the coocurrences analysis)
# FIXME: Problem with scores at hERG rfc1 rdkdescs1, need to drop nans?
# FIXME: Also with scores at mutagenicity rdkit descs, need to drop nans?
#
|
bsd-3-clause
|
anhaidgroup/py_entitymatching
|
py_entitymatching/dask/dask_nbmatcher.py
|
1
|
1467
|
"""
This module contains the functions for Naive Bayes classifier.
"""
# from py_entitymatching.matcher.mlmatcher import MLMatcher
from py_entitymatching.dask.daskmlmatcher import DaskMLMatcher
from py_entitymatching.matcher.matcherutils import get_ts
from sklearn.naive_bayes import GaussianNB
class DaskNBMatcher(DaskMLMatcher):
"""
WARNING THIS MATCHER IS EXPERIMENTAL AND NOT TESTED. USE AT YOUR OWN RISK.
Naive Bayes matcher.
Args:
*args,**kwargs: The arguments to scikit-learn's Naive Bayes
classifier.
name (string): The name of this matcher (defaults to None). If the
matcher name is None, the class automatically generates a string
and assigns it as the name.
"""
def __init__(self, *args, **kwargs):
logger.warning(
"WARNING THIS MATCHER IS EXPERIMENTAL AND NOT TESTED. USE AT YOUR OWN RISK.")
# If the name is given, then pop it
name = kwargs.pop('name', None)
if name is None:
# If the name of the matcher is give, then create one.
# Currently, we use a constant string + a random number.
self.name = 'NaiveBayes'+ '_' + get_ts()
else:
# Set the name of the matcher, with the given name.
self.name = name
super(DaskNBMatcher, self).__init__()
# Set the classifier to the scikit-learn classifier.
self.clf = GaussianNB(*args, **kwargs)
|
bsd-3-clause
|
catalyst-cooperative/pudl
|
src/pudl/transform/ferc714.py
|
1
|
20306
|
"""Transformation of the FERC Form 714 data."""
import logging
import re
import numpy as np
import pandas as pd
import pudl
from pudl import constants as pc
logger = logging.getLogger(__name__)
##############################################################################
# Constants required for transforming FERC 714
##############################################################################
# More detailed fixes on a per respondent basis
OFFSET_CODE_FIXES = {
102: {"CPT": "CST"},
110: {"CPT": "EST"},
115: {"MS": "MST"},
118: {
"CS": "CST",
"CD": "CDT",
},
120: {
"CTR": "CST",
"CSR": "CST",
"CPT": "CST",
"DST": "CST",
np.nan: "CST",
},
133: {
"AKS": "AKST",
"AST": "AKST",
"AKD": "AKDT",
"ADT": "AKDT",
},
134: {np.nan: "EST"},
137: {np.nan: "CST"},
140: {
"1": "EST",
"2": "EDT",
np.nan: "EST",
},
141: {np.nan: "CST"},
143: {"MS": "MST"},
146: {"DST": "EST"},
148: {np.nan: "CST"},
151: {
"DST": "CDT",
np.nan: "CST",
},
153: {np.nan: "MST"},
154: {np.nan: "MST"},
156: {np.nan: "CST"},
157: {"DST": "EDT"},
161: {"CPT": "CST"},
163: {"CPT": "CST"},
164: {np.nan: "CST"},
165: {"CS": "CST"}, # Uniform across the year.
173: {
"CPT": "CST",
np.nan: "CST",
},
174: {
"CS": "CDT", # Only shows up in summer! Seems backwards.
"CD": "CST", # Only shows up in winter! Seems backwards.
"433": "CDT",
},
176: {
"E": "EST",
np.nan: "EST",
},
182: {"PPT": "PDT"}, # Imperial Irrigation District P looks like D
186: {"EAS": "EST"},
189: {"CPT": "CST"},
190: {"CPT": "CST"},
193: {
"CS": "CST",
"CD": "CDT",
},
194: {"PPT": "PST"}, # LADWP, constant across all years.
195: {"CPT": "CST"},
208: {np.nan: "CST"},
211: {
"206": "EST",
"DST": "EDT",
np.nan: "EST",
},
213: {"CDS": "CDT"},
216: {np.nan: "CDT"},
217: {
"MPP": "MST",
"MPT": "MST",
},
224: {"DST": "EST"},
225: {
"EDS": "EDT",
"DST": "EDT",
"EPT": "EST",
},
226: {"DST": "CDT"},
230: {"EPT": "EST"},
233: {"DST": "EDT"},
234: {
"1": "EST",
"2": "EDT",
"DST": "EDT",
},
# Constant across the year. Never another timezone seen.
239: {"PPT": "PST"},
243: {"DST": "PST"},
245: {"CDS": "CDT"},
248: {"DST": "EDT"},
253: {"CPT": "CST"},
254: {"DST": "CDT"},
257: {"CPT": "CST"},
259: {"DST": "CDT"},
264: {"CDS": "CDT"},
271: {"EDS": "EDT"},
275: {"CPT": "CST"},
277: {
"CPT": "CST",
np.nan: "CST",
},
281: {"CEN": "CST"},
288: {np.nan: "EST"},
293: {np.nan: "MST"},
294: {np.nan: "EST"},
296: {"CPT": "CST"},
297: {"CPT": "CST"},
298: {"CPT": "CST"},
299: {"CPT": "CST"},
307: {"PPT": "PST"}, # Pacificorp, constant across the whole year.
308: {
"DST": "EDT",
"EDS": "EDT",
"EPT": "EST",
},
328: {
"EPT": "EST",
},
}
OFFSET_CODE_FIXES_BY_YEAR = [
{
"respondent_id_ferc714": 139,
"report_year": 2006,
"utc_offset_code": "PST"
},
{
"respondent_id_ferc714": 235,
"report_year": 2015,
"utc_offset_code": "MST"
},
{
"respondent_id_ferc714": 289,
"report_year": 2011,
"utc_offset_code": "CST"
},
{
"respondent_id_ferc714": 292,
"report_year": 2011,
"utc_offset_code": "CST"
},
]
BAD_RESPONDENTS = [
319,
99991,
99992,
99993,
99994,
99995,
]
"""Fake respondent IDs for database test entities."""
OFFSET_CODES = {
"EST": pd.Timedelta(-5, unit="hours"), # Eastern Standard
"EDT": pd.Timedelta(-5, unit="hours"), # Eastern Daylight
"CST": pd.Timedelta(-6, unit="hours"), # Central Standard
"CDT": pd.Timedelta(-6, unit="hours"), # Central Daylight
"MST": pd.Timedelta(-7, unit="hours"), # Mountain Standard
"MDT": pd.Timedelta(-7, unit="hours"), # Mountain Daylight
"PST": pd.Timedelta(-8, unit="hours"), # Pacific Standard
"PDT": pd.Timedelta(-8, unit="hours"), # Pacific Daylight
"AKST": pd.Timedelta(-9, unit="hours"), # Alaska Standard
"AKDT": pd.Timedelta(-9, unit="hours"), # Alaska Daylight
"HST": pd.Timedelta(-10, unit="hours"), # Hawaii Standard
}
"""
A mapping of timezone offset codes to Timedelta offsets from UTC.
from one year to the next, and these result in duplicate records, which are Note that
the FERC 714 instructions state that all hourly demand is to be reported in STANDARD
time for whatever timezone is being used. Even though many respondents use daylight
savings / standard time abbreviations, a large majority do appear to conform to using a
single UTC offset throughout the year. There are 6 instances in which the timezone
associated with reporting changed dropped.
"""
TZ_CODES = {
"EST": "America/New_York",
"EDT": "America/New_York",
"CST": "America/Chicago",
"CDT": "America/Chicago",
"MST": "America/Denver",
"MDT": "America/Denver",
"PST": "America/Los_Angeles",
"PDT": "America/Los_Angeles",
"AKST": "America/Anchorage",
"AKDT": "America/Anchorage",
"HST": "Pacific/Honolulu",
}
"""Mapping between standardized time offset codes and canonical timezones."""
EIA_CODE_FIXES = {
# FERC 714 Respondent ID: EIA BA or Utility ID
125: 2775, # EIA BA CAISO (fixing bad EIA Code of 229)
134: 5416, # Duke Energy Corp. (bad id was non-existent 3260)
203: 12341, # MidAmerican Energy Co. (fixes typo, from 12431)
257: 59504, # Southwest Power Pool (Fixing bad EIA Coding)
292: 20382, # City of West Memphis -- (fixes a typo, from 20383)
295: 40229, # Old Dominion Electric Cooperative (missing)
301: 14725, # PJM Interconnection Eastern Hub (missing)
302: 14725, # PJM Interconnection Western Hub (missing)
303: 14725, # PJM Interconnection Illinois Hub (missing)
304: 14725, # PJM Interconnection Northern Illinois Hub (missing)
305: 14725, # PJM Interconnection Dominion Hub (missing)
306: 14725, # PJM Interconnection AEP-Dayton Hub (missing)
# PacifiCorp Utility ID is 14354. It ALSO has 2 BA IDs: (14378, 14379)
# See https://github.com/catalyst-cooperative/pudl/issues/616
307: 14379, # Using this ID for now only b/c it's in the HIFLD geometry
309: 12427, # Michigan Power Pool / Power Coordination Center (missing)
315: 56090, # Griffith Energy (bad id was 55124)
323: 58790, # Gridforce Energy Management (missing)
324: 58791, # NaturEner Wind Watch LLC (Fixes bad ID 57995)
329: 39347, # East Texas Electricity Cooperative (missing)
}
"""Overrides of FERC 714 respondent IDs with wrong or missing EIA Codes"""
RENAME_COLS = {
"respondent_id_ferc714": {
"respondent_id": "respondent_id_ferc714",
"respondent_name": "respondent_name_ferc714",
},
"demand_hourly_pa_ferc714": {
"report_yr": "report_year",
"plan_date": "report_date",
"respondent_id": "respondent_id_ferc714",
"timezone": "utc_offset_code",
},
"description_pa_ferc714": {
"report_yr": "report_year",
"respondent_id": "respondent_id_ferc714",
"elec_util_name": "respondent_name_ferc714",
"peak_summer": "peak_demand_summer_mw",
"peak_winter": "peak_demand_winter_mw",
},
"id_certification_ferc714": {
"report_yr": "report_year",
"respondent_id": "respondent_id_ferc714",
},
"gen_plants_ba_ferc714": {
"report_yr": "report_year",
"respondent_id": "respondent_id_ferc714",
},
"demand_monthly_ba_ferc714": {
"report_yr": "report_year",
"respondent_id": "respondent_id_ferc714",
},
"net_energy_load_ba_ferc714": {
"report_yr": "report_year",
"respondent_id": "respondent_id_ferc714",
},
"adjacency_ba_ferc714": {
"report_yr": "report_year",
"respondent_id": "respondent_id_ferc714",
},
"interchange_ba_ferc714": {
"report_yr": "report_year",
"respondent_id": "respondent_id_ferc714",
},
"lambda_hourly_ba_ferc714": {
"report_yr": "report_year",
"respondent_id": "respondent_id_ferc714",
},
"lambda_description_ferc714": {
"report_yr": "report_year",
"respondent_id": "respondent_id_ferc714",
},
"demand_forecast_pa_ferc714": {
"report_yr": "report_year",
"respondent_id": "respondent_id_ferc714",
},
}
##############################################################################
# Internal helper functions.
##############################################################################
def _standardize_offset_codes(df, offset_fixes):
"""
Convert to standardized UTC offset abbreviations.
This function ensures that all of the 3-4 letter abbreviations used to indicate a
timestamp's localized offset from UTC are standardized, so that they can be used to
make the timestamps timezone aware. The standard abbreviations we're using are:
"HST": Hawaii Standard Time
"AKST": Alaska Standard Time
"AKDT": Alaska Daylight Time
"PST": Pacific Standard Time
"PDT": Pacific Daylight Time
"MST": Mountain Standard Time
"MDT": Mountain Daylight Time
"CST": Central Standard Time
"CDT": Central Daylight Time
"EST": Eastern Standard Time
"EDT": Eastern Daylight Time
In some cases different respondents use the same non-standard abbreviations to
indicate different offsets, and so the fixes are applied on a per-respondent basis,
as defined by offset_fixes.
Args:
df (pandas.DataFrame): A DataFrame containing a utc_offset_code column
that needs to be standardized.
offset_fixes (dict): A dictionary with respondent_id_ferc714 values as the
keys, and a dictionary mapping non-standard UTC offset codes to
the standardized UTC offset codes as the value.
Returns:
Standardized UTC offset codes.
"""
logger.debug("Standardizing UTC offset codes.")
# Treat empty string as missing
is_blank = df["utc_offset_code"] == ""
code = df["utc_offset_code"].mask(is_blank)
# Apply specific fixes on a per-respondent basis:
return code.groupby(df['respondent_id_ferc714']).apply(
lambda x: x.replace(offset_fixes[x.name]) if x.name in offset_fixes else x)
def _log_dupes(df, dupe_cols):
"""A macro to report the number of duplicate hours found."""
n_dupes = len(df[df.duplicated(dupe_cols)])
logger.debug(f"Found {n_dupes} duplicated hours.")
def respondent_id(tfr_dfs):
"""
Transform the FERC 714 respondent IDs, names, and EIA utility IDs.
This consists primarily of dropping test respondents and manually assigning EIA
utility IDs to a few FERC Form 714 respondents that report planning area demand, but
which don't have their corresponding EIA utility IDs provided by FERC for some
reason (including PacifiCorp).
Args:
tfr_dfs (dict): A dictionary of (partially) transformed dataframes, to be
cleaned up.
Returns:
dict: The input dictionary of dataframes, but with a finished
respondent_id_ferc714 dataframe.
"""
df = (
tfr_dfs["respondent_id_ferc714"].assign(
respondent_name_ferc714=lambda x: x.respondent_name_ferc714.str.strip(),
eia_code=lambda x: x.eia_code.replace(to_replace=0, value=pd.NA))
# These excludes fake Test IDs -- not real planning areas
.query("respondent_id_ferc714 not in @BAD_RESPONDENTS")
)
# There are a few utilities that seem mappable, but missing:
for rid in EIA_CODE_FIXES:
df.loc[df.respondent_id_ferc714 == rid,
"eia_code"] = EIA_CODE_FIXES[rid]
tfr_dfs["respondent_id_ferc714"] = df
return tfr_dfs
def demand_hourly_pa(tfr_dfs):
"""
Transform the hourly demand time series by Planning Area.
Transformations include:
- Clean UTC offset codes.
- Replace UTC offset codes with UTC offset and timezone.
- Drop 25th hour rows.
- Set records with 0 UTC code to 0 demand.
- Drop duplicate rows.
- Flip negative signs for reported demand.
Args:
tfr_dfs (dict): A dictionary of (partially) transformed dataframes, to be
cleaned up.
Returns:
dict: The input dictionary of dataframes, but with a finished
pa_demand_hourly_ferc714 dataframe.
"""
logger.debug("Converting dates into pandas Datetime types.")
df = tfr_dfs["demand_hourly_pa_ferc714"].copy()
# Parse date strings
# NOTE: Faster to ignore trailing 00:00:00 and use exact=False
df["report_date"] = pd.to_datetime(
df["report_date"], format="%m/%d/%Y", exact=False
)
# Assert that all respondents and years have complete and unique dates
all_dates = {
year: set(pd.date_range(f"{year}-01-01", f"{year}-12-31", freq="1D"))
for year in range(df["report_year"].min(), df["report_year"].max() + 1)
}
assert df.groupby(["respondent_id_ferc714", "report_year"]).apply(
lambda x: set(x["report_date"]) == all_dates[x.name[1]]
).all()
# Clean UTC offset codes
df["utc_offset_code"] = df["utc_offset_code"].str.strip().str.upper()
df["utc_offset_code"] = df.pipe(
_standardize_offset_codes, OFFSET_CODE_FIXES)
# NOTE: Assumes constant timezone for entire year
for fix in OFFSET_CODE_FIXES_BY_YEAR:
mask = (
(df["report_year"] == fix["report_year"]) &
(df["respondent_id_ferc714"] == fix["respondent_id_ferc714"])
)
df.loc[mask, "utc_offset_code"] = fix["utc_offset_code"]
# Replace UTC offset codes with UTC offset and timezone
df["utc_offset"] = df["utc_offset_code"].map(OFFSET_CODES)
df["timezone"] = df["utc_offset_code"].map(TZ_CODES)
df.drop(columns="utc_offset_code", inplace=True)
# Almost all 25th hours are unusable (0.0 or daily totals),
# and they shouldn't really exist at all based on FERC instructions.
df.drop(columns="hour25", inplace=True)
# Melt daily rows with 24 demands to hourly rows with single demand
logger.debug("Melting daily FERC 714 records into hourly records.")
df.rename(
columns=lambda x: int(re.sub(r"^hour", "", x)) -
1 if "hour" in x else x,
inplace=True
)
df = df.melt(
id_vars=[
"respondent_id_ferc714",
"report_year",
"report_date",
"utc_offset",
"timezone",
],
value_vars=range(24),
var_name="hour",
value_name="demand_mwh"
)
# Assert that all records missing UTC offset have zero demand
missing_offset = df["utc_offset"].isna()
assert df.loc[missing_offset, "demand_mwh"].eq(0).all()
# Drop these records
df.query("~@missing_offset", inplace=True)
# Construct UTC datetime
logger.debug("Converting local time + offset code to UTC + timezone.")
hour_timedeltas = {i: pd.to_timedelta(i, unit="h") for i in range(24)}
df["report_date"] += df["hour"].map(hour_timedeltas)
df["utc_datetime"] = df["report_date"] - df["utc_offset"]
df.drop(columns=["hour", "utc_offset"], inplace=True)
# Report and drop duplicated UTC datetimes
# There should be less than 10 of these,
# resulting from changes to a planning area's reporting timezone.
duplicated = df.duplicated(["respondent_id_ferc714", "utc_datetime"])
logger.debug(
f"Found {np.count_nonzero(duplicated)} duplicate UTC datetimes.")
df.query("~@duplicated", inplace=True)
# Flip the sign on sections of demand which were reported as negative
mask = (
(
df["report_year"].isin([2006, 2007, 2008, 2009]) &
(df["respondent_id_ferc714"] == 156)
) |
(
df["report_year"].isin([2006, 2007, 2008, 2009, 2010]) &
(df["respondent_id_ferc714"] == 289)
)
)
df.loc[mask, "demand_mwh"] *= -1
# Convert report_date to first day of year
df["report_date"] = df["report_date"].astype("datetime64[Y]")
# Format result
columns = [
"respondent_id_ferc714",
"report_date",
"utc_datetime",
"timezone",
"demand_mwh"
]
df.drop(columns=set(df.columns) - set(columns), inplace=True)
tfr_dfs["demand_hourly_pa_ferc714"] = df[columns]
return tfr_dfs
def id_certification(tfr_dfs):
"""A stub transform function."""
return tfr_dfs
def gen_plants_ba(tfr_dfs):
"""A stub transform function."""
return tfr_dfs
def demand_monthly_ba(tfr_dfs):
"""A stub transform function."""
return tfr_dfs
def net_energy_load_ba(tfr_dfs):
"""A stub transform function."""
return tfr_dfs
def adjacency_ba(tfr_dfs):
"""A stub transform function."""
return tfr_dfs
def interchange_ba(tfr_dfs):
"""A stub transform function."""
return tfr_dfs
def lambda_hourly_ba(tfr_dfs):
"""A stub transform function."""
return tfr_dfs
def lambda_description(tfr_dfs):
"""A stub transform function."""
return tfr_dfs
def description_pa(tfr_dfs):
"""A stub transform function."""
return tfr_dfs
def demand_forecast_pa(tfr_dfs):
"""A stub transform function."""
return tfr_dfs
def _early_transform(raw_df):
"""
A simple transform function for until the real ones are written.
* Removes footnotes columns ending with _f
* Drops report_prd, spplmnt_num, and row_num columns
* Excludes records which pertain to bad (test) respondents.
"""
logger.debug("Removing unneeded columns and dropping bad respondents.")
out_df = (
raw_df.filter(regex=r"^(?!.*_f$).*")
.drop(["report_prd", "spplmnt_num", "row_num"],
axis="columns", errors="ignore")
.query("respondent_id_ferc714 not in @BAD_RESPONDENTS")
)
return out_df
def transform(raw_dfs, tables=pc.pudl_tables["ferc714"]):
"""
Transform the raw FERC 714 dataframes into datapackage ready ouputs.
Args:
raw_dfs (dict): A dictionary of raw pandas.DataFrame objects, as read out of
the original FERC 714 CSV files. Generated by the
`pudl.extract.ferc714.extract()` function.
tables (iterable): The set of PUDL tables within FERC 714 that we should
process. Typically set to all of them, unless
Returns:
dict: A dictionary of pandas.DataFrame objects that are ready to be output in a
data package / database table.
"""
tfr_funcs = {
"respondent_id_ferc714": respondent_id,
"demand_hourly_pa_ferc714": demand_hourly_pa,
# These tables have yet to be fully transformed:
"description_pa_ferc714": description_pa,
"id_certification_ferc714": id_certification,
"gen_plants_ba_ferc714": gen_plants_ba,
"demand_monthly_ba_ferc714": demand_monthly_ba,
"net_energy_load_ba_ferc714": net_energy_load_ba,
"adjacency_ba_ferc714": adjacency_ba,
"interchange_ba_ferc714": interchange_ba,
"lambda_hourly_ba_ferc714": lambda_hourly_ba,
"lambda_description_ferc714": lambda_description,
"demand_forecast_pa_ferc714": demand_forecast_pa,
}
tfr_dfs = {}
for table in tables:
if table not in pc.pudl_tables["ferc714"]:
raise ValueError(
f"No transform function found for requested FERC Form 714 "
f"data table {table}!"
)
logger.info(f"Transforming {table}.")
tfr_dfs[table] = (
raw_dfs[table]
.rename(columns=RENAME_COLS[table])
.pipe(_early_transform)
)
tfr_dfs = tfr_funcs[table](tfr_dfs)
tfr_dfs[table] = (
pudl.helpers.convert_cols_dtypes(tfr_dfs[table], "ferc714", table)
)
return tfr_dfs
|
mit
|
RPGOne/Skynet
|
scikit-learn-0.18.1/sklearn/ensemble/weight_boosting.py
|
9
|
40890
|
"""Weight Boosting
This module contains weight boosting estimators for both classification and
regression.
The module structure is the following:
- The ``BaseWeightBoosting`` base class implements a common ``fit`` method
for all the estimators in the module. Regression and classification
only differ from each other in the loss function that is optimized.
- ``AdaBoostClassifier`` implements adaptive boosting (AdaBoost-SAMME) for
classification problems.
- ``AdaBoostRegressor`` implements adaptive boosting (AdaBoost.R2) for
regression problems.
"""
# Authors: Noel Dawe <[email protected]>
# Gilles Louppe <[email protected]>
# Hamzeh Alsalhi <[email protected]>
# Arnaud Joly <[email protected]>
#
# License: BSD 3 clause
from abc import ABCMeta, abstractmethod
import numpy as np
from numpy.core.umath_tests import inner1d
from .base import BaseEnsemble
from ..base import ClassifierMixin, RegressorMixin, is_regressor
from ..externals import six
from ..externals.six.moves import zip
from ..externals.six.moves import xrange as range
from .forest import BaseForest
from ..tree import DecisionTreeClassifier, DecisionTreeRegressor
from ..tree.tree import BaseDecisionTree
from ..tree._tree import DTYPE
from ..utils import check_array, check_X_y, check_random_state
from ..metrics import accuracy_score, r2_score
from sklearn.utils.validation import has_fit_parameter, check_is_fitted
__all__ = [
'AdaBoostClassifier',
'AdaBoostRegressor',
]
class BaseWeightBoosting(six.with_metaclass(ABCMeta, BaseEnsemble)):
"""Base class for AdaBoost estimators.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator=None,
n_estimators=50,
estimator_params=tuple(),
learning_rate=1.,
random_state=None):
super(BaseWeightBoosting, self).__init__(
base_estimator=base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params)
self.learning_rate = learning_rate
self.random_state = random_state
def fit(self, X, y, sample_weight=None):
"""Build a boosted classifier/regressor from the training set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. COO, DOK, and LIL are converted to CSR. The dtype is
forced to DTYPE from tree._tree if the base classifier of this
ensemble weighted boosting classifier is a tree or forest.
y : array-like of shape = [n_samples]
The target values (class labels in classification, real numbers in
regression).
sample_weight : array-like of shape = [n_samples], optional
Sample weights. If None, the sample weights are initialized to
1 / n_samples.
Returns
-------
self : object
Returns self.
"""
# Check parameters
if self.learning_rate <= 0:
raise ValueError("learning_rate must be greater than zero")
if (self.base_estimator is None or
isinstance(self.base_estimator, (BaseDecisionTree,
BaseForest))):
dtype = DTYPE
accept_sparse = 'csc'
else:
dtype = None
accept_sparse = ['csr', 'csc']
X, y = check_X_y(X, y, accept_sparse=accept_sparse, dtype=dtype,
y_numeric=is_regressor(self))
if sample_weight is None:
# Initialize weights to 1 / n_samples
sample_weight = np.empty(X.shape[0], dtype=np.float64)
sample_weight[:] = 1. / X.shape[0]
else:
# Normalize existing weights
sample_weight = sample_weight / sample_weight.sum(dtype=np.float64)
# Check that the sample weights sum is positive
if sample_weight.sum() <= 0:
raise ValueError(
"Attempting to fit with a non-positive "
"weighted number of samples.")
# Check parameters
self._validate_estimator()
# Clear any previous fit results
self.estimators_ = []
self.estimator_weights_ = np.zeros(self.n_estimators, dtype=np.float64)
self.estimator_errors_ = np.ones(self.n_estimators, dtype=np.float64)
random_state = check_random_state(self.random_state)
for iboost in range(self.n_estimators):
# Boosting step
sample_weight, estimator_weight, estimator_error = self._boost(
iboost,
X, y,
sample_weight,
random_state)
# Early termination
if sample_weight is None:
break
self.estimator_weights_[iboost] = estimator_weight
self.estimator_errors_[iboost] = estimator_error
# Stop if error is zero
if estimator_error == 0:
break
sample_weight_sum = np.sum(sample_weight)
# Stop if the sum of sample weights has become non-positive
if sample_weight_sum <= 0:
break
if iboost < self.n_estimators - 1:
# Normalize
sample_weight /= sample_weight_sum
return self
@abstractmethod
def _boost(self, iboost, X, y, sample_weight, random_state):
"""Implement a single boost.
Warning: This method needs to be overridden by subclasses.
Parameters
----------
iboost : int
The index of the current boost iteration.
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. COO, DOK, and LIL are converted to CSR.
y : array-like of shape = [n_samples]
The target values (class labels).
sample_weight : array-like of shape = [n_samples]
The current sample weights.
random_state : numpy.RandomState
The current random number generator
Returns
-------
sample_weight : array-like of shape = [n_samples] or None
The reweighted sample weights.
If None then boosting has terminated early.
estimator_weight : float
The weight for the current boost.
If None then boosting has terminated early.
error : float
The classification error for the current boost.
If None then boosting has terminated early.
"""
pass
def staged_score(self, X, y, sample_weight=None):
"""Return staged scores for X, y.
This generator method yields the ensemble score after each iteration of
boosting and therefore allows monitoring, such as to determine the
score on a test set after each boost.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
y : array-like, shape = [n_samples]
Labels for X.
sample_weight : array-like, shape = [n_samples], optional
Sample weights.
Returns
-------
z : float
"""
for y_pred in self.staged_predict(X):
if isinstance(self, ClassifierMixin):
yield accuracy_score(y, y_pred, sample_weight=sample_weight)
else:
yield r2_score(y, y_pred, sample_weight=sample_weight)
@property
def feature_importances_(self):
"""Return the feature importances (the higher, the more important the
feature).
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
if self.estimators_ is None or len(self.estimators_) == 0:
raise ValueError("Estimator not fitted, "
"call `fit` before `feature_importances_`.")
try:
norm = self.estimator_weights_.sum()
return (sum(weight * clf.feature_importances_ for weight, clf
in zip(self.estimator_weights_, self.estimators_))
/ norm)
except AttributeError:
raise AttributeError(
"Unable to compute feature importances "
"since base_estimator does not have a "
"feature_importances_ attribute")
def _validate_X_predict(self, X):
"""Ensure that X is in the proper format"""
if (self.base_estimator is None or
isinstance(self.base_estimator,
(BaseDecisionTree, BaseForest))):
X = check_array(X, accept_sparse='csr', dtype=DTYPE)
else:
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'])
return X
def _samme_proba(estimator, n_classes, X):
"""Calculate algorithm 4, step 2, equation c) of Zhu et al [1].
References
----------
.. [1] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
proba = estimator.predict_proba(X)
# Displace zero probabilities so the log is defined.
# Also fix negative elements which may occur with
# negative sample weights.
proba[proba < np.finfo(proba.dtype).eps] = np.finfo(proba.dtype).eps
log_proba = np.log(proba)
return (n_classes - 1) * (log_proba - (1. / n_classes)
* log_proba.sum(axis=1)[:, np.newaxis])
class AdaBoostClassifier(BaseWeightBoosting, ClassifierMixin):
"""An AdaBoost classifier.
An AdaBoost [1] classifier is a meta-estimator that begins by fitting a
classifier on the original dataset and then fits additional copies of the
classifier on the same dataset but where the weights of incorrectly
classified instances are adjusted such that subsequent classifiers focus
more on difficult cases.
This class implements the algorithm known as AdaBoost-SAMME [2].
Read more in the :ref:`User Guide <adaboost>`.
Parameters
----------
base_estimator : object, optional (default=DecisionTreeClassifier)
The base estimator from which the boosted ensemble is built.
Support for sample weighting is required, as well as proper `classes_`
and `n_classes_` attributes.
n_estimators : integer, optional (default=50)
The maximum number of estimators at which boosting is terminated.
In case of perfect fit, the learning procedure is stopped early.
learning_rate : float, optional (default=1.)
Learning rate shrinks the contribution of each classifier by
``learning_rate``. There is a trade-off between ``learning_rate`` and
``n_estimators``.
algorithm : {'SAMME', 'SAMME.R'}, optional (default='SAMME.R')
If 'SAMME.R' then use the SAMME.R real boosting algorithm.
``base_estimator`` must support calculation of class probabilities.
If 'SAMME' then use the SAMME discrete boosting algorithm.
The SAMME.R algorithm typically converges faster than SAMME,
achieving a lower test error with fewer boosting iterations.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
estimators_ : list of classifiers
The collection of fitted sub-estimators.
classes_ : array of shape = [n_classes]
The classes labels.
n_classes_ : int
The number of classes.
estimator_weights_ : array of floats
Weights for each estimator in the boosted ensemble.
estimator_errors_ : array of floats
Classification error for each estimator in the boosted
ensemble.
feature_importances_ : array of shape = [n_features]
The feature importances if supported by the ``base_estimator``.
See also
--------
AdaBoostRegressor, GradientBoostingClassifier, DecisionTreeClassifier
References
----------
.. [1] Y. Freund, R. Schapire, "A Decision-Theoretic Generalization of
on-Line Learning and an Application to Boosting", 1995.
.. [2] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
def __init__(self,
base_estimator=None,
n_estimators=50,
learning_rate=1.,
algorithm='SAMME.R',
random_state=None):
super(AdaBoostClassifier, self).__init__(
base_estimator=base_estimator,
n_estimators=n_estimators,
learning_rate=learning_rate,
random_state=random_state)
self.algorithm = algorithm
def fit(self, X, y, sample_weight=None):
"""Build a boosted classifier from the training set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
y : array-like of shape = [n_samples]
The target values (class labels).
sample_weight : array-like of shape = [n_samples], optional
Sample weights. If None, the sample weights are initialized to
``1 / n_samples``.
Returns
-------
self : object
Returns self.
"""
# Check that algorithm is supported
if self.algorithm not in ('SAMME', 'SAMME.R'):
raise ValueError("algorithm %s is not supported" % self.algorithm)
# Fit
return super(AdaBoostClassifier, self).fit(X, y, sample_weight)
def _validate_estimator(self):
"""Check the estimator and set the base_estimator_ attribute."""
super(AdaBoostClassifier, self)._validate_estimator(
default=DecisionTreeClassifier(max_depth=1))
# SAMME-R requires predict_proba-enabled base estimators
if self.algorithm == 'SAMME.R':
if not hasattr(self.base_estimator_, 'predict_proba'):
raise TypeError(
"AdaBoostClassifier with algorithm='SAMME.R' requires "
"that the weak learner supports the calculation of class "
"probabilities with a predict_proba method.\n"
"Please change the base estimator or set "
"algorithm='SAMME' instead.")
if not has_fit_parameter(self.base_estimator_, "sample_weight"):
raise ValueError("%s doesn't support sample_weight."
% self.base_estimator_.__class__.__name__)
def _boost(self, iboost, X, y, sample_weight, random_state):
"""Implement a single boost.
Perform a single boost according to the real multi-class SAMME.R
algorithm or to the discrete SAMME algorithm and return the updated
sample weights.
Parameters
----------
iboost : int
The index of the current boost iteration.
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
y : array-like of shape = [n_samples]
The target values (class labels).
sample_weight : array-like of shape = [n_samples]
The current sample weights.
random_state : numpy.RandomState
The current random number generator
Returns
-------
sample_weight : array-like of shape = [n_samples] or None
The reweighted sample weights.
If None then boosting has terminated early.
estimator_weight : float
The weight for the current boost.
If None then boosting has terminated early.
estimator_error : float
The classification error for the current boost.
If None then boosting has terminated early.
"""
if self.algorithm == 'SAMME.R':
return self._boost_real(iboost, X, y, sample_weight, random_state)
else: # elif self.algorithm == "SAMME":
return self._boost_discrete(iboost, X, y, sample_weight,
random_state)
def _boost_real(self, iboost, X, y, sample_weight, random_state):
"""Implement a single boost using the SAMME.R real algorithm."""
estimator = self._make_estimator(random_state=random_state)
estimator.fit(X, y, sample_weight=sample_weight)
y_predict_proba = estimator.predict_proba(X)
if iboost == 0:
self.classes_ = getattr(estimator, 'classes_', None)
self.n_classes_ = len(self.classes_)
y_predict = self.classes_.take(np.argmax(y_predict_proba, axis=1),
axis=0)
# Instances incorrectly classified
incorrect = y_predict != y
# Error fraction
estimator_error = np.mean(
np.average(incorrect, weights=sample_weight, axis=0))
# Stop if classification is perfect
if estimator_error <= 0:
return sample_weight, 1., 0.
# Construct y coding as described in Zhu et al [2]:
#
# y_k = 1 if c == k else -1 / (K - 1)
#
# where K == n_classes_ and c, k in [0, K) are indices along the second
# axis of the y coding with c being the index corresponding to the true
# class label.
n_classes = self.n_classes_
classes = self.classes_
y_codes = np.array([-1. / (n_classes - 1), 1.])
y_coding = y_codes.take(classes == y[:, np.newaxis])
# Displace zero probabilities so the log is defined.
# Also fix negative elements which may occur with
# negative sample weights.
proba = y_predict_proba # alias for readability
proba[proba < np.finfo(proba.dtype).eps] = np.finfo(proba.dtype).eps
# Boost weight using multi-class AdaBoost SAMME.R alg
estimator_weight = (-1. * self.learning_rate
* (((n_classes - 1.) / n_classes) *
inner1d(y_coding, np.log(y_predict_proba))))
# Only boost the weights if it will fit again
if not iboost == self.n_estimators - 1:
# Only boost positive weights
sample_weight *= np.exp(estimator_weight *
((sample_weight > 0) |
(estimator_weight < 0)))
return sample_weight, 1., estimator_error
def _boost_discrete(self, iboost, X, y, sample_weight, random_state):
"""Implement a single boost using the SAMME discrete algorithm."""
estimator = self._make_estimator(random_state=random_state)
estimator.fit(X, y, sample_weight=sample_weight)
y_predict = estimator.predict(X)
if iboost == 0:
self.classes_ = getattr(estimator, 'classes_', None)
self.n_classes_ = len(self.classes_)
# Instances incorrectly classified
incorrect = y_predict != y
# Error fraction
estimator_error = np.mean(
np.average(incorrect, weights=sample_weight, axis=0))
# Stop if classification is perfect
if estimator_error <= 0:
return sample_weight, 1., 0.
n_classes = self.n_classes_
# Stop if the error is at least as bad as random guessing
if estimator_error >= 1. - (1. / n_classes):
self.estimators_.pop(-1)
if len(self.estimators_) == 0:
raise ValueError('BaseClassifier in AdaBoostClassifier '
'ensemble is worse than random, ensemble '
'can not be fit.')
return None, None, None
# Boost weight using multi-class AdaBoost SAMME alg
estimator_weight = self.learning_rate * (
np.log((1. - estimator_error) / estimator_error) +
np.log(n_classes - 1.))
# Only boost the weights if I will fit again
if not iboost == self.n_estimators - 1:
# Only boost positive weights
sample_weight *= np.exp(estimator_weight * incorrect *
((sample_weight > 0) |
(estimator_weight < 0)))
return sample_weight, estimator_weight, estimator_error
def predict(self, X):
"""Predict classes for X.
The predicted class of an input sample is computed as the weighted mean
prediction of the classifiers in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
y : array of shape = [n_samples]
The predicted classes.
"""
pred = self.decision_function(X)
if self.n_classes_ == 2:
return self.classes_.take(pred > 0, axis=0)
return self.classes_.take(np.argmax(pred, axis=1), axis=0)
def staged_predict(self, X):
"""Return staged predictions for X.
The predicted class of an input sample is computed as the weighted mean
prediction of the classifiers in the ensemble.
This generator method yields the ensemble prediction after each
iteration of boosting and therefore allows monitoring, such as to
determine the prediction on a test set after each boost.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : generator of array, shape = [n_samples]
The predicted classes.
"""
n_classes = self.n_classes_
classes = self.classes_
if n_classes == 2:
for pred in self.staged_decision_function(X):
yield np.array(classes.take(pred > 0, axis=0))
else:
for pred in self.staged_decision_function(X):
yield np.array(classes.take(
np.argmax(pred, axis=1), axis=0))
def decision_function(self, X):
"""Compute the decision function of ``X``.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
score : array, shape = [n_samples, k]
The decision function of the input samples. The order of
outputs is the same of that of the `classes_` attribute.
Binary classification is a special cases with ``k == 1``,
otherwise ``k==n_classes``. For binary classification,
values closer to -1 or 1 mean more like the first or second
class in ``classes_``, respectively.
"""
check_is_fitted(self, "n_classes_")
X = self._validate_X_predict(X)
n_classes = self.n_classes_
classes = self.classes_[:, np.newaxis]
pred = None
if self.algorithm == 'SAMME.R':
# The weights are all 1. for SAMME.R
pred = sum(_samme_proba(estimator, n_classes, X)
for estimator in self.estimators_)
else: # self.algorithm == "SAMME"
pred = sum((estimator.predict(X) == classes).T * w
for estimator, w in zip(self.estimators_,
self.estimator_weights_))
pred /= self.estimator_weights_.sum()
if n_classes == 2:
pred[:, 0] *= -1
return pred.sum(axis=1)
return pred
def staged_decision_function(self, X):
"""Compute decision function of ``X`` for each boosting iteration.
This method allows monitoring (i.e. determine error on testing set)
after each boosting iteration.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
score : generator of array, shape = [n_samples, k]
The decision function of the input samples. The order of
outputs is the same of that of the `classes_` attribute.
Binary classification is a special cases with ``k == 1``,
otherwise ``k==n_classes``. For binary classification,
values closer to -1 or 1 mean more like the first or second
class in ``classes_``, respectively.
"""
check_is_fitted(self, "n_classes_")
X = self._validate_X_predict(X)
n_classes = self.n_classes_
classes = self.classes_[:, np.newaxis]
pred = None
norm = 0.
for weight, estimator in zip(self.estimator_weights_,
self.estimators_):
norm += weight
if self.algorithm == 'SAMME.R':
# The weights are all 1. for SAMME.R
current_pred = _samme_proba(estimator, n_classes, X)
else: # elif self.algorithm == "SAMME":
current_pred = estimator.predict(X)
current_pred = (current_pred == classes).T * weight
if pred is None:
pred = current_pred
else:
pred += current_pred
if n_classes == 2:
tmp_pred = np.copy(pred)
tmp_pred[:, 0] *= -1
yield (tmp_pred / norm).sum(axis=1)
else:
yield pred / norm
def predict_proba(self, X):
"""Predict class probabilities for X.
The predicted class probabilities of an input sample is computed as
the weighted mean predicted class probabilities of the classifiers
in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
p : array of shape = [n_samples]
The class probabilities of the input samples. The order of
outputs is the same of that of the `classes_` attribute.
"""
check_is_fitted(self, "n_classes_")
n_classes = self.n_classes_
X = self._validate_X_predict(X)
if self.algorithm == 'SAMME.R':
# The weights are all 1. for SAMME.R
proba = sum(_samme_proba(estimator, n_classes, X)
for estimator in self.estimators_)
else: # self.algorithm == "SAMME"
proba = sum(estimator.predict_proba(X) * w
for estimator, w in zip(self.estimators_,
self.estimator_weights_))
proba /= self.estimator_weights_.sum()
proba = np.exp((1. / (n_classes - 1)) * proba)
normalizer = proba.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba /= normalizer
return proba
def staged_predict_proba(self, X):
"""Predict class probabilities for X.
The predicted class probabilities of an input sample is computed as
the weighted mean predicted class probabilities of the classifiers
in the ensemble.
This generator method yields the ensemble predicted class probabilities
after each iteration of boosting and therefore allows monitoring, such
as to determine the predicted class probabilities on a test set after
each boost.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
p : generator of array, shape = [n_samples]
The class probabilities of the input samples. The order of
outputs is the same of that of the `classes_` attribute.
"""
X = self._validate_X_predict(X)
n_classes = self.n_classes_
proba = None
norm = 0.
for weight, estimator in zip(self.estimator_weights_,
self.estimators_):
norm += weight
if self.algorithm == 'SAMME.R':
# The weights are all 1. for SAMME.R
current_proba = _samme_proba(estimator, n_classes, X)
else: # elif self.algorithm == "SAMME":
current_proba = estimator.predict_proba(X) * weight
if proba is None:
proba = current_proba
else:
proba += current_proba
real_proba = np.exp((1. / (n_classes - 1)) * (proba / norm))
normalizer = real_proba.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
real_proba /= normalizer
yield real_proba
def predict_log_proba(self, X):
"""Predict class log-probabilities for X.
The predicted class log-probabilities of an input sample is computed as
the weighted mean predicted class log-probabilities of the classifiers
in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
p : array of shape = [n_samples]
The class probabilities of the input samples. The order of
outputs is the same of that of the `classes_` attribute.
"""
return np.log(self.predict_proba(X))
class AdaBoostRegressor(BaseWeightBoosting, RegressorMixin):
"""An AdaBoost regressor.
An AdaBoost [1] regressor is a meta-estimator that begins by fitting a
regressor on the original dataset and then fits additional copies of the
regressor on the same dataset but where the weights of instances are
adjusted according to the error of the current prediction. As such,
subsequent regressors focus more on difficult cases.
This class implements the algorithm known as AdaBoost.R2 [2].
Read more in the :ref:`User Guide <adaboost>`.
Parameters
----------
base_estimator : object, optional (default=DecisionTreeRegressor)
The base estimator from which the boosted ensemble is built.
Support for sample weighting is required.
n_estimators : integer, optional (default=50)
The maximum number of estimators at which boosting is terminated.
In case of perfect fit, the learning procedure is stopped early.
learning_rate : float, optional (default=1.)
Learning rate shrinks the contribution of each regressor by
``learning_rate``. There is a trade-off between ``learning_rate`` and
``n_estimators``.
loss : {'linear', 'square', 'exponential'}, optional (default='linear')
The loss function to use when updating the weights after each
boosting iteration.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
estimators_ : list of classifiers
The collection of fitted sub-estimators.
estimator_weights_ : array of floats
Weights for each estimator in the boosted ensemble.
estimator_errors_ : array of floats
Regression error for each estimator in the boosted ensemble.
feature_importances_ : array of shape = [n_features]
The feature importances if supported by the ``base_estimator``.
See also
--------
AdaBoostClassifier, GradientBoostingRegressor, DecisionTreeRegressor
References
----------
.. [1] Y. Freund, R. Schapire, "A Decision-Theoretic Generalization of
on-Line Learning and an Application to Boosting", 1995.
.. [2] H. Drucker, "Improving Regressors using Boosting Techniques", 1997.
"""
def __init__(self,
base_estimator=None,
n_estimators=50,
learning_rate=1.,
loss='linear',
random_state=None):
super(AdaBoostRegressor, self).__init__(
base_estimator=base_estimator,
n_estimators=n_estimators,
learning_rate=learning_rate,
random_state=random_state)
self.loss = loss
self.random_state = random_state
def fit(self, X, y, sample_weight=None):
"""Build a boosted regressor from the training set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
y : array-like of shape = [n_samples]
The target values (real numbers).
sample_weight : array-like of shape = [n_samples], optional
Sample weights. If None, the sample weights are initialized to
1 / n_samples.
Returns
-------
self : object
Returns self.
"""
# Check loss
if self.loss not in ('linear', 'square', 'exponential'):
raise ValueError(
"loss must be 'linear', 'square', or 'exponential'")
# Fit
return super(AdaBoostRegressor, self).fit(X, y, sample_weight)
def _validate_estimator(self):
"""Check the estimator and set the base_estimator_ attribute."""
super(AdaBoostRegressor, self)._validate_estimator(
default=DecisionTreeRegressor(max_depth=3))
def _boost(self, iboost, X, y, sample_weight, random_state):
"""Implement a single boost for regression
Perform a single boost according to the AdaBoost.R2 algorithm and
return the updated sample weights.
Parameters
----------
iboost : int
The index of the current boost iteration.
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
y : array-like of shape = [n_samples]
The target values (class labels in classification, real numbers in
regression).
sample_weight : array-like of shape = [n_samples]
The current sample weights.
random_state : numpy.RandomState
The current random number generator
Returns
-------
sample_weight : array-like of shape = [n_samples] or None
The reweighted sample weights.
If None then boosting has terminated early.
estimator_weight : float
The weight for the current boost.
If None then boosting has terminated early.
estimator_error : float
The regression error for the current boost.
If None then boosting has terminated early.
"""
estimator = self._make_estimator(random_state=random_state)
# Weighted sampling of the training set with replacement
# For NumPy >= 1.7.0 use np.random.choice
cdf = sample_weight.cumsum()
cdf /= cdf[-1]
uniform_samples = random_state.random_sample(X.shape[0])
bootstrap_idx = cdf.searchsorted(uniform_samples, side='right')
# searchsorted returns a scalar
bootstrap_idx = np.array(bootstrap_idx, copy=False)
# Fit on the bootstrapped sample and obtain a prediction
# for all samples in the training set
estimator.fit(X[bootstrap_idx], y[bootstrap_idx])
y_predict = estimator.predict(X)
error_vect = np.abs(y_predict - y)
error_max = error_vect.max()
if error_max != 0.:
error_vect /= error_max
if self.loss == 'square':
error_vect **= 2
elif self.loss == 'exponential':
error_vect = 1. - np.exp(- error_vect)
# Calculate the average loss
estimator_error = (sample_weight * error_vect).sum()
if estimator_error <= 0:
# Stop if fit is perfect
return sample_weight, 1., 0.
elif estimator_error >= 0.5:
# Discard current estimator only if it isn't the only one
if len(self.estimators_) > 1:
self.estimators_.pop(-1)
return None, None, None
beta = estimator_error / (1. - estimator_error)
# Boost weight using AdaBoost.R2 alg
estimator_weight = self.learning_rate * np.log(1. / beta)
if not iboost == self.n_estimators - 1:
sample_weight *= np.power(
beta,
(1. - error_vect) * self.learning_rate)
return sample_weight, estimator_weight, estimator_error
def _get_median_predict(self, X, limit):
# Evaluate predictions of all estimators
predictions = np.array([
est.predict(X) for est in self.estimators_[:limit]]).T
# Sort the predictions
sorted_idx = np.argsort(predictions, axis=1)
# Find index of median prediction for each sample
weight_cdf = self.estimator_weights_[sorted_idx].cumsum(axis=1)
median_or_above = weight_cdf >= 0.5 * weight_cdf[:, -1][:, np.newaxis]
median_idx = median_or_above.argmax(axis=1)
median_estimators = sorted_idx[np.arange(X.shape[0]), median_idx]
# Return median predictions
return predictions[np.arange(X.shape[0]), median_estimators]
def predict(self, X):
"""Predict regression value for X.
The predicted regression value of an input sample is computed
as the weighted median prediction of the classifiers in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
y : array of shape = [n_samples]
The predicted regression values.
"""
check_is_fitted(self, "estimator_weights_")
X = self._validate_X_predict(X)
return self._get_median_predict(X, len(self.estimators_))
def staged_predict(self, X):
"""Return staged predictions for X.
The predicted regression value of an input sample is computed
as the weighted median prediction of the classifiers in the ensemble.
This generator method yields the ensemble prediction after each
iteration of boosting and therefore allows monitoring, such as to
determine the prediction on a test set after each boost.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
y : generator of array, shape = [n_samples]
The predicted regression values.
"""
check_is_fitted(self, "estimator_weights_")
X = self._validate_X_predict(X)
for i, _ in enumerate(self.estimators_, 1):
yield self._get_median_predict(X, limit=i)
|
bsd-3-clause
|
pratapvardhan/scikit-learn
|
examples/semi_supervised/plot_label_propagation_versus_svm_iris.py
|
286
|
2378
|
"""
=====================================================================
Decision boundary of label propagation versus SVM on the Iris dataset
=====================================================================
Comparison for decision boundary generated on iris dataset
between Label Propagation and SVM.
This demonstrates Label Propagation learning a good boundary
even with a small amount of labeled data.
"""
print(__doc__)
# Authors: Clay Woolam <[email protected]>
# Licence: BSD
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn import svm
from sklearn.semi_supervised import label_propagation
rng = np.random.RandomState(0)
iris = datasets.load_iris()
X = iris.data[:, :2]
y = iris.target
# step size in the mesh
h = .02
y_30 = np.copy(y)
y_30[rng.rand(len(y)) < 0.3] = -1
y_50 = np.copy(y)
y_50[rng.rand(len(y)) < 0.5] = -1
# we create an instance of SVM and fit out data. We do not scale our
# data since we want to plot the support vectors
ls30 = (label_propagation.LabelSpreading().fit(X, y_30),
y_30)
ls50 = (label_propagation.LabelSpreading().fit(X, y_50),
y_50)
ls100 = (label_propagation.LabelSpreading().fit(X, y), y)
rbf_svc = (svm.SVC(kernel='rbf').fit(X, y), y)
# create a mesh to plot in
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# title for the plots
titles = ['Label Spreading 30% data',
'Label Spreading 50% data',
'Label Spreading 100% data',
'SVC with rbf kernel']
color_map = {-1: (1, 1, 1), 0: (0, 0, .9), 1: (1, 0, 0), 2: (.8, .6, 0)}
for i, (clf, y_train) in enumerate((ls30, ls50, ls100, rbf_svc)):
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
plt.subplot(2, 2, i + 1)
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.axis('off')
# Plot also the training points
colors = [color_map[y] for y in y_train]
plt.scatter(X[:, 0], X[:, 1], c=colors, cmap=plt.cm.Paired)
plt.title(titles[i])
plt.text(.90, 0, "Unlabeled points are colored white")
plt.show()
|
bsd-3-clause
|
JorgeDeLosSantos/nusa
|
examples/beam/beam_6_encastre.py
|
1
|
1084
|
# -*- coding: utf-8 -*-
# ***********************************
# Author: Pedro Jorge De Los Santos
# E-mail: [email protected]
# License: MIT License
# ***********************************
import numpy as np
from nusa import *
import itertools
import matplotlib.pyplot as plt
def pairwise(iterable):
#~ "s -> (s0,s1), (s1,s2), (s2, s3), ..."
a, b = itertools.tee(iterable)
next(b, None)
return zip(a, b)
# Input data
E = 29e6 # psi
I = 10
L = 10
P = 10e3
nelm = 10
parts = np.linspace(0, L, nelm + 1)
nodos = []
for xc in parts:
cn = Node((xc,0))
nodos.append(cn)
elementos = []
for x in pairwise(nodos):
ni,nj = x[0], x[1]
ce = Beam((ni,nj),E,I)
elementos.append(ce)
m = BeamModel()
for n in nodos: m.add_node(n)
for e in elementos: m.add_element(e)
m.add_constraint(nodos[0], ux=0, uy=0, ur=0)
m.add_force(nodos[-1], (-P,))
m.solve()
m.plot_disp(1, label="Approx.")
xx = np.linspace(0,L)
d = ((-P*xx**2.0)/(6.0*E*I))*(3*L - xx)
plt.plot(xx, d, label="Classic")
plt.legend()
plt.axis("auto")
plt.xlim(0,L+1)
m.show()
|
mit
|
noddi/panoramix
|
setup.py
|
1
|
1277
|
from setuptools import setup, find_packages
version = '0.5.1'
setup(
name='panoramix',
description=(
"A interactive data visualization platform build on SqlAlchemy "
"and druid.io"),
version=version,
packages=find_packages(),
package_data={'': [
'panoramix/migrations/alembic.ini',
'panoramix/data/birth_names.csv.gz',
]},
include_package_data=True,
zip_safe=False,
scripts=['panoramix/bin/panoramix'],
install_requires=[
'alembic>=0.7.7, <0.8.0',
'flask>=0.10.1, <1.0.0',
'flask-appbuilder>=1.4.5, <2.0.0',
'flask-login==0.2.11',
'flask-migrate>=1.5.1, <2.0.0',
'flask-script>=2.0.5, <3.0.0',
'flask-testing>=0.4.2, <0.5.0',
'gunicorn>=19.3.0, <20.0.0',
'markdown>=2.6.2, <3.0.0',
'pandas==0.16.2, <0.17',
'parsedatetime>=1.5, <2.0.0',
'pydruid>=0.2.2, <0.3',
'python-dateutil>=2.4.2, <3.0.0',
'requests>=2.7.0, <3.0.0',
'sqlparse>=0.1.16, <0.2.0',
],
author='Maxime Beauchemin',
author_email='[email protected]',
url='https://github.com/mistercrunch/panoramix',
download_url=(
'https://github.com/mistercrunch/panoramix/tarball/' + version),
)
|
apache-2.0
|
cseed/hail
|
hail/python/hail/methods/statgen.py
|
1
|
129761
|
import itertools
import math
import numpy as np
from typing import Dict, Callable
import builtins
import hail
import hail as hl
import hail.expr.aggregators as agg
from hail.expr import (Expression, ExpressionException, expr_float64, expr_call,
expr_any, expr_numeric, expr_locus, analyze, check_entry_indexed,
check_row_indexed, matrix_table_source, table_source)
from hail.expr.types import tbool, tarray, tfloat64, tint32
from hail import ir
from hail.genetics.reference_genome import reference_genome_type
from hail.linalg import BlockMatrix
from hail.matrixtable import MatrixTable
from hail.methods.misc import require_biallelic, require_row_key_variant
from hail.stats import LinearMixedModel
from hail.table import Table
from hail.typecheck import (typecheck, nullable, numeric, oneof, sequenceof,
enumeration, anytype)
from hail.utils import wrap_to_list, new_temp_file, FatalError
from hail.utils.java import Env, info, warning
from . import relatedness
from . import pca
from ..backend.spark_backend import SparkBackend
pc_relate = relatedness.pc_relate
identity_by_descent = relatedness.identity_by_descent
_blanczos_pca = pca._blanczos_pca
_hwe_normalized_blanczos = pca._hwe_normalized_blanczos
hwe_normalized_pca = pca.hwe_normalized_pca
pca = pca.pca
@typecheck(call=expr_call,
aaf_threshold=numeric,
include_par=bool,
female_threshold=numeric,
male_threshold=numeric,
aaf=nullable(str))
def impute_sex(call, aaf_threshold=0.0, include_par=False, female_threshold=0.2, male_threshold=0.8, aaf=None) -> Table:
r"""Impute sex of samples by calculating inbreeding coefficient on the
X chromosome.
.. include:: ../_templates/req_tvariant.rst
.. include:: ../_templates/req_biallelic.rst
Examples
--------
Remove samples where imputed sex does not equal reported sex:
>>> imputed_sex = hl.impute_sex(dataset.GT)
>>> dataset_result = dataset.filter_cols(imputed_sex[dataset.s].is_female != dataset.pheno.is_female,
... keep=False)
Notes
-----
We have used the same implementation as `PLINK v1.7
<https://zzz.bwh.harvard.edu/plink/summary.shtml#sexcheck>`__.
Let `gr` be the the reference genome of the type of the `locus` key (as
given by :attr:`.tlocus.reference_genome`)
1. Filter the dataset to loci on the X contig defined by `gr`.
2. Calculate alternate allele frequency (AAF) for each row from the dataset.
3. Filter to variants with AAF above `aaf_threshold`.
4. Remove loci in the pseudoautosomal region, as defined by `gr`, unless
`include_par` is ``True`` (it defaults to ``False``)
5. For each row and column with a non-missing genotype call, :math:`E`, the
expected number of homozygotes (from population AAF), is computed as
:math:`1.0 - (2.0*\mathrm{maf}*(1.0-\mathrm{maf}))`.
6. For each row and column with a non-missing genotype call, :math:`O`, the
observed number of homozygotes, is computed interpreting ``0`` as
heterozygote and ``1`` as homozygote`
7. For each row and column with a non-missing genotype call, :math:`N` is
incremented by 1
8. For each column, :math:`E`, :math:`O`, and :math:`N` are combined across
variants
9. For each column, :math:`F` is calculated by :math:`(O - E) / (N - E)`
10. A sex is assigned to each sample with the following criteria:
- Female when ``F < 0.2``
- Male when ``F > 0.8``
Use `female_threshold` and `male_threshold` to change this behavior.
**Annotations**
The returned column-key indexed :class:`.Table` has the following fields in
addition to the matrix table's column keys:
- **is_female** (:py:data:`.tbool`) -- True if the imputed sex is female,
false if male, missing if undetermined.
- **f_stat** (:py:data:`.tfloat64`) -- Inbreeding coefficient.
- **n_called** (:py:data:`.tint64`) -- Number of variants with a genotype call.
- **expected_homs** (:py:data:`.tfloat64`) -- Expected number of homozygotes.
- **observed_homs** (:py:data:`.tint64`) -- Observed number of homozygotes.
call : :class:`.CallExpression`
A genotype call for each row and column. The source dataset's row keys
must be [[locus], alleles] with types :class:`.tlocus` and
:class:`.tarray` of :obj:`.tstr`. Moreover, the alleles array must have
exactly two elements (i.e. the variant must be biallelic).
aaf_threshold : :obj:`float`
Minimum alternate allele frequency threshold.
include_par : :obj:`bool`
Include pseudoautosomal regions.
female_threshold : :obj:`float`
Samples are called females if F < female_threshold.
male_threshold : :obj:`float`
Samples are called males if F > male_threshold.
aaf : :class:`str` or :obj:`None`
A field defining the alternate allele frequency for each row. If
``None``, AAF will be computed from `call`.
Return
------
:class:`.Table`
Sex imputation statistics per sample.
"""
if aaf_threshold < 0.0 or aaf_threshold > 1.0:
raise FatalError("Invalid argument for `aaf_threshold`. Must be in range [0, 1].")
mt = call._indices.source
mt, _ = mt._process_joins(call)
mt = mt.annotate_entries(call=call)
mt = require_biallelic(mt, 'impute_sex')
if (aaf is None):
mt = mt.annotate_rows(aaf=agg.call_stats(mt.call, mt.alleles).AF[1])
aaf = 'aaf'
rg = mt.locus.dtype.reference_genome
mt = hl.filter_intervals(mt,
hl.map(lambda x_contig: hl.parse_locus_interval(x_contig, rg), rg.x_contigs),
keep=True)
if not include_par:
interval_type = hl.tarray(hl.tinterval(hl.tlocus(rg)))
mt = hl.filter_intervals(mt,
hl.literal(rg.par, interval_type),
keep=False)
mt = mt.filter_rows((mt[aaf] > aaf_threshold) & (mt[aaf] < (1 - aaf_threshold)))
mt = mt.annotate_cols(ib=agg.inbreeding(mt.call, mt[aaf]))
kt = mt.select_cols(
is_female=hl.cond(mt.ib.f_stat < female_threshold,
True,
hl.cond(mt.ib.f_stat > male_threshold,
False,
hl.null(tbool))),
**mt.ib).cols()
return kt
def _get_regression_row_fields(mt, pass_through, method) -> Dict[str, str]:
row_fields = dict(zip(mt.row_key.keys(), mt.row_key.keys()))
for f in pass_through:
if isinstance(f, str):
if f not in mt.row:
raise ValueError(f"'{method}/pass_through': MatrixTable has no row field {repr(f)}")
if f in row_fields:
# allow silent pass through of key fields
if f in mt.row_key:
pass
else:
raise ValueError(f"'{method}/pass_through': found duplicated field {repr(f)}")
row_fields[f] = mt[f]
else:
assert isinstance(f, Expression)
if not f._ir.is_nested_field:
raise ValueError(f"'{method}/pass_through': expect fields or nested fields, not complex expressions")
if not f._indices == mt._row_indices:
raise ExpressionException(f"'{method}/pass_through': require row-indexed fields, found indices {f._indices.axes}")
name = f._ir.name
if name in row_fields:
# allow silent pass through of key fields
if not (name in mt.row_key and f._ir == mt[name]._ir):
raise ValueError(f"'{method}/pass_through': found duplicated field {repr(name)}")
row_fields[name] = f
for k in mt.row_key:
del row_fields[k]
return row_fields
@typecheck(y=oneof(expr_float64, sequenceof(expr_float64), sequenceof(sequenceof(expr_float64))),
x=expr_float64,
covariates=sequenceof(expr_float64),
block_size=int,
pass_through=sequenceof(oneof(str, Expression)))
def linear_regression_rows(y, x, covariates, block_size=16, pass_through=()) -> hail.Table:
r"""For each row, test an input variable for association with
response variables using linear regression.
Examples
--------
>>> result_ht = hl.linear_regression_rows(
... y=dataset.pheno.height,
... x=dataset.GT.n_alt_alleles(),
... covariates=[1, dataset.pheno.age, dataset.pheno.is_female])
Warning
-------
As in the example, the intercept covariate ``1`` must be
included **explicitly** if desired.
Warning
-------
If `y` is a single value or a list, :func:`.linear_regression_rows`
considers the same set of columns (i.e., samples, points) for every response
variable and row, namely those columns for which **all** response variables
and covariates are defined.
If `y` is a list of lists, then each inner list is treated as an
independent group, subsetting columns for missingness separately.
Notes
-----
With the default root and `y` a single expression, the following row-indexed
fields are added.
- **<row key fields>** (Any) -- Row key fields.
- **<pass_through fields>** (Any) -- Row fields in `pass_through`.
- **n** (:py:data:`.tint32`) -- Number of columns used.
- **sum_x** (:py:data:`.tfloat64`) -- Sum of input values `x`.
- **y_transpose_x** (:py:data:`.tfloat64`) -- Dot product of response
vector `y` with the input vector `x`.
- **beta** (:py:data:`.tfloat64`) --
Fit effect coefficient of `x`, :math:`\hat\beta_1` below.
- **standard_error** (:py:data:`.tfloat64`) --
Estimated standard error, :math:`\widehat{\mathrm{se}}_1`.
- **t_stat** (:py:data:`.tfloat64`) -- :math:`t`-statistic, equal to
:math:`\hat\beta_1 / \widehat{\mathrm{se}}_1`.
- **p_value** (:py:data:`.tfloat64`) -- :math:`p`-value.
If `y` is a list of expressions, then the last five fields instead have type
:class:`.tarray` of :py:data:`.tfloat64`, with corresponding indexing of
the list and each array.
If `y` is a list of lists of expressions, then `n` and `sum_x` are of type
``array<float64>``, and the last five fields are of type
``array<array<float64>>``. Index into these arrays with
``a[index_in_outer_list, index_in_inner_list]``. For example, if
``y=[[a], [b, c]]`` then the p-value for ``b`` is ``p_value[1][0]``.
In the statistical genetics example above, the input variable `x` encodes
genotype as the number of alternate alleles (0, 1, or 2). For each variant
(row), genotype is tested for association with height controlling for age
and sex, by fitting the linear regression model:
.. math::
\mathrm{height} = \beta_0 + \beta_1 \, \mathrm{genotype}
+ \beta_2 \, \mathrm{age}
+ \beta_3 \, \mathrm{is\_female}
+ \varepsilon,
\quad
\varepsilon \sim \mathrm{N}(0, \sigma^2)
Boolean covariates like :math:`\mathrm{is\_female}` are encoded as 1 for
``True`` and 0 for ``False``. The null model sets :math:`\beta_1 = 0`.
The standard least-squares linear regression model is derived in Section
3.2 of `The Elements of Statistical Learning, 2nd Edition
<http://statweb.stanford.edu/~tibs/ElemStatLearn/printings/ESLII_print10.pdf>`__.
See equation 3.12 for the t-statistic which follows the t-distribution with
:math:`n - k - 1` degrees of freedom, under the null hypothesis of no
effect, with :math:`n` samples and :math:`k` covariates in addition to
``x``.
Note
----
Use the `pass_through` parameter to include additional row fields from
matrix table underlying ``x``. For example, to include an "rsid" field, set
``pass_through=['rsid']`` or ``pass_through=[mt.rsid]``.
Parameters
----------
y : :class:`.Float64Expression` or :obj:`list` of :class:`.Float64Expression`
One or more column-indexed response expressions.
x : :class:`.Float64Expression`
Entry-indexed expression for input variable.
covariates : :obj:`list` of :class:`.Float64Expression`
List of column-indexed covariate expressions.
block_size : :obj:`int`
Number of row regressions to perform simultaneously per core. Larger blocks
require more memory but may improve performance.
pass_through : :obj:`list` of :class:`str` or :class:`.Expression`
Additional row fields to include in the resulting table.
Returns
-------
:class:`.Table`
"""
if not isinstance(Env.backend(), SparkBackend):
return _linear_regression_rows_nd(y, x, covariates, block_size, pass_through)
mt = matrix_table_source('linear_regression_rows/x', x)
check_entry_indexed('linear_regression_rows/x', x)
y_is_list = isinstance(y, list)
if y_is_list and len(y) == 0:
raise ValueError("'linear_regression_rows': found no values for 'y'")
is_chained = y_is_list and isinstance(y[0], list)
if is_chained and any(len(lst) == 0 for lst in y):
raise ValueError("'linear_regression_rows': found empty inner list for 'y'")
y = wrap_to_list(y)
for e in (itertools.chain.from_iterable(y) if is_chained else y):
analyze('linear_regression_rows/y', e, mt._col_indices)
for e in covariates:
analyze('linear_regression_rows/covariates', e, mt._col_indices)
_warn_if_no_intercept('linear_regression_rows', covariates)
x_field_name = Env.get_uid()
if is_chained:
y_field_names = [[f'__y_{i}_{j}' for j in range(len(y[i]))] for i in range(len(y))]
y_dict = dict(zip(itertools.chain.from_iterable(y_field_names), itertools.chain.from_iterable(y)))
func = 'LinearRegressionRowsChained'
else:
y_field_names = list(f'__y_{i}' for i in range(len(y)))
y_dict = dict(zip(y_field_names, y))
func = 'LinearRegressionRowsSingle'
cov_field_names = list(f'__cov{i}' for i in range(len(covariates)))
row_fields = _get_regression_row_fields(mt, pass_through, 'linear_regression_rows')
# FIXME: selecting an existing entry field should be emitted as a SelectFields
mt = mt._select_all(col_exprs=dict(**y_dict,
**dict(zip(cov_field_names, covariates))),
row_exprs=row_fields,
col_key=[],
entry_exprs={x_field_name: x})
config = {
'name': func,
'yFields': y_field_names,
'xField': x_field_name,
'covFields': cov_field_names,
'rowBlockSize': block_size,
'passThrough': [x for x in row_fields if x not in mt.row_key]
}
ht_result = Table(ir.MatrixToTableApply(mt._mir, config))
if not y_is_list:
fields = ['y_transpose_x', 'beta', 'standard_error', 't_stat', 'p_value']
ht_result = ht_result.annotate(**{f: ht_result[f][0] for f in fields})
return ht_result.persist()
@typecheck(y=oneof(expr_float64, sequenceof(expr_float64), sequenceof(sequenceof(expr_float64))),
x=expr_float64,
covariates=sequenceof(expr_float64),
block_size=int,
pass_through=sequenceof(oneof(str, Expression)))
def _linear_regression_rows_nd(y, x, covariates, block_size=16, pass_through=()) -> hail.Table:
mt = matrix_table_source('linear_regression_rows_nd/x', x)
check_entry_indexed('linear_regression_rows_nd/x', x)
y_is_list = isinstance(y, list)
if y_is_list and len(y) == 0:
raise ValueError("'linear_regression_rows_nd': found no values for 'y'")
is_chained = y_is_list and isinstance(y[0], list)
if is_chained and any(len(lst) == 0 for lst in y):
raise ValueError("'linear_regression_rows': found empty inner list for 'y'")
y = wrap_to_list(y)
for e in (itertools.chain.from_iterable(y) if is_chained else y):
analyze('linear_regression_rows_nd/y', e, mt._col_indices)
for e in covariates:
analyze('linear_regression_rows_nd/covariates', e, mt._col_indices)
_warn_if_no_intercept('linear_regression_rows_nd', covariates)
x_field_name = Env.get_uid()
if is_chained:
y_field_names = [[f'__y_{i}_{j}' for j in range(len(y[i]))] for i in range(len(y))]
y_dict = dict(zip(itertools.chain.from_iterable(y_field_names), itertools.chain.from_iterable(y)))
else:
y_field_names = list(f'__y_{i}' for i in range(len(y)))
y_dict = dict(zip(y_field_names, y))
cov_field_names = list(f'__cov{i}' for i in range(len(covariates)))
row_field_names = _get_regression_row_fields(mt, pass_through, 'linear_regression_rows_nd')
# FIXME: selecting an existing entry field should be emitted as a SelectFields
mt = mt._select_all(col_exprs=dict(**y_dict,
**dict(zip(cov_field_names, covariates))),
row_exprs=row_field_names,
col_key=[],
entry_exprs={x_field_name: x})
entries_field_name = 'ent'
sample_field_name = "by_sample"
if not is_chained:
y_field_names = [y_field_names]
num_y_lists = len(y_field_names)
def all_defined(struct_root, field_names):
defined_array = hl.array([hl.is_defined(struct_root[field_name]) for field_name in field_names])
return defined_array.all(lambda a: a)
# Given a hail array, get the mean of the nonmissing entries and
# return new array where the missing entries are the mean.
def mean_impute(hl_array):
non_missing_mean = hl.mean(hl_array, filter_missing=True)
return hl_array.map(lambda entry: hl.if_else(hl.is_defined(entry), entry, non_missing_mean))
def select_array_indices(hl_array, indices):
return indices.map(lambda i: hl_array[i])
def dot_rows_with_themselves(matrix):
return (matrix * matrix) @ hl.nd.ones(matrix.shape[1])
def array_from_struct(struct, field_names):
return hl.array([struct[field_name] for field_name in field_names])
ht_local = mt._localize_entries(entries_field_name, sample_field_name)
ht = ht_local.transmute(**{entries_field_name: ht_local[entries_field_name][x_field_name]})
list_of_ys_and_covs_to_keep_with_indices = \
[hl.enumerate(ht[sample_field_name]).filter(lambda struct_with_index: all_defined(struct_with_index[1], one_y_field_name_set + cov_field_names)) for one_y_field_name_set in y_field_names]
def make_one_cov_matrix(ys_and_covs_to_keep):
return hl.nd.array(ys_and_covs_to_keep.map(lambda struct: array_from_struct(struct, cov_field_names))) \
if cov_field_names else hl.nd.zeros((hl.len(ys_and_covs_to_keep), 0))
def make_one_y_matrix(ys_and_covs_to_keep, one_y_field_name_set):
return hl.nd.array(ys_and_covs_to_keep.map(lambda struct: array_from_struct(struct, one_y_field_name_set)))
list_of_ys_and_covs_to_keep = [inner_list.map(lambda pair: pair[1]) for inner_list in list_of_ys_and_covs_to_keep_with_indices]
list_of_indices_to_keep = [inner_list.map(lambda pair: pair[0]) for inner_list in list_of_ys_and_covs_to_keep_with_indices]
cov_nds = hl.array([make_one_cov_matrix(ys_and_covs_to_keep) for ys_and_covs_to_keep in list_of_ys_and_covs_to_keep])
y_nds = hl.array([make_one_y_matrix(ys_and_covs_to_keep, one_y_field_name_set)
for ys_and_covs_to_keep, one_y_field_name_set in zip(list_of_ys_and_covs_to_keep, y_field_names)])
ht = ht.annotate_globals(kept_samples=list_of_indices_to_keep)
k = builtins.len(covariates)
ns = ht.index_globals().kept_samples.map(lambda one_sample_set: hl.len(one_sample_set))
cov_Qts = hl.if_else(k > 0,
cov_nds.map(lambda one_cov_nd: hl.nd.qr(one_cov_nd)[0].T),
ns.map(lambda n: hl.nd.zeros((0, n))))
Qtys = hl.range(num_y_lists).map(lambda i: cov_Qts[i] @ hl.array(y_nds)[i])
ht = ht.annotate_globals(
__y_nds=y_nds,
ds=ns.map(lambda n: n - k - 1),
__cov_Qts=cov_Qts,
__Qtys=Qtys,
__yyps=hl.range(num_y_lists).map(lambda i: dot_rows_with_themselves(y_nds[i].T) - dot_rows_with_themselves(Qtys[i].T)))
def process_block(block):
rows_in_block = hl.len(block)
# Processes one block group based on given idx. Returns a single struct.
def process_y_group(idx):
X = hl.nd.array(block[entries_field_name].map(lambda row: mean_impute(select_array_indices(row, ht.kept_samples[idx])))).T
n = ns[idx]
sum_x = (X.T @ hl.nd.ones((n,)))
Qtx = ht.__cov_Qts[idx] @ X
ytx = ht.__y_nds[idx].T @ X
xyp = ytx - (ht.__Qtys[idx].T @ Qtx)
xxpRec = (dot_rows_with_themselves(X.T) - dot_rows_with_themselves(Qtx.T)).map(lambda entry: 1 / entry)
b = xyp * xxpRec
se = ((1.0 / ht.ds[idx]) * (ht.__yyps[idx].reshape((-1, 1)) @ xxpRec.reshape((1, -1)) - (b * b))).map(lambda entry: hl.sqrt(entry))
t = b / se
p = t.map(lambda entry: 2 * hl.expr.functions.pT(-hl.abs(entry), ht.ds[idx], True, False))
return hl.struct(n=hl.range(rows_in_block).map(lambda i: n), sum_x=sum_x._data_array(), y_transpose_x=ytx.T._data_array(), beta=b.T._data_array(),
standard_error=se.T._data_array(), t_stat=t.T._data_array(), p_value=p.T._data_array())
per_y_list = hl.range(num_y_lists).map(lambda i: process_y_group(i))
key_field_names = [key_field for key_field in ht.key]
def build_row(row_idx):
# For every field we care about, map across all y's, getting the row_idxth one from each.
idxth_keys = {field_name: block[field_name][row_idx] for field_name in key_field_names}
computed_row_field_names = ['n', 'sum_x', 'y_transpose_x', 'beta', 'standard_error', 't_stat', 'p_value']
computed_row_fields = {
field_name: per_y_list.map(lambda one_y: one_y[field_name][row_idx]) for field_name in computed_row_field_names
}
pass_through_rows = {
field_name: block[field_name][row_idx] for field_name in row_field_names
}
if not is_chained:
computed_row_fields = {key: value[0] for key, value in computed_row_fields.items()}
return hl.struct(**{**idxth_keys, **computed_row_fields, **pass_through_rows})
new_rows = hl.range(rows_in_block).map(build_row)
return new_rows
def process_partition(part):
grouped = part.grouped(block_size)
return grouped.flatmap(lambda block: process_block(block))
res = ht._map_partitions(process_partition)
if not y_is_list:
fields = ['y_transpose_x', 'beta', 'standard_error', 't_stat', 'p_value']
res = res.annotate(**{f: res[f][0] for f in fields})
res = res.select_globals()
return res
@typecheck(test=enumeration('wald', 'lrt', 'score', 'firth'),
y=oneof(expr_float64, sequenceof(expr_float64), sequenceof(sequenceof(expr_float64))),
x=expr_float64,
covariates=sequenceof(expr_float64),
pass_through=sequenceof(oneof(str, Expression)))
def logistic_regression_rows(test, y, x, covariates, pass_through=()) -> hail.Table:
r"""For each row, test an input variable for association with a
binary response variable using logistic regression.
Examples
--------
Run the logistic regression Wald test per variant using a Boolean
phenotype, intercept and two covariates stored in column-indexed
fields:
>>> result_ht = hl.logistic_regression_rows(
... test='wald',
... y=dataset.pheno.is_case,
... x=dataset.GT.n_alt_alleles(),
... covariates=[1, dataset.pheno.age, dataset.pheno.is_female])
Run the logistic regression Wald test per variant using a list of binary (0/1)
phenotypes, intercept and two covariates stored in column-indexed
fields:
>>> result_ht = hl.logistic_regression_rows(
... test='wald',
... y=[dataset.pheno.is_case, dataset.pheno.is_case], # where pheno values are 0, 1, or missing
... x=dataset.GT.n_alt_alleles(),
... covariates=[1, dataset.pheno.age, dataset.pheno.is_female])
Warning
-------
:func:`.logistic_regression_rows` considers the same set of
columns (i.e., samples, points) for every row, namely those columns for
which **all** response variables and covariates are defined. For each row, missing values of
`x` are mean-imputed over these columns. As in the example, the
intercept covariate ``1`` must be included **explicitly** if desired.
Notes
-----
This method performs, for each row, a significance test of the input
variable in predicting a binary (case-control) response variable based
on the logistic regression model. The response variable type must either
be numeric (with all present values 0 or 1) or Boolean, in which case
true and false are coded as 1 and 0, respectively.
Hail supports the Wald test ('wald'), likelihood ratio test ('lrt'),
Rao score test ('score'), and Firth test ('firth'). Hail only includes
columns for which the response variable and all covariates are defined.
For each row, Hail imputes missing input values as the mean of the
non-missing values.
The example above considers a model of the form
.. math::
\mathrm{Prob}(\mathrm{is\_case}) =
\mathrm{sigmoid}(\beta_0 + \beta_1 \, \mathrm{gt}
+ \beta_2 \, \mathrm{age}
+ \beta_3 \, \mathrm{is\_female} + \varepsilon),
\quad
\varepsilon \sim \mathrm{N}(0, \sigma^2)
where :math:`\mathrm{sigmoid}` is the `sigmoid function`_, the genotype
:math:`\mathrm{gt}` is coded as 0 for HomRef, 1 for Het, and 2 for
HomVar, and the Boolean covariate :math:`\mathrm{is\_female}` is coded as
for ``True`` (female) and 0 for ``False`` (male). The null model sets
:math:`\beta_1 = 0`.
.. _sigmoid function: https://en.wikipedia.org/wiki/Sigmoid_function
The structure of the emitted row field depends on the test statistic as
shown in the tables below.
========== ================== ======= ============================================
Test Field Type Value
========== ================== ======= ============================================
Wald `beta` float64 fit effect coefficient,
:math:`\hat\beta_1`
Wald `standard_error` float64 estimated standard error,
:math:`\widehat{\mathrm{se}}`
Wald `z_stat` float64 Wald :math:`z`-statistic, equal to
:math:`\hat\beta_1 / \widehat{\mathrm{se}}`
Wald `p_value` float64 Wald p-value testing :math:`\beta_1 = 0`
LRT, Firth `beta` float64 fit effect coefficient,
:math:`\hat\beta_1`
LRT, Firth `chi_sq_stat` float64 deviance statistic
LRT, Firth `p_value` float64 LRT / Firth p-value testing
:math:`\beta_1 = 0`
Score `chi_sq_stat` float64 score statistic
Score `p_value` float64 score p-value testing :math:`\beta_1 = 0`
========== ================== ======= ============================================
For the Wald and likelihood ratio tests, Hail fits the logistic model for
each row using Newton iteration and only emits the above fields
when the maximum likelihood estimate of the coefficients converges. The
Firth test uses a modified form of Newton iteration. To help diagnose
convergence issues, Hail also emits three fields which summarize the
iterative fitting process:
================ =================== ======= ===============================
Test Field Type Value
================ =================== ======= ===============================
Wald, LRT, Firth `fit.n_iterations` int32 number of iterations until
convergence, explosion, or
reaching the max (25 for
Wald, LRT; 100 for Firth)
Wald, LRT, Firth `fit.converged` bool ``True`` if iteration converged
Wald, LRT, Firth `fit.exploded` bool ``True`` if iteration exploded
================ =================== ======= ===============================
We consider iteration to have converged when every coordinate of
:math:`\beta` changes by less than :math:`10^{-6}`. For Wald and LRT,
up to 25 iterations are attempted; in testing we find 4 or 5 iterations
nearly always suffice. Convergence may also fail due to explosion,
which refers to low-level numerical linear algebra exceptions caused by
manipulating ill-conditioned matrices. Explosion may result from (nearly)
linearly dependent covariates or complete separation_.
.. _separation: https://en.wikipedia.org/wiki/Separation_(statistics)
A more common situation in genetics is quasi-complete seperation, e.g.
variants that are observed only in cases (or controls). Such variants
inevitably arise when testing millions of variants with very low minor
allele count. The maximum likelihood estimate of :math:`\beta` under
logistic regression is then undefined but convergence may still occur
after a large number of iterations due to a very flat likelihood
surface. In testing, we find that such variants produce a secondary bump
from 10 to 15 iterations in the histogram of number of iterations per
variant. We also find that this faux convergence produces large standard
errors and large (insignificant) p-values. To not miss such variants,
consider using Firth logistic regression, linear regression, or
group-based tests.
Here's a concrete illustration of quasi-complete seperation in R. Suppose
we have 2010 samples distributed as follows for a particular variant:
======= ====== === ======
Status HomRef Het HomVar
======= ====== === ======
Case 1000 10 0
Control 1000 0 0
======= ====== === ======
The following R code fits the (standard) logistic, Firth logistic,
and linear regression models to this data, where ``x`` is genotype,
``y`` is phenotype, and ``logistf`` is from the logistf package:
.. code-block:: R
x <- c(rep(0,1000), rep(1,1000), rep(1,10)
y <- c(rep(0,1000), rep(0,1000), rep(1,10))
logfit <- glm(y ~ x, family=binomial())
firthfit <- logistf(y ~ x)
linfit <- lm(y ~ x)
The resulting p-values for the genotype coefficient are 0.991, 0.00085,
and 0.0016, respectively. The erroneous value 0.991 is due to
quasi-complete separation. Moving one of the 10 hets from case to control
eliminates this quasi-complete separation; the p-values from R are then
0.0373, 0.0111, and 0.0116, respectively, as expected for a less
significant association.
The Firth test reduces bias from small counts and resolves the issue of
separation by penalizing maximum likelihood estimation by the `Jeffrey's
invariant prior <https://en.wikipedia.org/wiki/Jeffreys_prior>`__. This
test is slower, as both the null and full model must be fit per variant,
and convergence of the modified Newton method is linear rather than
quadratic. For Firth, 100 iterations are attempted for the null model
and, if that is successful, for the full model as well. In testing we
find 20 iterations nearly always suffices. If the null model fails to
converge, then the `logreg.fit` fields reflect the null model;
otherwise, they reflect the full model.
See
`Recommended joint and meta-analysis strategies for case-control association testing of single low-count variants <http://www.ncbi.nlm.nih.gov/pmc/articles/PMC4049324/>`__
for an empirical comparison of the logistic Wald, LRT, score, and Firth
tests. The theoretical foundations of the Wald, likelihood ratio, and score
tests may be found in Chapter 3 of Gesine Reinert's notes
`Statistical Theory <http://www.stats.ox.ac.uk/~reinert/stattheory/theoryshort09.pdf>`__.
Firth introduced his approach in
`Bias reduction of maximum likelihood estimates, 1993 <http://www2.stat.duke.edu/~scs/Courses/Stat376/Papers/GibbsFieldEst/BiasReductionMLE.pdf>`__.
Heinze and Schemper further analyze Firth's approach in
`A solution to the problem of separation in logistic regression, 2002 <https://cemsiis.meduniwien.ac.at/fileadmin/msi_akim/CeMSIIS/KB/volltexte/Heinze_Schemper_2002_Statistics_in_Medicine.pdf>`__.
Hail's logistic regression tests correspond to the ``b.wald``,
``b.lrt``, and ``b.score`` tests in `EPACTS`_. For each variant, Hail
imputes missing input values as the mean of non-missing input values,
whereas EPACTS subsets to those samples with called genotypes. Hence,
Hail and EPACTS results will currently only agree for variants with no
missing genotypes.
.. _EPACTS: http://genome.sph.umich.edu/wiki/EPACTS#Single_Variant_Tests
Note
----
Use the `pass_through` parameter to include additional row fields from
matrix table underlying ``x``. For example, to include an "rsid" field, set
``pass_through=['rsid']`` or ``pass_through=[mt.rsid]``.
Parameters
----------
test : {'wald', 'lrt', 'score', 'firth'}
Statistical test.
y : :class:`.Float64Expression` or :obj:`list` of :class:`.Float64Expression`
One or more column-indexed response expressions.
All non-missing values must evaluate to 0 or 1.
Note that a :class:`.BooleanExpression` will be implicitly converted to
a :class:`.Float64Expression` with this property.
x : :class:`.Float64Expression`
Entry-indexed expression for input variable.
covariates : :obj:`list` of :class:`.Float64Expression`
Non-empty list of column-indexed covariate expressions.
pass_through : :obj:`list` of :class:`str` or :class:`.Expression`
Additional row fields to include in the resulting table.
Returns
-------
:class:`.Table`
"""
if len(covariates) == 0:
raise ValueError('logistic regression requires at least one covariate expression')
mt = matrix_table_source('logistic_regresion_rows/x', x)
check_entry_indexed('logistic_regresion_rows/x', x)
y_is_list = isinstance(y, list)
if y_is_list and len(y) == 0:
raise ValueError("'logistic_regression_rows': found no values for 'y'")
y = wrap_to_list(y)
for e in covariates:
analyze('logistic_regression_rows/covariates', e, mt._col_indices)
_warn_if_no_intercept('logistic_regression_rows', covariates)
x_field_name = Env.get_uid()
y_field = [f'__y_{i}' for i in range(len(y))]
y_dict = dict(zip(y_field, y))
cov_field_names = [f'__cov{i}' for i in range(len(covariates))]
row_fields = _get_regression_row_fields(mt, pass_through, 'logistic_regression_rows')
# FIXME: selecting an existing entry field should be emitted as a SelectFields
mt = mt._select_all(col_exprs=dict(**y_dict,
**dict(zip(cov_field_names, covariates))),
row_exprs=row_fields,
col_key=[],
entry_exprs={x_field_name: x})
config = {
'name': 'LogisticRegression',
'test': test,
'yFields': y_field,
'xField': x_field_name,
'covFields': cov_field_names,
'passThrough': [x for x in row_fields if x not in mt.row_key]
}
result = Table(ir.MatrixToTableApply(mt._mir, config))
if not y_is_list:
result = result.transmute(**result.logistic_regression[0])
return result.persist()
@typecheck(test=enumeration('wald', 'lrt', 'score'),
y=expr_float64,
x=expr_float64,
covariates=sequenceof(expr_float64),
pass_through=sequenceof(oneof(str, Expression)))
def poisson_regression_rows(test, y, x, covariates, pass_through=()) -> Table:
r"""For each row, test an input variable for association with a
count response variable using `Poisson regression <https://en.wikipedia.org/wiki/Poisson_regression>`__.
Notes
-----
See :func:`.logistic_regression_rows` for more info on statistical tests
of general linear models.
Note
----
Use the `pass_through` parameter to include additional row fields from
matrix table underlying ``x``. For example, to include an "rsid" field, set
``pass_through=['rsid']`` or ``pass_through=[mt.rsid]``.
Parameters
----------
y : :class:`.Float64Expression`
Column-indexed response expression.
All non-missing values must evaluate to a non-negative integer.
x : :class:`.Float64Expression`
Entry-indexed expression for input variable.
covariates : :obj:`list` of :class:`.Float64Expression`
Non-empty list of column-indexed covariate expressions.
pass_through : :obj:`list` of :class:`str` or :class:`.Expression`
Additional row fields to include in the resulting table.
Returns
-------
:class:`.Table`
"""
if len(covariates) == 0:
raise ValueError('Poisson regression requires at least one covariate expression')
mt = matrix_table_source('poisson_regression_rows/x', x)
check_entry_indexed('poisson_regression_rows/x', x)
analyze('poisson_regression_rows/y', y, mt._col_indices)
all_exprs = [y]
for e in covariates:
all_exprs.append(e)
analyze('poisson_regression_rows/covariates', e, mt._col_indices)
_warn_if_no_intercept('poisson_regression_rows', covariates)
x_field_name = Env.get_uid()
y_field_name = '__y'
cov_field_names = list(f'__cov{i}' for i in range(len(covariates)))
row_fields = _get_regression_row_fields(mt, pass_through, 'poisson_regression_rows')
# FIXME: selecting an existing entry field should be emitted as a SelectFields
mt = mt._select_all(col_exprs=dict(**{y_field_name: y},
**dict(zip(cov_field_names, covariates))),
row_exprs=row_fields,
col_key=[],
entry_exprs={x_field_name: x})
config = {
'name': 'PoissonRegression',
'test': test,
'yField': y_field_name,
'xField': x_field_name,
'covFields': cov_field_names,
'passThrough': [x for x in row_fields if x not in mt.row_key]
}
return Table(ir.MatrixToTableApply(mt._mir, config)).persist()
@typecheck(y=expr_float64,
x=sequenceof(expr_float64),
z_t=nullable(expr_float64),
k=nullable(np.ndarray),
p_path=nullable(str),
overwrite=bool,
standardize=bool,
mean_impute=bool)
def linear_mixed_model(y,
x,
z_t=None,
k=None,
p_path=None,
overwrite=False,
standardize=True,
mean_impute=True):
r"""Initialize a linear mixed model from a matrix table.
Examples
--------
Initialize a model using three fixed effects (including intercept) and
genetic marker random effects:
>>> marker_ds = dataset.filter_rows(dataset.use_as_marker) # doctest: +SKIP
>>> model, _ = hl.linear_mixed_model( # doctest: +SKIP
... y=marker_ds.pheno.height,
... x=[1, marker_ds.pheno.age, marker_ds.pheno.is_female],
... z_t=marker_ds.GT.n_alt_alleles(),
... p_path='output/p.bm')
Fit the model and examine :math:`h^2`:
>>> model.fit() # doctest: +SKIP
>>> model.h_sq # doctest: +SKIP
Sanity-check the normalized likelihood of :math:`h^2` over the percentile
grid:
>>> import matplotlib.pyplot as plt # doctest: +SKIP
>>> plt.plot(range(101), model.h_sq_normalized_lkhd()) # doctest: +SKIP
For this value of :math:`h^2`, test each variant for association:
>>> result_table = hl.linear_mixed_regression_rows(dataset.GT.n_alt_alleles(), model) # doctest: +SKIP
Alternatively, one can define a full-rank model using a pre-computed kinship
matrix :math:`K` in ndarray form. When :math:`K` is the realized
relationship matrix defined by the genetic markers, we obtain the same model
as above with :math:`P` written as a block matrix but returned as an
ndarray:
>>> rrm = hl.realized_relationship_matrix(marker_ds.GT).to_numpy() # doctest: +SKIP
>>> model, p = hl.linear_mixed_model( # doctest: +SKIP
... y=dataset.pheno.height,
... x=[1, dataset.pheno.age, dataset.pheno.is_female],
... k=rrm,
... p_path='output/p.bm',
... overwrite=True)
Notes
-----
See :class:`.LinearMixedModel` for details on the model and notation.
Exactly one of `z_t` and `k` must be set.
If `z_t` is set, the model is low-rank if the number of samples :math:`n` exceeds
the number of random effects :math:`m`. At least one dimension must be less
than or equal to 46300. If `standardize` is true, each random effect is first
standardized to have mean 0 and variance :math:`\frac{1}{m}`, so that the
diagonal values of the kinship matrix :math:`K = ZZ^T` are 1.0 in
expectation. This kinship matrix corresponds to the
:meth:`realized_relationship_matrix` in genetics. See
:meth:`.LinearMixedModel.from_random_effects` and :meth:`.BlockMatrix.svd`
for more details.
If `k` is set, the model is full-rank. For correct results, the indices of
`k` **must be aligned** with columns of the source of `y`.
Set `p_path` if you plan to use the model in :func:`.linear_mixed_regression_rows`.
`k` must be positive semi-definite; symmetry is not checked as only the
lower triangle is used. See :meth:`.LinearMixedModel.from_kinship` for more
details.
Missing, nan, or infinite values in `y` or `x` will raise an error.
If set, `z_t` may only have missing values if `mean_impute` is true, in
which case missing values of are set to the row mean. We recommend setting
`mean_impute` to false if you expect no missing values, both for performance
and as a sanity check.
Warning
-------
If the rows of the matrix table have been filtered to a small fraction,
then :meth:`.MatrixTable.repartition` before this method to improve
performance.
Parameters
----------
y: :class:`.Float64Expression`
Column-indexed expression for the observations (rows of :math:`y`).
Must have no missing values.
x: :obj:`list` of :class:`.Float64Expression`
Non-empty list of column-indexed expressions for the fixed effects (rows of :math:`X`).
Each expression must have the same source as `y` or no source
(e.g., the intercept ``1.0``).
Must have no missing values.
z_t: :class:`.Float64Expression`, optional
Entry-indexed expression for each mixed effect. These values are
row-standardized to variance :math:`1 / m` to form the entries of
:math:`Z^T`. If `mean_impute` is false, must have no missing values.
Exactly one of `z_t` and `k` must be set.
k: :class:`numpy.ndarray`, optional
Kinship matrix :math:`K`.
Exactly one of `z_t` and `k` must be set.
p_path: :class:`str`, optional
Path at which to write the projection :math:`P` as a block matrix.
Required if `z_t` is set.
overwrite: :obj:`bool`
If ``True``, overwrite an existing file at `p_path`.
standardize: :obj:`bool`
If ``True``, standardize `z_t` by row to mean 0 and variance
:math:`\frac{1}{m}`.
mean_impute: :obj:`bool`
If ``True``, mean-impute missing values of `z_t` by row.
Returns
-------
model: :class:`.LinearMixedModel`
Linear mixed model ready to be fit.
p: :class:`numpy.ndarray` or :class:`.BlockMatrix`
Matrix :math:`P` whose rows are the eigenvectors of :math:`K`.
The type is block matrix if the model is low rank (i.e., if `z_t` is set
and :math:`n > m`).
"""
source = matrix_table_source('linear_mixed_model/y', y)
if ((z_t is None and k is None)
or (z_t is not None and k is not None)):
raise ValueError("linear_mixed_model: set exactly one of 'z_t' and 'k'")
if len(x) == 0:
raise ValueError("linear_mixed_model: 'x' must include at least one fixed effect")
_warn_if_no_intercept('linear_mixed_model', x)
# collect x and y in one pass
mt = source.select_cols(xy=hl.array(x + [y])).key_cols_by()
xy = np.array(mt.xy.collect(), dtype=np.float64)
xy = xy.reshape(xy.size // (len(x) + 1), len(x) + 1)
x_nd = np.copy(xy[:, :-1])
y_nd = np.copy(xy[:, -1])
n = y_nd.size
del xy
if not np.all(np.isfinite(y_nd)):
raise ValueError("linear_mixed_model: 'y' has missing, nan, or infinite values")
if not np.all(np.isfinite(x_nd)):
raise ValueError("linear_mixed_model: 'x' has missing, nan, or infinite values")
if z_t is None:
model, p = LinearMixedModel.from_kinship(y_nd, x_nd, k, p_path, overwrite)
else:
check_entry_indexed('from_matrix_table: z_t', z_t)
if matrix_table_source('linear_mixed_model/z_t', z_t) != source:
raise ValueError("linear_mixed_model: 'y' and 'z_t' must "
"have the same source")
z_bm = BlockMatrix.from_entry_expr(z_t,
mean_impute=mean_impute,
center=standardize,
normalize=standardize).T # variance is 1 / n
m = z_bm.shape[1]
model, p = LinearMixedModel.from_random_effects(y_nd, x_nd, z_bm, p_path, overwrite)
if standardize:
model.s = model.s * (n / m) # now variance is 1 / m
if model.low_rank and isinstance(p, np.ndarray):
assert n > m
p = BlockMatrix.read(p_path)
return model, p
@typecheck(entry_expr=expr_float64,
model=LinearMixedModel,
pa_t_path=nullable(str),
a_t_path=nullable(str),
mean_impute=bool,
partition_size=nullable(int),
pass_through=sequenceof(oneof(str, Expression)))
def linear_mixed_regression_rows(entry_expr,
model,
pa_t_path=None,
a_t_path=None,
mean_impute=True,
partition_size=None,
pass_through=()):
"""For each row, test an input variable for association using a linear
mixed model.
Examples
--------
See the example in :meth:`linear_mixed_model` and section below on
efficiently testing multiple responses or sets of fixed effects.
Notes
-----
See :class:`.LinearMixedModel` for details on the model and notation.
This method packages up several steps for convenience:
1. Read the transformation :math:`P` from ``model.p_path``.
2. Write `entry_expr` at `a_t_path` as the block matrix :math:`A^T` with
block size that of :math:`P`. The parallelism is ``n_rows / block_size``.
3. Multiply and write :math:`A^T P^T` at `pa_t_path`. The parallelism is the
number of blocks in :math:`(PA)^T`, which equals
``(n_rows / block_size) * (model.r / block_size)``.
4. Compute regression results per row with
:meth:`.LinearMixedModel.fit_alternatives`.
The parallelism is ``n_rows / partition_size``.
If `pa_t_path` and `a_t_path` are not set, temporary files are used.
`entry_expr` may only have missing values if `mean_impute` is true, in
which case missing values of are set to the row mean. We recommend setting
`mean_impute` to false if you expect no missing values, both for performance
and as a sanity check.
**Efficiently varying the response or set of fixed effects**
Computing :math:`K`, :math:`P`, :math:`S`, :math:`A^T`, and especially the
product :math:`(PA)^T` may require significant compute when :math:`n` and/or
:math:`m` is large. However these quantities are all independent of the
response :math:`y` or fixed effects :math:`X`! And with the model
diagonalized, Step 4 above is fast and scalable.
So having run linear mixed regression once, we can
compute :math:`h^2` and regression statistics for another response or set of
fixed effects on the **same samples** at the roughly the speed of
:func:`.linear_regression_rows`.
For example, having collected another `y` and `x` as ndarrays, one can
construct a new linear mixed model directly.
Supposing the model is full-rank and `p` is an ndarray:
>>> model = hl.stats.LinearMixedModel(p @ y, p @ x, s) # doctest: +SKIP
>>> model.fit() # doctest: +SKIP
>>> result_ht = model.fit_alternatives(pa_t_path) # doctest: +SKIP
Supposing the model is low-rank and `p` is a block matrix:
>>> p = BlockMatrix.read(p_path) # doctest: +SKIP
>>> py, px = (p @ y).to_numpy(), (p @ x).to_numpy() # doctest: +SKIP
>>> model = LinearMixedModel(py, px, s, y, x) # doctest: +SKIP
>>> model.fit() # doctest: +SKIP
>>> result_ht = model.fit_alternatives(pa_t_path, a_t_path) # doctest: +SKIP
In either case, one can easily loop through many responses or conditional
analyses. To join results back to the matrix table:
>>> dataset = dataset.add_row_index() # doctest: +SKIP
>>> dataset = dataset.annotate_rows(lmmreg=result_ht[dataset.row_idx]]) # doctest: +SKIP
Warning
-------
For correct results, the column-index of `entry_expr` must correspond to the
sample index of the model. This will be true, for example, if `model`
was created with :func:`.linear_mixed_model` using (a possibly row-filtered
version of) the source of `entry_expr`, or if `y` and `x` were collected to
arrays from this source. Hail will raise an error if the number of columns
does not match ``model.n``, but will not detect, for example, permuted
samples.
The warning on :meth:`.BlockMatrix.write_from_entry_expr` applies to this
method when the number of samples is large.
Note
----
Use the `pass_through` parameter to include additional row fields from
matrix table underlying ``entry_expr``. For example, to include an "rsid"
field, set` pass_through=['rsid']`` or ``pass_through=[mt.rsid]``.
Parameters
----------
entry_expr: :class:`.Float64Expression`
Entry-indexed expression for input variable.
If mean_impute is false, must have no missing values.
model: :class:`.LinearMixedModel`
Fit linear mixed model with ``path_p`` set.
pa_t_path: :class:`str`, optional
Path at which to store the transpose of :math:`PA`.
If not set, a temporary file is used.
a_t_path: :class:`str`, optional
Path at which to store the transpose of :math:`A`.
If not set, a temporary file is used.
mean_impute: :obj:`bool`
Mean-impute missing values of `entry_expr` by row.
partition_size: :obj:`int`
Number of rows to process per partition.
Default given by block size of :math:`P`.
pass_through : :obj:`list` of :class:`str` or :class:`.Expression`
Additional row fields to include in the resulting table.
Returns
-------
:class:`.Table`
"""
mt = matrix_table_source('linear_mixed_regression_rows', entry_expr)
n = mt.count_cols()
check_entry_indexed('linear_mixed_regression_rows', entry_expr)
if not model._fitted:
raise ValueError("linear_mixed_regression_rows: 'model' has not been fit "
"using 'fit()'")
if model.p_path is None:
raise ValueError("linear_mixed_regression_rows: 'model' property 'p_path' "
"was not set at initialization")
if model.n != n:
raise ValueError(f"linear_mixed_regression_rows: linear mixed model expects {model.n} samples, "
f"\n but 'entry_expr' source has {n} columns.")
pa_t_path = new_temp_file() if pa_t_path is None else pa_t_path
a_t_path = new_temp_file() if a_t_path is None else a_t_path
p = BlockMatrix.read(model.p_path)
BlockMatrix.write_from_entry_expr(entry_expr,
a_t_path,
mean_impute=mean_impute,
block_size=p.block_size)
a_t = BlockMatrix.read(a_t_path)
(a_t @ p.T).write(pa_t_path, force_row_major=True)
ht = model.fit_alternatives(pa_t_path,
a_t_path if model.low_rank else None,
partition_size)
row_fields = _get_regression_row_fields(mt, pass_through, 'linear_mixed_regression_rows')
mt_keys = mt.select_rows(**row_fields).add_row_index('__row_idx').rows().add_index('__row_idx').key_by('__row_idx')
return mt_keys.annotate(**ht[mt_keys['__row_idx']]).key_by(*mt.row_key).drop('__row_idx')
@typecheck(key_expr=expr_any,
weight_expr=expr_float64,
y=expr_float64,
x=expr_float64,
covariates=sequenceof(expr_float64),
logistic=bool,
max_size=int,
accuracy=numeric,
iterations=int)
def skat(key_expr, weight_expr, y, x, covariates, logistic=False,
max_size=46340, accuracy=1e-6, iterations=10000) -> Table:
r"""Test each keyed group of rows for association by linear or logistic
SKAT test.
Examples
--------
Test each gene for association using the linear sequence kernel association
test:
>>> skat_table = hl.skat(key_expr=burden_ds.gene,
... weight_expr=burden_ds.weight,
... y=burden_ds.burden.pheno,
... x=burden_ds.GT.n_alt_alleles(),
... covariates=[1, burden_ds.burden.cov1, burden_ds.burden.cov2])
.. caution::
By default, the Davies algorithm iterates up to 10k times until an
accuracy of 1e-6 is achieved. Hence a reported p-value of zero with no
issues may truly be as large as 1e-6. The accuracy and maximum number of
iterations may be controlled by the corresponding function parameters.
In general, higher accuracy requires more iterations.
.. caution::
To process a group with :math:`m` rows, several copies of an
:math:`m \times m` matrix of doubles must fit in worker memory. Groups
with tens of thousands of rows may exhaust worker memory causing the
entire job to fail. In this case, use the `max_size` parameter to skip
groups larger than `max_size`.
Warning
-------
:func:`.skat` considers the same set of columns (i.e., samples, points) for
every group, namely those columns for which **all** covariates are defined.
For each row, missing values of `x` are mean-imputed over these columns.
As in the example, the intercept covariate ``1`` must be included
**explicitly** if desired.
Notes
-----
This method provides a scalable implementation of the score-based
variance-component test originally described in
`Rare-Variant Association Testing for Sequencing Data with the Sequence Kernel Association Test
<https://www.ncbi.nlm.nih.gov/pmc/articles/PMC3135811/>`__.
Row weights must be non-negative. Rows with missing weights are ignored. In
the R package ``skat``---which assumes rows are variants---default weights
are given by evaluating the Beta(1, 25) density at the minor allele
frequency. To replicate these weights in Hail using alternate allele
frequencies stored in a row-indexed field `AF`, one can use the expression:
>>> hl.dbeta(hl.min(ds2.AF), 1.0, 25.0) ** 2
In the logistic case, the response `y` must either be numeric (with all
present values 0 or 1) or Boolean, in which case true and false are coded
as 1 and 0, respectively.
The resulting :class:`.Table` provides the group's key (`id`), thenumber of
rows in the group (`size`), the variance component score `q_stat`, the SKAT
`p-value`, and a `fault` flag. For the toy example above, the table has the
form:
+-------+------+--------+---------+-------+
| id | size | q_stat | p_value | fault |
+=======+======+========+=========+=======+
| geneA | 2 | 4.136 | 0.205 | 0 |
+-------+------+--------+---------+-------+
| geneB | 1 | 5.659 | 0.195 | 0 |
+-------+------+--------+---------+-------+
| geneC | 3 | 4.122 | 0.192 | 0 |
+-------+------+--------+---------+-------+
Groups larger than `max_size` appear with missing `q_stat`, `p_value`, and
`fault`. The hard limit on the number of rows in a group is 46340.
Note that the variance component score `q_stat` agrees with ``Q`` in the R
package ``skat``, but both differ from :math:`Q` in the paper by the factor
:math:`\frac{1}{2\sigma^2}` in the linear case and :math:`\frac{1}{2}` in
the logistic case, where :math:`\sigma^2` is the unbiased estimator of
residual variance for the linear null model. The R package also applies a
"small-sample adjustment" to the null distribution in the logistic case
when the sample size is less than 2000. Hail does not apply this
adjustment.
The fault flag is an integer indicating whether any issues occurred when
running the Davies algorithm to compute the p-value as the right tail of a
weighted sum of :math:`\chi^2(1)` distributions.
+-------------+-----------------------------------------+
| fault value | Description |
+=============+=========================================+
| 0 | no issues |
+------+------+-----------------------------------------+
| 1 | accuracy NOT achieved |
+------+------+-----------------------------------------+
| 2 | round-off error possibly significant |
+------+------+-----------------------------------------+
| 3 | invalid parameters |
+------+------+-----------------------------------------+
| 4 | unable to locate integration parameters |
+------+------+-----------------------------------------+
| 5 | out of memory |
+------+------+-----------------------------------------+
Parameters
----------
key_expr : :class:`.Expression`
Row-indexed expression for key associated to each row.
weight_expr : :class:`.Float64Expression`
Row-indexed expression for row weights.
y : :class:`.Float64Expression`
Column-indexed response expression.
If `logistic` is ``True``, all non-missing values must evaluate to 0 or
1. Note that a :class:`.BooleanExpression` will be implicitly converted
to a :class:`.Float64Expression` with this property.
x : :class:`.Float64Expression`
Entry-indexed expression for input variable.
covariates : :obj:`list` of :class:`.Float64Expression`
List of column-indexed covariate expressions.
logistic : :obj:`bool`
If true, use the logistic test rather than the linear test.
max_size : :obj:`int`
Maximum size of group on which to run the test.
accuracy : :obj:`float`
Accuracy achieved by the Davies algorithm if fault value is zero.
iterations : :obj:`int`
Maximum number of iterations attempted by the Davies algorithm.
Returns
-------
:class:`.Table`
Table of SKAT results.
"""
mt = matrix_table_source('skat/x', x)
check_entry_indexed('skat/x', x)
analyze('skat/key_expr', key_expr, mt._row_indices)
analyze('skat/weight_expr', weight_expr, mt._row_indices)
analyze('skat/y', y, mt._col_indices)
all_exprs = [key_expr, weight_expr, y]
for e in covariates:
all_exprs.append(e)
analyze('skat/covariates', e, mt._col_indices)
_warn_if_no_intercept('skat', covariates)
# FIXME: remove this logic when annotation is better optimized
if x in mt._fields_inverse:
x_field_name = mt._fields_inverse[x]
entry_expr = {}
else:
x_field_name = Env.get_uid()
entry_expr = {x_field_name: x}
y_field_name = '__y'
weight_field_name = '__weight'
key_field_name = '__key'
cov_field_names = list(f'__cov{i}' for i in range(len(covariates)))
mt = mt._select_all(col_exprs=dict(**{y_field_name: y},
**dict(zip(cov_field_names, covariates))),
row_exprs={weight_field_name: weight_expr,
key_field_name: key_expr},
entry_exprs=entry_expr)
config = {
'name': 'Skat',
'keyField': key_field_name,
'weightField': weight_field_name,
'xField': x_field_name,
'yField': y_field_name,
'covFields': cov_field_names,
'logistic': logistic,
'maxSize': max_size,
'accuracy': accuracy,
'iterations': iterations
}
return Table(ir.MatrixToTableApply(mt._mir, config))
@typecheck(p_value=expr_numeric,
approximate=bool)
def lambda_gc(p_value, approximate=True):
"""
Compute genomic inflation factor (lambda GC) from an Expression of p-values.
.. include:: ../_templates/experimental.rst
Parameters
----------
p_value : :class:`.NumericExpression`
Row-indexed numeric expression of p-values.
approximate : :obj:`bool`
If False, computes exact lambda GC (slower and uses more memory).
Returns
-------
:obj:`float`
Genomic inflation factor (lambda genomic control).
"""
check_row_indexed('lambda_gc', p_value)
t = table_source('lambda_gc', p_value)
med_chisq = _lambda_gc_agg(p_value, approximate)
return t.aggregate(med_chisq)
@typecheck(p_value=expr_numeric,
approximate=bool)
def _lambda_gc_agg(p_value, approximate=True):
chisq = hl.qchisqtail(p_value, 1)
if approximate:
med_chisq = hl.agg.approx_quantiles(chisq, 0.5)
else:
med_chisq = hl.median(hl.agg.collect(chisq))
return med_chisq / hl.qchisqtail(0.5, 1)
@typecheck(ds=oneof(Table, MatrixTable),
keep_star=bool,
left_aligned=bool,
permit_shuffle=bool)
def split_multi(ds, keep_star=False, left_aligned=False, *, permit_shuffle=False):
"""Split multiallelic variants.
Warning
-------
In order to support a wide variety of data types, this function splits only
the variants on a :class:`.MatrixTable`, but **not the genotypes**. Use
:func:`.split_multi_hts` if possible, or split the genotypes yourself using
one of the entry modification methods: :meth:`.MatrixTable.annotate_entries`,
:meth:`.MatrixTable.select_entries`, :meth:`.MatrixTable.transmute_entries`.
The resulting dataset will be keyed by the split locus and alleles.
:func:`.split_multi` adds the following fields:
- `was_split` (*bool*) -- ``True`` if this variant was originally
multiallelic, otherwise ``False``.
- `a_index` (*int*) -- The original index of this alternate allele in the
multiallelic representation (NB: 1 is the first alternate allele or the
only alternate allele in a biallelic variant). For example, 1:100:A:T,C
splits into two variants: 1:100:A:T with ``a_index = 1`` and 1:100:A:C
with ``a_index = 2``.
- `old_locus` (*locus*) -- The original, unsplit locus.
- `old_alleles` (*array<str>*) -- The original, unsplit alleles.
All other fields are left unchanged.
Example
-------
:func:`.split_multi_hts`, which splits multiallelic variants for the HTS
genotype schema and updates the entry fields by downcoding the genotype, is
implemented as:
>>> sm = hl.split_multi(ds)
>>> pl = hl.or_missing(
... hl.is_defined(sm.PL),
... (hl.range(0, 3).map(lambda i: hl.min(hl.range(0, hl.len(sm.PL))
... .filter(lambda j: hl.downcode(hl.unphased_diploid_gt_index_call(j), sm.a_index) == hl.unphased_diploid_gt_index_call(i))
... .map(lambda j: sm.PL[j])))))
>>> split_ds = sm.annotate_entries(
... GT=hl.downcode(sm.GT, sm.a_index),
... AD=hl.or_missing(hl.is_defined(sm.AD),
... [hl.sum(sm.AD) - sm.AD[sm.a_index], sm.AD[sm.a_index]]),
... DP=sm.DP,
... PL=pl,
... GQ=hl.gq_from_pl(pl)).drop('old_locus', 'old_alleles')
See Also
--------
:func:`.split_multi_hts`
Parameters
----------
ds : :class:`.MatrixTable` or :class:`.Table`
An unsplit dataset.
keep_star : :obj:`bool`
Do not filter out * alleles.
left_aligned : :obj:`bool`
If ``True``, variants are assumed to be left aligned and have unique
loci. This avoids a shuffle. If the assumption is violated, an error
is generated.
permit_shuffle : :obj:`bool`
If ``True``, permit a data shuffle to sort out-of-order split results.
This will only be required if input data has duplicate loci, one of
which contains more than one alternate allele.
Returns
-------
:class:`.MatrixTable` or :class:`.Table`
"""
require_row_key_variant(ds, "split_multi")
new_id = Env.get_uid()
is_table = isinstance(ds, Table)
old_row = ds.row if is_table else ds._rvrow
kept_alleles = hl.range(1, hl.len(old_row.alleles))
if not keep_star:
kept_alleles = kept_alleles.filter(lambda i: old_row.alleles[i] != "*")
def new_struct(variant, i):
return hl.struct(alleles=variant.alleles,
locus=variant.locus,
a_index=i,
was_split=hl.len(old_row.alleles) > 2)
def split_rows(expr, rekey):
if isinstance(ds, MatrixTable):
mt = (ds.annotate_rows(**{new_id: expr})
.explode_rows(new_id))
if rekey:
mt = mt.key_rows_by()
else:
mt = mt.key_rows_by('locus')
new_row_expr = mt._rvrow.annotate(locus=mt[new_id]['locus'],
alleles=mt[new_id]['alleles'],
a_index=mt[new_id]['a_index'],
was_split=mt[new_id]['was_split'],
old_locus=mt.locus,
old_alleles=mt.alleles).drop(new_id)
mt = mt._select_rows('split_multi', new_row_expr)
if rekey:
return mt.key_rows_by('locus', 'alleles')
else:
return MatrixTable(ir.MatrixKeyRowsBy(mt._mir, ['locus', 'alleles'], is_sorted=True))
else:
assert isinstance(ds, Table)
ht = (ds.annotate(**{new_id: expr})
.explode(new_id))
if rekey:
ht = ht.key_by()
else:
ht = ht.key_by('locus')
new_row_expr = ht.row.annotate(locus=ht[new_id]['locus'],
alleles=ht[new_id]['alleles'],
a_index=ht[new_id]['a_index'],
was_split=ht[new_id]['was_split'],
old_locus=ht.locus,
old_alleles=ht.alleles).drop(new_id)
ht = ht._select('split_multi', new_row_expr)
if rekey:
return ht.key_by('locus', 'alleles')
else:
return Table(ir.TableKeyBy(ht._tir, ['locus', 'alleles'], is_sorted=True))
if left_aligned:
def make_struct(i):
def error_on_moved(v):
return (hl.case()
.when(v.locus == old_row.locus, new_struct(v, i))
.or_error("Found non-left-aligned variant in split_multi"))
return hl.bind(error_on_moved,
hl.min_rep(old_row.locus, [old_row.alleles[0], old_row.alleles[i]]))
return split_rows(hl.sorted(kept_alleles.map(make_struct)), permit_shuffle)
else:
def make_struct(i, cond):
def struct_or_empty(v):
return (hl.case()
.when(cond(v.locus), hl.array([new_struct(v, i)]))
.or_missing())
return hl.bind(struct_or_empty,
hl.min_rep(old_row.locus, [old_row.alleles[0], old_row.alleles[i]]))
def make_array(cond):
return hl.sorted(kept_alleles.flatmap(lambda i: make_struct(i, cond)))
left = split_rows(make_array(lambda locus: locus == ds['locus']), permit_shuffle)
moved = split_rows(make_array(lambda locus: locus != ds['locus']), True)
return left.union(moved) if is_table else left.union_rows(moved, _check_cols=False)
@typecheck(ds=oneof(Table, MatrixTable),
keep_star=bool,
left_aligned=bool,
vep_root=str,
permit_shuffle=bool)
def split_multi_hts(ds, keep_star=False, left_aligned=False, vep_root='vep', *, permit_shuffle=False):
"""Split multiallelic variants for datasets that contain one or more fields
from a standard high-throughput sequencing entry schema.
.. code-block:: text
struct {
GT: call,
AD: array<int32>,
DP: int32,
GQ: int32,
PL: array<int32>,
PGT: call,
PID: str
}
For other entry fields, write your own splitting logic using
:meth:`.MatrixTable.annotate_entries`.
Examples
--------
>>> hl.split_multi_hts(dataset).write('output/split.vds')
Notes
-----
We will explain by example. Consider a hypothetical 3-allelic
variant:
.. code-block:: text
A C,T 0/2:7,2,6:15:45:99,50,99,0,45,99
:func:`.split_multi_hts` will create two biallelic variants (one for each
alternate allele) at the same position
.. code-block:: text
A C 0/0:13,2:15:45:0,45,99
A T 0/1:9,6:15:50:50,0,99
Each multiallelic `GT` or `PGT` field is downcoded once for each alternate allele. A
call for an alternate allele maps to 1 in the biallelic variant
corresponding to itself and 0 otherwise. For example, in the example above,
0/2 maps to 0/0 and 0/1. The genotype 1/2 maps to 0/1 and 0/1.
The biallelic alt `AD` entry is just the multiallelic `AD` entry
corresponding to the alternate allele. The ref AD entry is the sum of the
other multiallelic entries.
The biallelic `DP` is the same as the multiallelic `DP`.
The biallelic `PL` entry for a genotype g is the minimum over `PL` entries
for multiallelic genotypes that downcode to g. For example, the `PL` for (A,
T) at 0/1 is the minimum of the PLs for 0/1 (50) and 1/2 (45), and thus 45.
Fixing an alternate allele and biallelic variant, downcoding gives a map
from multiallelic to biallelic alleles and genotypes. The biallelic `AD` entry
for an allele is just the sum of the multiallelic `AD` entries for alleles
that map to that allele. Similarly, the biallelic `PL` entry for a genotype is
the minimum over multiallelic `PL` entries for genotypes that map to that
genotype.
`GQ` is recomputed from `PL` if `PL` is provided and is not
missing. If not, it is copied from the original GQ.
Here is a second example for a het non-ref
.. code-block:: text
A C,T 1/2:2,8,6:16:45:99,50,99,45,0,99
splits as
.. code-block:: text
A C 0/1:8,8:16:45:45,0,99
A T 0/1:10,6:16:50:50,0,99
**VCF Info Fields**
Hail does not split fields in the info field. This means that if a
multiallelic site with `info.AC` value ``[10, 2]`` is split, each split
site will contain the same array ``[10, 2]``. The provided allele index
field `a_index` can be used to select the value corresponding to the split
allele's position:
>>> split_ds = hl.split_multi_hts(dataset)
>>> split_ds = split_ds.filter_rows(split_ds.info.AC[split_ds.a_index - 1] < 10,
... keep = False)
VCFs split by Hail and exported to new VCFs may be
incompatible with other tools, if action is not taken
first. Since the "Number" of the arrays in split multiallelic
sites no longer matches the structure on import ("A" for 1 per
allele, for example), Hail will export these fields with
number ".".
If the desired output is one value per site, then it is
possible to use annotate_variants_expr to remap these
values. Here is an example:
>>> split_ds = hl.split_multi_hts(dataset)
>>> split_ds = split_ds.annotate_rows(info = hl.struct(AC=split_ds.info.AC[split_ds.a_index - 1],
... **split_ds.info)) # doctest: +SKIP
>>> hl.export_vcf(split_ds, 'output/export.vcf') # doctest: +SKIP
The info field AC in *data/export.vcf* will have ``Number=1``.
**New Fields**
:func:`.split_multi_hts` adds the following fields:
- `was_split` (*bool*) -- ``True`` if this variant was originally
multiallelic, otherwise ``False``.
- `a_index` (*int*) -- The original index of this alternate allele in the
multiallelic representation (NB: 1 is the first alternate allele or the
only alternate allele in a biallelic variant). For example, 1:100:A:T,C
splits into two variants: 1:100:A:T with ``a_index = 1`` and 1:100:A:C
with ``a_index = 2``.
See Also
--------
:func:`.split_multi`
Parameters
----------
ds : :class:`.MatrixTable` or :class:`.Table`
An unsplit dataset.
keep_star : :obj:`bool`
Do not filter out * alleles.
left_aligned : :obj:`bool`
If ``True``, variants are assumed to be left
aligned and have unique loci. This avoids a shuffle. If the assumption
is violated, an error is generated.
vep_root : :class:`str`
Top-level location of vep data. All variable-length VEP fields
(intergenic_consequences, motif_feature_consequences,
regulatory_feature_consequences, and transcript_consequences)
will be split properly (i.e. a_index corresponding to the VEP allele_num).
permit_shuffle : :obj:`bool`
If ``True``, permit a data shuffle to sort out-of-order split results.
This will only be required if input data has duplicate loci, one of
which contains more than one alternate allele.
Returns
-------
:class:`.MatrixTable` or :class:`.Table`
A biallelic variant dataset.
"""
split = split_multi(ds, keep_star=keep_star, left_aligned=left_aligned, permit_shuffle=permit_shuffle)
row_fields = set(ds.row)
update_rows_expression = {}
if vep_root in row_fields:
update_rows_expression[vep_root] = split[vep_root].annotate(**{
x: split[vep_root][x].filter(lambda csq: csq.allele_num == split.a_index)
for x in ('intergenic_consequences', 'motif_feature_consequences',
'regulatory_feature_consequences', 'transcript_consequences')})
if isinstance(ds, Table):
return split.annotate(**update_rows_expression).drop('old_locus', 'old_alleles')
split = split.annotate_rows(**update_rows_expression)
entry_fields = ds.entry
expected_field_types = {
'GT': hl.tcall,
'AD': hl.tarray(hl.tint),
'DP': hl.tint,
'GQ': hl.tint,
'PL': hl.tarray(hl.tint),
'PGT': hl.tcall,
'PID': hl.tstr
}
bad_fields = []
for field in entry_fields:
if field in expected_field_types and entry_fields[field].dtype != expected_field_types[field]:
bad_fields.append((field, entry_fields[field].dtype, expected_field_types[field]))
if bad_fields:
msg = '\n '.join([f"'{x[0]}'\tfound: {x[1]}\texpected: {x[2]}" for x in bad_fields])
raise TypeError("'split_multi_hts': Found invalid types for the following fields:\n " + msg)
update_entries_expression = {}
if 'GT' in entry_fields:
update_entries_expression['GT'] = hl.downcode(split.GT, split.a_index)
if 'DP' in entry_fields:
update_entries_expression['DP'] = split.DP
if 'AD' in entry_fields:
update_entries_expression['AD'] = hl.or_missing(hl.is_defined(split.AD),
[hl.sum(split.AD) - split.AD[split.a_index], split.AD[split.a_index]])
if 'PL' in entry_fields:
pl = hl.or_missing(
hl.is_defined(split.PL),
(hl.range(0, 3).map(lambda i:
hl.min((hl.range(0, hl.triangle(split.old_alleles.length()))
.filter(lambda j: hl.downcode(hl.unphased_diploid_gt_index_call(j),
split.a_index).unphased_diploid_gt_index() == i
).map(lambda j: split.PL[j]))))))
if 'GQ' in entry_fields:
update_entries_expression['PL'] = pl
update_entries_expression['GQ'] = hl.or_else(hl.gq_from_pl(pl), split.GQ)
else:
update_entries_expression['PL'] = pl
else:
if 'GQ' in entry_fields:
update_entries_expression['GQ'] = split.GQ
if 'PGT' in entry_fields:
update_entries_expression['PGT'] = hl.downcode(split.PGT, split.a_index)
if 'PID' in entry_fields:
update_entries_expression['PID'] = split.PID
return split.annotate_entries(**update_entries_expression).drop('old_locus', 'old_alleles')
@typecheck(call_expr=expr_call)
def genetic_relatedness_matrix(call_expr) -> BlockMatrix:
r"""Compute the genetic relatedness matrix (GRM).
Examples
--------
>>> grm = hl.genetic_relatedness_matrix(dataset.GT)
Notes
-----
The genetic relationship matrix (GRM) :math:`G` encodes genetic correlation
between each pair of samples. It is defined by :math:`G = MM^T` where
:math:`M` is a standardized version of the genotype matrix, computed as
follows. Let :math:`C` be the :math:`n \times m` matrix of raw genotypes
in the variant dataset, with rows indexed by :math:`n` samples and columns
indexed by :math:`m` bialellic autosomal variants; :math:`C_{ij}` is the
number of alternate alleles of variant :math:`j` carried by sample
:math:`i`, which can be 0, 1, 2, or missing. For each variant :math:`j`,
the sample alternate allele frequency :math:`p_j` is computed as half the
mean of the non-missing entries of column :math:`j`. Entries of :math:`M`
are then mean-centered and variance-normalized as
.. math::
M_{ij} = \frac{C_{ij}-2p_j}{\sqrt{2p_j(1-p_j)m}},
with :math:`M_{ij} = 0` for :math:`C_{ij}` missing (i.e. mean genotype
imputation). This scaling normalizes genotype variances to a common value
:math:`1/m` for variants in Hardy-Weinberg equilibrium and is further
motivated in the paper `Patterson, Price and Reich, 2006
<http://journals.plos.org/plosgenetics/article?id=10.1371/journal.pgen.0020190>`__.
(The resulting amplification of signal from the low end of the allele
frequency spectrum will also introduce noise for rare variants; common
practice is to filter out variants with minor allele frequency below some
cutoff.) The factor :math:`1/m` gives each sample row approximately unit
total variance (assuming linkage equilibrium) so that the diagonal entries
of the GRM are approximately 1. Equivalently,
.. math::
G_{ik} = \frac{1}{m} \sum_{j=1}^m \frac{(C_{ij}-2p_j)(C_{kj}-2p_j)}{2 p_j (1-p_j)}
This method drops variants with :math:`p_j = 0` or :math:`p_j = 1` before
computing kinship.
Parameters
----------
call_expr : :class:`.CallExpression`
Entry-indexed call expression with columns corresponding
to samples.
Returns
-------
:class:`.BlockMatrix`
Genetic relatedness matrix for all samples. Row and column indices
correspond to matrix table column index.
"""
mt = matrix_table_source('genetic_relatedness_matrix/call_expr', call_expr)
check_entry_indexed('genetic_relatedness_matrix/call_expr', call_expr)
mt = mt.select_entries(__gt=call_expr.n_alt_alleles()).unfilter_entries()
mt = mt.select_rows(__AC=agg.sum(mt.__gt),
__n_called=agg.count_where(hl.is_defined(mt.__gt)))
mt = mt.filter_rows((mt.__AC > 0) & (mt.__AC < 2 * mt.__n_called))
mt = mt.select_rows(__mean_gt=mt.__AC / mt.__n_called)
mt = mt.annotate_rows(__hwe_scaled_std_dev=hl.sqrt(mt.__mean_gt * (2 - mt.__mean_gt)))
normalized_gt = hl.or_else((mt.__gt - mt.__mean_gt) / mt.__hwe_scaled_std_dev, 0.0)
bm = BlockMatrix.from_entry_expr(normalized_gt)
return (bm.T @ bm) / (bm.n_rows / 2.0)
@typecheck(call_expr=expr_call)
def realized_relationship_matrix(call_expr) -> BlockMatrix:
r"""Computes the realized relationship matrix (RRM).
Examples
--------
>>> rrm = hl.realized_relationship_matrix(dataset.GT)
Notes
-----
The realized relationship matrix (RRM) is defined as follows. Consider the
:math:`n \times m` matrix :math:`C` of raw genotypes, with rows indexed by
:math:`n` samples and columns indexed by the :math:`m` bialellic autosomal
variants; :math:`C_{ij}` is the number of alternate alleles of variant
:math:`j` carried by sample :math:`i`, which can be 0, 1, 2, or missing. For
each variant :math:`j`, the sample alternate allele frequency :math:`p_j` is
computed as half the mean of the non-missing entries of column :math:`j`.
Entries of :math:`M` are then mean-centered and variance-normalized as
.. math::
M_{ij} =
\frac{C_{ij}-2p_j}
{\sqrt{\frac{m}{n} \sum_{k=1}^n (C_{ij}-2p_j)^2}},
with :math:`M_{ij} = 0` for :math:`C_{ij}` missing (i.e. mean genotype
imputation). This scaling normalizes each variant column to have empirical
variance :math:`1/m`, which gives each sample row approximately unit total
variance (assuming linkage equilibrium) and yields the :math:`n \times n`
sample correlation or realized relationship matrix (RRM) :math:`K` as simply
.. math::
K = MM^T
Note that the only difference between the realized relationship matrix and
the genetic relatedness matrix (GRM) used in
:func:`.realized_relationship_matrix` is the variant (column) normalization:
where RRM uses empirical variance, GRM uses expected variance under
Hardy-Weinberg Equilibrium.
This method drops variants with zero variance before computing kinship.
Parameters
----------
call_expr : :class:`.CallExpression`
Entry-indexed call expression on matrix table with columns corresponding
to samples.
Returns
-------
:class:`.BlockMatrix`
Realized relationship matrix for all samples. Row and column indices
correspond to matrix table column index.
"""
mt = matrix_table_source('realized_relationship_matrix/call_expr', call_expr)
check_entry_indexed('realized_relationship_matrix/call_expr', call_expr)
mt = mt.select_entries(__gt=call_expr.n_alt_alleles()).unfilter_entries()
mt = mt.select_rows(__AC=agg.sum(mt.__gt),
__ACsq=agg.sum(mt.__gt * mt.__gt),
__n_called=agg.count_where(hl.is_defined(mt.__gt)))
mt = mt.select_rows(__mean_gt=mt.__AC / mt.__n_called,
__centered_length=hl.sqrt(mt.__ACsq - (mt.__AC ** 2) / mt.__n_called))
mt = mt.filter_rows(mt.__centered_length > 0.1) # truly non-zero values are at least sqrt(0.5)
normalized_gt = hl.or_else((mt.__gt - mt.__mean_gt) / mt.__centered_length, 0.0)
bm = BlockMatrix.from_entry_expr(normalized_gt)
return (bm.T @ bm) / (bm.n_rows / bm.n_cols)
@typecheck(entry_expr=expr_float64, block_size=nullable(int))
def row_correlation(entry_expr, block_size=None) -> BlockMatrix:
"""Computes the correlation matrix between row vectors.
Examples
--------
Consider the following dataset with three variants and four samples:
>>> data = [{'v': '1:1:A:C', 's': 'a', 'GT': hl.Call([0, 0])},
... {'v': '1:1:A:C', 's': 'b', 'GT': hl.Call([0, 0])},
... {'v': '1:1:A:C', 's': 'c', 'GT': hl.Call([0, 1])},
... {'v': '1:1:A:C', 's': 'd', 'GT': hl.Call([1, 1])},
... {'v': '1:2:G:T', 's': 'a', 'GT': hl.Call([0, 1])},
... {'v': '1:2:G:T', 's': 'b', 'GT': hl.Call([1, 1])},
... {'v': '1:2:G:T', 's': 'c', 'GT': hl.Call([0, 1])},
... {'v': '1:2:G:T', 's': 'd', 'GT': hl.Call([0, 0])},
... {'v': '1:3:C:G', 's': 'a', 'GT': hl.Call([0, 1])},
... {'v': '1:3:C:G', 's': 'b', 'GT': hl.Call([0, 0])},
... {'v': '1:3:C:G', 's': 'c', 'GT': hl.Call([1, 1])},
... {'v': '1:3:C:G', 's': 'd', 'GT': hl.null(hl.tcall)}]
>>> ht = hl.Table.parallelize(data, hl.dtype('struct{v: str, s: str, GT: call}'))
>>> mt = ht.to_matrix_table(row_key=['v'], col_key=['s'])
Compute genotype correlation between all pairs of variants:
>>> ld = hl.row_correlation(mt.GT.n_alt_alleles())
>>> ld.to_numpy()
array([[ 1. , -0.85280287, 0.42640143],
[-0.85280287, 1. , -0.5 ],
[ 0.42640143, -0.5 , 1. ]])
Compute genotype correlation between consecutively-indexed variants:
>>> ld.sparsify_band(lower=0, upper=1).to_numpy()
array([[ 1. , -0.85280287, 0. ],
[ 0. , 1. , -0.5 ],
[ 0. , 0. , 1. ]])
Warning
-------
Rows with a constant value (i.e., zero variance) will result `nan`
correlation values. To avoid this, first check that all rows vary or filter
out constant rows (for example, with the help of :func:`.aggregators.stats`).
Notes
-----
In this method, each row of entries is regarded as a vector with elements
defined by `entry_expr` and missing values mean-imputed per row.
The ``(i, j)`` element of the resulting block matrix is the correlation
between rows ``i`` and ``j`` (as 0-indexed by order in the matrix table;
see :meth:`~hail.MatrixTable.add_row_index`).
The correlation of two vectors is defined as the
`Pearson correlation coeffecient <https://en.wikipedia.org/wiki/Pearson_correlation_coefficient>`__
between the corresponding empirical distributions of elements,
or equivalently as the cosine of the angle between the vectors.
This method has two stages:
- writing the row-normalized block matrix to a temporary file on persistent
disk with :meth:`.BlockMatrix.from_entry_expr`. The parallelism is
``n_rows / block_size``.
- reading and multiplying this block matrix by its transpose. The
parallelism is ``(n_rows / block_size)^2`` if all blocks are computed.
Warning
-------
See all warnings on :meth:`.BlockMatrix.from_entry_expr`. In particular,
for large matrices, it may be preferable to run the two stages separately,
saving the row-normalized block matrix to a file on external storage with
:meth:`.BlockMatrix.write_from_entry_expr`.
The resulting number of matrix elements is the square of the number of rows
in the matrix table, so computing the full matrix may be infeasible. For
example, ten million rows would produce 800TB of float64 values. The
block-sparse representation on BlockMatrix may be used to work efficiently
with regions of such matrices, as in the second example above and
:meth:`ld_matrix`.
To prevent excessive re-computation, be sure to write and read the (possibly
block-sparsified) result before multiplication by another matrix.
Parameters
----------
entry_expr : :class:`.Float64Expression`
Entry-indexed numeric expression on matrix table.
block_size : :obj:`int`, optional
Block size. Default given by :meth:`.BlockMatrix.default_block_size`.
Returns
-------
:class:`.BlockMatrix`
Correlation matrix between row vectors. Row and column indices
correspond to matrix table row index.
"""
bm = BlockMatrix.from_entry_expr(entry_expr, mean_impute=True, center=True, normalize=True, block_size=block_size)
return bm @ bm.T
@typecheck(entry_expr=expr_float64,
locus_expr=expr_locus(),
radius=oneof(int, float),
coord_expr=nullable(expr_float64),
block_size=nullable(int))
def ld_matrix(entry_expr, locus_expr, radius, coord_expr=None, block_size=None) -> BlockMatrix:
"""Computes the windowed correlation (linkage disequilibrium) matrix between
variants.
Examples
--------
Consider the following dataset consisting of three variants with centimorgan
coordinates and four samples:
>>> data = [{'v': '1:1:A:C', 'cm': 0.1, 's': 'a', 'GT': hl.Call([0, 0])},
... {'v': '1:1:A:C', 'cm': 0.1, 's': 'b', 'GT': hl.Call([0, 0])},
... {'v': '1:1:A:C', 'cm': 0.1, 's': 'c', 'GT': hl.Call([0, 1])},
... {'v': '1:1:A:C', 'cm': 0.1, 's': 'd', 'GT': hl.Call([1, 1])},
... {'v': '1:2000000:G:T', 'cm': 0.9, 's': 'a', 'GT': hl.Call([0, 1])},
... {'v': '1:2000000:G:T', 'cm': 0.9, 's': 'b', 'GT': hl.Call([1, 1])},
... {'v': '1:2000000:G:T', 'cm': 0.9, 's': 'c', 'GT': hl.Call([0, 1])},
... {'v': '1:2000000:G:T', 'cm': 0.9, 's': 'd', 'GT': hl.Call([0, 0])},
... {'v': '2:1:C:G', 'cm': 0.2, 's': 'a', 'GT': hl.Call([0, 1])},
... {'v': '2:1:C:G', 'cm': 0.2, 's': 'b', 'GT': hl.Call([0, 0])},
... {'v': '2:1:C:G', 'cm': 0.2, 's': 'c', 'GT': hl.Call([1, 1])},
... {'v': '2:1:C:G', 'cm': 0.2, 's': 'd', 'GT': hl.null(hl.tcall)}]
>>> ht = hl.Table.parallelize(data, hl.dtype('struct{v: str, s: str, cm: float64, GT: call}'))
>>> ht = ht.transmute(**hl.parse_variant(ht.v))
>>> mt = ht.to_matrix_table(row_key=['locus', 'alleles'], col_key=['s'], row_fields=['cm'])
Compute linkage disequilibrium between all pairs of variants on the same
contig and within two megabases:
>>> ld = hl.ld_matrix(mt.GT.n_alt_alleles(), mt.locus, radius=2e6)
>>> ld.to_numpy()
array([[ 1. , -0.85280287, 0. ],
[-0.85280287, 1. , 0. ],
[ 0. , 0. , 1. ]])
Within one megabases:
>>> ld = hl.ld_matrix(mt.GT.n_alt_alleles(), mt.locus, radius=1e6)
>>> ld.to_numpy()
array([[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]])
Within one centimorgan:
>>> ld = hl.ld_matrix(mt.GT.n_alt_alleles(), mt.locus, radius=1.0, coord_expr=mt.cm)
>>> ld.to_numpy()
array([[ 1. , -0.85280287, 0. ],
[-0.85280287, 1. , 0. ],
[ 0. , 0. , 1. ]])
Within one centimorgan, and only calculate the upper triangle:
>>> ld = hl.ld_matrix(mt.GT.n_alt_alleles(), mt.locus, radius=1.0, coord_expr=mt.cm)
>>> ld = ld.sparsify_triangle()
>>> ld.to_numpy()
array([[ 1. , -0.85280287, 0. ],
[ 0. , 1. , 0. ],
[ 0. , 0. , 1. ]])
Notes
-----
This method sparsifies the result of :meth:`row_correlation` using
:func:`.linalg.utils.locus_windows` and
:meth:`.BlockMatrix.sparsify_row_intervals`
in order to only compute linkage disequilibrium between nearby
variants. Use :meth:`row_correlation` directly to calculate correlation
without windowing.
More precisely, variants are 0-indexed by their order in the matrix table
(see :meth:`~hail.MatrixTable.add_row_index`). Each variant is regarded as a vector of
elements defined by `entry_expr`, typically the number of alternate alleles
or genotype dosage. Missing values are mean-imputed within variant.
The method produces a symmetric block-sparse matrix supported in a
neighborhood of the diagonal. If variants :math:`i` and :math:`j` are on the
same contig and within `radius` base pairs (inclusive) then the
:math:`(i, j)` element is their
`Pearson correlation coefficient <https://en.wikipedia.org/wiki/Pearson_correlation_coefficient>`__.
Otherwise, the :math:`(i, j)` element is ``0.0``.
Rows with a constant value (i.e., zero variance) will result in ``nan``
correlation values. To avoid this, first check that all variants vary or
filter out constant variants (for example, with the help of
:func:`.aggregators.stats`).
If the :meth:`.global_position` on `locus_expr` is not in ascending order,
this method will fail. Ascending order should hold for a matrix table keyed
by locus or variant (and the associated row table), or for a table that's
been ordered by `locus_expr`.
Set `coord_expr` to use a value other than position to define the windows.
This row-indexed numeric expression must be non-missing, non-``nan``, on the
same source as `locus_expr`, and ascending with respect to locus
position for each contig; otherwise the method will raise an error.
Warning
-------
See the warnings in :meth:`row_correlation`. In particular, for large
matrices it may be preferable to run its stages separately.
`entry_expr` and `locus_expr` are implicitly aligned by row-index, though
they need not be on the same source. If their sources differ in the number
of rows, an error will be raised; otherwise, unintended misalignment may
silently produce unexpected results.
Parameters
----------
entry_expr : :class:`.Float64Expression`
Entry-indexed numeric expression on matrix table.
locus_expr : :class:`.LocusExpression`
Row-indexed locus expression on a table or matrix table that is
row-aligned with the matrix table of `entry_expr`.
radius: :obj:`int` or :obj:`float`
Radius of window for row values.
coord_expr: :class:`.Float64Expression`, optional
Row-indexed numeric expression for the row value on the same table or
matrix table as `locus_expr`.
By default, the row value is given by the locus position.
block_size : :obj:`int`, optional
Block size. Default given by :meth:`.BlockMatrix.default_block_size`.
Returns
-------
:class:`.BlockMatrix`
Windowed correlation matrix between variants.
Row and column indices correspond to matrix table variant index.
"""
starts_and_stops = hl.linalg.utils.locus_windows(locus_expr, radius, coord_expr, _localize=False)
starts_and_stops = hl.tuple([starts_and_stops[0].map(lambda i: hl.int64(i)), starts_and_stops[1].map(lambda i: hl.int64(i))])
ld = hl.row_correlation(entry_expr, block_size)
return ld._sparsify_row_intervals_expr(starts_and_stops, blocks_only=False)
@typecheck(n_populations=int,
n_samples=int,
n_variants=int,
n_partitions=nullable(int),
pop_dist=nullable(sequenceof(numeric)),
fst=nullable(sequenceof(numeric)),
af_dist=nullable(expr_any),
reference_genome=reference_genome_type,
mixture=bool)
def balding_nichols_model(n_populations, n_samples, n_variants, n_partitions=None,
pop_dist=None, fst=None, af_dist=None,
reference_genome='default', mixture=False) -> MatrixTable:
r"""Generate a matrix table of variants, samples, and genotypes using the
Balding-Nichols or Pritchard-Stephens-Donnelly model.
Examples
--------
Generate a matrix table of genotypes with 1000 variants and 100 samples
across 3 populations:
>>> bn_ds = hl.balding_nichols_model(3, 100, 1000, reference_genome='GRCh37')
Generate a matrix table using 4 populations, 40 samples, 150 variants, 3
partitions, population distribution ``[0.1, 0.2, 0.3, 0.4]``,
:math:`F_{ST}` values ``[.02, .06, .04, .12]``, ancestral allele
frequencies drawn from a truncated beta distribution with ``a = 0.01`` and
``b = 0.05`` over the interval ``[0.05, 1]``, and random seed 1:
>>> hl.set_global_seed(1)
>>> bn_ds = hl.balding_nichols_model(4, 40, 150, 3,
... pop_dist=[0.1, 0.2, 0.3, 0.4],
... fst=[.02, .06, .04, .12],
... af_dist=hl.rand_beta(a=0.01, b=2.0, lower=0.05, upper=1.0))
To guarantee reproducibility, we set the Hail global seed with
:func:`.set_global_seed` immediately prior to generating the dataset.
Notes
-----
This method simulates a matrix table of variants, samples, and genotypes
using the Balding-Nichols model, which we now define.
- :math:`K` populations are labeled by integers :math:`0, 1, \dots, K - 1`.
- :math:`N` samples are labeled by strings :math:`0, 1, \dots, N - 1`.
- :math:`M` variants are defined as ``1:1:A:C``, ``1:2:A:C``, ...,
``1:M:A:C``.
- The default distribution for population assignment :math:`\pi` is uniform.
- The default ancestral frequency distribution :math:`P_0` is uniform on
:math:`[0.1, 0.9]`.
All three classes are located in ``hail.stats``.
- The default :math:`F_{ST}` values are all :math:`0.1`.
The Balding-Nichols model models genotypes of individuals from a structured
population comprising :math:`K` homogeneous modern populations that have
each diverged from a single ancestral population (a `star phylogeny`). Each
sample is assigned a population by sampling from the categorical
distribution :math:`\pi`. Note that the actual size of each population is
random.
Variants are modeled as biallelic and unlinked. Ancestral allele
frequencies are drawn independently for each variant from a frequency
spectrum :math:`P_0`. The extent of genetic drift of each modern population
from the ancestral population is defined by the corresponding :math:`F_{ST}`
parameter :math:`F_k` (here and below, lowercase indices run over a range
bounded by the corresponding uppercase parameter, e.g. :math:`k = 1, \ldots,
K`). For each variant and population, allele frequencies are drawn from a
`beta distribution <https://en.wikipedia.org/wiki/Beta_distribution>`__
whose parameters are determined by the ancestral allele frequency and
:math:`F_{ST}` parameter. The beta distribution gives a continuous
approximation of the effect of genetic drift. We denote sample population
assignments by :math:`k_n`, ancestral allele frequencies by :math:`p_m`,
population allele frequencies by :math:`p_{k, m}`, and diploid, unphased
genotype calls by :math:`g_{n, m}` (0, 1, and 2 correspond to homozygous
reference, heterozygous, and homozygous variant, respectively).
The generative model is then given by:
.. math::
\begin{aligned}
k_n \,&\sim\, \pi \\
p_m \,&\sim\, P_0 \\
p_{k,m} \mid p_m\,&\sim\, \mathrm{Beta}(\mu = p_m,\, \sigma^2 = F_k p_m (1 - p_m)) \\
g_{n,m} \mid k_n, p_{k, m} \,&\sim\, \mathrm{Binomial}(2, p_{k_n, m})
\end{aligned}
The beta distribution by its mean and variance above; the usual parameters
are :math:`a = (1 - p) \frac{1 - F}{F}` and :math:`b = p \frac{1 - F}{F}` with
:math:`F = F_k` and :math:`p = p_m`.
The resulting dataset has the following fields.
Global fields:
- `bn.n_populations` (:py:data:`.tint32`) -- Number of populations.
- `bn.n_samples` (:py:data:`.tint32`) -- Number of samples.
- `bn.n_variants` (:py:data:`.tint32`) -- Number of variants.
- `bn.n_partitions` (:py:data:`.tint32`) -- Number of partitions.
- `bn.pop_dist` (:class:`.tarray` of :py:data:`.tfloat64`) -- Population distribution indexed by
population.
- `bn.fst` (:class:`.tarray` of :py:data:`.tfloat64`) -- :math:`F_{ST}` values indexed by
population.
- `bn.seed` (:py:data:`.tint32`) -- Random seed.
- `bn.mixture` (:py:data:`.tbool`) -- Value of `mixture` parameter.
Row fields:
- `locus` (:class:`.tlocus`) -- Variant locus (key field).
- `alleles` (:class:`.tarray` of :py:data:`.tstr`) -- Variant alleles (key field).
- `ancestral_af` (:py:data:`.tfloat64`) -- Ancestral allele frequency.
- `af` (:class:`.tarray` of :py:data:`.tfloat64`) -- Modern allele frequencies indexed by
population.
Column fields:
- `sample_idx` (:py:data:`.tint32`) - Sample index (key field).
- `pop` (:py:data:`.tint32`) -- Population of sample.
Entry fields:
- `GT` (:py:data:`.tcall`) -- Genotype call (diploid, unphased).
For the `Pritchard-Stephens-Donnelly model <http://www.genetics.org/content/155/2/945.long>`__,
set the `mixture` to true to treat `pop_dist` as the parameters of the
Dirichlet distribution describing admixture between the modern populations.
In this case, the type of `pop` is :class:`.tarray` of
:py:data:`.tfloat64` and the value is the mixture proportions.
Parameters
----------
n_populations : :obj:`int`
Number of modern populations.
n_samples : :obj:`int`
Total number of samples.
n_variants : :obj:`int`
Number of variants.
n_partitions : :obj:`int`, optional
Number of partitions.
Default is 1 partition per million entries or 8, whichever is larger.
pop_dist : :obj:`list` of :obj:`float`, optional
Unnormalized population distribution, a list of length
`n_populations` with non-negative values.
Default is ``[1, ..., 1]``.
fst : :obj:`list` of :obj:`float`, optional
:math:`F_{ST}` values, a list of length `n_populations` with values
in (0, 1). Default is ``[0.1, ..., 0.1]``.
af_dist : :class:`.Float64Expression`, optional
Representing a random function. Ancestral allele frequency
distribution. Default is :func:`.rand_unif` over the range
`[0.1, 0.9]` with seed 0.
reference_genome : :class:`str` or :class:`.ReferenceGenome`
Reference genome to use.
mixture : :obj:`bool`
Treat `pop_dist` as the parameters of a Dirichlet distribution,
as in the Prichard-Stevens-Donnelly model.
Returns
-------
:class:`.MatrixTable`
Simulated matrix table of variants, samples, and genotypes.
"""
if pop_dist is None:
pop_dist = [1 for _ in range(n_populations)]
if fst is None:
fst = [0.1 for _ in range(n_populations)]
if af_dist is None:
af_dist = hl.rand_unif(0.1, 0.9, seed=0)
if n_partitions is None:
n_partitions = max(8, int(n_samples * n_variants / (128 * 1024 * 1024)))
# verify args
for name, var in {"populations": n_populations,
"samples": n_samples,
"variants": n_variants,
"partitions": n_partitions}.items():
if var < 1:
raise ValueError("n_{} must be positive, got {}".format(name, var))
for name, var in {"pop_dist": pop_dist, "fst": fst}.items():
if len(var) != n_populations:
raise ValueError("{} must be of length n_populations={}, got length {}"
.format(name, n_populations, len(var)))
if any(x < 0 for x in pop_dist):
raise ValueError("pop_dist must be non-negative, got {}"
.format(pop_dist))
if any(x <= 0 or x >= 1 for x in fst):
raise ValueError("elements of fst must satisfy 0 < x < 1, got {}"
.format(fst))
# verify af_dist
if not af_dist._is_scalar:
raise ExpressionException('balding_nichols_model expects af_dist to '
+ 'have scalar arguments: found expression '
+ 'from source {}'
.format(af_dist._indices.source))
if af_dist.dtype != tfloat64:
raise ValueError("af_dist must be a hail function with return type tfloat64.")
info("balding_nichols_model: generating genotypes for {} populations, {} samples, and {} variants..."
.format(n_populations, n_samples, n_variants))
# generate matrix table
bn = hl.utils.range_matrix_table(n_variants, n_samples, n_partitions)
bn = bn.annotate_globals(
bn=hl.struct(n_populations=n_populations,
n_samples=n_samples,
n_variants=n_variants,
n_partitions=n_partitions,
pop_dist=pop_dist,
fst=fst,
mixture=mixture))
# col info
pop_f = hl.rand_dirichlet if mixture else hl.rand_cat
bn = bn.key_cols_by(sample_idx=bn.col_idx)
bn = bn.select_cols(pop=pop_f(pop_dist))
# row info
bn = bn.key_rows_by(locus=hl.locus_from_global_position(bn.row_idx, reference_genome=reference_genome),
alleles=['A', 'C'])
bn = bn.select_rows(ancestral_af=af_dist,
af=hl.bind(lambda ancestral:
hl.array([(1 - x) / x for x in fst])
.map(lambda x:
hl.rand_beta(ancestral * x,
(1 - ancestral) * x)),
af_dist))
# entry info
p = hl.sum(bn.pop * bn.af) if mixture else bn.af[bn.pop]
idx = hl.rand_cat([(1 - p) ** 2, 2 * p * (1 - p), p ** 2])
return bn.select_entries(GT=hl.unphased_diploid_gt_index_call(idx))
@typecheck(mt=MatrixTable, f=anytype)
def filter_alleles(mt: MatrixTable,
f: Callable) -> MatrixTable:
"""Filter alternate alleles.
.. include:: ../_templates/req_tvariant.rst
Examples
--------
Keep SNPs:
>>> ds_result = hl.filter_alleles(ds, lambda allele, i: hl.is_snp(ds.alleles[0], allele))
Keep alleles with AC > 0:
>>> ds_result = hl.filter_alleles(ds, lambda a, allele_index: ds.info.AC[allele_index - 1] > 0)
Update the AC field of the resulting dataset:
>>> updated_info = ds_result.info.annotate(AC = ds_result.new_to_old.map(lambda i: ds_result.info.AC[i-1]))
>>> ds_result = ds_result.annotate_rows(info = updated_info)
Notes
-----
The following new fields are generated:
- `old_locus` (``locus``) -- The old locus, before filtering and computing
the minimal representation.
- `old_alleles` (``array<str>``) -- The old alleles, before filtering and
computing the minimal representation.
- `old_to_new` (``array<int32>``) -- An array that maps old allele index to
new allele index. Its length is the same as `old_alleles`. Alleles that
are filtered are missing.
- `new_to_old` (``array<int32>``) -- An array that maps new allele index to
the old allele index. Its length is the same as the modified `alleles`
field.
If all alternate alleles of a variant are filtered out, the variant itself
is filtered out.
**Using** `f`
The `f` argument is a function or lambda evaluated per alternate allele to
determine whether that allele is kept. If `f` evaluates to ``True``, the
allele is kept. If `f` evaluates to ``False`` or missing, the allele is
removed.
`f` is a function that takes two arguments: the allele string (of type
:class:`.StringExpression`) and the allele index (of type
:class:`.Int32Expression`), and returns a boolean expression. This can
be either a defined function or a lambda. For example, these two usages
are equivalent:
(with a lambda)
>>> ds_result = hl.filter_alleles(ds, lambda allele, i: hl.is_snp(ds.alleles[0], allele))
(with a defined function)
>>> def filter_f(allele, allele_index):
... return hl.is_snp(ds.alleles[0], allele)
>>> ds_result = hl.filter_alleles(ds, filter_f)
Warning
-------
:func:`.filter_alleles` does not update any fields other than `locus` and
`alleles`. This means that row fields like allele count (AC) and entry
fields like allele depth (AD) can become meaningless unless they are also
updated. You can update them with :meth:`.annotate_rows` and
:meth:`.annotate_entries`.
See Also
--------
:func:`.filter_alleles_hts`
Parameters
----------
mt : :class:`.MatrixTable`
Dataset.
f : callable
Function from (allele: :class:`.StringExpression`, allele_index:
:class:`.Int32Expression`) to :class:`.BooleanExpression`
Returns
-------
:class:`.MatrixTable`
"""
require_row_key_variant(mt, 'filter_alleles')
inclusion = hl.range(0, hl.len(mt.alleles)).map(lambda i: (i == 0) | hl.bind(lambda ii: f(mt.alleles[ii], ii), i))
# old locus, old alleles, new to old, old to new
mt = mt.annotate_rows(__allele_inclusion=inclusion,
old_locus=mt.locus,
old_alleles=mt.alleles)
new_to_old = (hl.enumerate(mt.__allele_inclusion)
.filter(lambda elt: elt[1])
.map(lambda elt: elt[0]))
old_to_new_dict = (hl.dict(hl.enumerate(hl.enumerate(mt.alleles)
.filter(lambda elt: mt.__allele_inclusion[elt[0]]))
.map(lambda elt: (elt[1][1], elt[0]))))
old_to_new = hl.bind(lambda d: mt.alleles.map(lambda a: d.get(a)), old_to_new_dict)
mt = mt.annotate_rows(old_to_new=old_to_new, new_to_old=new_to_old)
new_locus_alleles = hl.min_rep(mt.locus, mt.new_to_old.map(lambda i: mt.alleles[i]))
mt = mt.annotate_rows(__new_locus=new_locus_alleles.locus, __new_alleles=new_locus_alleles.alleles)
mt = mt.filter_rows(hl.len(mt.__new_alleles) > 1)
left = mt.filter_rows((mt.locus == mt.__new_locus) & (mt.alleles == mt.__new_alleles))
right = mt.filter_rows((mt.locus != mt.__new_locus) | (mt.alleles != mt.__new_alleles))
right = right.key_rows_by(locus=right.__new_locus, alleles=right.__new_alleles)
return left.union_rows(right, _check_cols=False).drop('__allele_inclusion', '__new_locus', '__new_alleles')
@typecheck(mt=MatrixTable, f=anytype, subset=bool)
def filter_alleles_hts(mt: MatrixTable,
f: Callable,
subset: bool = False) -> MatrixTable:
"""Filter alternate alleles and update standard GATK entry fields.
Examples
--------
Filter to SNP alleles using the subset strategy:
>>> ds_result = hl.filter_alleles_hts(
... ds,
... lambda allele, _: hl.is_snp(ds.alleles[0], allele),
... subset=True)
Update the AC field of the resulting dataset:
>>> updated_info = ds_result.info.annotate(AC = ds_result.new_to_old.map(lambda i: ds_result.info.AC[i-1]))
>>> ds_result = ds_result.annotate_rows(info = updated_info)
Notes
-----
For usage of the `f` argument, see the :func:`.filter_alleles`
documentation.
:func:`.filter_alleles_hts` requires the dataset have the GATK VCF schema,
namely the following entry fields in this order:
.. code-block:: text
GT: call
AD: array<int32>
DP: int32
GQ: int32
PL: array<int32>
Use :meth:`.MatrixTable.select_entries` to rearrange these fields if
necessary.
The following new fields are generated:
- `old_locus` (``locus``) -- The old locus, before filtering and computing
the minimal representation.
- `old_alleles` (``array<str>``) -- The old alleles, before filtering and
computing the minimal representation.
- `old_to_new` (``array<int32>``) -- An array that maps old allele index to
new allele index. Its length is the same as `old_alleles`. Alleles that
are filtered are missing.
- `new_to_old` (``array<int32>``) -- An array that maps new allele index to
the old allele index. Its length is the same as the modified `alleles`
field.
**Downcode algorithm**
We will illustrate the behavior on the example genotype below
when filtering the first alternate allele (allele 1) at a site
with 1 reference allele and 2 alternate alleles.
.. code-block:: text
GT: 1/2
GQ: 10
AD: 0,50,35
0 | 1000
1 | 1000 10
2 | 1000 0 20
+-----------------
0 1 2
The downcode algorithm recodes occurances of filtered alleles
to occurances of the reference allele (e.g. 1 -> 0 in our
example). So the depths of filtered alleles in the AD field
are added to the depth of the reference allele. Where
downcoding filtered alleles merges distinct genotypes, the
minimum PL is used (since PL is on a log scale, this roughly
corresponds to adding probabilities). The PLs are then
re-normalized (shifted) so that the most likely genotype has a
PL of 0, and GT is set to this genotype. If an allele is
filtered, this algorithm acts similarly to
:func:`.split_multi_hts`.
The downcode algorithm would produce the following:
.. code-block:: text
GT: 0/1
GQ: 10
AD: 35,50
0 | 20
1 | 0 10
+-----------
0 1
In summary:
- GT: Downcode filtered alleles to reference.
- AD: Columns of filtered alleles are eliminated and their
values are added to the reference column, e.g., filtering
alleles 1 and 2 transforms ``25,5,10,20`` to ``40,20``.
- DP: No change.
- PL: Downcode filtered alleles to reference, combine PLs
using minimum for each overloaded genotype, and shift so
the overall minimum PL is 0.
- GQ: The second-lowest PL (after shifting).
**Subset algorithm**
We will illustrate the behavior on the example genotype below
when filtering the first alternate allele (allele 1) at a site
with 1 reference allele and 2 alternate alleles.
.. code-block:: text
GT: 1/2
GQ: 10
AD: 0,50,35
0 | 1000
1 | 1000 10
2 | 1000 0 20
+-----------------
0 1 2
The subset algorithm subsets the AD and PL arrays
(i.e. removes entries corresponding to filtered alleles) and
then sets GT to the genotype with the minimum PL. Note that
if the genotype changes (as in the example), the PLs are
re-normalized (shifted) so that the most likely genotype has a
PL of 0. Qualitatively, subsetting corresponds to the belief
that the filtered alleles are not real so we should discard
any probability mass associated with them.
The subset algorithm would produce the following:
.. code-block:: text
GT: 1/1
GQ: 980
AD: 0,50
0 | 980
1 | 980 0
+-----------
0 1
In summary:
- GT: Set to most likely genotype based on the PLs ignoring
the filtered allele(s).
- AD: The filtered alleles' columns are eliminated, e.g.,
filtering alleles 1 and 2 transforms ``25,5,10,20`` to
``25,20``.
- DP: Unchanged.
- PL: Columns involving filtered alleles are eliminated and
the remaining columns' values are shifted so the minimum
value is 0.
- GQ: The second-lowest PL (after shifting).
Warning
-------
:func:`.filter_alleles_hts` does not update any row fields other than
`locus` and `alleles`. This means that row fields like allele count (AC) can
become meaningless unless they are also updated. You can update them with
:meth:`.annotate_rows`.
See Also
--------
:func:`.filter_alleles`
Parameters
----------
mt : :class:`.MatrixTable`
f : callable
Function from (allele: :class:`.StringExpression`, allele_index:
:class:`.Int32Expression`) to :class:`.BooleanExpression`
subset : :obj:`.bool`
Subset PL field if ``True``, otherwise downcode PL field. The
calculation of GT and GQ also depend on whether one subsets or
downcodes the PL.
Returns
-------
:class:`.MatrixTable`
"""
if mt.entry.dtype != hl.hts_entry_schema:
raise FatalError("'filter_alleles_hts': entry schema must be the HTS entry schema:\n"
" found: {}\n"
" expected: {}\n"
" Use 'hl.filter_alleles' to split entries with non-HTS entry fields.".format(
mt.entry.dtype, hl.hts_entry_schema))
mt = filter_alleles(mt, f)
if subset:
newPL = hl.cond(
hl.is_defined(mt.PL),
hl.bind(
lambda unnorm: unnorm - hl.min(unnorm),
hl.range(0, hl.triangle(mt.alleles.length())).map(
lambda newi: hl.bind(
lambda newc: mt.PL[hl.call(mt.new_to_old[newc[0]],
mt.new_to_old[newc[1]]).unphased_diploid_gt_index()],
hl.unphased_diploid_gt_index_call(newi)))),
hl.null(tarray(tint32)))
return mt.annotate_entries(
GT=hl.unphased_diploid_gt_index_call(hl.argmin(newPL, unique=True)),
AD=hl.cond(
hl.is_defined(mt.AD),
hl.range(0, mt.alleles.length()).map(
lambda newi: mt.AD[mt.new_to_old[newi]]),
hl.null(tarray(tint32))),
# DP unchanged
GQ=hl.gq_from_pl(newPL),
PL=newPL)
# otherwise downcode
else:
mt = mt.annotate_rows(__old_to_new_no_na=mt.old_to_new.map(lambda x: hl.or_else(x, 0)))
newPL = hl.cond(
hl.is_defined(mt.PL),
(hl.range(0, hl.triangle(hl.len(mt.alleles)))
.map(lambda newi: hl.min(hl.range(0, hl.triangle(hl.len(mt.old_alleles)))
.filter(lambda oldi: hl.bind(
lambda oldc: hl.call(mt.__old_to_new_no_na[oldc[0]],
mt.__old_to_new_no_na[oldc[1]]) == hl.unphased_diploid_gt_index_call(newi),
hl.unphased_diploid_gt_index_call(oldi)))
.map(lambda oldi: mt.PL[oldi])))),
hl.null(tarray(tint32)))
return mt.annotate_entries(
GT=hl.call(mt.__old_to_new_no_na[mt.GT[0]],
mt.__old_to_new_no_na[mt.GT[1]]),
AD=hl.cond(
hl.is_defined(mt.AD),
(hl.range(0, hl.len(mt.alleles))
.map(lambda newi: hl.sum(hl.range(0, hl.len(mt.old_alleles))
.filter(lambda oldi: mt.__old_to_new_no_na[oldi] == newi)
.map(lambda oldi: mt.AD[oldi])))),
hl.null(tarray(tint32))),
# DP unchanged
GQ=hl.gq_from_pl(newPL),
PL=newPL).drop('__old_to_new_no_na')
@typecheck(mt=MatrixTable,
call_field=str,
r2=numeric,
bp_window_size=int,
memory_per_core=int)
def _local_ld_prune(mt, call_field, r2=0.2, bp_window_size=1000000, memory_per_core=256):
bytes_per_core = memory_per_core * 1024 * 1024
fraction_memory_to_use = 0.25
variant_byte_overhead = 50
genotypes_per_pack = 32
n_samples = mt.count_cols()
min_bytes_per_core = math.ceil((1 / fraction_memory_to_use) * 8 * n_samples + variant_byte_overhead)
if bytes_per_core < min_bytes_per_core:
raise ValueError("memory_per_core must be greater than {} MB".format(min_bytes_per_core // (1024 * 1024)))
bytes_per_variant = math.ceil(8 * n_samples / genotypes_per_pack) + variant_byte_overhead
bytes_available_per_core = bytes_per_core * fraction_memory_to_use
max_queue_size = int(max(1.0, math.ceil(bytes_available_per_core / bytes_per_variant)))
info(f'ld_prune: running local pruning stage with max queue size of {max_queue_size} variants')
return Table(ir.MatrixToTableApply(mt._mir, {
'name': 'LocalLDPrune',
'callField': call_field,
'r2Threshold': float(r2),
'windowSize': bp_window_size,
'maxQueueSize': max_queue_size
}))
@typecheck(call_expr=expr_call,
r2=numeric,
bp_window_size=int,
memory_per_core=int,
keep_higher_maf=bool,
block_size=nullable(int))
def ld_prune(call_expr, r2=0.2, bp_window_size=1000000, memory_per_core=256, keep_higher_maf=True, block_size=None):
"""Returns a maximal subset of variants that are nearly uncorrelated within each window.
.. include:: ../_templates/req_diploid_gt.rst
.. include:: ../_templates/req_biallelic.rst
.. include:: ../_templates/req_tvariant.rst
Examples
--------
Prune variants in linkage disequilibrium by filtering a dataset to those variants returned
by :func:`.ld_prune`. If the dataset contains multiallelic variants, the multiallelic variants
must be filtered out or split before being passed to :func:`.ld_prune`.
>>> biallelic_dataset = dataset.filter_rows(hl.len(dataset.alleles) == 2)
>>> pruned_variant_table = hl.ld_prune(biallelic_dataset.GT, r2=0.2, bp_window_size=500000)
>>> filtered_ds = dataset.filter_rows(hl.is_defined(pruned_variant_table[dataset.row_key]))
Notes
-----
This method finds a maximal subset of variants such that the squared Pearson
correlation coefficient :math:`r^2` of any pair at most `bp_window_size`
base pairs apart is strictly less than `r2`. Each variant is represented as
a vector over samples with elements given by the (mean-imputed) number of
alternate alleles. In particular, even if present, **phase information is
ignored**. Variants that do not vary across samples are dropped.
The method prunes variants in linkage disequilibrium in three stages.
- The first, "local pruning" stage prunes correlated variants within each
partition, using a local variant queue whose size is determined by
`memory_per_core`. A larger queue may facilitate more local pruning in
this stage. Minor allele frequency is not taken into account. The
parallelism is the number of matrix table partitions.
- The second, "global correlation" stage uses block-sparse matrix
multiplication to compute correlation between each pair of remaining
variants within `bp_window_size` base pairs, and then forms a graph of
correlated variants. The parallelism of writing the locally-pruned matrix
table as a block matrix is ``n_locally_pruned_variants / block_size``.
- The third, "global pruning" stage applies :func:`.maximal_independent_set`
to prune variants from this graph until no edges remain. This algorithm
iteratively removes the variant with the highest vertex degree. If
`keep_higher_maf` is true, then in the case of a tie for highest degree,
the variant with lowest minor allele frequency is removed.
Warning
-------
The locally-pruned matrix table and block matrix are stored as temporary files
on persistent disk. See the warnings on `BlockMatrix.from_entry_expr` with
regard to memory and Hadoop replication errors.
Parameters
----------
call_expr : :class:`.CallExpression`
Entry-indexed call expression on a matrix table with row-indexed
variants and column-indexed samples.
r2 : :obj:`float`
Squared correlation threshold (exclusive upper bound).
Must be in the range [0.0, 1.0].
bp_window_size: :obj:`int`
Window size in base pairs (inclusive upper bound).
memory_per_core : :obj:`int`
Memory in MB per core for local pruning queue.
keep_higher_maf: :obj:`int`
If ``True``, break ties at each step of the global pruning stage by
preferring to keep variants with higher minor allele frequency.
block_size: :obj:`int`, optional
Block size for block matrices in the second stage.
Default given by :meth:`.BlockMatrix.default_block_size`.
Returns
-------
:class:`.Table`
Table of a maximal independent set of variants.
"""
if block_size is None:
block_size = BlockMatrix.default_block_size()
if not 0.0 <= r2 <= 1:
raise ValueError(f'r2 must be in the range [0.0, 1.0], found {r2}')
if bp_window_size < 0:
raise ValueError(f'bp_window_size must be non-negative, found {bp_window_size}')
check_entry_indexed('ld_prune/call_expr', call_expr)
mt = matrix_table_source('ld_prune/call_expr', call_expr)
require_row_key_variant(mt, 'ld_prune')
# FIXME: remove once select_entries on a field is free
if call_expr in mt._fields_inverse:
field = mt._fields_inverse[call_expr]
else:
field = Env.get_uid()
mt = mt.select_entries(**{field: call_expr})
mt = mt.select_rows().select_cols()
mt = mt.distinct_by_row()
locally_pruned_table_path = new_temp_file()
(_local_ld_prune(require_biallelic(mt, 'ld_prune'), field, r2, bp_window_size, memory_per_core)
.write(locally_pruned_table_path, overwrite=True))
locally_pruned_table = hl.read_table(locally_pruned_table_path).add_index()
mt = mt.annotate_rows(info=locally_pruned_table[mt.row_key])
mt = mt.filter_rows(hl.is_defined(mt.info)).unfilter_entries()
std_gt_bm = BlockMatrix.from_entry_expr(
hl.or_else(
(mt[field].n_alt_alleles() - mt.info.mean) * mt.info.centered_length_rec,
0.0),
block_size=block_size)
r2_bm = (std_gt_bm @ std_gt_bm.T) ** 2
_, stops = hl.linalg.utils.locus_windows(locally_pruned_table.locus, bp_window_size)
entries = r2_bm.sparsify_row_intervals(range(stops.size), stops, blocks_only=True).entries(keyed=False)
entries = entries.filter((entries.entry >= r2) & (entries.i < entries.j))
entries = entries.select(i=hl.int32(entries.i), j=hl.int32(entries.j))
if keep_higher_maf:
fields = ['mean', 'locus']
else:
fields = ['locus']
info = locally_pruned_table.aggregate(
hl.agg.collect(locally_pruned_table.row.select('idx', *fields)), _localize=False)
info = hl.sorted(info, key=lambda x: x.idx)
entries = entries.annotate_globals(info=info)
entries = entries.filter(
(entries.info[entries.i].locus.contig == entries.info[entries.j].locus.contig)
& (entries.info[entries.j].locus.position - entries.info[entries.i].locus.position <= bp_window_size))
if keep_higher_maf:
entries = entries.annotate(
i=hl.struct(idx=entries.i,
twice_maf=hl.min(entries.info[entries.i].mean, 2.0 - entries.info[entries.i].mean)),
j=hl.struct(idx=entries.j,
twice_maf=hl.min(entries.info[entries.j].mean, 2.0 - entries.info[entries.j].mean)))
def tie_breaker(left, right):
return hl.sign(right.twice_maf - left.twice_maf)
else:
tie_breaker = None
variants_to_remove = hl.maximal_independent_set(
entries.i, entries.j, keep=False, tie_breaker=tie_breaker, keyed=False)
locally_pruned_table = locally_pruned_table.annotate_globals(
variants_to_remove=variants_to_remove.aggregate(
hl.agg.collect_as_set(variants_to_remove.node.idx), _localize=False))
return locally_pruned_table.filter(
locally_pruned_table.variants_to_remove.contains(hl.int32(locally_pruned_table.idx)),
keep=False
).select().persist()
def _warn_if_no_intercept(caller, covariates):
if all([e._indices.axes for e in covariates]):
warning(f'{caller}: model appears to have no intercept covariate.'
'\n To include an intercept, add 1.0 to the list of covariates.')
return True
return False
|
mit
|
mknorps/pp_TCF
|
MODULES/homfigs.py
|
1
|
1666
|
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# File name: homfigs.py
# Created by: gemusia
# Creation date: 30-06-2017
# Last modified: 18-07-2017 11:59:53
# Purpose: module for creating matplotlib figures of
# statistics created with module Channel
# from 'homstat.py'
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
import numpy as np
import matplotlib.pyplot as plt
# class of figures for channel flow
# with one subfigure
class Homfig:
# **kwargs_axes: list of axes features for ax1.set_$(STH)
# possible keys:
# title,xlabel,ylabel,xlim,ylim,xscale,yscale
def __init__(self,**kwargs_axes):
self.kwargs_axes = kwargs_axes
self.fig = plt.figure()
self.ax = self.fig.add_subplot(111)
self.plt_data = []
#default values
self.ax.set_xlabel('$y^{+}$')
self.ax.set_xlim([0,160])
#parameters read from the input
for key, val in self.kwargs_axes.iteritems():
getattr(self.ax,'set_'+key)(val)
def add_plot(self,*plt_args,**plt_kwargs):
self.plt_data.append((plt_args,plt_kwargs))
def hdraw(self,leg_loc=0):
for args,kwargs in self.plt_data:
# *args - unpacked as positional arguments
self.ax.plot(*args,**kwargs)
leg = self.ax.legend(loc=leg_loc)
def save(self,name):
self.fig.savefig(name)
'''
ax1.plot(DataFiles[cntr][:,0],DataFiles[cntr][:,1], 'k-^', label='model')
ax1.plot(DataFiles_LES[cntr][:,0],DataFiles_LES[cntr][:,1], '-', label='LES')
ax1.plot(DataFiles_DNS[cntr][:,0],DataFiles_DNS[cntr][:,1], '--', label='DNS')
'''
|
mit
|
superbobry/pymc3
|
pymc3/plots.py
|
1
|
17174
|
import numpy as np
from scipy.stats import kde
from .stats import *
from numpy.linalg import LinAlgError
__all__ = ['traceplot', 'kdeplot', 'kde2plot', 'forestplot', 'autocorrplot']
def traceplot(trace, varnames=None, figsize=None,
lines=None, combined=False, grid=True,
alpha=0.35, ax=None):
"""Plot samples histograms and values
Parameters
----------
trace : result of MCMC run
varnames : list of variable names
Variables to be plotted, if None all variable are plotted
figsize : figure size tuple
If None, size is (12, num of variables * 2) inch
lines : dict
Dictionary of variable name / value to be overplotted as vertical
lines to the posteriors and horizontal lines on sample values
e.g. mean of posteriors, true values of a simulation
combined : bool
Flag for combining multiple chains into a single chain. If False
(default), chains will be plotted separately.
grid : bool
Flag for adding gridlines to histogram. Defaults to True.
ax : axes
Matplotlib axes. Defaults to None.
Returns
-------
ax : matplotlib axes
"""
import matplotlib.pyplot as plt
if varnames is None:
varnames = trace.varnames
n = len(varnames)
if figsize is None:
figsize = (12, n*2)
if ax is None:
fig, ax = plt.subplots(n, 2, squeeze=False, figsize=figsize)
elif ax.shape != (n,2):
print('traceplot requires n*2 subplots')
return None
for i, v in enumerate(varnames):
for d in trace.get_values(v, combine=combined, squeeze=False):
d = np.squeeze(d)
d = make_2d(d)
if d.dtype.kind == 'i':
histplot_op(ax[i, 0], d, alpha=alpha)
else:
kdeplot_op(ax[i, 0], d)
ax[i, 0].set_title(str(v))
ax[i, 0].grid(grid)
ax[i, 1].set_title(str(v))
ax[i, 1].plot(d, alpha=alpha)
ax[i, 0].set_ylabel("Frequency")
ax[i, 1].set_ylabel("Sample value")
if lines:
try:
ax[i, 0].axvline(x=lines[v], color="r", lw=1.5)
ax[i, 1].axhline(y=lines[v], color="r", lw=1.5, alpha=alpha)
except KeyError:
pass
plt.tight_layout()
return ax
def histplot_op(ax, data, alpha=.35):
for i in range(data.shape[1]):
d = data[:, i]
mind = np.min(d)
maxd = np.max(d)
step = max((maxd-mind)//100, 1)
ax.hist(d, bins=range(mind, maxd + 2, step), alpha=alpha, align='left')
ax.set_xlim(mind - .5, maxd + .5)
def kdeplot_op(ax, data):
errored = []
for i in range(data.shape[1]):
d = data[:, i]
try:
density = kde.gaussian_kde(d)
l = np.min(d)
u = np.max(d)
x = np.linspace(0, 1, 100) * (u - l) + l
ax.plot(x, density(x))
except LinAlgError:
errored.append(i)
if errored:
ax.text(.27,.47, 'WARNING: KDE plot failed for: ' + str(errored), style='italic',
bbox={'facecolor':'red', 'alpha':0.5, 'pad':10})
def make_2d(a):
"""Ravel the dimensions after the first.
"""
a = np.atleast_2d(a.T).T
#flatten out dimensions beyond the first
n = a.shape[0]
newshape = np.product(a.shape[1:]).astype(int)
a = a.reshape((n, newshape), order='F')
return a
def kde2plot_op(ax, x, y, grid=200):
xmin = x.min()
xmax = x.max()
ymin = y.min()
ymax = y.max()
grid = grid * 1j
X, Y = np.mgrid[xmin:xmax:grid, ymin:ymax:grid]
positions = np.vstack([X.ravel(), Y.ravel()])
values = np.vstack([x, y])
kernel = kde.gaussian_kde(values)
Z = np.reshape(kernel(positions).T, X.shape)
ax.imshow(np.rot90(Z), cmap=plt.cm.gist_earth_r,
extent=[xmin, xmax, ymin, ymax])
def kdeplot(data, ax=None):
if ax is None:
f, ax = subplots(1, 1, squeeze=True)
kdeplot_op(ax, data)
return ax
def kde2plot(x, y, grid=200, ax=None):
if ax is None:
f, ax = subplots(1, 1, squeeze=True)
kde2plot_op(ax, x, y, grid)
return ax
def autocorrplot(trace, varnames=None, max_lag=100, burn=0,
symmetric_plot=False, ax=None, figsize=None):
"""Bar plot of the autocorrelation function for a trace
Parameters
----------
trace : result of MCMC run
varnames : list of variable names
Variables to be plotted, if None all variable are plotted.
Vector-value stochastics are handled automatically.
max_lag : int
Maximum lag to calculate autocorrelation. Defaults to 100.
burn : int
Number of samples to discard from the beginning of the trace.
Defaults to 0.
symmetric_plot : boolean
Plot from either [0, +lag] or [-lag, lag]. Defaults to False, [-, +lag].
ax : axes
Matplotlib axes. Defaults to None.
figsize : figure size tuple
If None, size is (12, num of variables * 2) inches.
Note this is not used if ax is supplied.
Returns
-------
ax : matplotlib axes
"""
import matplotlib.pyplot as plt
def _handle_array_varnames(varname):
if trace[0][varname].__class__ is np.ndarray:
k = trace[varname].shape[1]
for i in range(k):
yield varname + '_{0}'.format(i)
else:
yield varname
if varnames is None:
varnames = trace.varnames
else:
varnames = [str(v) for v in varnames]
varnames = [item for sub in [[i for i in _handle_array_varnames(v)]
for v in varnames] for item in sub]
nchains = trace.nchains
if figsize is None:
figsize = (12, len(varnames)*2)
if ax is None:
fig, ax = plt.subplots(len(varnames), nchains, squeeze=False,
sharex=True, sharey=True, figsize=figsize)
elif ax.shape != (len(varnames), nchains):
raise ValueError('autocorrplot requires {}*{} subplots'.format(
len(varnames), nchains))
return None
max_lag = min(len(trace) - 1, max_lag)
for i, v in enumerate(varnames):
for j in range(nchains):
try:
d = np.squeeze(trace.get_values(v, chains=[j], burn=burn,
combine=False))
except KeyError:
k = int(v.split('_')[-1])
v_use = '_'.join(v.split('_')[:-1])
d = np.squeeze(trace.get_values(v_use, chains=[j],
burn=burn, combine=False)[:, k])
ax[i, j].acorr(d, detrend=plt.mlab.detrend_mean, maxlags=max_lag)
if not j:
ax[i, j].set_ylabel("correlation")
if i == len(varnames) - 1:
ax[i, j].set_xlabel("lag")
ax[i, j].set_title(v)
if not symmetric_plot:
ax[i, j].set_xlim(0, max_lag)
if nchains > 1:
ax[i, j].set_title("chain {0}".format(j+1))
return ax
def var_str(name, shape):
"""Return a sequence of strings naming the element of the tallyable object.
This is a support function for forestplot.
:Example:
>>> var_str('theta', (4,))
['theta[1]', 'theta[2]', 'theta[3]', 'theta[4]']
"""
size = np.prod(shape)
ind = (np.indices(shape) + 1).reshape(-1, size)
names = ['[' + ','.join(map(str, i)) + ']' for i in zip(*ind)]
# if len(name)>12:
# name = '\n'.join(name.split('_'))
# name += '\n'
names[0] = '%s %s' % (name, names[0])
return names
def forestplot(trace_obj, varnames=None, alpha=0.05, quartiles=True, rhat=True,
main=None, xtitle=None, xrange=None, ylabels=None,
chain_spacing=0.05, vline=0, gs=None):
""" Forest plot (model summary plot)
Generates a "forest plot" of 100*(1-alpha)% credible intervals for either
the set of variables in a given model, or a specified set of nodes.
:Arguments:
trace_obj: NpTrace or MultiTrace object
Trace(s) from an MCMC sample.
varnames: list
List of variables to plot (defaults to None, which results in all
variables plotted).
alpha (optional): float
Alpha value for (1-alpha)*100% credible intervals (defaults to
0.05).
quartiles (optional): bool
Flag for plotting the interquartile range, in addition to the
(1-alpha)*100% intervals (defaults to True).
rhat (optional): bool
Flag for plotting Gelman-Rubin statistics. Requires 2 or more
chains (defaults to True).
main (optional): string
Title for main plot. Passing False results in titles being
suppressed; passing None (default) results in default titles.
xtitle (optional): string
Label for x-axis. Defaults to no label
xrange (optional): list or tuple
Range for x-axis. Defaults to matplotlib's best guess.
ylabels (optional): list or array
User-defined labels for each variable. If not provided, the node
__name__ attributes are used.
chain_spacing (optional): float
Plot spacing between chains (defaults to 0.05).
vline (optional): numeric
Location of vertical reference line (defaults to 0).
gs : GridSpec
Matplotlib GridSpec object. Defaults to None.
Returns
-------
gs : matplotlib GridSpec
"""
import matplotlib.pyplot as plt
from matplotlib import gridspec
# Quantiles to be calculated
qlist = [100 * alpha / 2, 50, 100 * (1 - alpha / 2)]
if quartiles:
qlist = [100 * alpha / 2, 25, 50, 75, 100 * (1 - alpha / 2)]
# Range for x-axis
plotrange = None
# Number of chains
chains = None
# Subplots
interval_plot = None
rhat_plot = None
nchains = trace_obj.nchains
if nchains > 1:
from .diagnostics import gelman_rubin
R = gelman_rubin(trace_obj)
if varnames is not None:
R = {v: R[v] for v in varnames}
else:
# Can't calculate Gelman-Rubin with a single trace
rhat = False
if varnames is None:
varnames = trace_obj.varnames
# Empty list for y-axis labels
labels = []
if gs is None:
# Initialize plot
if rhat and nchains > 1:
gs = gridspec.GridSpec(1, 2, width_ratios=[3, 1])
else:
gs = gridspec.GridSpec(1, 1)
# Subplot for confidence intervals
interval_plot = plt.subplot(gs[0])
trace_quantiles = quantiles(trace_obj, qlist, squeeze=False)
hpd_intervals = hpd(trace_obj, alpha, squeeze=False)
for j, chain in enumerate(trace_obj.chains):
# Counter for current variable
var = 1
for varname in varnames:
var_quantiles = trace_quantiles[chain][varname]
quants = [var_quantiles[v] for v in qlist]
var_hpd = hpd_intervals[chain][varname].T
# Substitute HPD interval for quantile
quants[0] = var_hpd[0].T
quants[-1] = var_hpd[1].T
# Ensure x-axis contains range of current interval
if plotrange:
plotrange = [min(
plotrange[0],
np.min(quants)),
max(plotrange[1],
np.max(quants))]
else:
plotrange = [np.min(quants), np.max(quants)]
# Number of elements in current variable
value = trace_obj.get_values(varname, chains=[chain])[0]
k = np.size(value)
# Append variable name(s) to list
if not j:
if k > 1:
names = var_str(varname, np.shape(value))
labels += names
else:
labels.append(varname)
# labels.append('\n'.join(varname.split('_')))
# Add spacing for each chain, if more than one
e = [0] + [(chain_spacing * ((i + 2) / 2)) *
(-1) ** i for i in range(nchains - 1)]
# Deal with multivariate nodes
if k > 1:
for i, q in enumerate(np.transpose(quants).squeeze()):
# Y coordinate with jitter
y = -(var + i) + e[j]
if quartiles:
# Plot median
plt.plot(q[2], y, 'bo', markersize=4)
# Plot quartile interval
plt.errorbar(
x=(q[1],
q[3]),
y=(y,
y),
linewidth=2,
color='b')
else:
# Plot median
plt.plot(q[1], y, 'bo', markersize=4)
# Plot outer interval
plt.errorbar(
x=(q[0],
q[-1]),
y=(y,
y),
linewidth=1,
color='b')
else:
# Y coordinate with jitter
y = -var + e[j]
if quartiles:
# Plot median
plt.plot(quants[2], y, 'bo', markersize=4)
# Plot quartile interval
plt.errorbar(
x=(quants[1],
quants[3]),
y=(y,
y),
linewidth=2,
color='b')
else:
# Plot median
plt.plot(quants[1], y, 'bo', markersize=4)
# Plot outer interval
plt.errorbar(
x=(quants[0],
quants[-1]),
y=(y,
y),
linewidth=1,
color='b')
# Increment index
var += k
labels = ylabels if ylabels is not None else labels
# Update margins
left_margin = np.max([len(x) for x in labels]) * 0.015
gs.update(left=left_margin, right=0.95, top=0.9, bottom=0.05)
# Define range of y-axis
plt.ylim(-var + 0.5, -0.5)
datarange = plotrange[1] - plotrange[0]
plt.xlim(plotrange[0] - 0.05 * datarange, plotrange[1] + 0.05 * datarange)
# Add variable labels
plt.yticks([-(l + 1) for l in range(len(labels))], labels)
# Add title
if main is not False:
plot_title = main or str(int((
1 - alpha) * 100)) + "% Credible Intervals"
plt.title(plot_title)
# Add x-axis label
if xtitle is not None:
plt.xlabel(xtitle)
# Constrain to specified range
if xrange is not None:
plt.xlim(*xrange)
# Remove ticklines on y-axes
for ticks in interval_plot.yaxis.get_major_ticks():
ticks.tick1On = False
ticks.tick2On = False
for loc, spine in interval_plot.spines.items():
if loc in ['bottom', 'top']:
pass
# spine.set_position(('outward',10)) # outward by 10 points
elif loc in ['left', 'right']:
spine.set_color('none') # don't draw spine
# Reference line
plt.axvline(vline, color='k', linestyle='--')
# Genenerate Gelman-Rubin plot
if rhat and nchains > 1:
# If there are multiple chains, calculate R-hat
rhat_plot = plt.subplot(gs[1])
if main is not False:
plt.title("R-hat")
# Set x range
plt.xlim(0.9, 2.1)
# X axis labels
plt.xticks((1.0, 1.5, 2.0), ("1", "1.5", "2+"))
plt.yticks([-(l + 1) for l in range(len(labels))], "")
i = 1
for varname in varnames:
chain = trace_obj.chains[0]
value = trace_obj.get_values(varname, chains=[chain])[0]
k = np.size(value)
if k > 1:
plt.plot([min(r, 2) for r in R[varname]], [-(j + i)
for j in range(k)], 'bo', markersize=4)
else:
plt.plot(min(R[varname], 2), -i, 'bo', markersize=4)
i += k
# Define range of y-axis
plt.ylim(-i + 0.5, -0.5)
# Remove ticklines on y-axes
for ticks in rhat_plot.yaxis.get_major_ticks():
ticks.tick1On = False
ticks.tick2On = False
for loc, spine in rhat_plot.spines.items():
if loc in ['bottom', 'top']:
pass
# spine.set_position(('outward',10)) # outward by 10 points
elif loc in ['left', 'right']:
spine.set_color('none') # don't draw spine
return gs
|
apache-2.0
|
se4u/pylearn2
|
pylearn2/cross_validation/dataset_iterators.py
|
1
|
19886
|
"""
Cross-validation dataset iterators.
"""
__author__ = "Steven Kearnes"
__copyright__ = "Copyright 2014, Stanford University"
__license__ = "3-clause BSD"
import numpy as np
import warnings
try:
from sklearn.cross_validation import (KFold, StratifiedKFold, ShuffleSplit,
StratifiedShuffleSplit)
except ImportError:
warnings.warn("Could not import from sklearn.")
from pylearn2.compat import OrderedDict
from pylearn2.cross_validation.blocks import StackedBlocksCV
from pylearn2.cross_validation.subset_iterators import (
ValidationKFold, StratifiedValidationKFold, ValidationShuffleSplit,
StratifiedValidationShuffleSplit)
from pylearn2.datasets.dense_design_matrix import DenseDesignMatrix
from pylearn2.datasets.transformer_dataset import TransformerDataset
class DatasetCV(object):
"""
Construct a new DenseDesignMatrix for each subset.
Parameters
----------
dataset : object
Full dataset for use in cross validation.
subset_iterator : iterable
Iterable that returns (train, test) or (train, valid, test) indices
for partitioning the dataset during cross-validation.
preprocessor : Preprocessor or None
Preprocessor to apply to child datasets.
fit_preprocessor : bool
Whether preprocessor can fit parameters when applied to training
data.
which_set : str, list or None
If None, return all subset datasets. If one or more of 'train',
'valid', or 'test', return only the dataset(s) corresponding to the
given subset(s).
return_dict : bool
Whether to return subset datasets as a dictionary. If True,
returns a dict with keys 'train', 'valid', and/or 'test' (if
subset_iterator returns two subsets per partition, 'train' and
'test' are used, and if subset_iterator returns three subsets per
partition, 'train', 'valid', and 'test' are used). If False,
returns a list of datasets matching the subset order given by
subset_iterator.
"""
def __init__(self, dataset, subset_iterator, preprocessor=None,
fit_preprocessor=False, which_set=None, return_dict=True):
self.dataset = dataset
self.subset_iterator = list(subset_iterator) # allow generator reuse
dataset_iterator = dataset.iterator(mode='sequential', num_batches=1,
data_specs=dataset.data_specs,
return_tuple=True)
self._data = dataset_iterator.next()
self.preprocessor = preprocessor
self.fit_preprocessor = fit_preprocessor
self.which_set = which_set
if which_set is not None:
which_set = np.atleast_1d(which_set)
assert len(which_set)
for label in which_set:
if label not in ['train', 'valid', 'test']:
raise ValueError("Unrecognized subset '{}'".format(label))
self.which_set = which_set
self.return_dict = return_dict
def get_data_subsets(self):
"""
Partition the dataset according to cross-validation subsets and
return the raw data in each subset.
"""
for subsets in self.subset_iterator:
labels = None
if len(subsets) == 3:
labels = ['train', 'valid', 'test']
elif len(subsets) == 2:
labels = ['train', 'test']
# data_subsets is an OrderedDict to maintain label order
data_subsets = OrderedDict()
for i, subset in enumerate(subsets):
subset_data = tuple(data[subset] for data in self._data)
# if len(subset_data) == 2:
# X, y = subset_data
# else:
# X, = subset_data
# y = None
# data_subsets[labels[i]] = (X, y)
data_subsets[labels[i]] = subset_data
yield data_subsets
def __iter__(self):
"""
Create a DenseDesignMatrix for each dataset subset and apply any
preprocessing to the child datasets.
"""
for data_subsets in self.get_data_subsets():
datasets = {}
for label, data in data_subsets.items():
try:
X, y = data
data_subset = DenseDesignMatrix(
X=X, y=y, X_labels=self.dataset.X_labels,
y_labels=self.dataset.y_labels)
except:
data_subset = self.dataset.__class__(
data=data, data_specs=self.dataset.data_specs)
assert isinstance(data_subset, self.dataset.__class__)
datasets[label] = data_subset
# preprocessing
if self.preprocessor is not None:
self.preprocessor.apply(datasets['train'],
can_fit=self.fit_preprocessor)
for label, dataset in datasets.items():
if label == 'train':
continue
self.preprocessor.apply(dataset, can_fit=False)
# which_set
if self.which_set is not None:
for label, dataset in list(datasets.items()):
if label not in self.which_set:
del datasets[label]
del data_subsets[label]
if not len(datasets):
raise ValueError("No matching dataset(s) for " +
"{}".format(self.which_set))
if not self.return_dict:
# data_subsets is an OrderedDict to maintain label order
datasets = list(datasets[label]
for label in data_subsets.keys())
if len(datasets) == 1:
datasets, = datasets
yield datasets
class StratifiedDatasetCV(DatasetCV):
"""
Subclass of DatasetCV for stratified experiments, where
the relative class proportions of the full dataset are maintained in
each partition.
Parameters
----------
dataset : object
Dataset to use in cross validation.
subset_iterator : iterable
Iterable that returns train/test or train/valid/test splits for
partitioning the dataset during cross-validation.
preprocessor : Preprocessor or None
Preprocessor to apply to child datasets.
fit_preprocessor : bool
Whether preprocessor can fit parameters when applied to training
data.
which_set : str, list or None
If None, return all subset datasets. If one or more of 'train',
'valid', or 'test', return only the dataset(s) corresponding to the
given subset(s).
return_dict : bool
Whether to return subset datasets as a dictionary. If True,
returns a dict with keys 'train', 'valid', and/or 'test' (if
subset_iterator returns two subsets per partition, 'train' and
'test' are used, and if subset_iterator returns three subsets per
partition, 'train', 'valid', and 'test' are used). If False,
returns a list of datasets matching the subset order given by
subset_iterator.
"""
@staticmethod
def get_y(dataset):
"""
Stratified cross-validation requires label information for
examples. This function gets target values for a dataset,
converting from one-hot encoding to a 1D array as needed.
Parameters
----------
dataset : object
Dataset containing target values for examples.
"""
y = np.asarray(dataset.y)
if y.ndim > 1:
assert np.array_equal(np.unique(y), [0, 1])
y = np.argmax(y, axis=1)
return y
class TransformerDatasetCV(object):
"""
Cross-validation with dataset transformations. This class returns
dataset subsets after transforming them with one or more pretrained
models.
Parameters
----------
dataset_iterator : DatasetCV
Cross-validation dataset iterator providing train/test or
train/valid/test datasets.
transformers : Model or iterable
Transformer model(s) to use for transforming datasets.
"""
def __init__(self, dataset_iterator, transformers):
self.dataset_iterator = dataset_iterator
self.transformers = transformers
def __iter__(self):
"""
Construct a Transformer dataset for each partition.
"""
for k, datasets in enumerate(self.dataset_iterator):
if isinstance(self.transformers, list):
transformer = self.transformers[k]
elif isinstance(self.transformers, StackedBlocksCV):
transformer = self.transformers.select_fold(k)
else:
transformer = self.transformers
if isinstance(datasets, list):
for i, dataset in enumerate(datasets):
datasets[i] = TransformerDataset(dataset, transformer)
else:
for key, dataset in datasets.items():
datasets[key] = TransformerDataset(dataset, transformer)
yield datasets
class DatasetKFold(DatasetCV):
"""
K-fold cross-validation.
Parameters
----------
dataset : object
Dataset to use for cross-validation.
n_folds : int
Number of cross-validation folds.
shuffle : bool
Whether to shuffle the dataset before partitioning.
random_state : int or RandomState
Random number generator used for shuffling.
kwargs : dict
Keyword arguments for DatasetCV.
"""
def __init__(self, dataset, n_folds=3, shuffle=False, random_state=None,
**kwargs):
n = dataset.get_num_examples()
cv = KFold(n, n_folds=n_folds, shuffle=shuffle,
random_state=random_state)
super(DatasetKFold, self).__init__(dataset, cv, **kwargs)
class StratifiedDatasetKFold(StratifiedDatasetCV):
"""
Stratified K-fold cross-validation.
Parameters
----------
dataset : object
Dataset to use for cross-validation.
n_folds : int
Number of cross-validation folds.
shuffle : bool
Whether to shuffle the dataset before partitioning.
random_state : int or RandomState
Random number generator used for shuffling.
kwargs : dict
Keyword arguments for DatasetCV.
"""
def __init__(self, dataset, n_folds=3, shuffle=False, random_state=None,
**kwargs):
y = self.get_y(dataset)
try:
cv = StratifiedKFold(y, n_folds=n_folds, shuffle=shuffle,
random_state=random_state)
except TypeError:
assert not shuffle and not random_state, (
"The 'shuffle' and 'random_state' arguments are not " +
"supported by this version of sklearn. See "
"http://scikit-learn.org/stable/developers/index.html" +
"#git-repo for details on installing the development version.")
cv = StratifiedKFold(y, n_folds=n_folds)
super(StratifiedDatasetKFold, self).__init__(dataset, cv, **kwargs)
class DatasetShuffleSplit(DatasetCV):
"""
Shuffle-split cross-validation.
Parameters
----------
dataset : object
Dataset to use for cross-validation.
n_iter : int
Number of shuffle-split iterations.
test_size : float, int, or None
If float, intepreted as the proportion of examples in the test set.
If int, interpreted as the absolute number of examples in the test
set. If None, adjusted to the complement of train_size.
train_size : float, int, or None
If float, intepreted as the proportion of examples in the training
set. If int, interpreted as the absolute number of examples in the
training set. If None, adjusted to the complement of test_size.
random_state : int or RandomState
Random number generator used for shuffling.
kwargs : dict
Keyword arguments for DatasetCV.
"""
def __init__(self, dataset, n_iter=10, test_size=0.1, train_size=None,
random_state=None, **kwargs):
n = dataset.X.shape[0]
cv = ShuffleSplit(n, n_iter=n_iter, test_size=test_size,
train_size=train_size, random_state=random_state)
super(DatasetShuffleSplit, self).__init__(dataset, cv, **kwargs)
class StratifiedDatasetShuffleSplit(StratifiedDatasetCV):
"""
Stratified shuffle-split cross-validation.
Parameters
----------
dataset : object
Dataset to use for cross-validation.
n_iter : int
Number of shuffle-split iterations.
test_size : float, int, or None
If float, intepreted as the proportion of examples in the test set.
If int, interpreted as the absolute number of examples in the test
set. If None, adjusted to the complement of train_size.
train_size : float, int, or None
If float, intepreted as the proportion of examples in the training
set. If int, interpreted as the absolute number of examples in the
training set. If None, adjusted to the complement of test_size.
random_state : int or RandomState
Random number generator used for shuffling.
kwargs : dict
Keyword arguments for DatasetCV.
"""
def __init__(self, dataset, n_iter=10, test_size=0.1, train_size=None,
random_state=None, **kwargs):
y = self.get_y(dataset)
cv = StratifiedShuffleSplit(y, n_iter=n_iter, test_size=test_size,
train_size=train_size,
random_state=random_state)
super(StratifiedDatasetShuffleSplit, self).__init__(dataset, cv,
**kwargs)
class DatasetValidationKFold(DatasetCV):
"""
K-fold cross-validation with train/valid/test subsets.
Parameters
----------
dataset : object
Dataset to use for cross-validation.
n_folds : int
Number of cross-validation folds. Must be at least 3.
shuffle : bool
Whether to shuffle the data before splitting.
random_state : int, RandomState, or None
Pseudorandom number seed or generator to use for shuffling.
kwargs : dict
Keyword arguments for DatasetCV.
"""
def __init__(self, dataset, n_folds=3, shuffle=False, random_state=None,
**kwargs):
n = dataset.get_num_examples()
cv = ValidationKFold(n, n_folds, shuffle, random_state)
super(DatasetValidationKFold, self).__init__(dataset, cv, **kwargs)
class StratifiedDatasetValidationKFold(StratifiedDatasetCV):
"""
Stratified K-fold cross-validation with train/valid/test subsets.
Parameters
----------
dataset : object
Dataset to use for cross-validation.
n_folds : int
Number of cross-validation folds. Must be at least 3.
shuffle : bool
Whether to shuffle the data before splitting.
random_state : int, RandomState, or None
Pseudorandom number seed or generator to use for shuffling.
kwargs : dict
Keyword arguments for DatasetCV.
"""
def __init__(self, dataset, n_folds=3, shuffle=False, random_state=None,
**kwargs):
y = self.get_y(dataset)
cv = StratifiedValidationKFold(y, n_folds, shuffle, random_state)
super(StratifiedDatasetValidationKFold, self).__init__(dataset, cv,
**kwargs)
class DatasetValidationShuffleSplit(DatasetCV):
"""
Shuffle-split cross-validation with train/valid/test subsets.
Parameters
----------
dataset : object
Dataset to use for cross-validation.
n_iter : int
Number of shuffle/split iterations.
test_size : float, int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the entire dataset to include in the validation
split. If int, represents the absolute number of validation
samples. If None, the value is automatically set to the complement
of train_size + valid_size.
valid_size : float, int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the entire dataset to include in the validation
split. If int, represents the absolute number of validation
samples. If None, the value is automatically set to match
test_size.
train_size : float, int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the entire dataset to include in the validation
split. If int, represents the absolute number of validation
samples. If None, the value is automatically set to the complement
of valid_size + test_size.
random_state : int, RandomState, or None
Pseudorandom number seed or generator to use for shuffling.
kwargs : dict
Keyword arguments for DatasetCV.
"""
def __init__(self, dataset, n_iter=10, test_size=0.1, valid_size=None,
train_size=None, random_state=None, **kwargs):
n = dataset.get_num_examples()
cv = ValidationShuffleSplit(n, n_iter, test_size, valid_size,
train_size, random_state)
super(DatasetValidationShuffleSplit, self).__init__(dataset, cv,
**kwargs)
class StratifiedDatasetValidationShuffleSplit(StratifiedDatasetCV):
"""
Stratified shuffle-split cross-validation with train/valid/test
subsets.
Parameters
----------
dataset : object
Dataset to use for cross-validation.
n_iter : int
Number of shuffle/split iterations.
test_size : float, int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the entire dataset to include in the validation
split. If int, represents the absolute number of validation
samples. If None, the value is automatically set to the complement
of train_size + valid_size.
valid_size : float, int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the entire dataset to include in the validation
split. If int, represents the absolute number of validation
samples. If None, the value is automatically set to match
test_size.
train_size : float, int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the entire dataset to include in the validation
split. If int, represents the absolute number of validation
samples. If None, the value is automatically set to the complement
of valid_size + test_size.
random_state : int, RandomState, or None
Pseudorandom number seed or generator to use for shuffling.
kwargs : dict
Keyword arguments for DatasetCV.
"""
def __init__(self, dataset, n_iter=10, test_size=0.1, valid_size=None,
train_size=None, random_state=None, **kwargs):
y = self.get_y(dataset)
cv = StratifiedValidationShuffleSplit(y, n_iter, test_size, valid_size,
train_size, random_state)
super(StratifiedDatasetValidationShuffleSplit, self).__init__(dataset,
cv,
**kwargs)
|
bsd-3-clause
|
ishank08/scikit-learn
|
sklearn/datasets/lfw.py
|
15
|
18695
|
"""Loader for the Labeled Faces in the Wild (LFW) dataset
This dataset is a collection of JPEG pictures of famous people collected
over the internet, all details are available on the official website:
http://vis-www.cs.umass.edu/lfw/
Each picture is centered on a single face. The typical task is called
Face Verification: given a pair of two pictures, a binary classifier
must predict whether the two images are from the same person.
An alternative task, Face Recognition or Face Identification is:
given the picture of the face of an unknown person, identify the name
of the person by referring to a gallery of previously seen pictures of
identified persons.
Both Face Verification and Face Recognition are tasks that are typically
performed on the output of a model trained to perform Face Detection. The
most popular model for Face Detection is called Viola-Johns and is
implemented in the OpenCV library. The LFW faces were extracted by this face
detector from various online websites.
"""
# Copyright (c) 2011 Olivier Grisel <[email protected]>
# License: BSD 3 clause
from os import listdir, makedirs, remove, rename
from os.path import join, exists, isdir
import logging
import numpy as np
try:
import urllib.request as urllib # for backwards compatibility
except ImportError:
import urllib
from .base import get_data_home, Bunch
from ..externals.joblib import Memory
from ..externals.six import b
logger = logging.getLogger(__name__)
BASE_URL = "http://vis-www.cs.umass.edu/lfw/"
ARCHIVE_NAME = "lfw.tgz"
FUNNELED_ARCHIVE_NAME = "lfw-funneled.tgz"
TARGET_FILENAMES = [
'pairsDevTrain.txt',
'pairsDevTest.txt',
'pairs.txt',
]
def scale_face(face):
"""Scale back to 0-1 range in case of normalization for plotting"""
scaled = face - face.min()
scaled /= scaled.max()
return scaled
#
# Common private utilities for data fetching from the original LFW website
# local disk caching, and image decoding.
#
def check_fetch_lfw(data_home=None, funneled=True, download_if_missing=True):
"""Helper function to download any missing LFW data"""
data_home = get_data_home(data_home=data_home)
lfw_home = join(data_home, "lfw_home")
if funneled:
archive_path = join(lfw_home, FUNNELED_ARCHIVE_NAME)
data_folder_path = join(lfw_home, "lfw_funneled")
archive_url = BASE_URL + FUNNELED_ARCHIVE_NAME
else:
archive_path = join(lfw_home, ARCHIVE_NAME)
data_folder_path = join(lfw_home, "lfw")
archive_url = BASE_URL + ARCHIVE_NAME
if not exists(lfw_home):
makedirs(lfw_home)
for target_filename in TARGET_FILENAMES:
target_filepath = join(lfw_home, target_filename)
if not exists(target_filepath):
if download_if_missing:
url = BASE_URL + target_filename
logger.warning("Downloading LFW metadata: %s", url)
urllib.urlretrieve(url, target_filepath)
else:
raise IOError("%s is missing" % target_filepath)
if not exists(data_folder_path):
if not exists(archive_path):
if download_if_missing:
archive_path_temp = archive_path + ".tmp"
logger.warning("Downloading LFW data (~200MB): %s",
archive_url)
urllib.urlretrieve(archive_url, archive_path_temp)
rename(archive_path_temp, archive_path)
else:
raise IOError("%s is missing" % target_filepath)
import tarfile
logger.info("Decompressing the data archive to %s", data_folder_path)
tarfile.open(archive_path, "r:gz").extractall(path=lfw_home)
remove(archive_path)
return lfw_home, data_folder_path
def _load_imgs(file_paths, slice_, color, resize):
"""Internally used to load images"""
# Try to import imread and imresize from PIL. We do this here to prevent
# the whole sklearn.datasets module from depending on PIL.
try:
try:
from scipy.misc import imread
except ImportError:
from scipy.misc.pilutil import imread
from scipy.misc import imresize
except ImportError:
raise ImportError("The Python Imaging Library (PIL)"
" is required to load data from jpeg files")
# compute the portion of the images to load to respect the slice_ parameter
# given by the caller
default_slice = (slice(0, 250), slice(0, 250))
if slice_ is None:
slice_ = default_slice
else:
slice_ = tuple(s or ds for s, ds in zip(slice_, default_slice))
h_slice, w_slice = slice_
h = (h_slice.stop - h_slice.start) // (h_slice.step or 1)
w = (w_slice.stop - w_slice.start) // (w_slice.step or 1)
if resize is not None:
resize = float(resize)
h = int(resize * h)
w = int(resize * w)
# allocate some contiguous memory to host the decoded image slices
n_faces = len(file_paths)
if not color:
faces = np.zeros((n_faces, h, w), dtype=np.float32)
else:
faces = np.zeros((n_faces, h, w, 3), dtype=np.float32)
# iterate over the collected file path to load the jpeg files as numpy
# arrays
for i, file_path in enumerate(file_paths):
if i % 1000 == 0:
logger.info("Loading face #%05d / %05d", i + 1, n_faces)
# Checks if jpeg reading worked. Refer to issue #3594 for more
# details.
img = imread(file_path)
if img.ndim is 0:
raise RuntimeError("Failed to read the image file %s, "
"Please make sure that libjpeg is installed"
% file_path)
face = np.asarray(img[slice_], dtype=np.float32)
face /= 255.0 # scale uint8 coded colors to the [0.0, 1.0] floats
if resize is not None:
face = imresize(face, resize)
if not color:
# average the color channels to compute a gray levels
# representation
face = face.mean(axis=2)
faces[i, ...] = face
return faces
#
# Task #1: Face Identification on picture with names
#
def _fetch_lfw_people(data_folder_path, slice_=None, color=False, resize=None,
min_faces_per_person=0):
"""Perform the actual data loading for the lfw people dataset
This operation is meant to be cached by a joblib wrapper.
"""
# scan the data folder content to retain people with more that
# `min_faces_per_person` face pictures
person_names, file_paths = [], []
for person_name in sorted(listdir(data_folder_path)):
folder_path = join(data_folder_path, person_name)
if not isdir(folder_path):
continue
paths = [join(folder_path, f) for f in sorted(listdir(folder_path))]
n_pictures = len(paths)
if n_pictures >= min_faces_per_person:
person_name = person_name.replace('_', ' ')
person_names.extend([person_name] * n_pictures)
file_paths.extend(paths)
n_faces = len(file_paths)
if n_faces == 0:
raise ValueError("min_faces_per_person=%d is too restrictive" %
min_faces_per_person)
target_names = np.unique(person_names)
target = np.searchsorted(target_names, person_names)
faces = _load_imgs(file_paths, slice_, color, resize)
# shuffle the faces with a deterministic RNG scheme to avoid having
# all faces of the same person in a row, as it would break some
# cross validation and learning algorithms such as SGD and online
# k-means that make an IID assumption
indices = np.arange(n_faces)
np.random.RandomState(42).shuffle(indices)
faces, target = faces[indices], target[indices]
return faces, target, target_names
def fetch_lfw_people(data_home=None, funneled=True, resize=0.5,
min_faces_per_person=0, color=False,
slice_=(slice(70, 195), slice(78, 172)),
download_if_missing=True):
"""Loader for the Labeled Faces in the Wild (LFW) people dataset
This dataset is a collection of JPEG pictures of famous people
collected on the internet, all details are available on the
official website:
http://vis-www.cs.umass.edu/lfw/
Each picture is centered on a single face. Each pixel of each channel
(color in RGB) is encoded by a float in range 0.0 - 1.0.
The task is called Face Recognition (or Identification): given the
picture of a face, find the name of the person given a training set
(gallery).
The original images are 250 x 250 pixels, but the default slice and resize
arguments reduce them to 62 x 47.
Parameters
----------
data_home : optional, default: None
Specify another download and cache folder for the datasets. By default
all scikit learn data is stored in '~/scikit_learn_data' subfolders.
funneled : boolean, optional, default: True
Download and use the funneled variant of the dataset.
resize : float, optional, default 0.5
Ratio used to resize the each face picture.
min_faces_per_person : int, optional, default None
The extracted dataset will only retain pictures of people that have at
least `min_faces_per_person` different pictures.
color : boolean, optional, default False
Keep the 3 RGB channels instead of averaging them to a single
gray level channel. If color is True the shape of the data has
one more dimension than the shape with color = False.
slice_ : optional
Provide a custom 2D slice (height, width) to extract the
'interesting' part of the jpeg files and avoid use statistical
correlation from the background
download_if_missing : optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Returns
-------
dataset : dict-like object with the following attributes:
dataset.data : numpy array of shape (13233, 2914)
Each row corresponds to a ravelled face image of original size 62 x 47
pixels. Changing the ``slice_`` or resize parameters will change the
shape of the output.
dataset.images : numpy array of shape (13233, 62, 47)
Each row is a face image corresponding to one of the 5749 people in
the dataset. Changing the ``slice_`` or resize parameters will change
the shape of the output.
dataset.target : numpy array of shape (13233,)
Labels associated to each face image. Those labels range from 0-5748
and correspond to the person IDs.
dataset.DESCR : string
Description of the Labeled Faces in the Wild (LFW) dataset.
"""
lfw_home, data_folder_path = check_fetch_lfw(
data_home=data_home, funneled=funneled,
download_if_missing=download_if_missing)
logger.info('Loading LFW people faces from %s', lfw_home)
# wrap the loader in a memoizing function that will return memmaped data
# arrays for optimal memory usage
m = Memory(cachedir=lfw_home, compress=6, verbose=0)
load_func = m.cache(_fetch_lfw_people)
# load and memoize the pairs as np arrays
faces, target, target_names = load_func(
data_folder_path, resize=resize,
min_faces_per_person=min_faces_per_person, color=color, slice_=slice_)
# pack the results as a Bunch instance
return Bunch(data=faces.reshape(len(faces), -1), images=faces,
target=target, target_names=target_names,
DESCR="LFW faces dataset")
#
# Task #2: Face Verification on pairs of face pictures
#
def _fetch_lfw_pairs(index_file_path, data_folder_path, slice_=None,
color=False, resize=None):
"""Perform the actual data loading for the LFW pairs dataset
This operation is meant to be cached by a joblib wrapper.
"""
# parse the index file to find the number of pairs to be able to allocate
# the right amount of memory before starting to decode the jpeg files
with open(index_file_path, 'rb') as index_file:
split_lines = [ln.strip().split(b('\t')) for ln in index_file]
pair_specs = [sl for sl in split_lines if len(sl) > 2]
n_pairs = len(pair_specs)
# iterating over the metadata lines for each pair to find the filename to
# decode and load in memory
target = np.zeros(n_pairs, dtype=np.int)
file_paths = list()
for i, components in enumerate(pair_specs):
if len(components) == 3:
target[i] = 1
pair = (
(components[0], int(components[1]) - 1),
(components[0], int(components[2]) - 1),
)
elif len(components) == 4:
target[i] = 0
pair = (
(components[0], int(components[1]) - 1),
(components[2], int(components[3]) - 1),
)
else:
raise ValueError("invalid line %d: %r" % (i + 1, components))
for j, (name, idx) in enumerate(pair):
try:
person_folder = join(data_folder_path, name)
except TypeError:
person_folder = join(data_folder_path, str(name, 'UTF-8'))
filenames = list(sorted(listdir(person_folder)))
file_path = join(person_folder, filenames[idx])
file_paths.append(file_path)
pairs = _load_imgs(file_paths, slice_, color, resize)
shape = list(pairs.shape)
n_faces = shape.pop(0)
shape.insert(0, 2)
shape.insert(0, n_faces // 2)
pairs.shape = shape
return pairs, target, np.array(['Different persons', 'Same person'])
def fetch_lfw_pairs(subset='train', data_home=None, funneled=True, resize=0.5,
color=False, slice_=(slice(70, 195), slice(78, 172)),
download_if_missing=True):
"""Loader for the Labeled Faces in the Wild (LFW) pairs dataset
This dataset is a collection of JPEG pictures of famous people
collected on the internet, all details are available on the
official website:
http://vis-www.cs.umass.edu/lfw/
Each picture is centered on a single face. Each pixel of each channel
(color in RGB) is encoded by a float in range 0.0 - 1.0.
The task is called Face Verification: given a pair of two pictures,
a binary classifier must predict whether the two images are from
the same person.
In the official `README.txt`_ this task is described as the
"Restricted" task. As I am not sure as to implement the
"Unrestricted" variant correctly, I left it as unsupported for now.
.. _`README.txt`: http://vis-www.cs.umass.edu/lfw/README.txt
The original images are 250 x 250 pixels, but the default slice and resize
arguments reduce them to 62 x 47.
Read more in the :ref:`User Guide <labeled_faces_in_the_wild>`.
Parameters
----------
subset : optional, default: 'train'
Select the dataset to load: 'train' for the development training
set, 'test' for the development test set, and '10_folds' for the
official evaluation set that is meant to be used with a 10-folds
cross validation.
data_home : optional, default: None
Specify another download and cache folder for the datasets. By
default all scikit learn data is stored in '~/scikit_learn_data'
subfolders.
funneled : boolean, optional, default: True
Download and use the funneled variant of the dataset.
resize : float, optional, default 0.5
Ratio used to resize the each face picture.
color : boolean, optional, default False
Keep the 3 RGB channels instead of averaging them to a single
gray level channel. If color is True the shape of the data has
one more dimension than the shape with color = False.
slice_ : optional
Provide a custom 2D slice (height, width) to extract the
'interesting' part of the jpeg files and avoid use statistical
correlation from the background
download_if_missing : optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Returns
-------
The data is returned as a Bunch object with the following attributes:
data : numpy array of shape (2200, 5828). Shape depends on ``subset``.
Each row corresponds to 2 ravel'd face images of original size 62 x 47
pixels. Changing the ``slice_``, ``resize`` or ``subset`` parameters
will change the shape of the output.
pairs : numpy array of shape (2200, 2, 62, 47). Shape depends on
``subset``.
Each row has 2 face images corresponding to same or different person
from the dataset containing 5749 people. Changing the ``slice_``,
``resize`` or ``subset`` parameters will change the shape of the
output.
target : numpy array of shape (2200,). Shape depends on ``subset``.
Labels associated to each pair of images. The two label values being
different persons or the same person.
DESCR : string
Description of the Labeled Faces in the Wild (LFW) dataset.
"""
lfw_home, data_folder_path = check_fetch_lfw(
data_home=data_home, funneled=funneled,
download_if_missing=download_if_missing)
logger.info('Loading %s LFW pairs from %s', subset, lfw_home)
# wrap the loader in a memoizing function that will return memmaped data
# arrays for optimal memory usage
m = Memory(cachedir=lfw_home, compress=6, verbose=0)
load_func = m.cache(_fetch_lfw_pairs)
# select the right metadata file according to the requested subset
label_filenames = {
'train': 'pairsDevTrain.txt',
'test': 'pairsDevTest.txt',
'10_folds': 'pairs.txt',
}
if subset not in label_filenames:
raise ValueError("subset='%s' is invalid: should be one of %r" % (
subset, list(sorted(label_filenames.keys()))))
index_file_path = join(lfw_home, label_filenames[subset])
# load and memoize the pairs as np arrays
pairs, target, target_names = load_func(
index_file_path, data_folder_path, resize=resize, color=color,
slice_=slice_)
# pack the results as a Bunch instance
return Bunch(data=pairs.reshape(len(pairs), -1), pairs=pairs,
target=target, target_names=target_names,
DESCR="'%s' segment of the LFW pairs dataset" % subset)
|
bsd-3-clause
|
davidwaroquiers/pymatgen
|
pymatgen/analysis/interface.py
|
4
|
47432
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module provides classes to store, generate, and manipulate material interfaces.
"""
import warnings
from itertools import product
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.lines import Line2D
from pymatgen.core.lattice import Lattice
from pymatgen.core.structure import Structure
from pymatgen.analysis.substrate_analyzer import SubstrateAnalyzer, reduce_vectors
from pymatgen.core.operations import SymmOp
from pymatgen.core.sites import PeriodicSite
from pymatgen.core.surface import Slab, SlabGenerator
from pymatgen.io.vasp.inputs import Poscar
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
__author__ = "Eric Sivonxay, Shyam Dwaraknath, and Kyle Bystrom"
__copyright__ = "Copyright 2019, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Kyle Bystrom"
__email__ = "[email protected]"
__date__ = "5/29/2019"
__status__ = "Prototype"
class Interface(Structure):
"""
This class stores data for defining an interface between two structures.
It is a subclass of pymatgen.core.structure.Structure.
"""
def __init__(
self,
lattice,
species,
coords,
sub_plane,
film_plane,
sub_init_cell,
film_init_cell,
modified_sub_structure,
modified_film_structure,
strained_sub_structure,
strained_film_structure,
validate_proximity=False,
coords_are_cartesian=False,
init_inplane_shift=None,
charge=None,
site_properties=None,
to_unit_cell=False,
):
"""
Makes an interface structure, a Structure object with additional
information and methods pertaining to interfaces.
Args:
lattice (Lattice/3x3 array): The lattice, either as a
:class:`pymatgen.core.lattice.Lattice` or
simply as any 2D array. Each row should correspond to a lattice
vector. E.g., [[10,0,0], [20,10,0], [0,0,30]] specifies a
lattice with lattice vectors [10,0,0], [20,10,0] and [0,0,30].
species ([Species]): Sequence of species on each site. Can take in
flexible input, including:
i. A sequence of element / species specified either as string
symbols, e.g. ["Li", "Fe2+", "P", ...] or atomic numbers,
e.g., (3, 56, ...) or actual Element or Species objects.
ii. List of dict of elements/species and occupancies, e.g.,
[{"Fe" : 0.5, "Mn":0.5}, ...]. This allows the setup of
disordered structures.
coords (Nx3 array): list of fractional/cartesian coordinates of
each species.
sub_plane (list): Substrate plane in the form of a list of integers
(based on the sub_init_cell), e.g.: [1, 2, 3].
film_plane (list): Film plane in the form of a list of integers
(based on the film_init_cell), e.g. [1, 2, 3].
sub_init_cell (Structure): initial bulk substrate structure
film_init_cell (Structure): initial bulk film structure
site_properties (dict): Properties associated with the sites as a
dict of sequences. The sequences have to be the same length as
the atomic species and fractional_coords. For an interface, you should
have the 'interface_label' properties to classify the sites as
'substrate' and 'film'.
modified_sub_structure (Slab): substrate supercell slab.
modified_film_structure (Slab): film supercell slab.
strained_sub_structure (Slab): strained substrate supercell slab
strained_film_structure (Slab): strained film supercell slab
validate_proximity (bool): Whether to check if there are sites
that are less than 0.01 Ang apart. Defaults to False.
coords_are_cartesian (bool): Set to True if you are providing
coordinates in cartesian coordinates. Defaults to False.
init_inplane_shift (length-2 list of float, in Cartesian coordinates):
The initial shift of the film relative to the substrate
in the plane of the interface.
charge (float, optional): overal charge of the structure
"""
super().__init__(
lattice,
species,
coords,
validate_proximity=validate_proximity,
to_unit_cell=to_unit_cell,
coords_are_cartesian=coords_are_cartesian,
site_properties=site_properties,
charge=charge,
)
self.modified_sub_structure = modified_sub_structure
self.modified_film_structure = modified_film_structure
self.strained_sub_structure = strained_sub_structure
self.strained_film_structure = strained_film_structure
self.sub_plane = sub_plane
self.film_plane = film_plane
self.sub_init_cell = sub_init_cell
self.film_init_cell = film_init_cell
z_shift = np.min(self.film.cart_coords[:, 2]) - np.max(self.substrate.cart_coords[:, 2])
if init_inplane_shift is None:
init_inplane_shift = np.array([0.0, 0.0])
self._offset_vector = np.append(init_inplane_shift, [z_shift])
def shift_film_along_surface_lattice(self, da, db):
"""
Given two floats da and db, adjust the shift vector
by da * (first lattice vector) + db * (second lattice vector).
This shift is in the plane of the interface.
I.e. da and db are fractional coordinates.
Args:
da (float): shift in the first lattice vector
db (float): shift in the second lattice vector
"""
self.shift_film(da * self.lattice.matrix[0] + db * self.lattice.matrix[1])
def change_z_shift(self, dz):
"""
Adjust the spacing between the substrate and film layers by dz Angstroms
Args:
dz (float): shift perpendicular to the plane (in Angstroms)
"""
self.shift_film(np.array([0.0, 0.0, dz]))
def shift_film(self, delta):
"""
Shift the film's position relative to the substrate.
Args:
delta (length-3 list of float or numpy array): Cartesian coordinate
vector by which to shift the film. After this operation
self.offset_vector -> self.offset_vector + delta.
"""
if self.offset_vector[2] + delta[2] < 0 or delta[2] > self.vacuum_thickness:
raise ValueError("The shift {} will collide the film and substrate.".format(delta))
self._offset_vector += np.array(delta)
self.translate_sites(self.get_film_indices(), delta, frac_coords=False, to_unit_cell=True)
@property
def offset_vector(self):
"""
Displacement of the origin of the film structure relative to that
of the substrate structure in Cartesian coordinates.
"""
return self._offset_vector.copy()
@offset_vector.setter
def offset_vector(self, offset_vector):
delta = offset_vector - self._offset_vector
self.shift_film(delta)
@property
def ab_shift(self):
"""
The 2D component of offset_vector along the interface plane
in fractional coordinates. I.e. if ab_shift = [a, b], the
Cartesian coordinate shift in the interface plane
is a * (first lattice vector) + b * (second lattice vector).
"""
return np.dot(self.offset_vector, np.linalg.inv(self.lattice.matrix))[:2]
@ab_shift.setter
def ab_shift(self, ab_shift):
delta = ab_shift - self.ab_shift
self.shift_film_along_surface_lattice(delta[0], delta[1])
@property
def z_shift(self):
"""
The 1D component of offset_vector along the interface plane
in fractional coordinates. I.e. if z_shift = z, the distance
between the substrate and film planes is z.
"""
return self.offset_vector[2]
@z_shift.setter
def z_shift(self, z_shift):
delta = z_shift - self.z_shift
self.change_z_shift(delta)
@property
def vacuum_thickness(self):
"""
Vacuum buffer above the film.
"""
return np.min(self.substrate.cart_coords[:, 2]) + self.lattice.c - np.max(self.film.cart_coords[:, 2])
@property
def substrate_sites(self):
"""
Return the substrate sites of the interface.
"""
sub_sites = []
for i, tag in enumerate(self.site_properties["interface_label"]):
if "substrate" in tag:
sub_sites.append(self.sites[i])
return sub_sites
@property
def substrate(self):
"""
Return the substrate (Structure) of the interface.
"""
return Structure.from_sites(self.substrate_sites)
def get_film_indices(self):
"""
Retrieve the indices of the film sites
"""
film_sites = []
for i, tag in enumerate(self.site_properties["interface_label"]):
if "film" in tag:
film_sites.append(i)
return film_sites
@property
def film_sites(self):
"""
Return the film sites of the interface.
"""
film_sites = []
for i, tag in enumerate(self.site_properties["interface_label"]):
if "film" in tag:
film_sites.append(self.sites[i])
return film_sites
@property
def film(self):
"""
Return the film (Structure) of the interface.
"""
return Structure.from_sites(self.film_sites)
def copy(self, site_properties=None):
"""
Convenience method to get a copy of the structure, with options to add
site properties.
Returns:
A copy of the Interface.
"""
props = self.site_properties
if site_properties:
props.update(site_properties)
return Interface(
self.lattice,
self.species_and_occu,
self.frac_coords,
self.sub_plane,
self.film_plane,
self.sub_init_cell,
self.film_init_cell,
self.modified_sub_structure,
self.modified_film_structure,
self.strained_sub_structure,
self.strained_film_structure,
validate_proximity=False,
coords_are_cartesian=False,
init_inplane_shift=self.offset_vector[:2],
charge=self.charge,
site_properties=self.site_properties,
)
def get_sorted_structure(self, key=None, reverse=False):
"""
Get a sorted copy of the structure. The parameters have the same
meaning as in list.sort. By default, sites are sorted by the
electronegativity of the species.
Args:
key: Specifies a function of one argument that is used to extract
a comparison key from each list element: key=str.lower. The
default value is None (compare the elements directly).
reverse (bool): If set to True, then the list elements are sorted
as if each comparison were reversed.
"""
struct_copy = self.copy()
struct_copy.sort(key=key, reverse=reverse)
return struct_copy
def as_dict(self):
"""
:return: MSONable dict
"""
d = super().as_dict()
d["@module"] = self.__class__.__module__
d["@class"] = self.__class__.__name__
d["sub_plane"] = self.sub_plane
d["film_plane"] = self.film_plane
d["sub_init_cell"] = self.sub_init_cell
d["film_init_cell"] = self.film_init_cell
d["modified_sub_structure"] = self.modified_sub_structure
d["modified_film_structure"] = self.modified_film_structure
d["strained_sub_structure"] = self.strained_sub_structure
d["strained_film_structure"] = self.strained_film_structure
d["init_inplane_shift"] = self.offset_vector[0:2]
return d
@classmethod
def from_dict(cls, d):
"""
:param d: Dict representation
:return: Interface
"""
lattice = Lattice.from_dict(d["lattice"])
sites = [PeriodicSite.from_dict(sd, lattice) for sd in d["sites"]]
s = Structure.from_sites(sites)
return Interface(
lattice=lattice,
species=s.species_and_occu,
coords=s.frac_coords,
sub_plane=d["sub_plane"],
film_plane=d["film_plane"],
sub_init_cell=d["sub_init_cell"],
film_init_cell=d["film_init_cell"],
modified_sub_structure=d["modified_sub_structure"],
modified_film_structure=d["modified_film_structure"],
strained_sub_structure=d["strained_sub_structure"],
strained_film_structure=d["strained_film_structure"],
site_properties=s.site_properties,
init_inplane_shift=d["init_inplane_shift"],
)
class InterfaceBuilder:
"""
This class constructs the epitaxially matched interfaces between two crystalline slabs
"""
def __init__(self, substrate_structure, film_structure):
"""
Args:
substrate_structure (Structure): structure of substrate
film_structure (Structure): structure of film
"""
# Bulk structures
self.original_substrate_structure = substrate_structure
self.original_film_structure = film_structure
self.matches = []
self.match_index = None
# SlabGenerator objects for the substrate and film
self.sub_sg = None
self.substrate_layers = None
self.film_sg = None
self.film_layers = None
# Structures with no vacuum
self.substrate_structures = []
self.film_structures = []
# "slab" structure (with no vacuum) oriented with a direction along x-axis and ab plane normal aligned with
# z-axis
self.oriented_substrate = None
self.oriented_film = None
# Strained structures with no vacuum
self.strained_substrate = None
self.strained_film = None
# Substrate with transformation/matches applied
self.modified_substrate_structures = []
self.modified_film_structures = []
# Non-stoichiometric slabs with symmetric surfaces, as generated by pymatgen. Please check, this is highly
# unreliable from tests.
self.sym_modified_substrate_structures = []
self.sym_modified_film_structures = []
# Interface structures
self.interfaces = []
self.interface_labels = []
def get_summary_dict(self):
"""
Return dictionary with information about the InterfaceBuilder,
with currently generated structures included.
"""
d = {"match": self.matches[0]}
d["substrate_layers"] = self.substrate_layers
d["film_layers"] = self.film_layers
d["bulk_substrate"] = self.original_substrate_structure
d["bulk_film"] = self.original_film_structure
d["strained_substrate"] = self.strained_substrate
d["strained_film"] = self.strained_film
d["slab_substrates"] = self.modified_substrate_structures
d["slab_films"] = self.modified_film_structures
d["interfaces"] = self.interfaces
d["interface_labels"] = self.interface_labels
return d
def write_all_structures(self):
"""
Write all of the structures relevant for
the interface calculation to VASP POSCAR files.
"""
_poscar = Poscar(self.original_substrate_structure)
_poscar.write_file("bulk_substrate_POSCAR")
_poscar = Poscar(self.original_film_structure)
_poscar.write_file("bulk_film_POSCAR")
_poscar = Poscar(self.strained_substrate)
_poscar.write_file("strained_substrate_POSCAR")
_poscar = Poscar(self.strained_film)
_poscar.write_file("strained_film_POSCAR")
for i, interface in enumerate(self.modified_substrate_structures):
_poscar = Poscar(interface)
_poscar.write_file("slab_substrate_%d_POSCAR" % i)
for i, interface in enumerate(self.modified_film_structures):
_poscar = Poscar(interface)
_poscar.write_file("slab_film_%d_POSCAR" % i)
for i, interface in enumerate(self.film_structures):
_poscar = Poscar(interface)
_poscar.write_file("slab_unit_film_%d_POSCAR" % i)
for label, interface in zip(self.interface_labels, self.interfaces):
_poscar = Poscar(interface)
_poscar.write_file("interface_%s_POSCAR" % label.replace("/", "-"))
def generate_interfaces(
self, film_millers=None, substrate_millers=None, film_layers=3, substrate_layers=3, **kwargs
):
"""
Generate a list of Interface (Structure) objects and store them to self.interfaces.
Args:
film_millers (list of [int]): list of film surfaces
substrate_millers (list of [int]): list of substrate surfaces
film_layers (int): number of layers of film to include in Interface structures.
substrate_layers (int): number of layers of substrate to include in Interface structures.
"""
self.get_oriented_slabs(
lowest=True,
film_millers=film_millers,
substrate_millers=substrate_millers,
film_layers=film_layers,
substrate_layers=substrate_layers,
)
self.combine_slabs(**kwargs)
def get_oriented_slabs(self, film_layers=3, substrate_layers=3, match_index=0, **kwargs):
"""
Get a list of oriented slabs for constructing interfaces and put them
in self.film_structures, self.substrate_structures, self.modified_film_structures,
and self.modified_substrate_structures.
Currently only uses first match (lowest SA) in the list of matches
Args:
film_layers (int): number of layers of film to include in Interface structures.
substrate_layers (int): number of layers of substrate to include in Interface structures.
match_index (int): ZSL match from which to construct slabs.
"""
self.match_index = match_index
self.substrate_layers = substrate_layers
self.film_layers = film_layers
if "zslgen" in kwargs.keys():
sa = SubstrateAnalyzer(zslgen=kwargs.get("zslgen"))
del kwargs["zslgen"]
else:
sa = SubstrateAnalyzer()
# Generate all possible interface matches
self.matches = list(sa.calculate(self.original_film_structure, self.original_substrate_structure, **kwargs))
match = self.matches[match_index]
# Generate substrate slab and align x axis to (100) and slab normal to (001)
# Get no-vacuum structure for strained bulk calculation
self.sub_sg = SlabGenerator(
self.original_substrate_structure,
match["sub_miller"],
substrate_layers,
0,
in_unit_planes=True,
reorient_lattice=False,
primitive=False,
)
no_vac_sub_slab = self.sub_sg.get_slab()
no_vac_sub_slab = get_shear_reduced_slab(no_vac_sub_slab)
self.oriented_substrate = align_x(no_vac_sub_slab)
self.oriented_substrate.sort()
# Get slab with vacuum
self.sub_sg = SlabGenerator(
self.original_substrate_structure,
match["sub_miller"],
substrate_layers,
1,
in_unit_planes=True,
reorient_lattice=False,
primitive=False,
)
sub_slabs = self.sub_sg.get_slabs()
for i, sub_slab in enumerate(sub_slabs):
sub_slab = get_shear_reduced_slab(sub_slab)
sub_slab = align_x(sub_slab)
sub_slab.sort()
sub_slabs[i] = sub_slab
self.substrate_structures = sub_slabs
# Generate film slab and align x axis to (100) and slab normal to (001)
# Get no-vacuum structure for strained bulk calculation
self.film_sg = SlabGenerator(
self.original_film_structure,
match["film_miller"],
film_layers,
0,
in_unit_planes=True,
reorient_lattice=False,
primitive=False,
)
no_vac_film_slab = self.film_sg.get_slab()
no_vac_film_slab = get_shear_reduced_slab(no_vac_film_slab)
self.oriented_film = align_x(no_vac_film_slab)
self.oriented_film.sort()
# Get slab with vacuum
self.film_sg = SlabGenerator(
self.original_film_structure,
match["film_miller"],
film_layers,
1,
in_unit_planes=True,
reorient_lattice=False,
primitive=False,
)
film_slabs = self.film_sg.get_slabs()
for i, film_slab in enumerate(film_slabs):
film_slab = get_shear_reduced_slab(film_slab)
film_slab = align_x(film_slab)
film_slab.sort()
film_slabs[i] = film_slab
self.film_structures = film_slabs
# Apply transformation to produce matched area and a & b vectors
self.apply_transformations(match)
# Get non-stoichioimetric substrate slabs
sym_sub_slabs = []
for sub_slab in self.modified_substrate_structures:
sym_sub_slab = self.sub_sg.nonstoichiometric_symmetrized_slab(sub_slab)
for slab in sym_sub_slab:
if not slab == sub_slab:
sym_sub_slabs.append(slab)
self.sym_modified_substrate_structures = sym_sub_slabs
# Get non-stoichioimetric film slabs
sym_film_slabs = []
for film_slab in self.modified_film_structures:
sym_film_slab = self.film_sg.nonstoichiometric_symmetrized_slab(film_slab)
for slab in sym_film_slab:
if not slab == film_slab:
sym_film_slabs.append(slab)
self.sym_modified_film_structures = sym_film_slabs
# Strained film structures (No Vacuum)
self.strained_substrate, self.strained_film = strain_slabs(self.oriented_substrate, self.oriented_film)
@staticmethod
def apply_transformation(structure, matrix):
"""
Make a supercell of structure using matrix
Args:
structure (Slab): Slab to make supercell of
matrix (3x3 np.ndarray): supercell matrix
Returns:
(Slab) The supercell of structure
"""
modified_substrate_structure = structure.copy()
# Apply scaling
modified_substrate_structure.make_supercell(matrix)
# Reduce vectors
new_lattice = modified_substrate_structure.lattice.matrix.copy()
new_lattice[:2, :] = reduce_vectors(*modified_substrate_structure.lattice.matrix[:2, :])
modified_substrate_structure = Slab(
lattice=Lattice(new_lattice),
species=modified_substrate_structure.species,
coords=modified_substrate_structure.cart_coords,
miller_index=modified_substrate_structure.miller_index,
oriented_unit_cell=modified_substrate_structure.oriented_unit_cell,
shift=modified_substrate_structure.shift,
scale_factor=modified_substrate_structure.scale_factor,
coords_are_cartesian=True,
energy=modified_substrate_structure.energy,
reorient_lattice=modified_substrate_structure.reorient_lattice,
to_unit_cell=True,
)
return modified_substrate_structure
def apply_transformations(self, match):
"""
Using ZSL match, transform all of the film_structures by the ZSL
supercell transformation.
Args:
match (dict): ZSL match returned by ZSLGenerator.__call__
"""
film_transformation = match["film_transformation"]
sub_transformation = match["substrate_transformation"]
modified_substrate_structures = [struct.copy() for struct in self.substrate_structures]
modified_film_structures = [struct.copy() for struct in self.film_structures]
# Match angles in lattices with 𝛾=θ° and 𝛾=(180-θ)°
if np.isclose(
180 - modified_film_structures[0].lattice.gamma,
modified_substrate_structures[0].lattice.gamma,
atol=3,
):
reflection = SymmOp.from_rotation_and_translation(((-1, 0, 0), (0, 1, 0), (0, 0, 1)), (0, 0, 1))
for modified_film_structure in modified_film_structures:
modified_film_structure.apply_operation(reflection, fractional=True)
self.oriented_film.apply_operation(reflection, fractional=True)
sub_scaling = np.diag(np.diag(sub_transformation))
# Turn into 3x3 Arrays
sub_scaling = np.diag(np.append(np.diag(sub_scaling), 1))
temp_matrix = np.diag([1, 1, 1])
temp_matrix[:2, :2] = sub_transformation
for modified_substrate_structure in modified_substrate_structures:
modified_substrate_structure = self.apply_transformation(modified_substrate_structure, temp_matrix)
self.modified_substrate_structures.append(modified_substrate_structure)
self.oriented_substrate = self.apply_transformation(self.oriented_substrate, temp_matrix)
film_scaling = np.diag(np.diag(film_transformation))
# Turn into 3x3 Arrays
film_scaling = np.diag(np.append(np.diag(film_scaling), 1))
temp_matrix = np.diag([1, 1, 1])
temp_matrix[:2, :2] = film_transformation
for modified_film_structure in modified_film_structures:
modified_film_structure = self.apply_transformation(modified_film_structure, temp_matrix)
self.modified_film_structures.append(modified_film_structure)
self.oriented_film = self.apply_transformation(self.oriented_film, temp_matrix)
def combine_slabs(self):
"""
Combine the slabs generated by get_oriented_slabs into interfaces
"""
all_substrate_variants = []
sub_labels = []
for i, slab in enumerate(self.modified_substrate_structures):
all_substrate_variants.append(slab)
sub_labels.append(str(i))
sg = SpacegroupAnalyzer(slab, symprec=1e-3)
if not sg.is_laue():
mirrored_slab = slab.copy()
reflection_z = SymmOp.from_rotation_and_translation(((1, 0, 0), (0, 1, 0), (0, 0, -1)), (0, 0, 0))
mirrored_slab.apply_operation(reflection_z, fractional=True)
translation = [0, 0, -min(mirrored_slab.frac_coords[:, 2])]
mirrored_slab.translate_sites(range(mirrored_slab.num_sites), translation)
all_substrate_variants.append(mirrored_slab)
sub_labels.append("%dm" % i)
all_film_variants = []
film_labels = []
for i, slab in enumerate(self.modified_film_structures):
all_film_variants.append(slab)
film_labels.append(str(i))
sg = SpacegroupAnalyzer(slab, symprec=1e-3)
if not sg.is_laue():
mirrored_slab = slab.copy()
reflection_z = SymmOp.from_rotation_and_translation(((1, 0, 0), (0, 1, 0), (0, 0, -1)), (0, 0, 0))
mirrored_slab.apply_operation(reflection_z, fractional=True)
translation = [0, 0, -min(mirrored_slab.frac_coords[:, 2])]
mirrored_slab.translate_sites(range(mirrored_slab.num_sites), translation)
all_film_variants.append(mirrored_slab)
film_labels.append("%dm" % i)
# substrate first index, film second index
self.interfaces = []
self.interface_labels = []
# self.interfaces = [[None for j in range(len(all_film_variants))] for i in range(len(all_substrate_variants))]
for i, substrate in enumerate(all_substrate_variants):
for j, film in enumerate(all_film_variants):
self.interfaces.append(self.make_interface(substrate, film))
self.interface_labels.append("%s/%s" % (film_labels[j], sub_labels[i]))
def make_interface(self, slab_substrate, slab_film, offset=None):
"""
Strain a film to fit a substrate and generate an interface.
Args:
slab_substrate (Slab): substrate structure supercell
slab_film (Slab): film structure supercell
offset ([int]): separation vector of film and substrate
"""
# Check if lattices are equal. If not, strain them to match
# NOTE: CHANGED THIS TO MAKE COPY OF SUBSTRATE/FILM, self.modified_film_structures NO LONGER STRAINED
unstrained_slab_substrate = slab_substrate.copy()
slab_substrate = slab_substrate.copy()
unstrained_slab_film = slab_film.copy()
slab_film = slab_film.copy()
latt_1 = slab_substrate.lattice.matrix.copy()
latt_1[2, :] = [0, 0, 1]
latt_2 = slab_film.lattice.matrix.copy()
latt_2[2, :] = [0, 0, 1]
if Lattice(latt_1) != Lattice(latt_2):
# Calculate lattice strained to match:
matched_slab_substrate, matched_slab_film = strain_slabs(slab_substrate, slab_film)
else:
matched_slab_substrate = slab_substrate
matched_slab_film = slab_film
# Ensure substrate has positive c-direction:
if matched_slab_substrate.lattice.matrix[2, 2] < 0:
latt = matched_slab_substrate.lattice.matrix.copy()
latt[2, 2] *= -1
new_struct = matched_slab_substrate.copy()
new_struct.lattice = Lattice(latt)
matched_slab_substrate = new_struct
# Ensure film has positive c-direction:
if matched_slab_film.lattice.matrix[2, 2] < 0:
latt = matched_slab_film.lattice.matrix.copy()
latt[2, 2] *= -1
new_struct = matched_slab_film.copy()
new_struct.lattice = Lattice(latt)
matched_slab_film = new_struct
if offset is None:
offset = (2.5, 0.0, 0.0)
_structure = merge_slabs(matched_slab_substrate, matched_slab_film, *offset)
orthogonal_structure = _structure.get_orthogonal_c_slab()
orthogonal_structure.sort()
if not orthogonal_structure.is_valid(tol=1):
warnings.warn("Check generated structure, it may contain atoms too closely placed")
# offset_vector = (offset[1], offset[2], offset[0])
interface = Interface(
orthogonal_structure.lattice.copy(),
orthogonal_structure.species,
orthogonal_structure.frac_coords,
slab_substrate.miller_index,
slab_film.miller_index,
self.original_substrate_structure,
self.original_film_structure,
unstrained_slab_substrate,
unstrained_slab_film,
slab_substrate,
slab_film,
init_inplane_shift=offset[1:],
site_properties=orthogonal_structure.site_properties,
)
return interface
def visualize_interface(self, interface_index=0, show_atoms=False, n_uc=2):
"""
Plot the film-substrate superlattice match, the film superlattice,
and the substrate superlattice in three separate plots and show them.
Args:
interface_index (int, 0): Choice of interface to plot
show_atoms (bool, False): Whether to plot atomic sites
n_uc (int, 2): Number of 2D unit cells of the interface in each direction.
(The unit cell of the interface is the supercell of th substrate
that matches a supercel of the film.)
"""
film_index = int(self.interface_labels[interface_index][0])
sub_index = int(self.interface_labels[interface_index][2])
visualize_interface(self.interfaces[interface_index], show_atoms, n_uc)
visualize_superlattice(
self.film_structures[film_index],
self.modified_film_structures[film_index],
film=True,
show_atoms=show_atoms,
n_uc=n_uc,
)
visualize_superlattice(
self.substrate_structures[sub_index],
self.modified_substrate_structures[sub_index],
film=False,
show_atoms=show_atoms,
n_uc=n_uc,
)
def visualize_interface(interface, show_atoms=False, n_uc=2):
"""
Plot the match of the substrate and film superlattices.
Args:
interface (Interface): Interface object
show_atoms (bool, False): Whether to plot atomic sites
n_uc (int, 2): Number of 2D unit cells of the interface in each direction.
(The unit cell of the interface is the supercell of th substrate
that matches a supercel of the film.)
"""
# sub_struct = interface.sub_init_cell
# film_struct = interface.film_init_cell
modified_sub_struct = interface.modified_sub_structure
modified_film_struct = interface.modified_film_structure
rotated_modified_film_structure = align_x(modified_film_struct.copy(), get_ortho_axes(modified_sub_struct))
# Show super lattice matches
plt.figure(dpi=150)
legend_elements = []
for i, j in product(range(-n_uc, n_uc), range(-n_uc, n_uc)):
v1 = modified_sub_struct.lattice.matrix[0, :]
v2 = modified_sub_struct.lattice.matrix[1, :]
current_start = v1 * i + v2 * j
plt.plot(
[current_start[0], current_start[0] + v1[0]],
[current_start[1], current_start[1] + v1[1]],
"-k",
linewidth=0.3,
)
plt.plot(
[current_start[0], current_start[0] + v2[0]],
[current_start[1], current_start[1] + v2[1]],
"-k",
linewidth=0.3,
)
if show_atoms:
plt.plot(
np.add(modified_sub_struct.cart_coords[:, 0], current_start[0]),
np.add(modified_sub_struct.cart_coords[:, 1], current_start[1]),
"or",
markersize=0.1,
)
legend_elements.append(Line2D([0], [0], color="k", lw=1, label="Substrate Superlattice"))
if show_atoms:
legend_elements.append(
Line2D(
[0],
[0],
marker="o",
color="w",
lw=1,
label="Substrate atoms",
markerfacecolor="r",
markersize=3,
)
)
for i, j in product(range(-n_uc, n_uc), range(-n_uc, n_uc)):
v1 = rotated_modified_film_structure.lattice.matrix[0, :]
v2 = rotated_modified_film_structure.lattice.matrix[1, :]
current_start = v1 * i + v2 * j
plt.plot(
[current_start[0], current_start[0] + v1[0]],
[current_start[1], current_start[1] + v1[1]],
"-b",
linewidth=0.3,
)
plt.plot(
[current_start[0], current_start[0] + v2[0]],
[current_start[1], current_start[1] + v2[1]],
"-b",
linewidth=0.3,
)
if show_atoms:
plt.plot(
np.add(rotated_modified_film_structure.cart_coords[:, 0], current_start[0]),
np.add(rotated_modified_film_structure.cart_coords[:, 1], current_start[1]),
"og",
markersize=0.1,
)
legend_elements.append(Line2D([0], [0], color="b", lw=1, label="Film Superlattice"))
if show_atoms:
legend_elements.append(
Line2D(
[0],
[0],
marker="o",
color="w",
lw=1,
label="Film atoms",
markerfacecolor="g",
markersize=3,
)
)
plt.axis("scaled")
plt.title("Superlattice Match")
plt.legend(handles=legend_elements)
plt.show()
def visualize_superlattice(struct, modified_struct, film=True, show_atoms=False, n_uc=2):
"""
Visualize the unit cell-supercell match for either the film or substrate
(specified by film boolean tag).
Args:
struct (Slab): unit cell slab
modified_struct (Slab): supercell slab
film (bool, True): True=label plot as film, False=label plot as substrate
show_atoms (bool, False): Whether to plot atomic sites
n_uc (int, 2): Number of 2D unit cells of the interface in each direction.
(The unit cell of the interface is the supercell of th substrate
that matches a supercel of the film.)
"""
label = "Film" if film else "Substrate"
plt.figure(dpi=150)
legend_elements = []
for i, j in product(range(-n_uc, n_uc), range(-n_uc, n_uc)):
v1 = modified_struct.lattice.matrix[0, :]
v2 = modified_struct.lattice.matrix[1, :]
current_start = v1 * i + v2 * j
plt.plot(
[current_start[0], current_start[0] + v1[0]],
[current_start[1], current_start[1] + v1[1]],
"-k",
linewidth=0.3,
)
plt.plot(
[current_start[0], current_start[0] + v2[0]],
[current_start[1], current_start[1] + v2[1]],
"-k",
linewidth=0.3,
)
if show_atoms:
plt.plot(
np.add(modified_struct.cart_coords[:, 0], current_start[0]),
np.add(modified_struct.cart_coords[:, 1], current_start[1]),
"or",
markersize=0.1,
)
legend_elements.append(Line2D([0], [0], color="k", lw=1, label="%s Superlattice" % label))
if show_atoms:
legend_elements.append(
Line2D(
[0],
[0],
marker="o",
color="w",
lw=1,
label="%s Superlattice atoms" % label,
markerfacecolor="r",
markersize=3,
)
)
uc_v1 = struct.lattice.matrix[0, :]
uc_v2 = struct.lattice.matrix[1, :]
sl_v1 = modified_struct.lattice.matrix[0, :]
sl_v2 = modified_struct.lattice.matrix[1, :]
sl_v = (sl_v1 + sl_v2) * n_uc
uc_v = (uc_v1 + uc_v2) * n_uc
rx = np.abs(int(n_uc * sl_v[0] / uc_v[0]))
ry = np.abs(int(n_uc * sl_v[1] / uc_v[1]))
for i, j in product(range(-rx, rx), range(-ry, ry)):
v1 = struct.lattice.matrix[0, :]
v2 = struct.lattice.matrix[1, :]
current_start = v1 * i + v2 * j
plt.plot(
[current_start[0], current_start[0] + v1[0]],
[current_start[1], current_start[1] + v1[1]],
"-b",
linewidth=0.3,
)
plt.plot(
[current_start[0], current_start[0] + v2[0]],
[current_start[1], current_start[1] + v2[1]],
"-b",
linewidth=0.3,
)
if show_atoms:
plt.plot(
np.add(struct.cart_coords[:, 0], current_start[0]),
np.add(struct.cart_coords[:, 1], current_start[1]),
"og",
markersize=0.1,
)
legend_elements.append(Line2D([0], [0], color="b", lw=1, label="%s Lattice" % label))
if show_atoms:
legend_elements.append(
Line2D(
[0],
[0],
marker="o",
color="w",
lw=1,
label="%s atoms" % label,
markerfacecolor="g",
markersize=3,
)
)
plt.axis("scaled")
plt.legend(handles=legend_elements)
plt.title("%s unit cell and superlattice" % label)
plt.show()
def merge_slabs(substrate, film, slab_offset, x_offset, y_offset, vacuum=20, **kwargs):
"""
Given substrate and film supercells (oriented to match as closely as possible),
strain the film to match the substrate lattice and combine the slabs.
Args:
slab_offset: spacing between the substrate and film
x_offset y_offset: in-plane displacement of the film in Cartesian coordinates
vacuum: vacuum buffer above the film
Returns:
combined_structure (Slab): A structure with the strained film and substrate
combined into one structure
"""
# strain film to match substrate
new_latt = film.lattice.matrix.copy()
new_latt[:2, :2] = substrate.lattice.matrix[:2, :2]
film.lattice = Lattice(new_latt)
combined_species = [*substrate.species, *film.species]
if kwargs.get("cell_height"):
height = kwargs.get("cell_height")
else:
added_height = vacuum + slab_offset + film.lattice.c
height = added_height + substrate.lattice.matrix[2, 2]
combined_lattice = substrate.lattice.matrix.copy()
combined_lattice[2, :] *= height / substrate.lattice.matrix[2, 2]
max_substrate = np.max(substrate.cart_coords[:, 2])
min_substrate = np.min(film.cart_coords[:, 2])
offset = max_substrate - min_substrate + slab_offset
offset_film_coords = [np.add(coord, [x_offset, y_offset, offset]) for coord in film.cart_coords]
combined_coords = [*substrate.cart_coords, *offset_film_coords]
combined_site_properties = {}
for key, item in substrate.site_properties.items():
combined_site_properties[key] = [
*substrate.site_properties[key],
*film.site_properties[key],
]
labels = ["substrate"] * len(substrate) + ["film"] * len(film)
combined_site_properties["interface_label"] = labels
combined_structure = Slab(
lattice=Lattice(combined_lattice),
species=combined_species,
coords=combined_coords,
miller_index=substrate.miller_index,
oriented_unit_cell=substrate,
shift=substrate.shift,
scale_factor=substrate.scale_factor,
coords_are_cartesian=True,
energy=substrate.energy,
reorient_lattice=False,
to_unit_cell=True,
site_properties=combined_site_properties,
)
return combined_structure
def strain_slabs(sub_slab, film_slab):
"""
Strain the film_slab to match the sub_slab,
orient the structures to match each other,
and return the new matching structures.
Args:
sub_slab (Slab): substrate supercell slab
film_slab (Slab): film supercell slab
Returns:
sub_struct (Slab): substrate structure oriented
to match the film supercell
film_struct (Slab): film structure strained to match
the substrate supercell lattice.
"""
sub_struct = sub_slab.copy()
latt_1 = sub_struct.lattice.matrix.copy()
film_struct = align_x(film_slab, get_ortho_axes(sub_struct)).copy()
latt_2 = film_struct.lattice.matrix.copy()
# Rotate film so its diagonal matches with the sub's diagonal
diag_vec = np.add(latt_1[0, :], latt_1[1, :])
sub_norm_diag_vec = diag_vec / np.linalg.norm(diag_vec)
sub_b = np.cross(sub_norm_diag_vec, [0, 0, 1])
sub_matrix = np.vstack([sub_norm_diag_vec, sub_b, [0, 0, 1]])
diag_vec = np.add(latt_2[0, :], latt_2[1, :])
film_norm_diag_vec = diag_vec / np.linalg.norm(diag_vec)
film_b = np.cross(film_norm_diag_vec, [0, 0, 1])
film_matrix = np.vstack([film_norm_diag_vec, film_b, [0, 0, 1]])
rotation = np.dot(np.linalg.inv(film_matrix), sub_matrix)
new_latt = Lattice(np.dot(film_struct.lattice.matrix, rotation))
film_struct.lattice = new_latt
# Average the two lattices (Should get equal strain?)
mean_a = np.mean([film_struct.lattice.matrix[0, :], sub_struct.lattice.matrix[0, :]], axis=0)
mean_b = np.mean([film_struct.lattice.matrix[1, :], sub_struct.lattice.matrix[1, :]], axis=0)
new_latt = np.vstack([mean_a, mean_b, sub_struct.lattice.matrix[2, :]])
sub_struct.lattice = Lattice(new_latt)
new_latt = np.vstack([mean_a, mean_b, film_struct.lattice.matrix[2, :]])
film_struct.lattice = Lattice(new_latt)
return sub_struct, film_struct
def get_ortho_axes(structure):
"""
Get an orthonormal set of axes for the structure with the first axis
pointing along the a lattice vector.
Args:
structure (Structure)
Returns:
3x3 numpy matrix with the axes
"""
sub_a = structure.lattice.matrix[0, :] / np.linalg.norm(structure.lattice.matrix[0, :])
sub_c = third_vect(sub_a, structure.lattice.matrix[1, :])
sub_b = third_vect(sub_c, sub_a)
sub_b = sub_b / np.linalg.norm(sub_b)
return np.vstack((sub_a, sub_b, sub_c))
def align_x(slab, orthogonal_basis=[[1, 0, 0], [0, 1, 0], [0, 0, 1]]):
"""
Align the a lattice vector of slab with the x axis. Optionally specify
an orthogonal_basis to align according to a different set of axes
Args:
slab (Slab): input structure
orthogonal basis (3x3 numpy matrix): If specified, align with
orthogonal_basis[0] rather than [1,0,0]
Returns:
The slab, which has been aligned with the specified axis in place.
"""
sub_ortho_axes = get_ortho_axes(slab)
rotation = transf_mat(sub_ortho_axes, orthogonal_basis)
new_sub_lattice = Lattice(np.dot(slab.lattice.matrix[0:3], rotation))
slab.lattice = new_sub_lattice
return slab
def transf_mat(A, B):
"""
Get the matrix to transform from the set of axes A
to the set of axes B.
Args:
A (3x3 numpy array): original axis basis
B (3x3 numpy array): new axis basis
Returns:
3x3 numpy array transformation between the bases
"""
return np.dot(np.linalg.inv(A), B)
def third_vect(a, b):
"""
Get a unit vector proportional to cross(a, b).
Args:
a, b (numpy arrays): 3D vectors.
Returns:
unit vector proportional to cross(a, b).
"""
c = np.cross(a, b)
return c / np.linalg.norm(c)
def get_shear_reduced_slab(slab):
"""
Reduce the vectors of the slab plane according to the algorithm in
substrate_analyzer, then make a new Slab with a Lattice with those
reduced vectors.
Args:
slab (Slab): Slab to reduce
Returns:
Slab object of identical structure to the input slab
but rduced in-plane lattice vectors
"""
reduced_vectors = reduce_vectors(slab.lattice.matrix[0], slab.lattice.matrix[1])
new_lattice = Lattice([reduced_vectors[0], reduced_vectors[1], slab.lattice.matrix[2]])
return Slab(
lattice=new_lattice,
species=slab.species,
coords=slab.cart_coords,
miller_index=slab.miller_index,
oriented_unit_cell=slab.oriented_unit_cell,
shift=slab.shift,
scale_factor=slab.scale_factor,
coords_are_cartesian=True,
energy=slab.energy,
reorient_lattice=slab.reorient_lattice,
to_unit_cell=True,
)
|
mit
|
seanjtaylor/out-for-justice
|
scripts/make_risks.py
|
1
|
1148
|
import pickle
import numpy as np
import pandas as pd
def main(input_file):
with open(input_file) as f:
graph = pickle.load(f)
node_map = {int(node_id): i for i, node_id in enumerate(graph.nodes())}
outcomes = []
for fn, name in [
('data/sfnodesdtINTOXICATIONCRIME.csv', 'intoxication'),
('data/sfnodesdtPROPERTYCRIME.csv', 'property'),
('data/sfnodesdtVIOLENTCRIME.csv', 'violent'),
]:
df = pd.read_csv(fn)
df['crime_type'] = name
outcomes.append(df)
df = pd.concat(outcomes)
df['id'] = df['id'].apply(node_map.get)
df = df[df['id'].notnull()]
for (tod, dow), time_df in df.groupby(['daytime', 'superday']):
mat = time_df.set_index(['id', 'crime_type'])['preds'].unstack()
outfile = 'data/sf_crime_risks_{}_{}.npy'.format(
tod.lower().replace('-','_'),
dow.lower()
)
np.save(outfile, mat.values)
if __name__ == '__main__':
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument('input_file')
args = parser.parse_args()
main(args.input_file)
|
mit
|
andrespires/python-buildpack
|
cf_spec/fixtures/miniconda_simple_app_python_2/app.py
|
12
|
2078
|
from flask import Flask
import pytest
import os
import importlib
import sys
MODULE_NAMES = ['numpy', 'scipy', 'sklearn', 'pandas']
modules = {}
for m in MODULE_NAMES:
try:
modules[m] = importlib.import_module(m)
except ImportError:
modules[m] = None
app = Flask(__name__)
@app.route('/<module_name>')
def in_module_tests(module_name):
if module_name not in modules:
return "This module is not listed"
try:
module = modules[module_name]
if module_name == 'sklearn':
result = pytest.main('--pyargs sklearn.tests')
result_string = "sklearn: passed" if result == 0 else "sklearn: failed"
else:
result = module.test()
num_failures = result.failures
result_string = "{}: number of failures={}".format(module_name, len(num_failures))
except (NameError, ImportError, AttributeError):
result_string = "{}: Error running test!".format(module_name)
return result_string
@app.route('/all')
def run_all():
results = "<br>\n".join([in_module_tests(m) for m in MODULE_NAMES])
return str(results)
def module_version(module_name):
m = modules[module_name]
if m is None:
version_string = "{}: unable to import".format(module_name)
else:
version_string = "{}: {}".format(module_name, m.__version__)
return version_string
@app.route('/')
def root():
versions = "<br>\n".join([module_version(m) for m in MODULE_NAMES])
python_version = "\npython-version%s\n" % sys.version
r = """<br><br>
Imports Successful!<br>
To test each module go to /numpy, /scipy, /sklearn and /pandas
or test all at /all.<br>
Test suites can take up to 10 minutes to run, main output is in app logs."""
return python_version + versions + r
if __name__ == '__main__':
# Get port from environment variable or choose 9099 as local default
port = int(os.getenv("PORT", 9099))
# Run the app, listening on all IPs with our chosen port number
app.run(host='0.0.0.0', port=port, debug=True)
|
mit
|
ycaihua/scikit-learn
|
examples/ensemble/plot_gradient_boosting_oob.py
|
230
|
4762
|
"""
======================================
Gradient Boosting Out-of-Bag estimates
======================================
Out-of-bag (OOB) estimates can be a useful heuristic to estimate
the "optimal" number of boosting iterations.
OOB estimates are almost identical to cross-validation estimates but
they can be computed on-the-fly without the need for repeated model
fitting.
OOB estimates are only available for Stochastic Gradient Boosting
(i.e. ``subsample < 1.0``), the estimates are derived from the improvement
in loss based on the examples not included in the bootstrap sample
(the so-called out-of-bag examples).
The OOB estimator is a pessimistic estimator of the true
test loss, but remains a fairly good approximation for a small number of trees.
The figure shows the cumulative sum of the negative OOB improvements
as a function of the boosting iteration. As you can see, it tracks the test
loss for the first hundred iterations but then diverges in a
pessimistic way.
The figure also shows the performance of 3-fold cross validation which
usually gives a better estimate of the test loss
but is computationally more demanding.
"""
print(__doc__)
# Author: Peter Prettenhofer <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import ensemble
from sklearn.cross_validation import KFold
from sklearn.cross_validation import train_test_split
# Generate data (adapted from G. Ridgeway's gbm example)
n_samples = 1000
random_state = np.random.RandomState(13)
x1 = random_state.uniform(size=n_samples)
x2 = random_state.uniform(size=n_samples)
x3 = random_state.randint(0, 4, size=n_samples)
p = 1 / (1.0 + np.exp(-(np.sin(3 * x1) - 4 * x2 + x3)))
y = random_state.binomial(1, p, size=n_samples)
X = np.c_[x1, x2, x3]
X = X.astype(np.float32)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5,
random_state=9)
# Fit classifier with out-of-bag estimates
params = {'n_estimators': 1200, 'max_depth': 3, 'subsample': 0.5,
'learning_rate': 0.01, 'min_samples_leaf': 1, 'random_state': 3}
clf = ensemble.GradientBoostingClassifier(**params)
clf.fit(X_train, y_train)
acc = clf.score(X_test, y_test)
print("Accuracy: {:.4f}".format(acc))
n_estimators = params['n_estimators']
x = np.arange(n_estimators) + 1
def heldout_score(clf, X_test, y_test):
"""compute deviance scores on ``X_test`` and ``y_test``. """
score = np.zeros((n_estimators,), dtype=np.float64)
for i, y_pred in enumerate(clf.staged_decision_function(X_test)):
score[i] = clf.loss_(y_test, y_pred)
return score
def cv_estimate(n_folds=3):
cv = KFold(n=X_train.shape[0], n_folds=n_folds)
cv_clf = ensemble.GradientBoostingClassifier(**params)
val_scores = np.zeros((n_estimators,), dtype=np.float64)
for train, test in cv:
cv_clf.fit(X_train[train], y_train[train])
val_scores += heldout_score(cv_clf, X_train[test], y_train[test])
val_scores /= n_folds
return val_scores
# Estimate best n_estimator using cross-validation
cv_score = cv_estimate(3)
# Compute best n_estimator for test data
test_score = heldout_score(clf, X_test, y_test)
# negative cumulative sum of oob improvements
cumsum = -np.cumsum(clf.oob_improvement_)
# min loss according to OOB
oob_best_iter = x[np.argmin(cumsum)]
# min loss according to test (normalize such that first loss is 0)
test_score -= test_score[0]
test_best_iter = x[np.argmin(test_score)]
# min loss according to cv (normalize such that first loss is 0)
cv_score -= cv_score[0]
cv_best_iter = x[np.argmin(cv_score)]
# color brew for the three curves
oob_color = list(map(lambda x: x / 256.0, (190, 174, 212)))
test_color = list(map(lambda x: x / 256.0, (127, 201, 127)))
cv_color = list(map(lambda x: x / 256.0, (253, 192, 134)))
# plot curves and vertical lines for best iterations
plt.plot(x, cumsum, label='OOB loss', color=oob_color)
plt.plot(x, test_score, label='Test loss', color=test_color)
plt.plot(x, cv_score, label='CV loss', color=cv_color)
plt.axvline(x=oob_best_iter, color=oob_color)
plt.axvline(x=test_best_iter, color=test_color)
plt.axvline(x=cv_best_iter, color=cv_color)
# add three vertical lines to xticks
xticks = plt.xticks()
xticks_pos = np.array(xticks[0].tolist() +
[oob_best_iter, cv_best_iter, test_best_iter])
xticks_label = np.array(list(map(lambda t: int(t), xticks[0])) +
['OOB', 'CV', 'Test'])
ind = np.argsort(xticks_pos)
xticks_pos = xticks_pos[ind]
xticks_label = xticks_label[ind]
plt.xticks(xticks_pos, xticks_label)
plt.legend(loc='upper right')
plt.ylabel('normalized loss')
plt.xlabel('number of iterations')
plt.show()
|
bsd-3-clause
|
ewmoore/numpy
|
numpy/lib/recfunctions.py
|
13
|
34877
|
"""
Collection of utilities to manipulate structured arrays.
Most of these functions were initially implemented by John Hunter for matplotlib.
They have been rewritten and extended for convenience.
"""
import sys
import itertools
import numpy as np
import numpy.ma as ma
from numpy import ndarray, recarray
from numpy.ma import MaskedArray
from numpy.ma.mrecords import MaskedRecords
from numpy.lib._iotools import _is_string_like
_check_fill_value = np.ma.core._check_fill_value
__all__ = ['append_fields',
'drop_fields',
'find_duplicates',
'get_fieldstructure',
'join_by',
'merge_arrays',
'rec_append_fields', 'rec_drop_fields', 'rec_join',
'recursive_fill_fields', 'rename_fields',
'stack_arrays',
]
def recursive_fill_fields(input, output):
"""
Fills fields from output with fields from input,
with support for nested structures.
Parameters
----------
input : ndarray
Input array.
output : ndarray
Output array.
Notes
-----
* `output` should be at least the same size as `input`
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> a = np.array([(1, 10.), (2, 20.)], dtype=[('A', int), ('B', float)])
>>> b = np.zeros((3,), dtype=a.dtype)
>>> rfn.recursive_fill_fields(a, b)
array([(1, 10.0), (2, 20.0), (0, 0.0)],
dtype=[('A', '<i4'), ('B', '<f8')])
"""
newdtype = output.dtype
for field in newdtype.names:
try:
current = input[field]
except ValueError:
continue
if current.dtype.names:
recursive_fill_fields(current, output[field])
else:
output[field][:len(current)] = current
return output
def get_names(adtype):
"""
Returns the field names of the input datatype as a tuple.
Parameters
----------
adtype : dtype
Input datatype
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> rfn.get_names(np.empty((1,), dtype=int)) is None
True
>>> rfn.get_names(np.empty((1,), dtype=[('A',int), ('B', float)]))
('A', 'B')
>>> adtype = np.dtype([('a', int), ('b', [('ba', int), ('bb', int)])])
>>> rfn.get_names(adtype)
('a', ('b', ('ba', 'bb')))
"""
listnames = []
names = adtype.names
for name in names:
current = adtype[name]
if current.names:
listnames.append((name, tuple(get_names(current))))
else:
listnames.append(name)
return tuple(listnames) or None
def get_names_flat(adtype):
"""
Returns the field names of the input datatype as a tuple. Nested structure
are flattend beforehand.
Parameters
----------
adtype : dtype
Input datatype
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> rfn.get_names_flat(np.empty((1,), dtype=int)) is None
True
>>> rfn.get_names_flat(np.empty((1,), dtype=[('A',int), ('B', float)]))
('A', 'B')
>>> adtype = np.dtype([('a', int), ('b', [('ba', int), ('bb', int)])])
>>> rfn.get_names_flat(adtype)
('a', 'b', 'ba', 'bb')
"""
listnames = []
names = adtype.names
for name in names:
listnames.append(name)
current = adtype[name]
if current.names:
listnames.extend(get_names_flat(current))
return tuple(listnames) or None
def flatten_descr(ndtype):
"""
Flatten a structured data-type description.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> ndtype = np.dtype([('a', '<i4'), ('b', [('ba', '<f8'), ('bb', '<i4')])])
>>> rfn.flatten_descr(ndtype)
(('a', dtype('int32')), ('ba', dtype('float64')), ('bb', dtype('int32')))
"""
names = ndtype.names
if names is None:
return ndtype.descr
else:
descr = []
for field in names:
(typ, _) = ndtype.fields[field]
if typ.names:
descr.extend(flatten_descr(typ))
else:
descr.append((field, typ))
return tuple(descr)
def zip_descr(seqarrays, flatten=False):
"""
Combine the dtype description of a series of arrays.
Parameters
----------
seqarrays : sequence of arrays
Sequence of arrays
flatten : {boolean}, optional
Whether to collapse nested descriptions.
"""
newdtype = []
if flatten:
for a in seqarrays:
newdtype.extend(flatten_descr(a.dtype))
else:
for a in seqarrays:
current = a.dtype
names = current.names or ()
if len(names) > 1:
newdtype.append(('', current.descr))
else:
newdtype.extend(current.descr)
return np.dtype(newdtype).descr
def get_fieldstructure(adtype, lastname=None, parents=None,):
"""
Returns a dictionary with fields as keys and a list of parent fields as values.
This function is used to simplify access to fields nested in other fields.
Parameters
----------
adtype : np.dtype
Input datatype
lastname : optional
Last processed field name (used internally during recursion).
parents : dictionary
Dictionary of parent fields (used interbally during recursion).
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> ndtype = np.dtype([('A', int),
... ('B', [('BA', int),
... ('BB', [('BBA', int), ('BBB', int)])])])
>>> rfn.get_fieldstructure(ndtype)
... # XXX: possible regression, order of BBA and BBB is swapped
{'A': [], 'B': [], 'BA': ['B'], 'BB': ['B'], 'BBA': ['B', 'BB'], 'BBB': ['B', 'BB']}
"""
if parents is None:
parents = {}
names = adtype.names
for name in names:
current = adtype[name]
if current.names:
if lastname:
parents[name] = [lastname, ]
else:
parents[name] = []
parents.update(get_fieldstructure(current, name, parents))
else:
lastparent = [_ for _ in (parents.get(lastname, []) or [])]
if lastparent:
# if (lastparent[-1] != lastname):
lastparent.append(lastname)
elif lastname:
lastparent = [lastname, ]
parents[name] = lastparent or []
return parents or None
def _izip_fields_flat(iterable):
"""
Returns an iterator of concatenated fields from a sequence of arrays,
collapsing any nested structure.
"""
for element in iterable:
if isinstance(element, np.void):
for f in _izip_fields_flat(tuple(element)):
yield f
else:
yield element
def _izip_fields(iterable):
"""
Returns an iterator of concatenated fields from a sequence of arrays.
"""
for element in iterable:
if hasattr(element, '__iter__') and not isinstance(element, basestring):
for f in _izip_fields(element):
yield f
elif isinstance(element, np.void) and len(tuple(element)) == 1:
for f in _izip_fields(element):
yield f
else:
yield element
def izip_records(seqarrays, fill_value=None, flatten=True):
"""
Returns an iterator of concatenated items from a sequence of arrays.
Parameters
----------
seqarray : sequence of arrays
Sequence of arrays.
fill_value : {None, integer}
Value used to pad shorter iterables.
flatten : {True, False},
Whether to
"""
# OK, that's a complete ripoff from Python2.6 itertools.izip_longest
def sentinel(counter=([fill_value] * (len(seqarrays) - 1)).pop):
"Yields the fill_value or raises IndexError"
yield counter()
#
fillers = itertools.repeat(fill_value)
iters = [itertools.chain(it, sentinel(), fillers) for it in seqarrays]
# Should we flatten the items, or just use a nested approach
if flatten:
zipfunc = _izip_fields_flat
else:
zipfunc = _izip_fields
#
try:
for tup in itertools.izip(*iters):
yield tuple(zipfunc(tup))
except IndexError:
pass
def _fix_output(output, usemask=True, asrecarray=False):
"""
Private function: return a recarray, a ndarray, a MaskedArray
or a MaskedRecords depending on the input parameters
"""
if not isinstance(output, MaskedArray):
usemask = False
if usemask:
if asrecarray:
output = output.view(MaskedRecords)
else:
output = ma.filled(output)
if asrecarray:
output = output.view(recarray)
return output
def _fix_defaults(output, defaults=None):
"""
Update the fill_value and masked data of `output`
from the default given in a dictionary defaults.
"""
names = output.dtype.names
(data, mask, fill_value) = (output.data, output.mask, output.fill_value)
for (k, v) in (defaults or {}).iteritems():
if k in names:
fill_value[k] = v
data[k][mask[k]] = v
return output
def merge_arrays(seqarrays,
fill_value= -1, flatten=False, usemask=False, asrecarray=False):
"""
Merge arrays field by field.
Parameters
----------
seqarrays : sequence of ndarrays
Sequence of arrays
fill_value : {float}, optional
Filling value used to pad missing data on the shorter arrays.
flatten : {False, True}, optional
Whether to collapse nested fields.
usemask : {False, True}, optional
Whether to return a masked array or not.
asrecarray : {False, True}, optional
Whether to return a recarray (MaskedRecords) or not.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> rfn.merge_arrays((np.array([1, 2]), np.array([10., 20., 30.])))
masked_array(data = [(1, 10.0) (2, 20.0) (--, 30.0)],
mask = [(False, False) (False, False) (True, False)],
fill_value = (999999, 1e+20),
dtype = [('f0', '<i4'), ('f1', '<f8')])
>>> rfn.merge_arrays((np.array([1, 2]), np.array([10., 20., 30.])),
... usemask=False)
array([(1, 10.0), (2, 20.0), (-1, 30.0)],
dtype=[('f0', '<i4'), ('f1', '<f8')])
>>> rfn.merge_arrays((np.array([1, 2]).view([('a', int)]),
... np.array([10., 20., 30.])),
... usemask=False, asrecarray=True)
rec.array([(1, 10.0), (2, 20.0), (-1, 30.0)],
dtype=[('a', '<i4'), ('f1', '<f8')])
Notes
-----
* Without a mask, the missing value will be filled with something,
* depending on what its corresponding type:
-1 for integers
-1.0 for floating point numbers
'-' for characters
'-1' for strings
True for boolean values
* XXX: I just obtained these values empirically
"""
# Only one item in the input sequence ?
if (len(seqarrays) == 1):
seqarrays = np.asanyarray(seqarrays[0])
# Do we have a single ndarray as input ?
if isinstance(seqarrays, (ndarray, np.void)):
seqdtype = seqarrays.dtype
if (not flatten) or \
(zip_descr((seqarrays,), flatten=True) == seqdtype.descr):
# Minimal processing needed: just make sure everythng's a-ok
seqarrays = seqarrays.ravel()
# Make sure we have named fields
if not seqdtype.names:
seqdtype = [('', seqdtype)]
# Find what type of array we must return
if usemask:
if asrecarray:
seqtype = MaskedRecords
else:
seqtype = MaskedArray
elif asrecarray:
seqtype = recarray
else:
seqtype = ndarray
return seqarrays.view(dtype=seqdtype, type=seqtype)
else:
seqarrays = (seqarrays,)
else:
# Make sure we have arrays in the input sequence
seqarrays = map(np.asanyarray, seqarrays)
# Find the sizes of the inputs and their maximum
sizes = tuple(a.size for a in seqarrays)
maxlength = max(sizes)
# Get the dtype of the output (flattening if needed)
newdtype = zip_descr(seqarrays, flatten=flatten)
# Initialize the sequences for data and mask
seqdata = []
seqmask = []
# If we expect some kind of MaskedArray, make a special loop.
if usemask:
for (a, n) in itertools.izip(seqarrays, sizes):
nbmissing = (maxlength - n)
# Get the data and mask
data = a.ravel().__array__()
mask = ma.getmaskarray(a).ravel()
# Get the filling value (if needed)
if nbmissing:
fval = _check_fill_value(fill_value, a.dtype)
if isinstance(fval, (ndarray, np.void)):
if len(fval.dtype) == 1:
fval = fval.item()[0]
fmsk = True
else:
fval = np.array(fval, dtype=a.dtype, ndmin=1)
fmsk = np.ones((1,), dtype=mask.dtype)
else:
fval = None
fmsk = True
# Store an iterator padding the input to the expected length
seqdata.append(itertools.chain(data, [fval] * nbmissing))
seqmask.append(itertools.chain(mask, [fmsk] * nbmissing))
# Create an iterator for the data
data = tuple(izip_records(seqdata, flatten=flatten))
output = ma.array(np.fromiter(data, dtype=newdtype, count=maxlength),
mask=list(izip_records(seqmask, flatten=flatten)))
if asrecarray:
output = output.view(MaskedRecords)
else:
# Same as before, without the mask we don't need...
for (a, n) in itertools.izip(seqarrays, sizes):
nbmissing = (maxlength - n)
data = a.ravel().__array__()
if nbmissing:
fval = _check_fill_value(fill_value, a.dtype)
if isinstance(fval, (ndarray, np.void)):
if len(fval.dtype) == 1:
fval = fval.item()[0]
else:
fval = np.array(fval, dtype=a.dtype, ndmin=1)
else:
fval = None
seqdata.append(itertools.chain(data, [fval] * nbmissing))
output = np.fromiter(tuple(izip_records(seqdata, flatten=flatten)),
dtype=newdtype, count=maxlength)
if asrecarray:
output = output.view(recarray)
# And we're done...
return output
def drop_fields(base, drop_names, usemask=True, asrecarray=False):
"""
Return a new array with fields in `drop_names` dropped.
Nested fields are supported.
Parameters
----------
base : array
Input array
drop_names : string or sequence
String or sequence of strings corresponding to the names of the fields
to drop.
usemask : {False, True}, optional
Whether to return a masked array or not.
asrecarray : string or sequence
Whether to return a recarray or a mrecarray (`asrecarray=True`) or
a plain ndarray or masked array with flexible dtype (`asrecarray=False`)
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> a = np.array([(1, (2, 3.0)), (4, (5, 6.0))],
... dtype=[('a', int), ('b', [('ba', float), ('bb', int)])])
>>> rfn.drop_fields(a, 'a')
array([((2.0, 3),), ((5.0, 6),)],
dtype=[('b', [('ba', '<f8'), ('bb', '<i4')])])
>>> rfn.drop_fields(a, 'ba')
array([(1, (3,)), (4, (6,))],
dtype=[('a', '<i4'), ('b', [('bb', '<i4')])])
>>> rfn.drop_fields(a, ['ba', 'bb'])
array([(1,), (4,)],
dtype=[('a', '<i4')])
"""
if _is_string_like(drop_names):
drop_names = [drop_names, ]
else:
drop_names = set(drop_names)
#
def _drop_descr(ndtype, drop_names):
names = ndtype.names
newdtype = []
for name in names:
current = ndtype[name]
if name in drop_names:
continue
if current.names:
descr = _drop_descr(current, drop_names)
if descr:
newdtype.append((name, descr))
else:
newdtype.append((name, current))
return newdtype
#
newdtype = _drop_descr(base.dtype, drop_names)
if not newdtype:
return None
#
output = np.empty(base.shape, dtype=newdtype)
output = recursive_fill_fields(base, output)
return _fix_output(output, usemask=usemask, asrecarray=asrecarray)
def rec_drop_fields(base, drop_names):
"""
Returns a new numpy.recarray with fields in `drop_names` dropped.
"""
return drop_fields(base, drop_names, usemask=False, asrecarray=True)
def rename_fields(base, namemapper):
"""
Rename the fields from a flexible-datatype ndarray or recarray.
Nested fields are supported.
Parameters
----------
base : ndarray
Input array whose fields must be modified.
namemapper : dictionary
Dictionary mapping old field names to their new version.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> a = np.array([(1, (2, [3.0, 30.])), (4, (5, [6.0, 60.]))],
... dtype=[('a', int),('b', [('ba', float), ('bb', (float, 2))])])
>>> rfn.rename_fields(a, {'a':'A', 'bb':'BB'})
array([(1, (2.0, [3.0, 30.0])), (4, (5.0, [6.0, 60.0]))],
dtype=[('A', '<i4'), ('b', [('ba', '<f8'), ('BB', '<f8', 2)])])
"""
def _recursive_rename_fields(ndtype, namemapper):
newdtype = []
for name in ndtype.names:
newname = namemapper.get(name, name)
current = ndtype[name]
if current.names:
newdtype.append((newname,
_recursive_rename_fields(current, namemapper)))
else:
newdtype.append((newname, current))
return newdtype
newdtype = _recursive_rename_fields(base.dtype, namemapper)
return base.view(newdtype)
def append_fields(base, names, data, dtypes=None,
fill_value= -1, usemask=True, asrecarray=False):
"""
Add new fields to an existing array.
The names of the fields are given with the `names` arguments,
the corresponding values with the `data` arguments.
If a single field is appended, `names`, `data` and `dtypes` do not have
to be lists but just values.
Parameters
----------
base : array
Input array to extend.
names : string, sequence
String or sequence of strings corresponding to the names
of the new fields.
data : array or sequence of arrays
Array or sequence of arrays storing the fields to add to the base.
dtypes : sequence of datatypes, optional
Datatype or sequence of datatypes.
If None, the datatypes are estimated from the `data`.
fill_value : {float}, optional
Filling value used to pad missing data on the shorter arrays.
usemask : {False, True}, optional
Whether to return a masked array or not.
asrecarray : {False, True}, optional
Whether to return a recarray (MaskedRecords) or not.
"""
# Check the names
if isinstance(names, (tuple, list)):
if len(names) != len(data):
msg = "The number of arrays does not match the number of names"
raise ValueError(msg)
elif isinstance(names, basestring):
names = [names, ]
data = [data, ]
#
if dtypes is None:
data = [np.array(a, copy=False, subok=True) for a in data]
data = [a.view([(name, a.dtype)]) for (name, a) in zip(names, data)]
else :
if not isinstance(dtypes, (tuple, list)):
dtypes = [dtypes, ]
if len(data) != len(dtypes):
if len(dtypes) == 1:
dtypes = dtypes * len(data)
else:
msg = "The dtypes argument must be None, a dtype, or a list."
raise ValueError(msg)
data = [np.array(a, copy=False, subok=True, dtype=d).view([(n, d)])
for (a, n, d) in zip(data, names, dtypes)]
#
base = merge_arrays(base, usemask=usemask, fill_value=fill_value)
if len(data) > 1:
data = merge_arrays(data, flatten=True, usemask=usemask,
fill_value=fill_value)
else:
data = data.pop()
#
output = ma.masked_all(max(len(base), len(data)),
dtype=base.dtype.descr + data.dtype.descr)
output = recursive_fill_fields(base, output)
output = recursive_fill_fields(data, output)
#
return _fix_output(output, usemask=usemask, asrecarray=asrecarray)
def rec_append_fields(base, names, data, dtypes=None):
"""
Add new fields to an existing array.
The names of the fields are given with the `names` arguments,
the corresponding values with the `data` arguments.
If a single field is appended, `names`, `data` and `dtypes` do not have
to be lists but just values.
Parameters
----------
base : array
Input array to extend.
names : string, sequence
String or sequence of strings corresponding to the names
of the new fields.
data : array or sequence of arrays
Array or sequence of arrays storing the fields to add to the base.
dtypes : sequence of datatypes, optional
Datatype or sequence of datatypes.
If None, the datatypes are estimated from the `data`.
See Also
--------
append_fields
Returns
-------
appended_array : np.recarray
"""
return append_fields(base, names, data=data, dtypes=dtypes,
asrecarray=True, usemask=False)
def stack_arrays(arrays, defaults=None, usemask=True, asrecarray=False,
autoconvert=False):
"""
Superposes arrays fields by fields
Parameters
----------
seqarrays : array or sequence
Sequence of input arrays.
defaults : dictionary, optional
Dictionary mapping field names to the corresponding default values.
usemask : {True, False}, optional
Whether to return a MaskedArray (or MaskedRecords is `asrecarray==True`)
or a ndarray.
asrecarray : {False, True}, optional
Whether to return a recarray (or MaskedRecords if `usemask==True`) or
just a flexible-type ndarray.
autoconvert : {False, True}, optional
Whether automatically cast the type of the field to the maximum.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> x = np.array([1, 2,])
>>> rfn.stack_arrays(x) is x
True
>>> z = np.array([('A', 1), ('B', 2)], dtype=[('A', '|S3'), ('B', float)])
>>> zz = np.array([('a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)],
... dtype=[('A', '|S3'), ('B', float), ('C', float)])
>>> test = rfn.stack_arrays((z,zz))
>>> test
masked_array(data = [('A', 1.0, --) ('B', 2.0, --) ('a', 10.0, 100.0) ('b', 20.0, 200.0)
('c', 30.0, 300.0)],
mask = [(False, False, True) (False, False, True) (False, False, False)
(False, False, False) (False, False, False)],
fill_value = ('N/A', 1e+20, 1e+20),
dtype = [('A', '|S3'), ('B', '<f8'), ('C', '<f8')])
"""
if isinstance(arrays, ndarray):
return arrays
elif len(arrays) == 1:
return arrays[0]
seqarrays = [np.asanyarray(a).ravel() for a in arrays]
nrecords = [len(a) for a in seqarrays]
ndtype = [a.dtype for a in seqarrays]
fldnames = [d.names for d in ndtype]
#
dtype_l = ndtype[0]
newdescr = dtype_l.descr
names = [_[0] for _ in newdescr]
for dtype_n in ndtype[1:]:
for descr in dtype_n.descr:
name = descr[0] or ''
if name not in names:
newdescr.append(descr)
names.append(name)
else:
nameidx = names.index(name)
current_descr = newdescr[nameidx]
if autoconvert:
if np.dtype(descr[1]) > np.dtype(current_descr[-1]):
current_descr = list(current_descr)
current_descr[-1] = descr[1]
newdescr[nameidx] = tuple(current_descr)
elif descr[1] != current_descr[-1]:
raise TypeError("Incompatible type '%s' <> '%s'" % \
(dict(newdescr)[name], descr[1]))
# Only one field: use concatenate
if len(newdescr) == 1:
output = ma.concatenate(seqarrays)
else:
#
output = ma.masked_all((np.sum(nrecords),), newdescr)
offset = np.cumsum(np.r_[0, nrecords])
seen = []
for (a, n, i, j) in zip(seqarrays, fldnames, offset[:-1], offset[1:]):
names = a.dtype.names
if names is None:
output['f%i' % len(seen)][i:j] = a
else:
for name in n:
output[name][i:j] = a[name]
if name not in seen:
seen.append(name)
#
return _fix_output(_fix_defaults(output, defaults),
usemask=usemask, asrecarray=asrecarray)
def find_duplicates(a, key=None, ignoremask=True, return_index=False):
"""
Find the duplicates in a structured array along a given key
Parameters
----------
a : array-like
Input array
key : {string, None}, optional
Name of the fields along which to check the duplicates.
If None, the search is performed by records
ignoremask : {True, False}, optional
Whether masked data should be discarded or considered as duplicates.
return_index : {False, True}, optional
Whether to return the indices of the duplicated values.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> ndtype = [('a', int)]
>>> a = np.ma.array([1, 1, 1, 2, 2, 3, 3],
... mask=[0, 0, 1, 0, 0, 0, 1]).view(ndtype)
>>> rfn.find_duplicates(a, ignoremask=True, return_index=True)
... # XXX: judging by the output, the ignoremask flag has no effect
"""
a = np.asanyarray(a).ravel()
# Get a dictionary of fields
fields = get_fieldstructure(a.dtype)
# Get the sorting data (by selecting the corresponding field)
base = a
if key:
for f in fields[key]:
base = base[f]
base = base[key]
# Get the sorting indices and the sorted data
sortidx = base.argsort()
sortedbase = base[sortidx]
sorteddata = sortedbase.filled()
# Compare the sorting data
flag = (sorteddata[:-1] == sorteddata[1:])
# If masked data must be ignored, set the flag to false where needed
if ignoremask:
sortedmask = sortedbase.recordmask
flag[sortedmask[1:]] = False
flag = np.concatenate(([False], flag))
# We need to take the point on the left as well (else we're missing it)
flag[:-1] = flag[:-1] + flag[1:]
duplicates = a[sortidx][flag]
if return_index:
return (duplicates, sortidx[flag])
else:
return duplicates
def join_by(key, r1, r2, jointype='inner', r1postfix='1', r2postfix='2',
defaults=None, usemask=True, asrecarray=False):
"""
Join arrays `r1` and `r2` on key `key`.
The key should be either a string or a sequence of string corresponding
to the fields used to join the array.
An exception is raised if the `key` field cannot be found in the two input
arrays.
Neither `r1` nor `r2` should have any duplicates along `key`: the presence
of duplicates will make the output quite unreliable. Note that duplicates
are not looked for by the algorithm.
Parameters
----------
key : {string, sequence}
A string or a sequence of strings corresponding to the fields used
for comparison.
r1, r2 : arrays
Structured arrays.
jointype : {'inner', 'outer', 'leftouter'}, optional
If 'inner', returns the elements common to both r1 and r2.
If 'outer', returns the common elements as well as the elements of r1
not in r2 and the elements of not in r2.
If 'leftouter', returns the common elements and the elements of r1 not
in r2.
r1postfix : string, optional
String appended to the names of the fields of r1 that are present in r2
but absent of the key.
r2postfix : string, optional
String appended to the names of the fields of r2 that are present in r1
but absent of the key.
defaults : {dictionary}, optional
Dictionary mapping field names to the corresponding default values.
usemask : {True, False}, optional
Whether to return a MaskedArray (or MaskedRecords is `asrecarray==True`)
or a ndarray.
asrecarray : {False, True}, optional
Whether to return a recarray (or MaskedRecords if `usemask==True`) or
just a flexible-type ndarray.
Notes
-----
* The output is sorted along the key.
* A temporary array is formed by dropping the fields not in the key for the
two arrays and concatenating the result. This array is then sorted, and
the common entries selected. The output is constructed by filling the fields
with the selected entries. Matching is not preserved if there are some
duplicates...
"""
# Check jointype
if jointype not in ('inner', 'outer', 'leftouter'):
raise ValueError("The 'jointype' argument should be in 'inner', "\
"'outer' or 'leftouter' (got '%s' instead)" % jointype)
# If we have a single key, put it in a tuple
if isinstance(key, basestring):
key = (key,)
# Check the keys
for name in key:
if name not in r1.dtype.names:
raise ValueError('r1 does not have key field %s' % name)
if name not in r2.dtype.names:
raise ValueError('r2 does not have key field %s' % name)
# Make sure we work with ravelled arrays
r1 = r1.ravel()
r2 = r2.ravel()
(nb1, nb2) = (len(r1), len(r2))
(r1names, r2names) = (r1.dtype.names, r2.dtype.names)
# Check the names for collision
if (set.intersection(set(r1names),set(r2names)).difference(key) and
not (r1postfix or r2postfix)):
msg = "r1 and r2 contain common names, r1postfix and r2postfix "
msg += "can't be empty"
raise ValueError(msg)
# Make temporary arrays of just the keys
r1k = drop_fields(r1, [n for n in r1names if n not in key])
r2k = drop_fields(r2, [n for n in r2names if n not in key])
# Concatenate the two arrays for comparison
aux = ma.concatenate((r1k, r2k))
idx_sort = aux.argsort(order=key)
aux = aux[idx_sort]
#
# Get the common keys
flag_in = ma.concatenate(([False], aux[1:] == aux[:-1]))
flag_in[:-1] = flag_in[1:] + flag_in[:-1]
idx_in = idx_sort[flag_in]
idx_1 = idx_in[(idx_in < nb1)]
idx_2 = idx_in[(idx_in >= nb1)] - nb1
(r1cmn, r2cmn) = (len(idx_1), len(idx_2))
if jointype == 'inner':
(r1spc, r2spc) = (0, 0)
elif jointype == 'outer':
idx_out = idx_sort[~flag_in]
idx_1 = np.concatenate((idx_1, idx_out[(idx_out < nb1)]))
idx_2 = np.concatenate((idx_2, idx_out[(idx_out >= nb1)] - nb1))
(r1spc, r2spc) = (len(idx_1) - r1cmn, len(idx_2) - r2cmn)
elif jointype == 'leftouter':
idx_out = idx_sort[~flag_in]
idx_1 = np.concatenate((idx_1, idx_out[(idx_out < nb1)]))
(r1spc, r2spc) = (len(idx_1) - r1cmn, 0)
# Select the entries from each input
(s1, s2) = (r1[idx_1], r2[idx_2])
#
# Build the new description of the output array .......
# Start with the key fields
ndtype = [list(_) for _ in r1k.dtype.descr]
# Add the other fields
ndtype.extend(list(_) for _ in r1.dtype.descr if _[0] not in key)
# Find the new list of names (it may be different from r1names)
names = list(_[0] for _ in ndtype)
for desc in r2.dtype.descr:
desc = list(desc)
name = desc[0]
# Have we seen the current name already ?
if name in names:
nameidx = ndtype.index(desc)
current = ndtype[nameidx]
# The current field is part of the key: take the largest dtype
if name in key:
current[-1] = max(desc[1], current[-1])
# The current field is not part of the key: add the suffixes
else:
current[0] += r1postfix
desc[0] += r2postfix
ndtype.insert(nameidx + 1, desc)
#... we haven't: just add the description to the current list
else:
names.extend(desc[0])
ndtype.append(desc)
# Revert the elements to tuples
ndtype = [tuple(_) for _ in ndtype]
# Find the largest nb of common fields : r1cmn and r2cmn should be equal, but...
cmn = max(r1cmn, r2cmn)
# Construct an empty array
output = ma.masked_all((cmn + r1spc + r2spc,), dtype=ndtype)
names = output.dtype.names
for f in r1names:
selected = s1[f]
if f not in names or (f in r2names and not r2postfix and not f in key):
f += r1postfix
current = output[f]
current[:r1cmn] = selected[:r1cmn]
if jointype in ('outer', 'leftouter'):
current[cmn:cmn + r1spc] = selected[r1cmn:]
for f in r2names:
selected = s2[f]
if f not in names or (f in r1names and not r1postfix and f not in key):
f += r2postfix
current = output[f]
current[:r2cmn] = selected[:r2cmn]
if (jointype == 'outer') and r2spc:
current[-r2spc:] = selected[r2cmn:]
# Sort and finalize the output
output.sort(order=key)
kwargs = dict(usemask=usemask, asrecarray=asrecarray)
return _fix_output(_fix_defaults(output, defaults), **kwargs)
def rec_join(key, r1, r2, jointype='inner', r1postfix='1', r2postfix='2',
defaults=None):
"""
Join arrays `r1` and `r2` on keys.
Alternative to join_by, that always returns a np.recarray.
See Also
--------
join_by : equivalent function
"""
kwargs = dict(jointype=jointype, r1postfix=r1postfix, r2postfix=r2postfix,
defaults=defaults, usemask=False, asrecarray=True)
return join_by(key, r1, r2, **kwargs)
|
bsd-3-clause
|
anntzer/scipy
|
scipy/cluster/tests/test_hierarchy.py
|
12
|
42543
|
#
# Author: Damian Eads
# Date: April 17, 2008
#
# Copyright (C) 2008 Damian Eads
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# 3. The name of the author may not be used to endorse or promote
# products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import numpy as np
from numpy.testing import assert_allclose, assert_equal, assert_, assert_warns
import pytest
from pytest import raises as assert_raises
import scipy.cluster.hierarchy
from scipy.cluster.hierarchy import (
ClusterWarning, linkage, from_mlab_linkage, to_mlab_linkage,
num_obs_linkage, inconsistent, cophenet, fclusterdata, fcluster,
is_isomorphic, single, leaders,
correspond, is_monotonic, maxdists, maxinconsts, maxRstat,
is_valid_linkage, is_valid_im, to_tree, leaves_list, dendrogram,
set_link_color_palette, cut_tree, optimal_leaf_ordering,
_order_cluster_tree, _hierarchy, _LINKAGE_METHODS)
from scipy.spatial.distance import pdist
from scipy.cluster._hierarchy import Heap
from . import hierarchy_test_data
# Matplotlib is not a scipy dependency but is optionally used in dendrogram, so
# check if it's available
try:
import matplotlib # type: ignore[import]
# and set the backend to be Agg (no gui)
matplotlib.use('Agg')
# before importing pyplot
import matplotlib.pyplot as plt # type: ignore[import]
have_matplotlib = True
except Exception:
have_matplotlib = False
class TestLinkage:
def test_linkage_non_finite_elements_in_distance_matrix(self):
# Tests linkage(Y) where Y contains a non-finite element (e.g. NaN or Inf).
# Exception expected.
y = np.zeros((6,))
y[0] = np.nan
assert_raises(ValueError, linkage, y)
def test_linkage_empty_distance_matrix(self):
# Tests linkage(Y) where Y is a 0x4 linkage matrix. Exception expected.
y = np.zeros((0,))
assert_raises(ValueError, linkage, y)
def test_linkage_tdist(self):
for method in ['single', 'complete', 'average', 'weighted']:
self.check_linkage_tdist(method)
def check_linkage_tdist(self, method):
# Tests linkage(Y, method) on the tdist data set.
Z = linkage(hierarchy_test_data.ytdist, method)
expectedZ = getattr(hierarchy_test_data, 'linkage_ytdist_' + method)
assert_allclose(Z, expectedZ, atol=1e-10)
def test_linkage_X(self):
for method in ['centroid', 'median', 'ward']:
self.check_linkage_q(method)
def check_linkage_q(self, method):
# Tests linkage(Y, method) on the Q data set.
Z = linkage(hierarchy_test_data.X, method)
expectedZ = getattr(hierarchy_test_data, 'linkage_X_' + method)
assert_allclose(Z, expectedZ, atol=1e-06)
y = scipy.spatial.distance.pdist(hierarchy_test_data.X,
metric="euclidean")
Z = linkage(y, method)
assert_allclose(Z, expectedZ, atol=1e-06)
def test_compare_with_trivial(self):
rng = np.random.RandomState(0)
n = 20
X = rng.rand(n, 2)
d = pdist(X)
for method, code in _LINKAGE_METHODS.items():
Z_trivial = _hierarchy.linkage(d, n, code)
Z = linkage(d, method)
assert_allclose(Z_trivial, Z, rtol=1e-14, atol=1e-15)
def test_optimal_leaf_ordering(self):
Z = linkage(hierarchy_test_data.ytdist, optimal_ordering=True)
expectedZ = getattr(hierarchy_test_data, 'linkage_ytdist_single_olo')
assert_allclose(Z, expectedZ, atol=1e-10)
class TestLinkageTies:
_expectations = {
'single': np.array([[0, 1, 1.41421356, 2],
[2, 3, 1.41421356, 3]]),
'complete': np.array([[0, 1, 1.41421356, 2],
[2, 3, 2.82842712, 3]]),
'average': np.array([[0, 1, 1.41421356, 2],
[2, 3, 2.12132034, 3]]),
'weighted': np.array([[0, 1, 1.41421356, 2],
[2, 3, 2.12132034, 3]]),
'centroid': np.array([[0, 1, 1.41421356, 2],
[2, 3, 2.12132034, 3]]),
'median': np.array([[0, 1, 1.41421356, 2],
[2, 3, 2.12132034, 3]]),
'ward': np.array([[0, 1, 1.41421356, 2],
[2, 3, 2.44948974, 3]]),
}
def test_linkage_ties(self):
for method in ['single', 'complete', 'average', 'weighted', 'centroid', 'median', 'ward']:
self.check_linkage_ties(method)
def check_linkage_ties(self, method):
X = np.array([[-1, -1], [0, 0], [1, 1]])
Z = linkage(X, method=method)
expectedZ = self._expectations[method]
assert_allclose(Z, expectedZ, atol=1e-06)
class TestInconsistent:
def test_inconsistent_tdist(self):
for depth in hierarchy_test_data.inconsistent_ytdist:
self.check_inconsistent_tdist(depth)
def check_inconsistent_tdist(self, depth):
Z = hierarchy_test_data.linkage_ytdist_single
assert_allclose(inconsistent(Z, depth),
hierarchy_test_data.inconsistent_ytdist[depth])
class TestCopheneticDistance:
def test_linkage_cophenet_tdist_Z(self):
# Tests cophenet(Z) on tdist data set.
expectedM = np.array([268, 295, 255, 255, 295, 295, 268, 268, 295, 295,
295, 138, 219, 295, 295])
Z = hierarchy_test_data.linkage_ytdist_single
M = cophenet(Z)
assert_allclose(M, expectedM, atol=1e-10)
def test_linkage_cophenet_tdist_Z_Y(self):
# Tests cophenet(Z, Y) on tdist data set.
Z = hierarchy_test_data.linkage_ytdist_single
(c, M) = cophenet(Z, hierarchy_test_data.ytdist)
expectedM = np.array([268, 295, 255, 255, 295, 295, 268, 268, 295, 295,
295, 138, 219, 295, 295])
expectedc = 0.639931296433393415057366837573
assert_allclose(c, expectedc, atol=1e-10)
assert_allclose(M, expectedM, atol=1e-10)
class TestMLabLinkageConversion:
def test_mlab_linkage_conversion_empty(self):
# Tests from/to_mlab_linkage on empty linkage array.
X = np.asarray([])
assert_equal(from_mlab_linkage([]), X)
assert_equal(to_mlab_linkage([]), X)
def test_mlab_linkage_conversion_single_row(self):
# Tests from/to_mlab_linkage on linkage array with single row.
Z = np.asarray([[0., 1., 3., 2.]])
Zm = [[1, 2, 3]]
assert_equal(from_mlab_linkage(Zm), Z)
assert_equal(to_mlab_linkage(Z), Zm)
def test_mlab_linkage_conversion_multiple_rows(self):
# Tests from/to_mlab_linkage on linkage array with multiple rows.
Zm = np.asarray([[3, 6, 138], [4, 5, 219],
[1, 8, 255], [2, 9, 268], [7, 10, 295]])
Z = np.array([[2., 5., 138., 2.],
[3., 4., 219., 2.],
[0., 7., 255., 3.],
[1., 8., 268., 4.],
[6., 9., 295., 6.]],
dtype=np.double)
assert_equal(from_mlab_linkage(Zm), Z)
assert_equal(to_mlab_linkage(Z), Zm)
class TestFcluster:
def test_fclusterdata(self):
for t in hierarchy_test_data.fcluster_inconsistent:
self.check_fclusterdata(t, 'inconsistent')
for t in hierarchy_test_data.fcluster_distance:
self.check_fclusterdata(t, 'distance')
for t in hierarchy_test_data.fcluster_maxclust:
self.check_fclusterdata(t, 'maxclust')
def check_fclusterdata(self, t, criterion):
# Tests fclusterdata(X, criterion=criterion, t=t) on a random 3-cluster data set.
expectedT = getattr(hierarchy_test_data, 'fcluster_' + criterion)[t]
X = hierarchy_test_data.Q_X
T = fclusterdata(X, criterion=criterion, t=t)
assert_(is_isomorphic(T, expectedT))
def test_fcluster(self):
for t in hierarchy_test_data.fcluster_inconsistent:
self.check_fcluster(t, 'inconsistent')
for t in hierarchy_test_data.fcluster_distance:
self.check_fcluster(t, 'distance')
for t in hierarchy_test_data.fcluster_maxclust:
self.check_fcluster(t, 'maxclust')
def check_fcluster(self, t, criterion):
# Tests fcluster(Z, criterion=criterion, t=t) on a random 3-cluster data set.
expectedT = getattr(hierarchy_test_data, 'fcluster_' + criterion)[t]
Z = single(hierarchy_test_data.Q_X)
T = fcluster(Z, criterion=criterion, t=t)
assert_(is_isomorphic(T, expectedT))
def test_fcluster_monocrit(self):
for t in hierarchy_test_data.fcluster_distance:
self.check_fcluster_monocrit(t)
for t in hierarchy_test_data.fcluster_maxclust:
self.check_fcluster_maxclust_monocrit(t)
def check_fcluster_monocrit(self, t):
expectedT = hierarchy_test_data.fcluster_distance[t]
Z = single(hierarchy_test_data.Q_X)
T = fcluster(Z, t, criterion='monocrit', monocrit=maxdists(Z))
assert_(is_isomorphic(T, expectedT))
def check_fcluster_maxclust_monocrit(self, t):
expectedT = hierarchy_test_data.fcluster_maxclust[t]
Z = single(hierarchy_test_data.Q_X)
T = fcluster(Z, t, criterion='maxclust_monocrit', monocrit=maxdists(Z))
assert_(is_isomorphic(T, expectedT))
class TestLeaders:
def test_leaders_single(self):
# Tests leaders using a flat clustering generated by single linkage.
X = hierarchy_test_data.Q_X
Y = pdist(X)
Z = linkage(Y)
T = fcluster(Z, criterion='maxclust', t=3)
Lright = (np.array([53, 55, 56]), np.array([2, 3, 1]))
L = leaders(Z, T)
assert_equal(L, Lright)
class TestIsIsomorphic:
def test_is_isomorphic_1(self):
# Tests is_isomorphic on test case #1 (one flat cluster, different labellings)
a = [1, 1, 1]
b = [2, 2, 2]
assert_(is_isomorphic(a, b))
assert_(is_isomorphic(b, a))
def test_is_isomorphic_2(self):
# Tests is_isomorphic on test case #2 (two flat clusters, different labelings)
a = [1, 7, 1]
b = [2, 3, 2]
assert_(is_isomorphic(a, b))
assert_(is_isomorphic(b, a))
def test_is_isomorphic_3(self):
# Tests is_isomorphic on test case #3 (no flat clusters)
a = []
b = []
assert_(is_isomorphic(a, b))
def test_is_isomorphic_4A(self):
# Tests is_isomorphic on test case #4A (3 flat clusters, different labelings, isomorphic)
a = [1, 2, 3]
b = [1, 3, 2]
assert_(is_isomorphic(a, b))
assert_(is_isomorphic(b, a))
def test_is_isomorphic_4B(self):
# Tests is_isomorphic on test case #4B (3 flat clusters, different labelings, nonisomorphic)
a = [1, 2, 3, 3]
b = [1, 3, 2, 3]
assert_(is_isomorphic(a, b) == False)
assert_(is_isomorphic(b, a) == False)
def test_is_isomorphic_4C(self):
# Tests is_isomorphic on test case #4C (3 flat clusters, different labelings, isomorphic)
a = [7, 2, 3]
b = [6, 3, 2]
assert_(is_isomorphic(a, b))
assert_(is_isomorphic(b, a))
def test_is_isomorphic_5(self):
# Tests is_isomorphic on test case #5 (1000 observations, 2/3/5 random
# clusters, random permutation of the labeling).
for nc in [2, 3, 5]:
self.help_is_isomorphic_randperm(1000, nc)
def test_is_isomorphic_6(self):
# Tests is_isomorphic on test case #5A (1000 observations, 2/3/5 random
# clusters, random permutation of the labeling, slightly
# nonisomorphic.)
for nc in [2, 3, 5]:
self.help_is_isomorphic_randperm(1000, nc, True, 5)
def test_is_isomorphic_7(self):
# Regression test for gh-6271
assert_(not is_isomorphic([1, 2, 3], [1, 1, 1]))
def help_is_isomorphic_randperm(self, nobs, nclusters, noniso=False, nerrors=0):
for k in range(3):
a = np.int_(np.random.rand(nobs) * nclusters)
b = np.zeros(a.size, dtype=np.int_)
P = np.random.permutation(nclusters)
for i in range(0, a.shape[0]):
b[i] = P[a[i]]
if noniso:
Q = np.random.permutation(nobs)
b[Q[0:nerrors]] += 1
b[Q[0:nerrors]] %= nclusters
assert_(is_isomorphic(a, b) == (not noniso))
assert_(is_isomorphic(b, a) == (not noniso))
class TestIsValidLinkage:
def test_is_valid_linkage_various_size(self):
for nrow, ncol, valid in [(2, 5, False), (2, 3, False),
(1, 4, True), (2, 4, True)]:
self.check_is_valid_linkage_various_size(nrow, ncol, valid)
def check_is_valid_linkage_various_size(self, nrow, ncol, valid):
# Tests is_valid_linkage(Z) with linkage matrics of various sizes
Z = np.asarray([[0, 1, 3.0, 2, 5],
[3, 2, 4.0, 3, 3]], dtype=np.double)
Z = Z[:nrow, :ncol]
assert_(is_valid_linkage(Z) == valid)
if not valid:
assert_raises(ValueError, is_valid_linkage, Z, throw=True)
def test_is_valid_linkage_int_type(self):
# Tests is_valid_linkage(Z) with integer type.
Z = np.asarray([[0, 1, 3.0, 2],
[3, 2, 4.0, 3]], dtype=int)
assert_(is_valid_linkage(Z) == False)
assert_raises(TypeError, is_valid_linkage, Z, throw=True)
def test_is_valid_linkage_empty(self):
# Tests is_valid_linkage(Z) with empty linkage.
Z = np.zeros((0, 4), dtype=np.double)
assert_(is_valid_linkage(Z) == False)
assert_raises(ValueError, is_valid_linkage, Z, throw=True)
def test_is_valid_linkage_4_and_up(self):
# Tests is_valid_linkage(Z) on linkage on observation sets between
# sizes 4 and 15 (step size 3).
for i in range(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
assert_(is_valid_linkage(Z) == True)
def test_is_valid_linkage_4_and_up_neg_index_left(self):
# Tests is_valid_linkage(Z) on linkage on observation sets between
# sizes 4 and 15 (step size 3) with negative indices (left).
for i in range(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
Z[i//2,0] = -2
assert_(is_valid_linkage(Z) == False)
assert_raises(ValueError, is_valid_linkage, Z, throw=True)
def test_is_valid_linkage_4_and_up_neg_index_right(self):
# Tests is_valid_linkage(Z) on linkage on observation sets between
# sizes 4 and 15 (step size 3) with negative indices (right).
for i in range(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
Z[i//2,1] = -2
assert_(is_valid_linkage(Z) == False)
assert_raises(ValueError, is_valid_linkage, Z, throw=True)
def test_is_valid_linkage_4_and_up_neg_dist(self):
# Tests is_valid_linkage(Z) on linkage on observation sets between
# sizes 4 and 15 (step size 3) with negative distances.
for i in range(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
Z[i//2,2] = -0.5
assert_(is_valid_linkage(Z) == False)
assert_raises(ValueError, is_valid_linkage, Z, throw=True)
def test_is_valid_linkage_4_and_up_neg_counts(self):
# Tests is_valid_linkage(Z) on linkage on observation sets between
# sizes 4 and 15 (step size 3) with negative counts.
for i in range(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
Z[i//2,3] = -2
assert_(is_valid_linkage(Z) == False)
assert_raises(ValueError, is_valid_linkage, Z, throw=True)
class TestIsValidInconsistent:
def test_is_valid_im_int_type(self):
# Tests is_valid_im(R) with integer type.
R = np.asarray([[0, 1, 3.0, 2],
[3, 2, 4.0, 3]], dtype=int)
assert_(is_valid_im(R) == False)
assert_raises(TypeError, is_valid_im, R, throw=True)
def test_is_valid_im_various_size(self):
for nrow, ncol, valid in [(2, 5, False), (2, 3, False),
(1, 4, True), (2, 4, True)]:
self.check_is_valid_im_various_size(nrow, ncol, valid)
def check_is_valid_im_various_size(self, nrow, ncol, valid):
# Tests is_valid_im(R) with linkage matrics of various sizes
R = np.asarray([[0, 1, 3.0, 2, 5],
[3, 2, 4.0, 3, 3]], dtype=np.double)
R = R[:nrow, :ncol]
assert_(is_valid_im(R) == valid)
if not valid:
assert_raises(ValueError, is_valid_im, R, throw=True)
def test_is_valid_im_empty(self):
# Tests is_valid_im(R) with empty inconsistency matrix.
R = np.zeros((0, 4), dtype=np.double)
assert_(is_valid_im(R) == False)
assert_raises(ValueError, is_valid_im, R, throw=True)
def test_is_valid_im_4_and_up(self):
# Tests is_valid_im(R) on im on observation sets between sizes 4 and 15
# (step size 3).
for i in range(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
R = inconsistent(Z)
assert_(is_valid_im(R) == True)
def test_is_valid_im_4_and_up_neg_index_left(self):
# Tests is_valid_im(R) on im on observation sets between sizes 4 and 15
# (step size 3) with negative link height means.
for i in range(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
R = inconsistent(Z)
R[i//2,0] = -2.0
assert_(is_valid_im(R) == False)
assert_raises(ValueError, is_valid_im, R, throw=True)
def test_is_valid_im_4_and_up_neg_index_right(self):
# Tests is_valid_im(R) on im on observation sets between sizes 4 and 15
# (step size 3) with negative link height standard deviations.
for i in range(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
R = inconsistent(Z)
R[i//2,1] = -2.0
assert_(is_valid_im(R) == False)
assert_raises(ValueError, is_valid_im, R, throw=True)
def test_is_valid_im_4_and_up_neg_dist(self):
# Tests is_valid_im(R) on im on observation sets between sizes 4 and 15
# (step size 3) with negative link counts.
for i in range(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
R = inconsistent(Z)
R[i//2,2] = -0.5
assert_(is_valid_im(R) == False)
assert_raises(ValueError, is_valid_im, R, throw=True)
class TestNumObsLinkage:
def test_num_obs_linkage_empty(self):
# Tests num_obs_linkage(Z) with empty linkage.
Z = np.zeros((0, 4), dtype=np.double)
assert_raises(ValueError, num_obs_linkage, Z)
def test_num_obs_linkage_1x4(self):
# Tests num_obs_linkage(Z) on linkage over 2 observations.
Z = np.asarray([[0, 1, 3.0, 2]], dtype=np.double)
assert_equal(num_obs_linkage(Z), 2)
def test_num_obs_linkage_2x4(self):
# Tests num_obs_linkage(Z) on linkage over 3 observations.
Z = np.asarray([[0, 1, 3.0, 2],
[3, 2, 4.0, 3]], dtype=np.double)
assert_equal(num_obs_linkage(Z), 3)
def test_num_obs_linkage_4_and_up(self):
# Tests num_obs_linkage(Z) on linkage on observation sets between sizes
# 4 and 15 (step size 3).
for i in range(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
assert_equal(num_obs_linkage(Z), i)
class TestLeavesList:
def test_leaves_list_1x4(self):
# Tests leaves_list(Z) on a 1x4 linkage.
Z = np.asarray([[0, 1, 3.0, 2]], dtype=np.double)
to_tree(Z)
assert_equal(leaves_list(Z), [0, 1])
def test_leaves_list_2x4(self):
# Tests leaves_list(Z) on a 2x4 linkage.
Z = np.asarray([[0, 1, 3.0, 2],
[3, 2, 4.0, 3]], dtype=np.double)
to_tree(Z)
assert_equal(leaves_list(Z), [0, 1, 2])
def test_leaves_list_Q(self):
for method in ['single', 'complete', 'average', 'weighted', 'centroid',
'median', 'ward']:
self.check_leaves_list_Q(method)
def check_leaves_list_Q(self, method):
# Tests leaves_list(Z) on the Q data set
X = hierarchy_test_data.Q_X
Z = linkage(X, method)
node = to_tree(Z)
assert_equal(node.pre_order(), leaves_list(Z))
def test_Q_subtree_pre_order(self):
# Tests that pre_order() works when called on sub-trees.
X = hierarchy_test_data.Q_X
Z = linkage(X, 'single')
node = to_tree(Z)
assert_equal(node.pre_order(), (node.get_left().pre_order()
+ node.get_right().pre_order()))
class TestCorrespond:
def test_correspond_empty(self):
# Tests correspond(Z, y) with empty linkage and condensed distance matrix.
y = np.zeros((0,))
Z = np.zeros((0,4))
assert_raises(ValueError, correspond, Z, y)
def test_correspond_2_and_up(self):
# Tests correspond(Z, y) on linkage and CDMs over observation sets of
# different sizes.
for i in range(2, 4):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
assert_(correspond(Z, y))
for i in range(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
assert_(correspond(Z, y))
def test_correspond_4_and_up(self):
# Tests correspond(Z, y) on linkage and CDMs over observation sets of
# different sizes. Correspondence should be false.
for (i, j) in (list(zip(list(range(2, 4)), list(range(3, 5)))) +
list(zip(list(range(3, 5)), list(range(2, 4))))):
y = np.random.rand(i*(i-1)//2)
y2 = np.random.rand(j*(j-1)//2)
Z = linkage(y)
Z2 = linkage(y2)
assert_equal(correspond(Z, y2), False)
assert_equal(correspond(Z2, y), False)
def test_correspond_4_and_up_2(self):
# Tests correspond(Z, y) on linkage and CDMs over observation sets of
# different sizes. Correspondence should be false.
for (i, j) in (list(zip(list(range(2, 7)), list(range(16, 21)))) +
list(zip(list(range(2, 7)), list(range(16, 21))))):
y = np.random.rand(i*(i-1)//2)
y2 = np.random.rand(j*(j-1)//2)
Z = linkage(y)
Z2 = linkage(y2)
assert_equal(correspond(Z, y2), False)
assert_equal(correspond(Z2, y), False)
def test_num_obs_linkage_multi_matrix(self):
# Tests num_obs_linkage with observation matrices of multiple sizes.
for n in range(2, 10):
X = np.random.rand(n, 4)
Y = pdist(X)
Z = linkage(Y)
assert_equal(num_obs_linkage(Z), n)
class TestIsMonotonic:
def test_is_monotonic_empty(self):
# Tests is_monotonic(Z) on an empty linkage.
Z = np.zeros((0, 4))
assert_raises(ValueError, is_monotonic, Z)
def test_is_monotonic_1x4(self):
# Tests is_monotonic(Z) on 1x4 linkage. Expecting True.
Z = np.asarray([[0, 1, 0.3, 2]], dtype=np.double)
assert_equal(is_monotonic(Z), True)
def test_is_monotonic_2x4_T(self):
# Tests is_monotonic(Z) on 2x4 linkage. Expecting True.
Z = np.asarray([[0, 1, 0.3, 2],
[2, 3, 0.4, 3]], dtype=np.double)
assert_equal(is_monotonic(Z), True)
def test_is_monotonic_2x4_F(self):
# Tests is_monotonic(Z) on 2x4 linkage. Expecting False.
Z = np.asarray([[0, 1, 0.4, 2],
[2, 3, 0.3, 3]], dtype=np.double)
assert_equal(is_monotonic(Z), False)
def test_is_monotonic_3x4_T(self):
# Tests is_monotonic(Z) on 3x4 linkage. Expecting True.
Z = np.asarray([[0, 1, 0.3, 2],
[2, 3, 0.4, 2],
[4, 5, 0.6, 4]], dtype=np.double)
assert_equal(is_monotonic(Z), True)
def test_is_monotonic_3x4_F1(self):
# Tests is_monotonic(Z) on 3x4 linkage (case 1). Expecting False.
Z = np.asarray([[0, 1, 0.3, 2],
[2, 3, 0.2, 2],
[4, 5, 0.6, 4]], dtype=np.double)
assert_equal(is_monotonic(Z), False)
def test_is_monotonic_3x4_F2(self):
# Tests is_monotonic(Z) on 3x4 linkage (case 2). Expecting False.
Z = np.asarray([[0, 1, 0.8, 2],
[2, 3, 0.4, 2],
[4, 5, 0.6, 4]], dtype=np.double)
assert_equal(is_monotonic(Z), False)
def test_is_monotonic_3x4_F3(self):
# Tests is_monotonic(Z) on 3x4 linkage (case 3). Expecting False
Z = np.asarray([[0, 1, 0.3, 2],
[2, 3, 0.4, 2],
[4, 5, 0.2, 4]], dtype=np.double)
assert_equal(is_monotonic(Z), False)
def test_is_monotonic_tdist_linkage1(self):
# Tests is_monotonic(Z) on clustering generated by single linkage on
# tdist data set. Expecting True.
Z = linkage(hierarchy_test_data.ytdist, 'single')
assert_equal(is_monotonic(Z), True)
def test_is_monotonic_tdist_linkage2(self):
# Tests is_monotonic(Z) on clustering generated by single linkage on
# tdist data set. Perturbing. Expecting False.
Z = linkage(hierarchy_test_data.ytdist, 'single')
Z[2,2] = 0.0
assert_equal(is_monotonic(Z), False)
def test_is_monotonic_Q_linkage(self):
# Tests is_monotonic(Z) on clustering generated by single linkage on
# Q data set. Expecting True.
X = hierarchy_test_data.Q_X
Z = linkage(X, 'single')
assert_equal(is_monotonic(Z), True)
class TestMaxDists:
def test_maxdists_empty_linkage(self):
# Tests maxdists(Z) on empty linkage. Expecting exception.
Z = np.zeros((0, 4), dtype=np.double)
assert_raises(ValueError, maxdists, Z)
def test_maxdists_one_cluster_linkage(self):
# Tests maxdists(Z) on linkage with one cluster.
Z = np.asarray([[0, 1, 0.3, 4]], dtype=np.double)
MD = maxdists(Z)
expectedMD = calculate_maximum_distances(Z)
assert_allclose(MD, expectedMD, atol=1e-15)
def test_maxdists_Q_linkage(self):
for method in ['single', 'complete', 'ward', 'centroid', 'median']:
self.check_maxdists_Q_linkage(method)
def check_maxdists_Q_linkage(self, method):
# Tests maxdists(Z) on the Q data set
X = hierarchy_test_data.Q_X
Z = linkage(X, method)
MD = maxdists(Z)
expectedMD = calculate_maximum_distances(Z)
assert_allclose(MD, expectedMD, atol=1e-15)
class TestMaxInconsts:
def test_maxinconsts_empty_linkage(self):
# Tests maxinconsts(Z, R) on empty linkage. Expecting exception.
Z = np.zeros((0, 4), dtype=np.double)
R = np.zeros((0, 4), dtype=np.double)
assert_raises(ValueError, maxinconsts, Z, R)
def test_maxinconsts_difrow_linkage(self):
# Tests maxinconsts(Z, R) on linkage and inconsistency matrices with
# different numbers of clusters. Expecting exception.
Z = np.asarray([[0, 1, 0.3, 4]], dtype=np.double)
R = np.random.rand(2, 4)
assert_raises(ValueError, maxinconsts, Z, R)
def test_maxinconsts_one_cluster_linkage(self):
# Tests maxinconsts(Z, R) on linkage with one cluster.
Z = np.asarray([[0, 1, 0.3, 4]], dtype=np.double)
R = np.asarray([[0, 0, 0, 0.3]], dtype=np.double)
MD = maxinconsts(Z, R)
expectedMD = calculate_maximum_inconsistencies(Z, R)
assert_allclose(MD, expectedMD, atol=1e-15)
def test_maxinconsts_Q_linkage(self):
for method in ['single', 'complete', 'ward', 'centroid', 'median']:
self.check_maxinconsts_Q_linkage(method)
def check_maxinconsts_Q_linkage(self, method):
# Tests maxinconsts(Z, R) on the Q data set
X = hierarchy_test_data.Q_X
Z = linkage(X, method)
R = inconsistent(Z)
MD = maxinconsts(Z, R)
expectedMD = calculate_maximum_inconsistencies(Z, R)
assert_allclose(MD, expectedMD, atol=1e-15)
class TestMaxRStat:
def test_maxRstat_invalid_index(self):
for i in [3.3, -1, 4]:
self.check_maxRstat_invalid_index(i)
def check_maxRstat_invalid_index(self, i):
# Tests maxRstat(Z, R, i). Expecting exception.
Z = np.asarray([[0, 1, 0.3, 4]], dtype=np.double)
R = np.asarray([[0, 0, 0, 0.3]], dtype=np.double)
if isinstance(i, int):
assert_raises(ValueError, maxRstat, Z, R, i)
else:
assert_raises(TypeError, maxRstat, Z, R, i)
def test_maxRstat_empty_linkage(self):
for i in range(4):
self.check_maxRstat_empty_linkage(i)
def check_maxRstat_empty_linkage(self, i):
# Tests maxRstat(Z, R, i) on empty linkage. Expecting exception.
Z = np.zeros((0, 4), dtype=np.double)
R = np.zeros((0, 4), dtype=np.double)
assert_raises(ValueError, maxRstat, Z, R, i)
def test_maxRstat_difrow_linkage(self):
for i in range(4):
self.check_maxRstat_difrow_linkage(i)
def check_maxRstat_difrow_linkage(self, i):
# Tests maxRstat(Z, R, i) on linkage and inconsistency matrices with
# different numbers of clusters. Expecting exception.
Z = np.asarray([[0, 1, 0.3, 4]], dtype=np.double)
R = np.random.rand(2, 4)
assert_raises(ValueError, maxRstat, Z, R, i)
def test_maxRstat_one_cluster_linkage(self):
for i in range(4):
self.check_maxRstat_one_cluster_linkage(i)
def check_maxRstat_one_cluster_linkage(self, i):
# Tests maxRstat(Z, R, i) on linkage with one cluster.
Z = np.asarray([[0, 1, 0.3, 4]], dtype=np.double)
R = np.asarray([[0, 0, 0, 0.3]], dtype=np.double)
MD = maxRstat(Z, R, 1)
expectedMD = calculate_maximum_inconsistencies(Z, R, 1)
assert_allclose(MD, expectedMD, atol=1e-15)
def test_maxRstat_Q_linkage(self):
for method in ['single', 'complete', 'ward', 'centroid', 'median']:
for i in range(4):
self.check_maxRstat_Q_linkage(method, i)
def check_maxRstat_Q_linkage(self, method, i):
# Tests maxRstat(Z, R, i) on the Q data set
X = hierarchy_test_data.Q_X
Z = linkage(X, method)
R = inconsistent(Z)
MD = maxRstat(Z, R, 1)
expectedMD = calculate_maximum_inconsistencies(Z, R, 1)
assert_allclose(MD, expectedMD, atol=1e-15)
class TestDendrogram:
def test_dendrogram_single_linkage_tdist(self):
# Tests dendrogram calculation on single linkage of the tdist data set.
Z = linkage(hierarchy_test_data.ytdist, 'single')
R = dendrogram(Z, no_plot=True)
leaves = R["leaves"]
assert_equal(leaves, [2, 5, 1, 0, 3, 4])
def test_valid_orientation(self):
Z = linkage(hierarchy_test_data.ytdist, 'single')
assert_raises(ValueError, dendrogram, Z, orientation="foo")
def test_labels_as_array_or_list(self):
# test for gh-12418
Z = linkage(hierarchy_test_data.ytdist, 'single')
labels = np.array([1, 3, 2, 6, 4, 5])
result1 = dendrogram(Z, labels=labels, no_plot=True)
result2 = dendrogram(Z, labels=labels.tolist(), no_plot=True)
assert result1 == result2
@pytest.mark.skipif(not have_matplotlib, reason="no matplotlib")
def test_valid_label_size(self):
link = np.array([
[0, 1, 1.0, 4],
[2, 3, 1.0, 5],
[4, 5, 2.0, 6],
])
plt.figure()
with pytest.raises(ValueError) as exc_info:
dendrogram(link, labels=list(range(100)))
assert "Dimensions of Z and labels must be consistent."\
in str(exc_info.value)
with pytest.raises(
ValueError,
match="Dimensions of Z and labels must be consistent."):
dendrogram(link, labels=[])
plt.close()
@pytest.mark.skipif(not have_matplotlib, reason="no matplotlib")
def test_dendrogram_plot(self):
for orientation in ['top', 'bottom', 'left', 'right']:
self.check_dendrogram_plot(orientation)
def check_dendrogram_plot(self, orientation):
# Tests dendrogram plotting.
Z = linkage(hierarchy_test_data.ytdist, 'single')
expected = {'color_list': ['C1', 'C0', 'C0', 'C0', 'C0'],
'dcoord': [[0.0, 138.0, 138.0, 0.0],
[0.0, 219.0, 219.0, 0.0],
[0.0, 255.0, 255.0, 219.0],
[0.0, 268.0, 268.0, 255.0],
[138.0, 295.0, 295.0, 268.0]],
'icoord': [[5.0, 5.0, 15.0, 15.0],
[45.0, 45.0, 55.0, 55.0],
[35.0, 35.0, 50.0, 50.0],
[25.0, 25.0, 42.5, 42.5],
[10.0, 10.0, 33.75, 33.75]],
'ivl': ['2', '5', '1', '0', '3', '4'],
'leaves': [2, 5, 1, 0, 3, 4],
'leaves_color_list': ['C1', 'C1', 'C0', 'C0', 'C0', 'C0'],
}
fig = plt.figure()
ax = fig.add_subplot(221)
# test that dendrogram accepts ax keyword
R1 = dendrogram(Z, ax=ax, orientation=orientation)
assert_equal(R1, expected)
# test that dendrogram accepts and handle the leaf_font_size and
# leaf_rotation keywords
dendrogram(Z, ax=ax, orientation=orientation,
leaf_font_size=20, leaf_rotation=90)
testlabel = (
ax.get_xticklabels()[0]
if orientation in ['top', 'bottom']
else ax.get_yticklabels()[0]
)
assert_equal(testlabel.get_rotation(), 90)
assert_equal(testlabel.get_size(), 20)
dendrogram(Z, ax=ax, orientation=orientation,
leaf_rotation=90)
testlabel = (
ax.get_xticklabels()[0]
if orientation in ['top', 'bottom']
else ax.get_yticklabels()[0]
)
assert_equal(testlabel.get_rotation(), 90)
dendrogram(Z, ax=ax, orientation=orientation,
leaf_font_size=20)
testlabel = (
ax.get_xticklabels()[0]
if orientation in ['top', 'bottom']
else ax.get_yticklabels()[0]
)
assert_equal(testlabel.get_size(), 20)
plt.close()
# test plotting to gca (will import pylab)
R2 = dendrogram(Z, orientation=orientation)
plt.close()
assert_equal(R2, expected)
@pytest.mark.skipif(not have_matplotlib, reason="no matplotlib")
def test_dendrogram_truncate_mode(self):
Z = linkage(hierarchy_test_data.ytdist, 'single')
R = dendrogram(Z, 2, 'lastp', show_contracted=True)
plt.close()
assert_equal(R, {'color_list': ['C0'],
'dcoord': [[0.0, 295.0, 295.0, 0.0]],
'icoord': [[5.0, 5.0, 15.0, 15.0]],
'ivl': ['(2)', '(4)'],
'leaves': [6, 9],
'leaves_color_list': ['C0', 'C0'],
})
R = dendrogram(Z, 2, 'mtica', show_contracted=True)
plt.close()
assert_equal(R, {'color_list': ['C1', 'C0', 'C0', 'C0'],
'dcoord': [[0.0, 138.0, 138.0, 0.0],
[0.0, 255.0, 255.0, 0.0],
[0.0, 268.0, 268.0, 255.0],
[138.0, 295.0, 295.0, 268.0]],
'icoord': [[5.0, 5.0, 15.0, 15.0],
[35.0, 35.0, 45.0, 45.0],
[25.0, 25.0, 40.0, 40.0],
[10.0, 10.0, 32.5, 32.5]],
'ivl': ['2', '5', '1', '0', '(2)'],
'leaves': [2, 5, 1, 0, 7],
'leaves_color_list': ['C1', 'C1', 'C0', 'C0', 'C0'],
})
def test_dendrogram_colors(self):
# Tests dendrogram plots with alternate colors
Z = linkage(hierarchy_test_data.ytdist, 'single')
set_link_color_palette(['c', 'm', 'y', 'k'])
R = dendrogram(Z, no_plot=True,
above_threshold_color='g', color_threshold=250)
set_link_color_palette(['g', 'r', 'c', 'm', 'y', 'k'])
color_list = R['color_list']
assert_equal(color_list, ['c', 'm', 'g', 'g', 'g'])
# reset color palette (global list)
set_link_color_palette(None)
def calculate_maximum_distances(Z):
# Used for testing correctness of maxdists.
n = Z.shape[0] + 1
B = np.zeros((n-1,))
q = np.zeros((3,))
for i in range(0, n - 1):
q[:] = 0.0
left = Z[i, 0]
right = Z[i, 1]
if left >= n:
q[0] = B[int(left) - n]
if right >= n:
q[1] = B[int(right) - n]
q[2] = Z[i, 2]
B[i] = q.max()
return B
def calculate_maximum_inconsistencies(Z, R, k=3):
# Used for testing correctness of maxinconsts.
n = Z.shape[0] + 1
B = np.zeros((n-1,))
q = np.zeros((3,))
for i in range(0, n - 1):
q[:] = 0.0
left = Z[i, 0]
right = Z[i, 1]
if left >= n:
q[0] = B[int(left) - n]
if right >= n:
q[1] = B[int(right) - n]
q[2] = R[i, k]
B[i] = q.max()
return B
def within_tol(a, b, tol):
return np.abs(a - b).max() < tol
def test_unsupported_uncondensed_distance_matrix_linkage_warning():
assert_warns(ClusterWarning, linkage, [[0, 1], [1, 0]])
def test_euclidean_linkage_value_error():
for method in scipy.cluster.hierarchy._EUCLIDEAN_METHODS:
assert_raises(ValueError, linkage, [[1, 1], [1, 1]],
method=method, metric='cityblock')
def test_2x2_linkage():
Z1 = linkage([1], method='single', metric='euclidean')
Z2 = linkage([[0, 1], [0, 0]], method='single', metric='euclidean')
assert_allclose(Z1, Z2)
def test_node_compare():
np.random.seed(23)
nobs = 50
X = np.random.randn(nobs, 4)
Z = scipy.cluster.hierarchy.ward(X)
tree = to_tree(Z)
assert_(tree > tree.get_left())
assert_(tree.get_right() > tree.get_left())
assert_(tree.get_right() == tree.get_right())
assert_(tree.get_right() != tree.get_left())
def test_cut_tree():
np.random.seed(23)
nobs = 50
X = np.random.randn(nobs, 4)
Z = scipy.cluster.hierarchy.ward(X)
cutree = cut_tree(Z)
assert_equal(cutree[:, 0], np.arange(nobs))
assert_equal(cutree[:, -1], np.zeros(nobs))
assert_equal(cutree.max(0), np.arange(nobs - 1, -1, -1))
assert_equal(cutree[:, [-5]], cut_tree(Z, n_clusters=5))
assert_equal(cutree[:, [-5, -10]], cut_tree(Z, n_clusters=[5, 10]))
assert_equal(cutree[:, [-10, -5]], cut_tree(Z, n_clusters=[10, 5]))
nodes = _order_cluster_tree(Z)
heights = np.array([node.dist for node in nodes])
assert_equal(cutree[:, np.searchsorted(heights, [5])],
cut_tree(Z, height=5))
assert_equal(cutree[:, np.searchsorted(heights, [5, 10])],
cut_tree(Z, height=[5, 10]))
assert_equal(cutree[:, np.searchsorted(heights, [10, 5])],
cut_tree(Z, height=[10, 5]))
def test_optimal_leaf_ordering():
# test with the distance vector y
Z = optimal_leaf_ordering(linkage(hierarchy_test_data.ytdist),
hierarchy_test_data.ytdist)
expectedZ = hierarchy_test_data.linkage_ytdist_single_olo
assert_allclose(Z, expectedZ, atol=1e-10)
# test with the observation matrix X
Z = optimal_leaf_ordering(linkage(hierarchy_test_data.X, 'ward'),
hierarchy_test_data.X)
expectedZ = hierarchy_test_data.linkage_X_ward_olo
assert_allclose(Z, expectedZ, atol=1e-06)
def test_Heap():
values = np.array([2, -1, 0, -1.5, 3])
heap = Heap(values)
pair = heap.get_min()
assert_equal(pair['key'], 3)
assert_equal(pair['value'], -1.5)
heap.remove_min()
pair = heap.get_min()
assert_equal(pair['key'], 1)
assert_equal(pair['value'], -1)
heap.change_value(1, 2.5)
pair = heap.get_min()
assert_equal(pair['key'], 2)
assert_equal(pair['value'], 0)
heap.remove_min()
heap.remove_min()
heap.change_value(1, 10)
pair = heap.get_min()
assert_equal(pair['key'], 4)
assert_equal(pair['value'], 3)
heap.remove_min()
pair = heap.get_min()
assert_equal(pair['key'], 1)
assert_equal(pair['value'], 10)
|
bsd-3-clause
|
rsivapr/scikit-learn
|
examples/svm/plot_svm_iris.py
|
5
|
1644
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
SVM-SVC (Support Vector Classification)
=========================================================
The classification application of the SVM is used below. The
`Iris <http://en.wikipedia.org/wiki/Iris_flower_data_set>`_
dataset has been used for this example
The decision boundaries, are shown with all the points in the training-set.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import pylab as pl
from sklearn import svm, datasets
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features.
Y = iris.target
h = .02 # step size in the mesh
clf = svm.SVC(C=1.0, kernel='linear')
# we create an instance of SVM Classifier and fit the data.
clf.fit(X, Y)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
pl.figure(1, figsize=(4, 3))
pl.pcolormesh(xx, yy, Z, cmap=pl.cm.Paired)
# Plot also the training points
pl.scatter(X[:, 0], X[:, 1], c=Y, cmap=pl.cm.Paired)
pl.xlabel('Sepal length')
pl.ylabel('Sepal width')
pl.xlim(xx.min(), xx.max())
pl.ylim(yy.min(), yy.max())
pl.xticks(())
pl.yticks(())
pl.show()
|
bsd-3-clause
|
Work4Labs/lettuce
|
lettuce/django/steps/mail.py
|
20
|
1903
|
"""
Step definitions for working with Django email.
"""
from smtplib import SMTPException
from django.core import mail
from lettuce import step
STEP_PREFIX = r'(?:Given|And|Then|When) '
CHECK_PREFIX = r'(?:And|Then) '
EMAIL_PARTS = ('subject', 'body', 'from_email', 'to', 'bcc', 'cc')
GOOD_MAIL = mail.EmailMessage.send
@step(CHECK_PREFIX + r'I have sent (\d+) emails?')
def mail_sent_count(step, count):
"""
Then I have sent 2 emails
"""
count = int(count)
assert len(mail.outbox) == count, "Length of outbox is {0}".format(count)
@step(r'I have not sent any emails')
def mail_not_sent(step):
"""
I have not sent any emails
"""
return mail_sent_count(step, 0)
@step(CHECK_PREFIX + (r'I have sent an email with "([^"]*)" in the ({0})'
'').format('|'.join(EMAIL_PARTS)))
def mail_sent_content(step, text, part):
"""
Then I have sent an email with "pandas" in the body
"""
assert any(text in getattr(email, part)
for email
in mail.outbox
), "An email contained expected text in the {0}".format(part)
@step(CHECK_PREFIX + r'I have sent an email with the following in the body:')
def mail_sent_content_multiline(step):
"""
I have sent an email with the following in the body:
\"""
Name: Mr. Panda
\"""
"""
return mail_sent_content(step, step.multiline, 'body')
@step(STEP_PREFIX + r'I clear my email outbox')
def mail_clear(step):
"""
I clear my email outbox
"""
mail.EmailMessage.send = GOOD_MAIL
mail.outbox = []
def broken_send(*args, **kwargs):
"""
Broken send function for email_broken step
"""
raise SMTPException("Failure mocked by lettuce")
@step(STEP_PREFIX + r'sending email does not work')
def email_broken(step):
"""
Break email sending
"""
mail.EmailMessage.send = broken_send
|
gpl-3.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.