repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
tridesclous/tridesclous | tridesclous/tests/test_decomposition.py | 1 | 2267 | import numpy as np
import time
import os
import shutil
from tridesclous.dataio import DataIO
from tridesclous.catalogueconstructor import CatalogueConstructor
from matplotlib import pyplot
from tridesclous.tests.testingtools import setup_catalogue
dataset_name='olfactory_bulb'
def setup_module():
setup_catalogue('test_decomposition', dataset_name=dataset_name)
def teardown_module():
if not(os.environ.get('APPVEYOR') in ('true', 'True')):
# this fix appveyor teardown_module bug
shutil.rmtree('test_decomposition')
def test_all_decomposition():
dirname = 'test_decomposition'
dataio = DataIO(dirname=dirname)
cc = catalogueconstructor = CatalogueConstructor(dataio=dataio)
print(dataio)
print(cc)
methods = ['global_pca', 'pca_by_channel', 'peak_max', ] #'neighborhood_pca', 'tsne', 'pca_by_channel_then_tsne'
for method in methods:
t0 = time.perf_counter()
cc.extract_some_features(method=method)
t1 = time.perf_counter()
print('extract_some_features', method, t1-t0)
#~ from tridesclous.gui import mkQApp, CatalogueWindow
#~ app = mkQApp()
#~ win = CatalogueWindow(catalogueconstructor)
#~ win.show()
#~ app.exec_()
def debug_one_decomposition():
dirname = 'test_catalogueconstructor'
dataio = DataIO(dirname=dirname)
cc = catalogueconstructor = CatalogueConstructor(dataio=dataio)
print(dataio)
print(cc)
t0 = time.perf_counter()
#~ cc.extract_some_features(method='global_pca', n_components=7)
#~ cc.extract_some_features(method='peak_max')
cc.extract_some_features(method='pca_by_channel', n_components_by_channel=3)
#~ cc.extract_some_features(method='neighborhood_pca', n_components_by_neighborhood=3, radius_um=500)
print(cc.channel_to_features)
print(cc.channel_to_features.shape)
t1 = time.perf_counter()
print('extract_some_features', t1-t0)
#~ from tridesclous.gui import mkQApp, CatalogueWindow
#~ app = mkQApp()
#~ win = CatalogueWindow(catalogueconstructor)
#~ win.show()
#~ app.exec_()
if __name__ == '__main__':
#~ setup_module()
#~ test_all_decomposition()
debug_one_decomposition()
| mit |
stylianos-kampakis/scikit-learn | sklearn/setup.py | 225 | 2856 | import os
from os.path import join
import warnings
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
from numpy.distutils.system_info import get_info, BlasNotFoundError
import numpy
libraries = []
if os.name == 'posix':
libraries.append('m')
config = Configuration('sklearn', parent_package, top_path)
config.add_subpackage('__check_build')
config.add_subpackage('svm')
config.add_subpackage('datasets')
config.add_subpackage('datasets/tests')
config.add_subpackage('feature_extraction')
config.add_subpackage('feature_extraction/tests')
config.add_subpackage('cluster')
config.add_subpackage('cluster/tests')
config.add_subpackage('covariance')
config.add_subpackage('covariance/tests')
config.add_subpackage('cross_decomposition')
config.add_subpackage('decomposition')
config.add_subpackage('decomposition/tests')
config.add_subpackage("ensemble")
config.add_subpackage("ensemble/tests")
config.add_subpackage('feature_selection')
config.add_subpackage('feature_selection/tests')
config.add_subpackage('utils')
config.add_subpackage('utils/tests')
config.add_subpackage('externals')
config.add_subpackage('mixture')
config.add_subpackage('mixture/tests')
config.add_subpackage('gaussian_process')
config.add_subpackage('gaussian_process/tests')
config.add_subpackage('neighbors')
config.add_subpackage('neural_network')
config.add_subpackage('preprocessing')
config.add_subpackage('manifold')
config.add_subpackage('metrics')
config.add_subpackage('semi_supervised')
config.add_subpackage("tree")
config.add_subpackage("tree/tests")
config.add_subpackage('metrics/tests')
config.add_subpackage('metrics/cluster')
config.add_subpackage('metrics/cluster/tests')
# add cython extension module for isotonic regression
config.add_extension(
'_isotonic',
sources=['_isotonic.c'],
include_dirs=[numpy.get_include()],
libraries=libraries,
)
# some libs needs cblas, fortran-compiled BLAS will not be sufficient
blas_info = get_info('blas_opt', 0)
if (not blas_info) or (
('NO_ATLAS_INFO', 1) in blas_info.get('define_macros', [])):
config.add_library('cblas',
sources=[join('src', 'cblas', '*.c')])
warnings.warn(BlasNotFoundError.__doc__)
# the following packages depend on cblas, so they have to be build
# after the above.
config.add_subpackage('linear_model')
config.add_subpackage('utils')
# add the test directory
config.add_subpackage('tests')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| bsd-3-clause |
hlin117/scikit-learn | examples/linear_model/plot_logistic_path.py | 349 | 1195 | #!/usr/bin/env python
"""
=================================
Path with L1- Logistic Regression
=================================
Computes path on IRIS dataset.
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
from datetime import datetime
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
from sklearn import datasets
from sklearn.svm import l1_min_c
iris = datasets.load_iris()
X = iris.data
y = iris.target
X = X[y != 2]
y = y[y != 2]
X -= np.mean(X, 0)
###############################################################################
# Demo path functions
cs = l1_min_c(X, y, loss='log') * np.logspace(0, 3)
print("Computing regularization path ...")
start = datetime.now()
clf = linear_model.LogisticRegression(C=1.0, penalty='l1', tol=1e-6)
coefs_ = []
for c in cs:
clf.set_params(C=c)
clf.fit(X, y)
coefs_.append(clf.coef_.ravel().copy())
print("This took ", datetime.now() - start)
coefs_ = np.array(coefs_)
plt.plot(np.log10(cs), coefs_)
ymin, ymax = plt.ylim()
plt.xlabel('log(C)')
plt.ylabel('Coefficients')
plt.title('Logistic Regression Path')
plt.axis('tight')
plt.show()
| bsd-3-clause |
UASLab/ImageAnalysis | video/4-extract-dji-frames.py | 1 | 13136 | #!/usr/bin/env python3
# extract srt form of subtitles from dji movie (caption setting needs
# to be turned on when movie is recorded)
#
# ffmpeg -txt_format text -i input_file.MOV output_file.srt
import argparse
import cv2
import datetime
import skvideo.io # pip3 install scikit-video
import math
import fractions
import json
from matplotlib import pyplot as plt
import numpy as np
import os
import pyexiv2
import re
import sys
from scipy import interpolate # strait up linear interpolation, nothing fancy
from auracore import wgs84
from aurauas_flightdata import flight_loader, flight_interp
from props import PropertyNode
import props_json
import djilog
parser = argparse.ArgumentParser(description='extract and geotag dji movie frames.')
parser.add_argument('--video', required=True, help='input video')
parser.add_argument('--camera', help='select camera calibration file')
parser.add_argument('--cam-mount', choices=['forward', 'down', 'rear'],
default='down',
help='approximate camera mounting orientation')
parser.add_argument('--interval', type=float, default=1.0, help='extraction interval')
parser.add_argument('--distance', type=float, help='max extraction distance interval')
parser.add_argument('--start-time', type=float, help='begin frame grabbing at this time.')
parser.add_argument('--end-time', type=float, help='end frame grabbing at this time.')
parser.add_argument('--start-counter', type=int, default=1, help='first image counter')
parser.add_argument('--ground', type=float, help='ground altitude in meters')
parser.add_argument('--djicsv', help='name of dji exported csv log file from the flight, see https://www.phantomhelp.com/logviewer/upload/')
args = parser.parse_args()
r2d = 180.0 / math.pi
match_ratio = 0.75
scale = 0.4
filter_method = 'homography'
tol = 3.0
overlap = 0.25
djicsv = djilog.djicsv()
djicsv.load(args.djicsv)
class Fraction(fractions.Fraction):
"""Only create Fractions from floats.
>>> Fraction(0.3)
Fraction(3, 10)
>>> Fraction(1.1)
Fraction(11, 10)
"""
def __new__(cls, value, ignore=None):
"""Should be compatible with Python 2.6, though untested."""
return fractions.Fraction.from_float(value).limit_denominator(99999)
def dms_to_decimal(degrees, minutes, seconds, sign=' '):
"""Convert degrees, minutes, seconds into decimal degrees.
>>> dms_to_decimal(10, 10, 10)
10.169444444444444
>>> dms_to_decimal(8, 9, 10, 'S')
-8.152777777777779
"""
return (-1 if sign[0] in 'SWsw' else 1) * (
float(degrees) +
float(minutes) / 60 +
float(seconds) / 3600
)
def decimal_to_dms(decimal):
"""Convert decimal degrees into degrees, minutes, seconds.
>>> decimal_to_dms(50.445891)
[Fraction(50, 1), Fraction(26, 1), Fraction(113019, 2500)]
>>> decimal_to_dms(-125.976893)
[Fraction(125, 1), Fraction(58, 1), Fraction(92037, 2500)]
"""
remainder, degrees = math.modf(abs(decimal))
remainder, minutes = math.modf(remainder * 60)
return [Fraction(n) for n in (degrees, minutes, remainder * 60)]
# find affine transform between matching keypoints in pixel
# coordinate space. fullAffine=True means unconstrained to
# include best warp/shear. fullAffine=False means limit the
# matrix to only best rotation, translation, and scale.
def findAffine(src, dst, fullAffine=False):
affine_minpts = 7
#print("src:", src)
#print("dst:", dst)
if len(src) >= affine_minpts:
# affine = cv2.estimateRigidTransform(np.array([src]), np.array([dst]), fullAffine)
affine, status = \
cv2.estimateAffinePartial2D(np.array([src]).astype(np.float32),
np.array([dst]).astype(np.float32))
else:
affine = None
#print str(affine)
return affine
def decomposeAffine(affine):
if affine is None:
return (0.0, 0.0, 0.0, 1.0, 1.0)
tx = affine[0][2]
ty = affine[1][2]
a = affine[0][0]
b = affine[0][1]
c = affine[1][0]
d = affine[1][1]
sx = math.sqrt( a*a + b*b )
if a < 0.0:
sx = -sx
sy = math.sqrt( c*c + d*d )
if d < 0.0:
sy = -sy
rotate_deg = math.atan2(-b,a) * 180.0/math.pi
if rotate_deg < -180.0:
rotate_deg += 360.0
if rotate_deg > 180.0:
rotate_deg -= 360.0
return (rotate_deg, tx, ty, sx, sy)
def filterMatches(kp1, kp2, matches):
mkp1, mkp2 = [], []
idx_pairs = []
used = np.zeros(len(kp2), np.bool_)
for m in matches:
if len(m) == 2 and m[0].distance < m[1].distance * match_ratio:
#print " dist[0] = %d dist[1] = %d" % (m[0].distance, m[1].distance)
m = m[0]
# FIXME: ignore the bottom section of movie for feature detection
#if kp1[m.queryIdx].pt[1] > h*0.75:
# continue
if not used[m.trainIdx]:
used[m.trainIdx] = True
mkp1.append( kp1[m.queryIdx] )
mkp2.append( kp2[m.trainIdx] )
idx_pairs.append( (m.queryIdx, m.trainIdx) )
p1 = np.float32([kp.pt for kp in mkp1])
p2 = np.float32([kp.pt for kp in mkp2])
kp_pairs = zip(mkp1, mkp2)
return p1, p2, kp_pairs, idx_pairs, mkp1
def filterFeatures(p1, p2, K, method):
inliers = 0
total = len(p1)
space = ""
status = []
M = None
if len(p1) < 7:
# not enough points
return None, np.zeros(total), [], []
if method == 'homography':
M, status = cv2.findHomography(p1, p2, cv2.LMEDS, tol)
elif method == 'fundamental':
M, status = cv2.findFundamentalMat(p1, p2, cv2.LMEDS, tol)
elif method == 'essential':
M, status = cv2.findEssentialMat(p1, p2, K, cv2.LMEDS, threshold=tol)
elif method == 'none':
M = None
status = np.ones(total)
newp1 = []
newp2 = []
for i, flag in enumerate(status):
if flag:
newp1.append(p1[i])
newp2.append(p2[i])
p1 = np.float32(newp1)
p2 = np.float32(newp2)
inliers = np.sum(status)
total = len(status)
#print '%s%d / %d inliers/matched' % (space, np.sum(status), len(status))
return M, status, np.float32(newp1), np.float32(newp2)
# pathname work
abspath = os.path.abspath(args.video)
basename, ext = os.path.splitext(abspath)
srtname = basename + ".srt"
dirname = basename + "_frames"
print("basename:", basename)
print("srtname:", srtname)
print("dirname:", dirname)
local_config = os.path.join(dirname, "camera.json")
config = PropertyNode()
if args.camera:
# seed the camera calibration and distortion coefficients from a
# known camera config
print('Setting camera config from:', args.camera)
props_json.load(args.camera, config)
config.setString('name', args.camera)
props_json.save(local_config, config)
elif os.path.exists(local_config):
# load local config file if it exists
props_json.load(local_config, config)
K_list = []
for i in range(9):
K_list.append( config.getFloatEnum('K', i) )
K = np.copy(np.array(K_list)).reshape(3,3)
dist = []
for i in range(5):
dist.append( config.getFloatEnum("dist_coeffs", i) )
# check for required input files
if not os.path.isfile(args.video):
print("%s doesn't exist, aborting ..." % args.video)
quit()
if os.path.isfile(basename + ".srt"):
srtname = basename + ".srt"
elif os.path.isfile(basename + ".SRT"):
srtname = basename + ".SRT"
else:
print("SRT (caption) file doesn't exist, aborting ...")
quit()
# output directory
os.makedirs(dirname, exist_ok=True)
# setup feature detection
detector = cv2.SIFT_create(nfeatures=1000)
FLANN_INDEX_KDTREE = 1 # bug: flann enums are missing
FLANN_INDEX_LSH = 6
flann_params = { 'algorithm': FLANN_INDEX_KDTREE,
'trees': 5 }
matcher = cv2.FlannBasedMatcher(flann_params, {}) # bug : need to pass empty dict (#1329)
srt = djilog.djisrt()
srt.load(srtname)
# fetch video metadata
metadata = skvideo.io.ffprobe(args.video)
#print(metadata.keys())
#print(json.dumps(metadata["video"], indent=4))
fps_string = metadata['video']['@avg_frame_rate']
(num, den) = fps_string.split('/')
fps = float(num) / float(den)
codec = metadata['video']['@codec_long_name']
w = int(metadata['video']['@width'])
h = int(metadata['video']['@height'])
print('fps:', fps)
print('codec:', codec)
print('output size:', w, 'x', h)
# extract frames
print("Opening ", args.video)
reader = skvideo.io.FFmpegReader(args.video, inputdict={}, outputdict={})
meta = os.path.join(dirname, "image-metadata.txt")
f = open(meta, 'w')
print("writing meta data to", meta)
last_time = -1000000
counter = 0
img_counter = args.start_counter
last_lat = 0
last_lon = 0
kp_list_ref = []
des_list_ref = []
for frame in reader.nextFrame():
frame = frame[:,:,::-1] # convert from RGB to BGR (to make opencv happy)
time = float(counter) / fps
counter += 1
print("frame:", counter, "time:", "%.3f" % time)
if args.start_time and time < args.start_time:
continue
if args.end_time and time > args.end_time:
break
if srt.need_interpolate:
lat_deg = srt.interp_lats(time)
lon_deg = srt.interp_lons(time)
alt_m = srt.interp_heights(time) + args.ground
else:
if counter - 1 >= len(srt.times):
print("MORE FRAMES THAN SRT ENTRIS")
continue
time_str = srt.times[counter - 1]
lat_deg = srt.lats[counter - 1]
lon_deg = srt.lons[counter - 1]
alt_m = srt.heights[counter - 1]
# compute unix version of timestamp (here in local tz)
main_str, t1, t2 = time_str.split(",")
fraction = (float(t1)*1000 + float(t2)) / 1000000
print("dt:", time_str)
date_time_obj = datetime.datetime.strptime(main_str, '%Y-%m-%d %H:%M:%S')
unix_sec = float(date_time_obj.strftime('%s')) + fraction
print("from local:", unix_sec)
record = djicsv.query(unix_sec)
roll = record['roll']
pitch = record['pitch']
yaw = record['yaw']
if yaw < 0: yaw += 360.0
if abs(lat_deg) < 0.001 and abs(lon_deg) < 0.001:
continue
write_frame = False
# by distance camera has moved
(c1, c2, dist_m) = wgs84.geo_inverse(lat_deg, lon_deg, last_lat, last_lon)
print("dist:", dist_m)
#if time >= last_time + args.interval and dist_m >= args.distance:
if args.distance and dist_m >= args.distance:
write_frame = True
# by visual overlap
method = cv2.INTER_AREA
frame_scale = cv2.resize(frame, (0,0), fx=scale, fy=scale,
interpolation=method)
cv2.imshow('frame', frame_scale)
gray = cv2.cvtColor(frame_scale, cv2.COLOR_BGR2GRAY)
(h, w) = gray.shape
kp_list = detector.detect(gray)
kp_list, des_list = detector.compute(gray, kp_list)
if not (des_list_ref is None) and not (des_list is None) and len(des_list_ref) and len(des_list):
matches = matcher.knnMatch(des_list, trainDescriptors=des_list_ref, k=2)
p1, p2, kp_pairs, idx_pairs, mkp1 = filterMatches(kp_list, kp_list_ref, matches)
M, status, newp1, newp2 = filterFeatures(p1, p2, K, filter_method)
filtered = []
for i, flag in enumerate(status):
if flag:
filtered.append(mkp1[i])
affine = findAffine(p2, p1, fullAffine=False)
if affine is None:
write_frame = True
else:
(rot, tx, ty, sx, sy) = decomposeAffine(affine)
xperc = abs(tx) / w
yperc = abs(ty) / h
perc = math.sqrt(xperc*xperc + yperc*yperc)
print("pixel dist:", tx, ty, "%.1f%% %.1f%%" % (xperc*100, yperc*100))
if perc >= overlap:
write_frame = True
else:
# first frame
write_frame = True
cv2.waitKey(1)
if write_frame:
print("WRITE FRAME")
file = os.path.join(dirname, "img_%04d" % img_counter + ".jpg")
img_counter += 1
cv2.imwrite(file, frame)
# geotag the image
exif = pyexiv2.ImageMetadata(file)
exif.read()
print(lat_deg, lon_deg, alt_m)
exif['Exif.Image.DateTime'] = time_str
GPS = 'Exif.GPSInfo.GPS'
exif[GPS + 'AltitudeRef'] = '0' if alt_m >= 0 else '1'
exif[GPS + 'Altitude'] = Fraction(alt_m)
exif[GPS + 'Latitude'] = decimal_to_dms(lat_deg)
exif[GPS + 'LatitudeRef'] = 'N' if lat_deg >= 0 else 'S'
exif[GPS + 'Longitude'] = decimal_to_dms(lon_deg)
exif[GPS + 'LongitudeRef'] = 'E' if lon_deg >= 0 else 'W'
exif[GPS + 'MapDatum'] = 'WGS-84'
exif.write()
head, tail = os.path.split(file)
f.write("%s,%.8f,%.8f,%.4f,%.4f,%.4f,%.4f,%.2f\n" % (tail, lat_deg, lon_deg, alt_m, yaw, pitch, roll, time))
# by distance
last_lat = lat_deg
last_lon = lon_deg
# by time
last_time = time
# by overlap
kp_list_ref = kp_list
des_list_ref = des_list
f.close()
| mit |
schets/scikit-learn | examples/linear_model/plot_ridge_path.py | 254 | 1655 | """
===========================================================
Plot Ridge coefficients as a function of the regularization
===========================================================
Shows the effect of collinearity in the coefficients of an estimator.
.. currentmodule:: sklearn.linear_model
:class:`Ridge` Regression is the estimator used in this example.
Each color represents a different feature of the
coefficient vector, and this is displayed as a function of the
regularization parameter.
At the end of the path, as alpha tends toward zero
and the solution tends towards the ordinary least squares, coefficients
exhibit big oscillations.
"""
# Author: Fabian Pedregosa -- <[email protected]>
# License: BSD 3 clause
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
# X is the 10x10 Hilbert matrix
X = 1. / (np.arange(1, 11) + np.arange(0, 10)[:, np.newaxis])
y = np.ones(10)
###############################################################################
# Compute paths
n_alphas = 200
alphas = np.logspace(-10, -2, n_alphas)
clf = linear_model.Ridge(fit_intercept=False)
coefs = []
for a in alphas:
clf.set_params(alpha=a)
clf.fit(X, y)
coefs.append(clf.coef_)
###############################################################################
# Display results
ax = plt.gca()
ax.set_color_cycle(['b', 'r', 'g', 'c', 'k', 'y', 'm'])
ax.plot(alphas, coefs)
ax.set_xscale('log')
ax.set_xlim(ax.get_xlim()[::-1]) # reverse axis
plt.xlabel('alpha')
plt.ylabel('weights')
plt.title('Ridge coefficients as a function of the regularization')
plt.axis('tight')
plt.show()
| bsd-3-clause |
saiwing-yeung/scikit-learn | sklearn/datasets/svmlight_format.py | 19 | 16759 | """This module implements a loader and dumper for the svmlight format
This format is a text-based format, with one sample per line. It does
not store zero valued features hence is suitable for sparse dataset.
The first element of each line can be used to store a target variable to
predict.
This format is used as the default format for both svmlight and the
libsvm command line programs.
"""
# Authors: Mathieu Blondel <[email protected]>
# Lars Buitinck
# Olivier Grisel <[email protected]>
# License: BSD 3 clause
from contextlib import closing
import io
import os.path
import numpy as np
import scipy.sparse as sp
from ._svmlight_format import _load_svmlight_file
from .. import __version__
from ..externals import six
from ..externals.six import u, b
from ..externals.six.moves import range, zip
from ..utils import check_array
from ..utils.fixes import frombuffer_empty
def load_svmlight_file(f, n_features=None, dtype=np.float64,
multilabel=False, zero_based="auto", query_id=False):
"""Load datasets in the svmlight / libsvm format into sparse CSR matrix
This format is a text-based format, with one sample per line. It does
not store zero valued features hence is suitable for sparse dataset.
The first element of each line can be used to store a target variable
to predict.
This format is used as the default format for both svmlight and the
libsvm command line programs.
Parsing a text based source can be expensive. When working on
repeatedly on the same dataset, it is recommended to wrap this
loader with joblib.Memory.cache to store a memmapped backup of the
CSR results of the first call and benefit from the near instantaneous
loading of memmapped structures for the subsequent calls.
In case the file contains a pairwise preference constraint (known
as "qid" in the svmlight format) these are ignored unless the
query_id parameter is set to True. These pairwise preference
constraints can be used to constraint the combination of samples
when using pairwise loss functions (as is the case in some
learning to rank problems) so that only pairs with the same
query_id value are considered.
This implementation is written in Cython and is reasonably fast.
However, a faster API-compatible loader is also available at:
https://github.com/mblondel/svmlight-loader
Parameters
----------
f : {str, file-like, int}
(Path to) a file to load. If a path ends in ".gz" or ".bz2", it will
be uncompressed on the fly. If an integer is passed, it is assumed to
be a file descriptor. A file-like or file descriptor will not be closed
by this function. A file-like object must be opened in binary mode.
n_features : int or None
The number of features to use. If None, it will be inferred. This
argument is useful to load several files that are subsets of a
bigger sliced dataset: each subset might not have examples of
every feature, hence the inferred shape might vary from one
slice to another.
multilabel : boolean, optional, default False
Samples may have several labels each (see
http://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multilabel.html)
zero_based : boolean or "auto", optional, default "auto"
Whether column indices in f are zero-based (True) or one-based
(False). If column indices are one-based, they are transformed to
zero-based to match Python/NumPy conventions.
If set to "auto", a heuristic check is applied to determine this from
the file contents. Both kinds of files occur "in the wild", but they
are unfortunately not self-identifying. Using "auto" or True should
always be safe.
query_id : boolean, default False
If True, will return the query_id array for each file.
dtype : numpy data type, default np.float64
Data type of dataset to be loaded. This will be the data type of the
output numpy arrays ``X`` and ``y``.
Returns
-------
X: scipy.sparse matrix of shape (n_samples, n_features)
y: ndarray of shape (n_samples,), or, in the multilabel a list of
tuples of length n_samples.
query_id: array of shape (n_samples,)
query_id for each sample. Only returned when query_id is set to
True.
See also
--------
load_svmlight_files: similar function for loading multiple files in this
format, enforcing the same number of features/columns on all of them.
Examples
--------
To use joblib.Memory to cache the svmlight file::
from sklearn.externals.joblib import Memory
from sklearn.datasets import load_svmlight_file
mem = Memory("./mycache")
@mem.cache
def get_data():
data = load_svmlight_file("mysvmlightfile")
return data[0], data[1]
X, y = get_data()
"""
return tuple(load_svmlight_files([f], n_features, dtype, multilabel,
zero_based, query_id))
def _gen_open(f):
if isinstance(f, int): # file descriptor
return io.open(f, "rb", closefd=False)
elif not isinstance(f, six.string_types):
raise TypeError("expected {str, int, file-like}, got %s" % type(f))
_, ext = os.path.splitext(f)
if ext == ".gz":
import gzip
return gzip.open(f, "rb")
elif ext == ".bz2":
from bz2 import BZ2File
return BZ2File(f, "rb")
else:
return open(f, "rb")
def _open_and_load(f, dtype, multilabel, zero_based, query_id):
if hasattr(f, "read"):
actual_dtype, data, ind, indptr, labels, query = \
_load_svmlight_file(f, dtype, multilabel, zero_based, query_id)
# XXX remove closing when Python 2.7+/3.1+ required
else:
with closing(_gen_open(f)) as f:
actual_dtype, data, ind, indptr, labels, query = \
_load_svmlight_file(f, dtype, multilabel, zero_based, query_id)
# convert from array.array, give data the right dtype
if not multilabel:
labels = frombuffer_empty(labels, np.float64)
data = frombuffer_empty(data, actual_dtype)
indices = frombuffer_empty(ind, np.intc)
indptr = np.frombuffer(indptr, dtype=np.intc) # never empty
query = frombuffer_empty(query, np.intc)
data = np.asarray(data, dtype=dtype) # no-op for float{32,64}
return data, indices, indptr, labels, query
def load_svmlight_files(files, n_features=None, dtype=np.float64,
multilabel=False, zero_based="auto", query_id=False):
"""Load dataset from multiple files in SVMlight format
This function is equivalent to mapping load_svmlight_file over a list of
files, except that the results are concatenated into a single, flat list
and the samples vectors are constrained to all have the same number of
features.
In case the file contains a pairwise preference constraint (known
as "qid" in the svmlight format) these are ignored unless the
query_id parameter is set to True. These pairwise preference
constraints can be used to constraint the combination of samples
when using pairwise loss functions (as is the case in some
learning to rank problems) so that only pairs with the same
query_id value are considered.
Parameters
----------
files : iterable over {str, file-like, int}
(Paths of) files to load. If a path ends in ".gz" or ".bz2", it will
be uncompressed on the fly. If an integer is passed, it is assumed to
be a file descriptor. File-likes and file descriptors will not be
closed by this function. File-like objects must be opened in binary
mode.
n_features: int or None
The number of features to use. If None, it will be inferred from the
maximum column index occurring in any of the files.
This can be set to a higher value than the actual number of features
in any of the input files, but setting it to a lower value will cause
an exception to be raised.
multilabel: boolean, optional
Samples may have several labels each (see
http://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multilabel.html)
zero_based: boolean or "auto", optional
Whether column indices in f are zero-based (True) or one-based
(False). If column indices are one-based, they are transformed to
zero-based to match Python/NumPy conventions.
If set to "auto", a heuristic check is applied to determine this from
the file contents. Both kinds of files occur "in the wild", but they
are unfortunately not self-identifying. Using "auto" or True should
always be safe.
query_id: boolean, defaults to False
If True, will return the query_id array for each file.
dtype : numpy data type, default np.float64
Data type of dataset to be loaded. This will be the data type of the
output numpy arrays ``X`` and ``y``.
Returns
-------
[X1, y1, ..., Xn, yn]
where each (Xi, yi) pair is the result from load_svmlight_file(files[i]).
If query_id is set to True, this will return instead [X1, y1, q1,
..., Xn, yn, qn] where (Xi, yi, qi) is the result from
load_svmlight_file(files[i])
Notes
-----
When fitting a model to a matrix X_train and evaluating it against a
matrix X_test, it is essential that X_train and X_test have the same
number of features (X_train.shape[1] == X_test.shape[1]). This may not
be the case if you load the files individually with load_svmlight_file.
See also
--------
load_svmlight_file
"""
r = [_open_and_load(f, dtype, multilabel, bool(zero_based), bool(query_id))
for f in files]
if (zero_based is False
or zero_based == "auto" and all(np.min(tmp[1]) > 0 for tmp in r)):
for ind in r:
indices = ind[1]
indices -= 1
n_f = max(ind[1].max() for ind in r) + 1
if n_features is None:
n_features = n_f
elif n_features < n_f:
raise ValueError("n_features was set to {},"
" but input file contains {} features"
.format(n_features, n_f))
result = []
for data, indices, indptr, y, query_values in r:
shape = (indptr.shape[0] - 1, n_features)
X = sp.csr_matrix((data, indices, indptr), shape)
X.sort_indices()
result += X, y
if query_id:
result.append(query_values)
return result
def _dump_svmlight(X, y, f, multilabel, one_based, comment, query_id):
X_is_sp = int(hasattr(X, "tocsr"))
y_is_sp = int(hasattr(y, "tocsr"))
if X.dtype.kind == 'i':
value_pattern = u("%d:%d")
else:
value_pattern = u("%d:%.16g")
if y.dtype.kind == 'i':
label_pattern = u("%d")
else:
label_pattern = u("%.16g")
line_pattern = u("%s")
if query_id is not None:
line_pattern += u(" qid:%d")
line_pattern += u(" %s\n")
if comment:
f.write(b("# Generated by dump_svmlight_file from scikit-learn %s\n"
% __version__))
f.write(b("# Column indices are %s-based\n"
% ["zero", "one"][one_based]))
f.write(b("#\n"))
f.writelines(b("# %s\n" % line) for line in comment.splitlines())
for i in range(X.shape[0]):
if X_is_sp:
span = slice(X.indptr[i], X.indptr[i + 1])
row = zip(X.indices[span], X.data[span])
else:
nz = X[i] != 0
row = zip(np.where(nz)[0], X[i, nz])
s = " ".join(value_pattern % (j + one_based, x) for j, x in row)
if multilabel:
if y_is_sp:
nz_labels = y[i].nonzero()[1]
else:
nz_labels = np.where(y[i] != 0)[0]
labels_str = ",".join(label_pattern % j for j in nz_labels)
else:
if y_is_sp:
labels_str = label_pattern % y.data[i]
else:
labels_str = label_pattern % y[i]
if query_id is not None:
feat = (labels_str, query_id[i], s)
else:
feat = (labels_str, s)
f.write((line_pattern % feat).encode('ascii'))
def dump_svmlight_file(X, y, f, zero_based=True, comment=None, query_id=None,
multilabel=False):
"""Dump the dataset in svmlight / libsvm file format.
This format is a text-based format, with one sample per line. It does
not store zero valued features hence is suitable for sparse dataset.
The first element of each line can be used to store a target variable
to predict.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : {array-like, sparse matrix}, shape = [n_samples (, n_labels)]
Target values. Class labels must be an
integer or float, or array-like objects of integer or float for
multilabel classifications.
f : string or file-like in binary mode
If string, specifies the path that will contain the data.
If file-like, data will be written to f. f should be opened in binary
mode.
zero_based : boolean, optional
Whether column indices should be written zero-based (True) or one-based
(False).
comment : string, optional
Comment to insert at the top of the file. This should be either a
Unicode string, which will be encoded as UTF-8, or an ASCII byte
string.
If a comment is given, then it will be preceded by one that identifies
the file as having been dumped by scikit-learn. Note that not all
tools grok comments in SVMlight files.
query_id : array-like, shape = [n_samples]
Array containing pairwise preference constraints (qid in svmlight
format).
multilabel: boolean, optional
Samples may have several labels each (see
http://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multilabel.html)
.. versionadded:: 0.17
parameter *multilabel* to support multilabel datasets.
"""
if comment is not None:
# Convert comment string to list of lines in UTF-8.
# If a byte string is passed, then check whether it's ASCII;
# if a user wants to get fancy, they'll have to decode themselves.
# Avoid mention of str and unicode types for Python 3.x compat.
if isinstance(comment, bytes):
comment.decode("ascii") # just for the exception
else:
comment = comment.encode("utf-8")
if six.b("\0") in comment:
raise ValueError("comment string contains NUL byte")
yval = check_array(y, accept_sparse='csr', ensure_2d=False)
if sp.issparse(yval):
if yval.shape[1] != 1 and not multilabel:
raise ValueError("expected y of shape (n_samples, 1),"
" got %r" % (yval.shape,))
else:
if yval.ndim != 1 and not multilabel:
raise ValueError("expected y of shape (n_samples,), got %r"
% (yval.shape,))
Xval = check_array(X, accept_sparse='csr')
if Xval.shape[0] != yval.shape[0]:
raise ValueError("X.shape[0] and y.shape[0] should be the same, got"
" %r and %r instead." % (Xval.shape[0], yval.shape[0]))
# We had some issues with CSR matrices with unsorted indices (e.g. #1501),
# so sort them here, but first make sure we don't modify the user's X.
# TODO We can do this cheaper; sorted_indices copies the whole matrix.
if yval is y and hasattr(yval, "sorted_indices"):
y = yval.sorted_indices()
else:
y = yval
if hasattr(y, "sort_indices"):
y.sort_indices()
if Xval is X and hasattr(Xval, "sorted_indices"):
X = Xval.sorted_indices()
else:
X = Xval
if hasattr(X, "sort_indices"):
X.sort_indices()
if query_id is not None:
query_id = np.asarray(query_id)
if query_id.shape[0] != y.shape[0]:
raise ValueError("expected query_id of shape (n_samples,), got %r"
% (query_id.shape,))
one_based = not zero_based
if hasattr(f, "write"):
_dump_svmlight(X, y, f, multilabel, one_based, comment, query_id)
else:
with open(f, "wb") as f:
_dump_svmlight(X, y, f, multilabel, one_based, comment, query_id)
| bsd-3-clause |
ColdMatter/EDMSuite | MoleculeMOTScripts/dcamapi.py | 1 | 17354 | # -*- coding: utf-8 -*-
"""
Created on Wed Apr 24 13:42:45 2019
@author: TweezerLab
"""
import numpy as np
import ctypes as cp
import time
DCAMCAP_EVENT_FRAMEREADY = int("0x0002", 0)
DCAMERR_ERROR = 0
DCAMERR_NOERROR = 1
DCAMWAIT_TIMEOUT_INFINITE = int("0x80000000", 0)
DCAM_CAPTUREMODE_SNAP = 0
DCAM_CAPTUREMODE_SEQUENCE = 1
DCAM_DEFAULT_ARG = 0
DCAM_IDPROP_EXPOSURETIME = int("0x001F0110", 0)
DCAM_IDSTR_MODEL = int("0x04000104", 0)
CAPTUREMODE_SNAP = 0
CAPTUREMODE_SEQUENCE = 1
DCAM_IDMSG_GETPARAM=int("0x0202",0)
DCAM_IDMSG_SETPARAM=int("0x0201",0)
DCAM_IDPARAM_SUBARRAY_INQ=int("0x800001A2",0)
DCAM_IDPARAM_SUBARRAY=int("0xC00001E2",0)
class DCAM_PARAM_PROPERTYATTR(cp.Structure):
_fields_ = [("cbSize", cp.c_int32),
("iProp", cp.c_int32),
("option", cp.c_int32),
("iReserved1", cp.c_int32),
("attribute", cp.c_int32),
("iGroup", cp.c_int32),
("iUnit", cp.c_int32),
("attribute2", cp.c_int32),
("valuemin", cp.c_double),
("valuemax", cp.c_double),
("valuestep", cp.c_double),
("valuedefault", cp.c_double),
("nMaxChannel", cp.c_int32),
("iReserved3", cp.c_int32),
("nMaxView", cp.c_int32),
("iProp_NumberOfElement", cp.c_int32),
("iProp_ArrayBase", cp.c_int32),
("iPropStep_Element", cp.c_int32)]
class DCAM_PARAM_PROPERTYVALUETEXT(cp.Structure):
_fields_ = [("cbSize", cp.c_int32),
("iProp", cp.c_int32),
("value", cp.c_double),
("text", cp.c_char_p),
("textbytes", cp.c_int32)]
class DCAM_HDR_PARAM(cp.Structure):
_fields_ = [("cbSize", cp.c_ulong),
("id", cp.c_ulong),
("iFlag", cp.c_ulong),
("oFlag", cp.c_ulong)]
class DCAM_PARAM_SUBARRAY_INQ(cp.Structure):
_fields_ = [("hdr", DCAM_HDR_PARAM),
("binning", cp.c_int32),
("hmax", cp.c_int32),
("vmax", cp.c_int32),
("hposunit", cp.c_int32),
("vposunit", cp.c_int32),
("hunit", cp.c_int32),
("vunit", cp.c_int32)]
class DCAM_PARAM_SUBARRAY(cp.Structure):
_fields_ = [("hdr", DCAM_HDR_PARAM),
("hpos", cp.c_int32),
("vpos", cp.c_int32),
("hsize", cp.c_int32),
("vsize", cp.c_int32)]
class DCAMAPI():
def __init__(self):
self.camera_id = 0
self.camera_handle = cp.c_void_p(0)
self.dcam=cp.windll.dcamapi
def errorHandler(self,err_code,state):
string_buffer=cp.create_string_buffer(100)
if (err_code == DCAMERR_ERROR):
last_error=self.dcam.dcam_getlasterror(self.camera_handle,
string_buffer,
cp.c_int32(100))
raise Exception('Error in '+\
str(state)+\
' with last error '+\
str(last_error))
return err_code
def dcam_init(self):
n_camera=cp.c_int32(0)
self.errorHandler(
self.dcam.dcam_init(None,
cp.byref(n_camera),
None),"dcam_init")
self.n_camera=n_camera
def dcam_open(self):
self.errorHandler(
self.dcam.dcam_open(cp.byref(self.camera_handle),
cp.c_int32(self.camera_id),
None),
"dcam_open")
def dcam_close(self):
self.errorHandler(
self.dcam.dcam_close(self.camera_handle),
"dcam_close")
def dcam_getmodelinfo(self):
string_buffer = cp.create_string_buffer(100)
self.errorHandler(
self.dcam.dcam_getmodelinfo(cp.c_int32(self.camera_id),
cp.c_int32(DCAM_IDSTR_MODEL),
string_buffer,
cp.c_int(100)),
"dcam_getmodelinfo")
return string_buffer.value
def set_capture_mode(self,capture_mode):
self.capture_mode=capture_mode
def getcameraproperties(self):
pass
def dcam_getpropertyattr(self):
pass
def dcam_getpropertyvalue(self):
pass
def dcam_getpropertyvaluetext(self):
pass
def dcam_setgetpropertyvalue(self):
pass
def dcam_getbinning(self):
binning=cp.c_int32(0)
self.errorHandler(
self.dcam.dcam_getbinning(self.camera_handle,
cp.byref(binning)),
"dcam_getbinning")
return binning.value
def dcam_getexposuretime(self):
exptime=cp.c_double(0)
self.errorHandler(
self.dcam.dcam_getexposuretime(self.camera_handle,
cp.byref(exptime)),
"dcam_getexposuretime")
return exptime.value
def dcam_gettriggermode(self):
trigmode=cp.c_int32(0)
self.errorHandler(
self.dcam.dcam_gettriggermode(self.camera_handle,
cp.byref(trigmode)),
"dcam_gettriggermode")
return trigmode.value
def dcam_gettriggerpolarity(self):
trigpol=cp.c_int32(0)
self.errorHandler(
self.dcam.dcam_gettriggerpolarity(self.camera_handle,
cp.byref(trigpol)),
"dcam_gettriggerpolarity")
return trigpol.value
def dcam_getdataframebytes(self):
frame_bytes = cp.c_int32(0)
self.errorHandler(
self.dcam.dcam_getdataframebytes(self.camera_handle,
cp.byref(frame_bytes)),
"dcam_getframedatabytes")
return frame_bytes.value
def dcam_setbinning(self,binning):
self.errorHandler(
self.dcam.dcam_setbinning(self.camera_handle,
cp.c_int32(binning)),
"dcam_setbinning")
def dcam_setexposuretime(self,exptime): # TODO: set low exposure time for speed up
self.errorHandler(
self.dcam.dcam_setexposuretime(self.camera_handle,
cp.c_double(exptime)),
"dcam_setexposuretime")
def dcam_settriggermode(self,trigmode):
self.errorHandler(
self.dcam.dcam_settriggermode(self.camera_handle,
cp.c_int32(trigmode)),
"dcam_settriggermode")
def dcam_settriggerpolarity(self,trigpol):
self.errorHandler(
self.dcam.dcam_settriggerpolarity(self.camera_handle,
cp.c_int32(trigpol)),
"dcam_settriggerpolarity")
def dcam_extended_subarray_inq(self,binning):
sub_array_inq=DCAM_PARAM_SUBARRAY_INQ(DCAM_HDR_PARAM())
sub_array_inq.hdr.cbSize=cp.sizeof(sub_array_inq)
sub_array_inq.hdr.id=DCAM_IDPARAM_SUBARRAY_INQ
sub_array_inq.binning=binning
self.errorHandler(
self.dcam.dcam_extended(self.camera_handle,
DCAM_IDMSG_GETPARAM,
cp.byref(sub_array_inq),
cp.sizeof(DCAM_PARAM_SUBARRAY_INQ)),
"dcam_extended_subarray_inq")
return sub_array_inq
def dcam_extended_subarray_getROI(self):
sub_array=DCAM_PARAM_SUBARRAY(DCAM_HDR_PARAM())
sub_array.hdr.cbSize=cp.sizeof(sub_array)
sub_array.hdr.id=DCAM_IDPARAM_SUBARRAY
self.errorHandler(
self.dcam.dcam_extended(self.camera_handle,
DCAM_IDMSG_GETPARAM,
cp.byref(sub_array),
cp.sizeof(DCAM_PARAM_SUBARRAY)),
"dcam_extended_subarray_getROI")
return sub_array
def dcam_extended_subarray_setROI(self,x,y,len_x,len_y,binning):
sub_array_inq=self.dcam_extended_subarray_inq(binning)
sub_array=DCAM_PARAM_SUBARRAY(DCAM_HDR_PARAM())
sub_array.hdr.cbSize=cp.sizeof(sub_array)
sub_array.hdr.id=DCAM_IDPARAM_SUBARRAY
sub_array.hpos=x-(x%sub_array_inq.hposunit)
sub_array.vpos=y-(y%sub_array_inq.vposunit)
sub_array.hsize=len_x-(len_x%sub_array_inq.hunit)
sub_array.vsize=len_y-(len_y%sub_array_inq.hunit)
self.errorHandler(
self.dcam.dcam_extended(self.camera_handle,
DCAM_IDMSG_SETPARAM,
cp.byref(sub_array),
cp.sizeof(DCAM_PARAM_SUBARRAY)),
"dcam_extended_subarray_setROI")
def dcam_precapure(self):
self.errorHandler(
self.dcam.dcam_precapture(self.camera_handle,
cp.c_int(self.capture_mode)),
"dcam_precapture")
def dcam_allocframe(self):
self.errorHandler(
self.dcam.dcam_allocframe(self.camera_handle,
cp.c_int32(200)), # TODO: try less for speed up
"dcam_allocframe")
def dcam_capture(self):
self.errorHandler(
self.dcam.dcam_capture(self.camera_handle),
"dcam_capture")
def dcam_wait(self):
wait=cp.c_int(DCAMCAP_EVENT_FRAMEREADY)
self.errorHandler(
self.dcam.dcam_wait(self.camera_handle,
cp.byref(wait),
cp.c_int(DCAMWAIT_TIMEOUT_INFINITE),
None),
"dcam_wait")
def dcam_gettransferinfo(self):
buffer_indx = cp.c_int32(0)
frame_count = cp.c_int32(0)
self.errorHandler(
self.dcam.dcam_gettransferinfo(self.camera_handle,
cp.byref(buffer_indx),
cp.byref(frame_count)),
"dcam_gettransferinfo")
return buffer_indx.value, frame_count.value
def dcam_lockdata(self,frame):
buffer_pointer = cp.c_void_p(0)
row_bytes = cp.c_int32(0)
self.errorHandler(
self.dcam.dcam_lockdata(self.camera_handle,
cp.byref(buffer_pointer),
cp.byref(row_bytes),
cp.c_int32(frame)),
"dcam_lockdata")
return buffer_pointer
def dcam_unlockdata(self):
self.errorHandler(
self.dcam.dcam_unlockdata(self.camera_handle),
"dcam_unlockdata")
def dcam_idle(self):
self.errorHandler(
self.dcam.dcam_idle(self.camera_handle),
"dcam_idle")
def dcam_freeframe(self):
self.errorHandler(
self.dcam.dcam_freeframe(self.camera_handle),
"dcam_freeframe")
def getdata(self,frame):
data_frame_bytes=self.dcam_getdataframebytes()
array = np.empty((int(data_frame_bytes/2), 1), dtype=np.uint16)
buffer_pointer=self.dcam_lockdata(frame)
cp.memmove(array.ctypes.data, buffer_pointer, data_frame_bytes)
self.dcam_unlockdata()
return array
def run(self):
buffer_index = -1
self.capture_mode=0
n_buffers = int(2.0*self.dcam_getdataframebytes())
self.dcam_precapure()
self.dcam_allocframe()
self.dcam_capture()
self.dcam_wait()
buffer_indx, frame_count=self.dcam_gettransferinfo()
new_frames = []
if buffer_indx < buffer_index:
for i in range(buffer_index + 1, n_buffers):
new_frames.append(i)
for i in range(buffer_indx + 1):
new_frames.append(i)
else:
for i in range(buffer_index, buffer_indx):
new_frames.append(i+1)
buffer_index = buffer_index
frames = []
for frame in new_frames:
array=self.getdata(frame)
frames.append(array)
return frames
def test_running_time_binning_4():
dcamapi=DCAMAPI()
dcamapi.dcam_init()
dcamapi.dcam_open()
dcamapi.dcam_setbinning(4)
cap_time=[]
for _ in range(100):
start=time.time()
dcamapi.run()
stop=time.time()
cap_time.append(stop-start)
dcamapi.dcam_idle()
dcamapi.dcam_freeframe()
dcamapi.dcam_close()
print('\nBinning 4x4 and full frame')
print('Excluding freeframe and close camera')
print('mean capture time (100 iter)',np.mean(cap_time))
print('std capure time (100 iter)',np.std(cap_time))
def test_running_time_binning_1():
dcamapi=DCAMAPI()
dcamapi.dcam_init()
dcamapi.dcam_open()
dcamapi.dcam_setbinning(1)
cap_time=[]
for _ in range(100):
start=time.time()
dcamapi.run()
stop=time.time()
cap_time.append(stop-start)
dcamapi.dcam_idle()
dcamapi.dcam_freeframe()
dcamapi.dcam_close()
print('\nBinning 1x1 and full frame')
print('Excluding freeframe and close camera')
print('mean capture time (100 iter)',np.mean(cap_time))
print('std capture time (100 iter)',np.std(cap_time))
def test_running_time_subarray_binning_4():
import matplotlib.pyplot as plt
dcamapi=DCAMAPI()
dcamapi.dcam_init()
dcamapi.dcam_open()
dcamapi.dcam_setbinning(4)
cap_time=np.zeros((16,16,10),dtype=float)
for b in range(10):
l=0
for i in range(0,124,8):
k=0
for j in range(0,124,8):
dcamapi.dcam_extended_subarray_setROI(i,j,4,4,4)
start=time.time()
dcamapi.run()
stop=time.time()
cap_time[l,k,b]=1e3*(stop-start)
dcamapi.dcam_idle()
dcamapi.dcam_freeframe()
k+=1
l+=1
dcamapi.dcam_close()
print('\nBinning 4x4 and subarray 4x4')
print('Excluding freeframe and close camera')
print('mean capture time (10 iter)')
plt.figure()
plt.imshow(np.mean(cap_time,axis=2))
plt.colorbar()
plt.show()
print('std capture time (10 iter)')
plt.figure()
plt.imshow(np.std(cap_time,axis=2))
plt.colorbar()
plt.show()
def test_display_captured_image():
import matplotlib.pyplot as plt
start = time.time()
dcamapi=DCAMAPI()
dcamapi.dcam_init()
dcamapi.dcam_open()
dcamapi.dcam_setbinning(8)
cap_start = time.time()
frames=dcamapi.run()
cap_stop = time.time()
dcamapi.dcam_close()
stop = time.time()
print(f'time elapsed from start to stop : {stop-start}')
print(f'time elapsed during capture : {cap_stop-cap_start}')
image = frames[0].reshape(128,168)
plt.imshow(image)
plt.show()
def test_display_captured_image_subarray():
#import matplotlib.pyplot as plt
dcamapi=DCAMAPI()
dcamapi.dcam_init()
dcamapi.dcam_open()
dcamapi.dcam_setbinning(4)
dcamapi.dcam_extended_subarray_setROI(0,0,4,4,4)
dcamapi.run()
dcamapi.dcam_close()
#plt.imshow(frames[0].reshape(128,128))
def test_running_time_binning_1_ext():
import matplotlib.pyplot as plt
dcamapi=DCAMAPI()
print(dcamapi)
dcamapi.dcam_init()
dcamapi.dcam_open()
dcamapi.dcam_setbinning(1)
print(dcamapi.dcam_gettriggermode())
dcamapi.dcam_settriggermode(2)
print('waiting for trigger')
pic=dcamapi.run()
dcamapi.dcam_idle()
dcamapi.dcam_freeframe()
dcamapi.dcam_close()
return pic
if __name__=='__main__':
pic=test_display_captured_image()
#test_running_time_binning_4()
#test_running_time_subarray_binning_4()
#test_display_captured_image_subarray()
| mit |
nespinoza/exonailer | utilities/data_utils.py | 1 | 88290 | # -*- coding: utf-8 -*-
from math import floor,ceil
import matplotlib.pyplot as plt
plt.style.use('ggplot')
import matplotlib.gridspec as gridspec
try:
import george
from george import kernels
except:
print 'Warning! The george package is not installed. Some GP functionalities will not work.'
try:
import celerite
from celerite import terms
except:
print 'Warning! The celerite package is not installed. Some GP functionalities will not work.'
import numpy as np
import batman
import radvel
log2pi = np.log(2.*np.pi)
G = 6.67408e-11 # Grav. constant in mks
# This defines prior distributions that need samples to be
# controlled so they don't get out of their support:
prior_distributions = ['Uniform','Jeffreys','Beta']
def get_sigma(x,median):
"""
This function returns the MAD-based standard-deviation.
"""
mad = np.median(np.abs(x-median))
return 1.4826*mad
def get_phases(t,P,t0):
phase = ((t - np.median(t0))/np.median(P)) % 1
ii = np.where(phase>=0.5)[0]
phase[ii] = phase[ii]-1.0
return phase
def read_transit_params(prior_dict,instrument):
names = ['P','inc','a','p','t0','q1','q2']
vals = len(names)*[[]]
for i in range(len(names)):
try:
param = prior_dict[names[i]]
except:
param = prior_dict[names[i]+'_'+instrument]
vals[i] = param['object'].value
return vals
def pre_process(all_t,all_f,all_f_err,options,transit_instruments,parameters):
out_t = np.array([])
out_f = np.array([])
out_phases = np.array([])
out_f_err = np.array([])
out_transit_instruments = np.array([])
all_phases = np.zeros(len(all_t))
for instrument in options['photometry'].keys():
all_idx = np.where(transit_instruments==instrument)[0]
t = all_t[all_idx]
f = all_f[all_idx]
if all_f_err is not None:
f_err = all_f_err[all_idx]
# Now, the first phase in transit fitting is to 'detrend' the
# data. This is done with the 'detrend' flag. If
# the data is already detrended, set the flag to None:
if options['photometry'][instrument]['PHOT_DETREND'] is not None:
if options['photometry'][instrument]['PHOT_DETREND'] == 'mfilter':
# Get median filter, and smooth it with a gaussian filter:
from scipy.signal import medfilt
from scipy.ndimage.filters import gaussian_filter
filt = gaussian_filter(medfilt(f,options['photometry'][instrument]['WINDOW']),5)
f = f/filt
if f_err is not None:
f_err = f_err/filt
elif type(options['photometry'][instrument]['PHOT_DETREND']) is not bool:
print '\t WARNING: PHOT_DETREND option '+options['photometry'][instrument]['PHOT_DETREND']+\
' for '+instrument+' not recognized!'
# Extract transit parameters from prior dictionary:
if options['MODE'] != 'transit_noise':
P,inc,a,p,t0,q1,q2 = read_transit_params(parameters,instrument)
# If the user wants to ommit transit events:
if len(options['photometry'][instrument]['NOMIT'])>0:
# Get the phases:
phases = (t-t0)/P
# Get the transit events in phase space:
transit_events = np.arange(ceil(np.min(phases)),floor(np.max(phases))+1)
# Convert to zeros fluxes at the events you want to eliminate:
for n in options['photometry'][instrument]['NOMIT']:
idx = np.where((phases>n-0.5)&(phases<n+0.5))[0]
f[idx] = np.zeros(len(idx))
# Eliminate them from the t,f and phases array:
idx = np.where(f!=0.0)[0]
t = t[idx]
f = f[idx]
phases = phases[idx]
if f_err is not None:
f_err = f_err[idx]
if options['MODE'] != 'transit_noise':
# Generate the phases:
phases = get_phases(t,P,t0)
# If outlier removal is on, remove them:
if options['photometry'][instrument]['PHOT_GET_OUTLIERS'] and options['MODE'] != 'transit_noise':
model = get_transit_model(t.astype('float64'),t0,P,p,a,inc,q1,q2,options['photometry'][instrument]['LD_LAW'])
# Get approximate transit duration in phase space:
idx = np.where(model == 1.0)[0]
phase_dur = np.abs(phases[idx][np.where(np.abs(phases[idx]) == \
np.min(np.abs(phases[idx])))])[0] + 0.01
# Get precision:
median_flux = np.median(f)
sigma = get_sigma(f,median_flux)
# Perform sigma-clipping for out-of-transit data using phased data:
good_times = np.array([])
good_fluxes = np.array([])
good_phases = np.array([])
if f_err is not None:
good_errors = np.array([])
# Iterate through the dataset:
for i in range(len(t)):
if np.abs(phases[i])<phase_dur:
good_times = np.append(good_times,t[i])
good_fluxes = np.append(good_fluxes,f[i])
good_phases = np.append(good_phases,phases[i])
if f_err is not None:
good_errors = np.append(good_errors,f_err[i])
else:
if (f[i]<median_flux + 3*sigma) and (f[i]>median_flux - 3*sigma):
good_times = np.append(good_times,t[i])
good_fluxes = np.append(good_fluxes,f[i])
good_phases = np.append(good_phases,phases[i])
if f_err is not None:
good_errors = np.append(good_errors,f_err[i])
t = good_times
f = good_fluxes
phases = good_phases
if f_err is not None:
f_err = good_errors
out_t = np.append(out_t,t)
out_f = np.append(out_f,f)
out_transit_instruments = np.append(out_transit_instruments,np.array(len(t)*[instrument]))
out_f_err = np.append(out_f_err,f_err)
#all_t[all_idx] = t
#all_f[all_idx] = f
#all_f_err[all_idx] = f_err
if options['MODE'] != 'transit_noise':
out_phases = np.append(out_phases,phases)
#all_phases[all_idx] = phases
else:
out_phases = np.append(out_phases,np.zeros(len(t)))
#all_phases = np.zeros(len(t))
if f_err is not None:
return out_t.astype('float64'), out_phases.astype('float64'), out_f.astype('float64'), out_f_err.astype('float64'),out_transit_instruments
#return all_t.astype('float64'), all_phases.astype('float64'), all_f.astype('float64'), all_f_err.astype('float64')
else:
return out_t.astype('float64'), out_phases.astype('float64'), out_f.astype('float64'), f_err,out_transit_instruments
#return all_t.astype('float64'), all_phases.astype('float64'), all_f.astype('float64'), f_err
def init_batman(t,law):
"""
This function initializes the batman code.
"""
params = batman.TransitParams()
params.t0 = 0.
params.per = 1.
params.rp = 0.1
params.a = 15.
params.inc = 87.
params.ecc = 0.
params.w = 90.
if law == 'linear':
params.u = [0.5]
else:
params.u = [0.1,0.3]
params.limb_dark = law
m = batman.TransitModel(params,t)
return params,m
def init_radvel(nplanets=1):
return radvel.model.Parameters(nplanets,basis='per tc e w k')
def get_transit_model(t,t0,P,p,a,inc,q1,q2,ld_law):
params,m = init_batman(t,law=ld_law)
coeff1,coeff2 = reverse_ld_coeffs(ld_law, q1, q2)
params.t0 = t0
params.per = P
params.rp = p
params.a = a
params.inc = inc
if ld_law == 'linear':
params.u = [coeff1]
else:
params.u = [coeff1,coeff2]
return m.light_curve(params)
def convert_ld_coeffs(ld_law, coeff1, coeff2):
if ld_law == 'quadratic':
q1 = (coeff1 + coeff2)**2
q2 = coeff1/(2.*(coeff1+coeff2))
elif ld_law=='squareroot':
q1 = (coeff1 + coeff2)**2
q2 = coeff2/(2.*(coeff1+coeff2))
elif ld_law=='logarithmic':
q1 = (1-coeff2)**2
q2 = (1.-coeff1)/(1.-coeff2)
elif ld_law=='linear':
return coeff1,0.0
return q1,q2
def reverse_ld_coeffs(ld_law, q1, q2):
if ld_law == 'quadratic':
coeff1 = 2.*np.sqrt(q1)*q2
coeff2 = np.sqrt(q1)*(1.-2.*q2)
elif ld_law=='squareroot':
coeff1 = np.sqrt(q1)*(1.-2.*q2)
coeff2 = 2.*np.sqrt(q1)*q2
elif ld_law=='logarithmic':
coeff1 = 1.-np.sqrt(q1)*q2
coeff2 = 1.-np.sqrt(q1)
if ld_law =='linear':
coeff1 = q1
coeff2 = 0.0
return coeff1,coeff2
def count_instruments(instrument_list):
all_instruments = []
for instrument in instrument_list:
if instrument not in all_instruments:
all_instruments.append(instrument)
all_idxs = len(all_instruments)*[[]]
all_ndata = len(all_instruments)*[[]]
for i in range(len(all_instruments)):
all_idxs[i] = np.where(all_instruments[i] == instrument_list)[0]
all_ndata[i] = len(all_idxs[i])
return all_instruments,all_idxs,np.array(all_ndata)
import emcee
import Wavelets
import scipy.optimize as op
def exonailer_mcmc_fit(times, relative_flux, error, tr_instruments, times_rv, rv, rv_err, rv_instruments,\
parameters, idx_resampling, options):
"""
This function performs an MCMC fitting procedure using a transit model
fitted to input data using the batman package (Kreidberg, 2015) assuming
the underlying noise process is either 'white' or '1/f-like' (see Carter &
Winn, 2010). It makes use of the emcee package (Foreman-Mackey et al., 2014)
to perform the MCMC, and the sampling scheme explained in Kipping (2013) to
sample coefficients from two-parameter limb-darkening laws; the logarithmic
law is sampled according to Espinoza & Jordán (2016).
The inputs are:
times: Times (in same units as the period and time of transit center).
relative_flux: Relative flux; it is assumed out-of-transit flux is 1.
error: If you have errors on the fluxes, put them here. Otherwise, set
this to None.
tr_instruments: Instruments of each time/flux pair.
times_rv: Times (in same units as the period and time of transit center)
of RV data.
rv: Radial velocity measurements.
rv_err: If you have errors on the RVs, put them here. Otherwise, set
this to None.
rv_instruments: Instruments of each time/RV pair.
parameters: Dictionary containing the information regarding the parameters (including priors).
idx_resampling: This defines the indexes over which you want to perform such resampling
(selective resampling). It is a dictionary over the instruments; idx_resampling[instrument]
has the indexes for the given instrument.
options: Dictionary containing the information inputted by the user.
The outputs are the chains of each of the parameters in the theta_0 array in the same
order as they were inputted. This includes the sampled parameters from all the walkers.
"""
# If mode is not RV:
if options['MODE'] != 'rvs':
params = {}
m = {}
t_resampling = {}
transit_flat = {}
# Count instruments:
all_tr_instruments,all_tr_instruments_idxs,n_data_trs = count_instruments(tr_instruments)
# Prepare data for batman:
xt = times.astype('float64')
yt = relative_flux.astype('float64')
yerrt = error.astype('float64')
if options['MODE'] != 'transit_noise':
for k in range(len(all_tr_instruments)):
instrument = all_tr_instruments[k]
params[instrument],m[instrument] = init_batman(xt[all_tr_instruments_idxs[k]],\
law=options['photometry'][instrument]['LD_LAW'])
# Initialize the parameters of the transit model,
# and prepare resampling data if resampling is True:
if options['photometry'][instrument]['RESAMPLING']:
t_resampling[instrument] = np.array([])
for i in range(len(idx_resampling[instrument])):
tij = np.zeros(options['photometry'][instrument]['NRESAMPLING'])
for j in range(1,options['photometry'][instrument]['NRESAMPLING']+1):
# Eq (35) in Kipping (2010)
tij[j-1] = xt[all_tr_instruments_idxs[k]][idx_resampling[instrument][i]] + ((j - \
((options['photometry'][instrument]['NRESAMPLING']+1)/2.))*(options['photometry'][instrument]['TEXP']/np.double(\
options['photometry'][instrument]['NRESAMPLING'])))
t_resampling[instrument] = np.append(t_resampling[instrument], np.copy(tij))
params[instrument],m[instrument] = init_batman(t_resampling[instrument],\
law=options['photometry'][instrument]['LD_LAW'])
transit_flat[instrument] = np.ones(len(xt[all_tr_instruments_idxs[k]]))
transit_flat[instrument][idx_resampling[instrument]] = np.zeros(len(idx_resampling[instrument]))
# Initialize the variable names:
if options['MODE'] != 'rvs':
if len(all_tr_instruments)>1:
transit_params = ['P','inc']
else:
the_instrument = options['photometry'].keys()[0]
transit_params = ['P','inc','t0','a','p','sigma_w','sigma_r','q1','q2']
common_params = ['ecc','omega']
else:
common_params = ['ecc','omega','P','t0']
# If mode is not transit, prepare the RV data too:
if 'transit' not in options['MODE']:
xrv = times_rv.astype('float64')
yrv = rv.astype('float64')
if rv_err is None:
yerrrv = 0.0
else:
yerrrv = rv_err.astype('float64')
all_rv_instruments,all_rv_instruments_idxs,n_data_rvs = count_instruments(rv_instruments)
rv_params = ['K']
#if len(all_rv_instruments)>1:
# for instrument in all_rv_instruments:
# rv_params.append('mu_'+instrument)
# rv_params.append('sigma_w_rv_'+instrument)
#else:
# rv_params.append('mu')
# rv_params.append('sigma_w_rv')
radvel_params = init_radvel()
# Create lists that will save parameters to check the limits on:
parameters_to_check = []
# Check common parameters:
if options['MODE'] != 'transit_noise':
if parameters['ecc']['type'] == 'FIXED':
common_params.pop(common_params.index('ecc'))
elif parameters['ecc']['type'] in prior_distributions:
parameters_to_check.append('ecc')
if parameters['omega']['type'] == 'FIXED':
common_params.pop(common_params.index('omega'))
elif parameters['omega']['type'] in prior_distributions:
parameters_to_check.append('omega')
# Eliminate from the parameter list parameters that are being fixed:
# First, generate a sufix dictionary, which will add the sufix _instrument to
# each instrument in the MCMC, in order to keep track of the parameters that
# are being held constant between instruments and those that vary with instrument:
sufix = {}
if options['MODE'] != 'rvs' and options['MODE'] != 'transit_noise':
if len(all_tr_instruments)>1:
# Check parameters that always will be constant amongst transits:
for par in ['P','inc']:
if parameters[par]['type'] == 'FIXED':
transit_params.pop(transit_params.index(par))
elif parameters[par]['type'] in prior_distributions:
parameters_to_check.append(par)
# Now check parameters that might change between instruments:
for i in range(len(all_tr_instruments)):
instrument = all_tr_instruments[i]
sufix[instrument] = {}
for par in ['t0','a','p','sigma_w','q1','q2']:
orig_par = par
sufix[instrument][orig_par] = ''
if par not in parameters.keys():
par = par+'_'+instrument
sufix[instrument][orig_par] = '_'+instrument
if par not in parameters.keys():
print 'Error: parameter '+orig_par+' not defined. Exiting...'
sys.exit()
if par not in transit_params:
transit_params.append(par)
if parameters[par]['type'] == 'FIXED':
transit_params.pop(transit_params.index(par))
elif parameters[par]['type'] in prior_distributions:
parameters_to_check.append(par)
if options['photometry'][instrument]['PHOT_NOISE_MODEL'] == 'flicker':
for noise_param in ['sigma_r']:
transit_params.append(noise_param+'_'+instrument)
if parameters[noise_param+'_'+instrument]['type'] == 'FIXED':
transit_params.pop(transit_params.index(noise_param+'_'+instrument))
elif parameters[noise_param+'_'+instrument]['type'] in prior_distributions:
parameters_to_check.append(noise_param+'_'+instrument)
elif options['photometry'][instrument]['PHOT_NOISE_MODEL'] == 'GPExpSquaredKernel':
for noise_param in ['lnh','lnlambda']:
transit_params.append(noise_param+'_'+instrument)
if parameters[noise_param+'_'+instrument]['type'] == 'FIXED':
transit_params.pop(transit_params.index(noise_param+'_'+instrument))
elif parameters[noise_param+'_'+instrument]['type'] in prior_distributions:
parameters_to_check.append(noise_param+'_'+instrument)
elif options['photometry'][instrument]['PHOT_NOISE_MODEL'] == 'GPGranulation':
for noise_param in ['lnomega','lnS']:
transit_params.append(noise_param+'_'+instrument)
if parameters[noise_param+'_'+instrument]['type'] == 'FIXED':
transit_params.pop(transit_params.index(noise_param+'_'+instrument))
elif parameters[noise_param+'_'+instrument]['type'] in prior_distributions:
parameters_to_check.append(noise_param+'_'+instrument)
elif options['photometry'][instrument]['PHOT_NOISE_MODEL'] == 'GPAsteroseismology':
for noise_param in ['lnomega','lnS','lnQ','lnA','epsilon','lnW','lnnu','lnDeltanu']:
transit_params.append(noise_param+'_'+instrument)
if parameters[noise_param+'_'+instrument]['type'] == 'FIXED':
transit_params.pop(transit_params.index(noise_param+'_'+instrument))
elif parameters[noise_param+'_'+instrument]['type'] in prior_distributions:
parameters_to_check.append(noise_param+'_'+instrument)
else:
for par in ['P','t0','a','p','inc','sigma_w','q1','q2']:
if parameters[par]['type'] == 'FIXED':
transit_params.pop(transit_params.index(par))
elif parameters[par]['type'] in prior_distributions:
parameters_to_check.append(par)
if options['photometry'][options['photometry'].keys()[0]]['PHOT_NOISE_MODEL'] == 'flicker':
if parameters['sigma_r']['type'] == 'FIXED':
transit_params.pop(transit_params.index('sigma_r'))
elif parameters['sigma_r']['type'] in prior_distributions:
parameters_to_check.append('sigma_r')
elif options['photometry'][options['photometry'].keys()[0]]['PHOT_NOISE_MODEL'] == 'GPExpSquaredKernel':
transit_params.pop(transit_params.index('sigma_r'))
for noise_param in ['lnh','lnlambda']:
transit_params.append(noise_param)
if parameters[noise_param]['type'] == 'FIXED':
transit_params.pop(transit_params.index(noise_param))
elif parameters[noise_param]['type'] in prior_distributions:
parameters_to_check.append(noise_param)
elif options['photometry'][options['photometry'].keys()[0]]['PHOT_NOISE_MODEL'] == 'GPGranulation':
transit_params.pop(transit_params.index('sigma_r'))
for noise_param in ['lnomega','lnS']:
transit_params.append(noise_param)
if parameters[noise_param]['type'] == 'FIXED':
transit_params.pop(transit_params.index(noise_param))
elif parameters[noise_param]['type'] in prior_distributions:
parameters_to_check.append(noise_param)
elif options['photometry'][options['photometry'].keys()[0]]['PHOT_NOISE_MODEL'] == 'GPAsteroseismology':
transit_params.pop(transit_params.index('sigma_r'))
for noise_param in ['lnomega','lnS','lnQ','lnA','epsilon','lnW','lnnu','lnDeltanu']:
transit_params.append(noise_param)
if parameters[noise_param]['type'] == 'FIXED':
transit_params.pop(transit_params.index(noise_param))
elif parameters[noise_param]['type'] in prior_distributions:
parameters_to_check.append(noise_param)
else:
transit_params.pop(transit_params.index('sigma_r'))
if options['MODE'] != 'transit' and options['MODE'] != 'transit_noise':
if parameters['K']['type'] == 'FIXED':
rv_params.pop(rv_params.index('K'))
elif parameters['K']['type'] in prior_distributions:
parameters_to_check.append('K')
if len(all_rv_instruments)>1:
sigma_w_rv = {}
for instrument in all_rv_instruments:
sufix[instrument] = {}
for par in ['mu','sigma_w_rv']:
orig_par = par
sufix[instrument][orig_par] = ''
if par not in parameters.keys():
par = par+'_'+instrument
sufix[instrument][orig_par] = '_'+instrument
if par not in parameters.keys():
print 'Error: parameter '+orig_par+' not defined for instrument '+instrument+'. Exiting...'
sys.exit()
if par not in rv_params:
rv_params.append(par)
if parameters[par]['type'] == 'FIXED':
rv_params.pop(rv_params.index(par))
elif parameters[par]['type'] in prior_distributions:
if par not in parameters_to_check:
parameters_to_check.append(par)
else:
if parameters['K']['type'] == 'FIXED':
rv_params.pop(rv_params.index('K'))
elif parameters['K']['type'] in prior_distributions:
parameters_to_check.append('K')
for rvpar in ['sigma_w_rv','mu']:
if parameters[rvpar]['type'] in prior_distributions:
parameters_to_check.append(rvpar)
rv_params.append(rvpar)
elif parameters[rvpar]['type'] != 'FIXED':
rv_params.append(rvpar)
if options['MODE'] == 'transit':
all_mcmc_params = transit_params + common_params
elif options['MODE'] == 'rvs':
all_mcmc_params = rv_params + common_params
elif options['MODE'] == 'transit_noise':
all_mcmc_params = []
parameters_to_check = []
if options['photometry'][options['photometry'].keys()[0]]['PHOT_NOISE_MODEL'] == 'white':
noise_parameters = ['sigma_w']
if options['photometry'][options['photometry'].keys()[0]]['PHOT_NOISE_MODEL'] == 'flicker':
noise_parameters = ['sigma_w','sigma_r']
elif options['photometry'][options['photometry'].keys()[0]]['PHOT_NOISE_MODEL'] == 'GPExpSquaredKernel':
noise_parameters in ['lnh','lnlambda','sigma_w']
elif options['photometry'][options['photometry'].keys()[0]]['PHOT_NOISE_MODEL'] == 'GPGranulation':
noise_parameters = ['lnomega','lnS','sigma_w']
elif options['photometry'][options['photometry'].keys()[0]]['PHOT_NOISE_MODEL'] == 'GPAsteroseismology':
noise_parameters = ['lnomega','lnS','lnQ','lnA','epsilon','lnW','lnnu','lnDeltanu','sigma_w']
for noise_param in noise_parameters:
all_mcmc_params.append(noise_param)
if parameters[noise_param]['type'] == 'FIXED':
all_mcmc_params.pop(all_mcmc_params.index(noise_param))
elif parameters[noise_param]['type'] in prior_distributions:
parameters_to_check.append(noise_param)
else:
all_mcmc_params = transit_params + rv_params + common_params
n_params = len(all_mcmc_params)
def normal_like(x,mu,tau):
return 0.5*(np.log(tau) - log2pi - tau*( (x-mu)**2))
def get_fn_likelihood(residuals, sigma_w, sigma_r, gamma=1.0):
like=0.0
# Arrays of zeros to be passed to the likelihood function
aa,bb,M = Wavelets.getDWT(residuals)
# Calculate the g(gamma) factor used in Carter & Winn...
if(gamma==1.0):
g_gamma=1.0/(2.0*np.log(2.0)) # (value assuming gamma=1)
else:
g_gamma=(2.0)-(2.0)**gamma
# log-Likelihood of the aproximation coefficients
sigmasq_S=(sigma_r**2)*g_gamma+(sigma_w)**2
tau_a = 1.0/sigmasq_S
like += normal_like( bb[0], 0.0 , tau_a )
k=long(0)
SS=range(M)
for ii in SS:
# log-Likelihood of the detail coefficients with m=i...
if(ii==0):
sigmasq_W=(sigma_r**2)*(2.0**(-gamma*np.double(1.0)))+(sigma_w)**2
tau=1.0/sigmasq_W
like += normal_like( bb[1], 0.0, tau )
else:
sigmasq_W=(sigma_r**2)*(2.0**(-gamma*np.double(ii+1)))+(sigma_w)**2
tau=1.0/sigmasq_W
for j in range(2**ii):
like += normal_like( aa[k], 0.0 , tau )
k=k+1
return like
def get_sq_exp_likelihood(t,residuals,errors,sigma_w,lnh,lnlambda):
kernel = (np.exp(lnh)**2)*george.kernels.ExpSquaredKernel(np.exp(lnlambda)**2)
gp = george.GP(kernel,solver=george.HODLRSolver)
try:
gp.compute(t,np.sqrt(errors**2 + sigma_w**2))
except:
return -np.inf
return gp.lnlikelihood(residuals)
def get_granulation_likelihood(t,residuals,errors,sigma_w,lnomega,lnS):
bounds = dict(log_S0=(-1e15, 1e15), log_Q=(-1e15, 1e15), log_omega0=(-1e15, 1e15),log_sigma=(-1e15,1e15))
kernel = terms.SHOTerm(log_S0=lnS, log_Q=np.log(1./np.sqrt(2.)), log_omega0=lnomega,\
bounds=bounds)
kernel.freeze_parameter("log_Q")
kernel += terms.JitterTerm(log_sigma=np.log(sigma_w),\
bounds=bounds)
gp = celerite.GP(kernel, mean=np.mean(residuals))
try:
gp.compute(t,errors)
except:
return -np.inf
return gp.log_likelihood(residuals)
def get_asteroseismology_likelihood(t,residuals,errors,sigma_w,lnomega,lnS,lnQ,lnA,epsilon,\
lnW,lnnu,lnDeltanu,instrument):
bounds = dict(log_S0=(-1e15, 1e15), log_Q=(-1e15, 1e15), log_omega0=(-1e15, 1e15),log_sigma=(-1e15,1e15))
# First, the granulation noise component:
kernel = terms.SHOTerm(log_S0=lnS, log_Q=np.log(1./np.sqrt(2.)), log_omega0=lnomega,\
bounds=bounds)
kernel.freeze_parameter("log_Q")
# Next, the frequency kernels:
nu = np.exp(lnnu)
Deltanu = np.exp(lnDeltanu)
W = np.exp(lnW)
n = options['photometry'][instrument]["NASTEROSEISMOLOGY"]
for j in range(-(n-1)/2,(n-1)/2+1):
lnSj = lnA - 2.*lnQ - (j*Deltanu+epsilon)**2/(2.*(W**2))
wj = 2.*np.pi*(nu+j*Deltanu+epsilon)*0.0864 # Last factor converts from muHz to 1/day (assuming t is in days)
if wj>0.:
kernel += terms.SHOTerm(log_S0=lnSj, log_Q=lnQ, log_omega0=np.log(wj),
bounds=bounds)
else:
return -np.inf
# Finally, a "jitter" term component for the photometric noise:
kernel += terms.JitterTerm(log_sigma=np.log(sigma_w),\
bounds=bounds)
# Set the GP:
gp = celerite.GP(kernel, mean=np.mean(residuals))
try:
gp.compute(t,errors)
lnlike = gp.log_likelihood(residuals)
except:
return -np.inf
# Return the likelihood:
if not np.isnan(lnlike):
return lnlike
else:
return -np.inf
def lnlike_transit_noise(gamma=1.0):
residuals = (yt-1.0)*1e6
if options['photometry'][the_instrument]['PHOT_NOISE_MODEL'] == 'flicker':
log_like = get_fn_likelihood(residuals,parameters['sigma_w']['object'].value,\
parameters['sigma_r']['object'].value)
elif options['photometry'][the_instrument]['PHOT_NOISE_MODEL'] == 'GPExpSquaredKernel':
log_like = get_sq_exp_likelihood(xt,residuals,yerrt*1e6,\
parameters['sigma_w']['object'].value,\
parameters['lnh']['object'].value,\
parameters['lnlambda']['object'].value)
elif options['photometry'][the_instrument]['PHOT_NOISE_MODEL'] == 'GPGranulation':
log_like = get_granulation_likelihood(xt,residuals,yerrt*1e6,\
parameters['sigma_w']['object'].value,\
parameters['lnomega']['object'].value,\
parameters['lnS']['object'].value)
elif options['photometry'][the_instrument]['PHOT_NOISE_MODEL'] == 'GPAsteroseismology':
log_like = get_asteroseismology_likelihood(xt,residuals,yerrt*1e6,\
parameters['sigma_w']['object'].value,\
parameters['lnomega']['object'].value,\
parameters['lnS']['object'].value,\
parameters['lnQ']['object'].value,\
parameters['lnA']['object'].value,\
parameters['epsilon']['object'].value,\
parameters['lnW']['object'].value,\
parameters['lnnu']['object'].value,\
parameters['lnDeltanu']['object'].value,\
the_instrument)
else:
taus = 1.0/((yerrt*1e6)**2 + (parameters['sigma_w']['object'].value)**2)
log_like = -0.5*(n_data_trs[0]*log2pi+np.sum(np.log(1./taus)+taus*(residuals**2)))
return log_like
def lnlike_transit(gamma=1.0):
if len(all_tr_instruments) == 1:
coeff1,coeff2 = reverse_ld_coeffs(options['photometry'][the_instrument]['LD_LAW'], \
parameters['q1']['object'].value,parameters['q2']['object'].value)
params[the_instrument].t0 = parameters['t0']['object'].value
params[the_instrument].per = parameters['P']['object'].value
params[the_instrument].rp = parameters['p']['object'].value
params[the_instrument].a = parameters['a']['object'].value
params[the_instrument].inc = parameters['inc']['object'].value
params[the_instrument].ecc = parameters['ecc']['object'].value
params[the_instrument].w = parameters['omega']['object'].value
params[the_instrument].u = [coeff1,coeff2]
model = m[the_instrument].light_curve(params[the_instrument])
if options['photometry'][the_instrument]['RESAMPLING']:
for i in range(len(idx_resampling[the_instrument])):
transit_flat[the_instrument][idx_resampling[the_instrument][i]] = \
np.mean(model[i*options['photometry'][the_instrument]['NRESAMPLING']:options['photometry'][the_instrument]['NRESAMPLING']*(i+1)])
residuals = (yt-transit_flat[the_instrument])*1e6
else:
residuals = (yt-model)*1e6
if options['photometry'][the_instrument]['PHOT_NOISE_MODEL'] == 'flicker':
log_like = get_fn_likelihood(residuals,parameters['sigma_w']['object'].value,\
parameters['sigma_r']['object'].value)
elif options['photometry'][the_instrument]['PHOT_NOISE_MODEL'] == 'GPExpSquaredKernel':
log_like = get_sq_exp_likelihood(xt,residuals,yerrt*1e6,\
parameters['sigma_w']['object'].value,\
parameters['lnh']['object'].value,\
parameters['lnlambda']['object'].value)
elif options['photometry'][the_instrument]['PHOT_NOISE_MODEL'] == 'GPGranulation':
log_like = get_granulation_likelihood(xt,residuals,yerrt*1e6,\
parameters['sigma_w']['object'].value,\
parameters['lnomega']['object'].value,\
parameters['lnS']['object'].value)
elif options['photometry'][the_instrument]['PHOT_NOISE_MODEL'] == 'GPAsteroseismology':
log_like = get_asteroseismology_likelihood(xt,residuals,yerrt*1e6,\
parameters['sigma_w']['object'].value,\
parameters['lnomega']['object'].value,\
parameters['lnS']['object'].value,\
parameters['lnQ']['object'].value,\
parameters['lnA']['object'].value,\
parameters['epsilon']['object'].value,\
parameters['lnW']['object'].value,\
parameters['lnnu']['object'].value,\
parameters['lnDeltanu']['object'].value,\
the_instrument)
else:
taus = 1.0/((yerrt*1e6)**2 + (parameters['sigma_w']['object'].value)**2)
log_like = -0.5*(n_data_trs[0]*log2pi+np.sum(np.log(1./taus)+taus*(residuals**2)))
if 'stellardensity' in options.keys():
sd_mean = options['stellardensity']['mean']
sd_sigma = options['stellardensity']['sigma']
#print 'val:',sd_mean,sd_sigma
model = ((3.*np.pi)/(G*(parameters['P']['object'].value*(24.*3600.0))**2))*(parameters['a']['object'].value)**3
#print 'model:',model
log_like = log_like - 0.5*(log2pi + 2.*np.log(sd_sigma) + ((model-sd_mean)/sd_sigma)**2)
#print 'Median residuals:',np.median(residuals)
#print 'Transit log-like:',log_like
return log_like
else:
log_like = 0.0
for k in range(len(all_tr_instruments)):
instrument = all_tr_instruments[k]
coeff1,coeff2 = reverse_ld_coeffs(options['photometry'][instrument]['LD_LAW'], \
parameters['q1'+sufix[instrument]['q1']]['object'].value,\
parameters['q2'+sufix[instrument]['q2']]['object'].value)
params[instrument].t0 = parameters['t0'+sufix[instrument]['t0']]['object'].value
params[instrument].per = parameters['P']['object'].value
params[instrument].rp = parameters['p'+sufix[instrument]['p']]['object'].value
params[instrument].a = parameters['a'+sufix[instrument]['a']]['object'].value
params[instrument].inc = parameters['inc']['object'].value
params[instrument].ecc = parameters['ecc']['object'].value
params[instrument].w = parameters['omega']['object'].value
params[instrument].u = [coeff1,coeff2]
model = m[instrument].light_curve(params[instrument])
if options['photometry'][instrument]['RESAMPLING']:
for i in range(len(idx_resampling[instrument])):
transit_flat[instrument][idx_resampling[instrument][i]] = \
np.mean(model[i*options['photometry'][instrument]['NRESAMPLING']:options['photometry'][instrument]['NRESAMPLING']*(i+1)])
residuals = (yt[all_tr_instruments_idxs[k]]-transit_flat[instrument])*1e6
else:
residuals = (yt[all_tr_instruments_idxs[k]]-model)*1e6
if options['photometry'][instrument]['PHOT_NOISE_MODEL'] == 'flicker':
log_like = log_like + get_fn_likelihood(residuals,parameters['sigma_w'+sufix[instrument]['sigma_w']]['object'].value,\
parameters['sigma_r'+sufix[instrument]['sigma_r']]['object'].value)
elif options['photometry'][instrument]['PHOT_NOISE_MODEL'] == 'GPExpSquaredKernel':
log_like = log_like + get_sq_exp_likelihood(xt[all_tr_instruments_idxs[k]],residuals,yerrt[all_tr_instruments_idxs[k]]*1e6,\
parameters['sigma_w'+sufix[instrument]['sigma_w']]['object'].value,\
parameters['lnh'+sufix[instrument]['lnh']]['object'].value,\
parameters['lnlambda'+sufix[instrument]['lnlambda']]['object'].value)
elif options['photometry'][instrument]['PHOT_NOISE_MODEL'] == 'GPGranulation':
log_like = log_like + get_granulation_likelihood(xt[all_tr_instruments_idxs[k]],residuals,yerrt[all_tr_instruments_idxs[k]]*1e6,\
parameters['sigma_w'+sufix[instrument]['sigma_w']]['object'].value,\
parameters['lnomega'+sufix[instrument]['lnomega']]['object'].value,\
parameters['lnS'+sufix[instrument]['lnS']]['object'].value)
elif options['photometry'][instrument]['PHOT_NOISE_MODEL'] == 'GPAsteroseismology':
log_like = log_like + get_asteroseismology_likelihood(xt[all_tr_instruments_idxs[k]],residuals,yerrt[all_tr_instruments_idxs[k]]*1e6,\
parameters['sigma_w'+sufix[instrument]['sigma_w']]['object'].value,\
parameters['lnomega'+sufix[instrument]['lnomega']]['object'].value,\
parameters['lnS'+sufix[instrument]['lnS']]['object'].value,\
parameters['lnQ'+sufix[instrument]['lnQ']]['object'].value,\
parameters['lnA'+sufix[instrument]['lnA']]['object'].value,\
parameters['epsilon'+sufix[instrument]['epsilon']]['object'].value,\
parameters['lnW'+sufix[instrument]['lnW']]['object'].value,\
parameters['lnnu'+sufix[instrument]['lnnu']]['object'].value,\
parameters['lnDeltanu'+sufix[instrument]['lnDeltanu']]['object'].value,\
instrument)
else:
taus = 1.0/((yerrt[all_tr_instruments_idxs[k]]*1e6)**2 + (parameters['sigma_w'+sufix[instrument]['sigma_w']]['object'].value)**2)
log_like = log_like - 0.5*(n_data_trs[k]*log2pi+np.sum(np.log(1./taus)+taus*(residuals**2)))
if 'stellardensity' in options.keys():
sd_mean = options['stellardensity']['mean']
sd_sigma = options['stellardensity']['sigma']
model = ((3.*np.pi)/(G*(parameters['P']['object'].value*(24.*3600.0))**2))*(parameters['a'+sufix[instrument]['a']]['object'].value)**3
log_like = log_like - 0.5*(log2pi + 2.*np.log(sd_sigma) + ((model-sd_mean)/sd_sigma)**2)
return log_like
def lnlike_rv():
#print 'RVs:'
#print 'mu',parameters['mu']['object'].value
#print 'K',parameters['K']['object'].value
#print 'ecc',parameters['ecc']['object'].value
if len(all_rv_instruments) == 1:
radvel_params['per1'] = radvel.Parameter(value=parameters['P']['object'].value)
radvel_params['tc1'] = radvel.Parameter(value=parameters['t0']['object'].value)
radvel_params['w1'] = radvel.Parameter(value=parameters['omega']['object'].value*np.pi/180.)
radvel_params['e1'] = radvel.Parameter(value=parameters['ecc']['object'].value)
radvel_params['k1'] = radvel.Parameter(value=parameters['K']['object'].value)
model = parameters['mu']['object'].value + radvel.model.RVModel(radvel_params).__call__(xrv)
residuals = (yrv-model)
#print 'Median residuals:',np.median(residuals)
taus = 1.0/((yerrrv)**2 + (parameters['sigma_w_rv']['object'].value)**2)
log_like = -0.5*(n_data_rvs[0]*log2pi+np.sum(np.log(1./taus)+taus*(residuals**2)))
#print 'RV log-like:',log_like
return log_like
else:
log_like = 0.0
for i in range(len(all_rv_instruments)):
radvel_params['per1'] = radvel.Parameter(value=parameters['P']['object'].value)
radvel_params['tc1'] = radvel.Parameter(value=parameters['t0']['object'].value)
radvel_params['w1'] = radvel.Parameter(value=parameters['omega']['object'].value*np.pi/180.)
radvel_params['e1'] = radvel.Parameter(value=parameters['ecc']['object'].value)
radvel_params['k1'] = radvel.Parameter(value=parameters['K']['object'].value)
model = parameters['mu'+sufix[all_rv_instruments[i]]['mu']]['object'].value + \
radvel.model.RVModel(radvel_params).__call__(xrv[all_rv_instruments_idxs[i]])
residuals = (yrv[all_rv_instruments_idxs[i]]-model)
taus = 1.0/((yerrrv[all_rv_instruments_idxs[i]])**2 + (parameters['sigma_w_rv'+sufix[all_rv_instruments[i]]['sigma_w_rv']]['object'].value)**2)
log_like = log_like -0.5*(n_data_rvs[i]*log2pi+np.sum(np.log(1./taus)+taus*(residuals**2)))
return log_like
def lnprior(theta):
# Read in the values of the parameter vector and update values of the objects.
# For each one, if everything is ok, get the total prior, which is the sum
# of the independant priors for each parameter:
total_prior = 0.0
for i in range(n_params):
c_param = all_mcmc_params[i]
parameters[c_param]['object'].set_value(theta[i])
if c_param in parameters_to_check:
if not parameters[c_param]['object'].check_value(theta[i]):
return -np.inf
total_prior += parameters[c_param]['object'].get_ln_prior()
return total_prior
def lnprob_full(theta):
lp = lnprior(theta)
if not np.isfinite(lp):
return -np.inf
lnrv = lnlike_rv()
return lp + lnrv + lnlike_transit()
def lnprob_transit(theta):
lp = lnprior(theta)
if not np.isfinite(lp):
return -np.inf
return lp + lnlike_transit()
def lnprob_transit_noise(theta):
lp = lnprior(theta)
if not np.isfinite(lp):
return -np.inf
return lp + lnlike_transit_noise()
def lnprob_rv(theta):
lp = lnprior(theta)
if not np.isfinite(lp):
return -np.inf
return lp + lnlike_rv()
# Define the posterior to use:
if options['MODE'] == 'full':
lnprob = lnprob_full
elif options['MODE'] == 'transit':
lnprob = lnprob_transit
elif options['MODE'] == 'transit_noise':
lnprob = lnprob_transit_noise
elif options['MODE'] == 'rvs':
lnprob = lnprob_rv
else:
print 'Mode not supported. Doing nothing.'
# If already not done, get posterior samples:
if len(parameters[all_mcmc_params[0]]['object'].posterior) == 0:
# Make a first MCMC run to search for optimal parameter values
# in (almost) all the parameter space defined by the priors if
# no initial guess is given:
ndim = n_params
pos = []
for j in range(200):
while True:
theta_vector = np.array([])
for i in range(n_params):
current_parameter = all_mcmc_params[i]
# If parameter has a guess, sample a value from prior distribution, multiply it by 1e-3 and
# add it to the real value (this is just to have the walkers move around a sphere around the
# guess with orders of magnitude defined by the prior). If no initial guess, sample from the
# prior:
if parameters[current_parameter]['object'].has_guess:
theta_vector = np.append(theta_vector,parameters[current_parameter]['object'].init_value + \
(parameters[current_parameter]['object'].init_value-\
parameters[current_parameter]['object'].sample())*1e-3)
else:
theta_vector = np.append(theta_vector,parameters[current_parameter]['object'].sample())
lnprob(theta_vector)
val = lnprob(theta_vector)
try:
val = lnprob(theta_vector)
except:
val = np.inf
if np.isfinite(val):
break
pos.append(theta_vector)
# Run the sampler for a bit (300 walkers, 300 jumps, 300 burnin):
print '\t Starting first iteration run...'
sampler = emcee.EnsembleSampler(200, ndim, lnprob)
sampler.run_mcmc(pos, 200)
# Now sample the walkers around the values found in previous iteration:
pos = []
first_time = True
init_vals = np.zeros(n_params)
init_vals_sigma = np.zeros(n_params)
for j in range(options['NWALKERS']):
while True:
theta_vector = np.array([])
for i in range(n_params):
if first_time:
c_p_chain = np.array([])
for walker in range(200):
c_p_chain = np.append(c_p_chain,sampler.chain[walker,100:,i])
init_vals[i] = np.median(c_p_chain)
init_vals_sigma[i] = get_sigma(c_p_chain,np.median(c_p_chain))
current_parameter = all_mcmc_params[i]
# Put the walkers around a small gaussian sphere centered on the best value
# found in previous iteration. Walkers will run away from sphere eventually:
theta_vector = np.append(theta_vector,np.random.normal(init_vals[i],\
init_vals_sigma[i]*1e-3))
if first_time:
first_time = False
try:
val = lnprob(theta_vector)
except:
val = np.inf
if np.isfinite(val):
break
pos.append(theta_vector)
# Run the (final) MCMC:
print '\t Done! Starting MCMC...'
sampler = emcee.EnsembleSampler(options['NWALKERS'], ndim, lnprob)
sampler.run_mcmc(pos, options['NJUMPS']+options['NBURNIN'])
print '\t Done! Saving...'
# Save the parameter chains for the parameters that were actually varied:
for i in range(n_params):
c_param = all_mcmc_params[i]
c_p_chain = np.array([])
for walker in range(options['NWALKERS']):
c_p_chain = np.append(c_p_chain,sampler.chain[walker,options['NBURNIN']:,i])
parameters[c_param]['object'].set_posterior(np.copy(c_p_chain))
# When done or if MCMC already performed, save results:
initial_values = {}
for i in range(len(all_mcmc_params)):
initial_values[all_mcmc_params[i]] = parameters[all_mcmc_params[i]]['object'].value
import matplotlib.pyplot as plt
def plot_transit_and_rv(times, relative_flux, error, tr_instruments, times_rv, rv, rv_err, rv_instruments,\
parameters, idx_resampling, options, texp = 0.020434):
# Generate out_dir folder name (for saving residuals, models, etc.):
mode = options['MODE']
target = options['TARGET']
fname = target+'_'+mode+'_'
if mode != 'rvs':
for instrument in options['photometry'].keys():
fname = fname + instrument +'_'+options['photometry'][instrument]['PHOT_NOISE_MODEL']+\
'_'+options['photometry'][instrument]['LD_LAW']+'_'
out_dir = 'results/'+fname[:-1]+'/'
plt.title('exonailer final fit + data')
# If mode is not RV:
if options['MODE'] != 'rvs':
params = {}
m = {}
t_resampling = {}
transit_flat = {}
# Count instruments:
all_tr_instruments,all_tr_instruments_idxs,n_data_trs = count_instruments(tr_instruments)
# Prepare data for batman:
xt = times.astype('float64')
yt = relative_flux.astype('float64')
yerrt = error.astype('float64')
for k in range(len(all_tr_instruments)):
instrument = all_tr_instruments[k]
params[instrument],m[instrument] = init_batman(xt[all_tr_instruments_idxs[k]],\
law=options['photometry'][instrument]['LD_LAW'])
# Initialize the parameters of the transit model,
# and prepare resampling data if resampling is True:
if options['photometry'][instrument]['RESAMPLING']:
t_resampling[instrument] = np.array([])
for i in range(len(idx_resampling[instrument])):
tij = np.zeros(options['photometry'][instrument]['NRESAMPLING'])
for j in range(1,options['photometry'][instrument]['NRESAMPLING']+1):
# Eq (35) in Kipping (2010)
tij[j-1] = xt[all_tr_instruments_idxs[k]][idx_resampling[instrument][i]] + ((j - \
((options['photometry'][instrument]['NRESAMPLING']+1)/2.))*(options['photometry'][instrument]['TEXP']/np.double(\
options['photometry'][instrument]['NRESAMPLING'])))
t_resampling[instrument] = np.append(t_resampling[instrument], np.copy(tij))
params[instrument],m[instrument] = init_batman(t_resampling[instrument],\
law=options['photometry'][instrument]['LD_LAW'])
transit_flat[instrument] = np.ones(len(xt[all_tr_instruments_idxs[k]]))
transit_flat[instrument][idx_resampling[instrument]] = np.zeros(len(idx_resampling[instrument]))
# Initialize the variable names:
if options['MODE'] != 'rvs':
if len(all_tr_instruments)>1:
transit_params = ['P','inc']
else:
the_instrument = options['photometry'].keys()[0]
transit_params = ['P','inc','t0','a','p','inc','sigma_w','sigma_r','q1','q2']
common_params = ['ecc','omega']
else:
common_params = ['ecc','omega','P','t0']
# If mode is not transit, prepare the data too:
if 'transit' not in options['MODE']:
xrv = times_rv.astype('float64')
yrv = rv.astype('float64')
if rv_err is None:
yerrrv = 0.0
else:
yerrrv = rv_err.astype('float64')
all_rv_instruments,all_rv_instruments_idxs,n_data_rvs = count_instruments(rv_instruments)
rv_params = ['K']
#if len(all_rv_instruments)>1:
# for instrument in all_rv_instruments:
# rv_params.append('mu_'+instrument)
# rv_params.append('sigma_w_rv_'+instrument)
#else:
# rv_params.append('mu')
# rv_params.append('sigma_w_rv')
# Create lists that will save parameters to check the limits on:
parameters_to_check = []
# Check common parameters:
if parameters['ecc']['type'] == 'FIXED':
common_params.pop(common_params.index('ecc'))
elif parameters['ecc']['type'] in prior_distributions:
parameters_to_check.append('ecc')
if parameters['omega']['type'] == 'FIXED':
common_params.pop(common_params.index('omega'))
elif parameters['omega']['type'] in prior_distributions:
parameters_to_check.append('omega')
# Eliminate from the parameter list parameters that are being fixed:
# First, generate a sufix dictionary, which will add the sufix _instrument to
# each instrument in the MCMC, in order to keep track of the parameters that
# are being held constant between instruments and those that vary with instrument:
sufix = {}
if options['MODE'] != 'rvs':
if len(all_tr_instruments)>1:
# First, generate a sufix dictionary, which will add the sufix _instrument to
# each instrument in the MCMC, in order to keep track of the parameters that
# are being held constant between instruments and those that vary with instrument:
sufix = {}
# Check parameters that always will be constant amongst transits:
for par in ['P','inc']:
if parameters[par]['type'] == 'FIXED':
transit_params.pop(transit_params.index(par))
elif parameters[par]['type'] in prior_distributions:
parameters_to_check.append(par)
# Now check parameters that might change between instruments:
for i in range(len(all_tr_instruments)):
instrument = all_tr_instruments[i]
sufix[instrument] = {}
for par in ['t0','a','p','sigma_w','q1','q2']:
orig_par = par
sufix[instrument][orig_par] = ''
if par not in parameters.keys():
par = par+'_'+instrument
sufix[instrument][orig_par] = '_'+instrument
if par not in parameters.keys():
print 'Error: parameter '+orig_par+' not defined. Exiting...'
sys.exit()
if par not in transit_params:
transit_params.append(par)
if parameters[par]['type'] == 'FIXED':
transit_params.pop(transit_params.index(par))
elif parameters[par]['type'] in prior_distributions:
parameters_to_check.append(par)
if options['photometry'][instrument]['PHOT_NOISE_MODEL'] == 'flicker':
for noise_param in ['sigma_r']:
transit_params.append(noise_param+'_'+instrument)
if parameters[noise_param+'_'+instrument]['type'] == 'FIXED':
transit_params.pop(transit_params.index(noise_param+'_'+instrument))
elif parameters[noise_param+'_'+instrument]['type'] in prior_distributions:
parameters_to_check.append(noise_param+'_'+instrument)
elif options['photometry'][instrument]['PHOT_NOISE_MODEL'] == 'GPExpSquaredKernel':
for noise_param in ['lnh','lnlambda']:
transit_params.append(noise_param+'_'+instrument)
if parameters[noise_param+'_'+instrument]['type'] == 'FIXED':
transit_params.pop(transit_params.index(noise_param+'_'+instrument))
elif parameters[noise_param+'_'+instrument]['type'] in prior_distributions:
parameters_to_check.append(noise_param+'_'+instrument)
elif options['photometry'][instrument]['PHOT_NOISE_MODEL'] == 'GPGranulation':
for noise_param in ['lnomega','lnS']:
transit_params.append(noise_param+'_'+instrument)
if parameters[noise_param+'_'+instrument]['type'] == 'FIXED':
transit_params.pop(transit_params.index(noise_param+'_'+instrument))
elif parameters[noise_param+'_'+instrument]['type'] in prior_distributions:
parameters_to_check.append(noise_param+'_'+instrument)
elif options['photometry'][instrument]['PHOT_NOISE_MODEL'] == 'GPAsteroseismology':
for noise_param in ['lnomega','lnS','lnQ','lnA','epsilon','lnW','lnnu','lnDeltanu']:
transit_params.append(noise_param+'_'+instrument)
if parameters[noise_param+'_'+instrument]['type'] == 'FIXED':
transit_params.pop(transit_params.index(noise_param+'_'+instrument))
elif parameters[noise_param+'_'+instrument]['type'] in prior_distributions:
parameters_to_check.append(noise_param+'_'+instrument)
else:
for par in ['P','t0','a','p','inc','sigma_w','q1','q2']:
if parameters[par]['type'] == 'FIXED':
transit_params.pop(transit_params.index(par))
elif parameters[par]['type'] in prior_distributions:
parameters_to_check.append(par)
if options['photometry'][options['photometry'].keys()[0]]['PHOT_NOISE_MODEL'] == 'flicker':
if parameters['sigma_r']['type'] == 'FIXED':
transit_params.pop(transit_params.index('sigma_r'))
elif parameters['sigma_r']['type'] in prior_distributions:
parameters_to_check.append('sigma_r')
elif options['photometry'][options['photometry'].keys()[0]]['PHOT_NOISE_MODEL'] == 'GPExpSquaredKernel':
transit_params.pop(transit_params.index('sigma_r'))
for noise_param in ['lnh','lnlambda']:
transit_params.append(noise_param)
elif options['photometry'][options['photometry'].keys()[0]]['PHOT_NOISE_MODEL'] == 'GPGranulation':
transit_params.pop(transit_params.index('sigma_r'))
for noise_param in ['lnomega','lnS']:
transit_params.append(noise_param)
elif options['photometry'][options['photometry'].keys()[0]]['PHOT_NOISE_MODEL'] == 'GPAsteroseismology':
transit_params.pop(transit_params.index('sigma_r'))
for noise_param in ['lnomega','lnS','lnQ','lnA','epsilon','lnW','lnnu','lnDeltanu']:
transit_params.append(noise_param)
else:
transit_params.pop(transit_params.index('sigma_r'))
if options['MODE'] != 'transit':
if parameters['K']['type'] == 'FIXED':
rv_params.pop(rv_params.index('K'))
elif parameters['K']['type'] in prior_distributions:
parameters_to_check.append('K')
if len(all_rv_instruments)>1:
sigma_w_rv = {}
for instrument in all_rv_instruments:
sufix[instrument] = {}
for par in ['mu','sigma_w_rv']:
orig_par = par
sufix[instrument][orig_par] = ''
if par not in parameters.keys():
par = par+'_'+instrument
sufix[instrument][orig_par] = '_'+instrument
if par not in parameters.keys():
print 'Error: parameter '+orig_par+' not defined. Exiting...'
sys.exit()
if par not in rv_params:
rv_params.append(par)
if parameters[par]['type'] == 'FIXED':
rv_params.pop(rv_params.index(par))
elif parameters[par]['type'] in prior_distributions:
parameters_to_check.append(par)
#sigma_w_rv = {}
#for instrument in all_rv_instruments:
# if parameters['mu_'+instrument]['type'] == 'FIXED':
# rv_params.pop(rv_params.index('mu_'+instrument))
# elif parameters['mu_'+instrument]['type'] in prior_distributions:
# parameters_to_check.append('mu_'+instrument)
# if parameters['sigma_w_rv_'+instrument]['type'] == 'FIXED':
# rv_params.pop(rv_params.index('sigma_w_rv_'+instrument))
# elif parameters['sigma_w_rv_'+instrument]['type'] in prior_distributions:
# parameters_to_check.append('sigma_w_rv_'+instrument)
# else:
# sigma_w_rv[instrument] = 0.0
# rv_params.pop(rv_params.index('sigma_w_rv_'+instrument))
else:
if parameters['K']['type'] == 'FIXED':
rv_params.pop(rv_params.index('K'))
elif parameters['K']['type'] in prior_distributions:
parameters_to_check.append('K')
for rvpar in ['sigma_w_rv','mu']:
if parameters[rvpar]['type'] in prior_distributions:
parameters_to_check.append(rvpar)
rv_params.append(rvpar)
elif parameters[rvpar]['type'] != 'FIXED':
rv_params.append(rvpar)
if options['MODE'] == 'transit':
all_mcmc_params = transit_params + common_params
elif options['MODE'] == 'rvs':
all_mcmc_params = rv_params + common_params
elif options['MODE'] == 'transit_noise':
all_mcmc_params = ['sigma_w','sigma_r']
else:
all_mcmc_params = transit_params + rv_params + common_params
# First, generate plot with gridspec according to the number of
# instruments used for transits:
if options['MODE'] == 'full':
nrows = 4
ncols = len(all_tr_instruments)
gridspec.GridSpec(nrows,len(all_tr_instruments))
elif options['MODE'] == 'transit':
nrows = 1
ncols = len(all_tr_instruments)
elif options['MODE'] == 'rvs':
nrows = 3
ncols = 1
all_tr_instruments = ['noinst']
gridspec.GridSpec(nrows,len(all_tr_instruments))
if options['MODE'] != 'rvs':
gridspec.GridSpec(nrows,len(all_tr_instruments))
# Plot transits:
if len(all_tr_instruments) == 1:
plt.subplot2grid((nrows,ncols),(0,0),colspan=2)
coeff1,coeff2 = reverse_ld_coeffs(options['photometry'][the_instrument]['LD_LAW'], \
parameters['q1']['object'].value,parameters['q2']['object'].value)
params[the_instrument].t0 = parameters['t0']['object'].value
params[the_instrument].per = parameters['P']['object'].value
params[the_instrument].rp = parameters['p']['object'].value
params[the_instrument].a = parameters['a']['object'].value
params[the_instrument].inc = parameters['inc']['object'].value
params[the_instrument].ecc = parameters['ecc']['object'].value
params[the_instrument].w = parameters['omega']['object'].value
params[the_instrument].u = [coeff1,coeff2]
model = m[the_instrument].light_curve(params[the_instrument])
model_t = np.linspace(np.min(xt),np.max(xt),len(xt)*100)#4)
model_phase = get_phases(model_t,params[the_instrument].per,params[the_instrument].t0)
phase = get_phases(xt,params[the_instrument].per,params[the_instrument].t0)
if options['photometry'][the_instrument]['RESAMPLING']:
# Generate residuals for plot:
for i in range(len(idx_resampling[the_instrument])):
transit_flat[the_instrument][idx_resampling[the_instrument][i]] = \
np.mean(model[i*options['photometry'][the_instrument]['NRESAMPLING']:options['photometry'][the_instrument]['NRESAMPLING']*(i+1)])
residuals = (yt-transit_flat[the_instrument])
# Now model (resampled) transit:
idx_resampling_pred = np.where((model_phase>-options['photometry'][the_instrument]['PHASE_MAX_RESAMPLING'])&\
(model_phase<options['photometry'][the_instrument]['PHASE_MAX_RESAMPLING']))[0]
t_resampling_pred = np.array([])
for i in range(len(idx_resampling_pred)):
tij = np.zeros(options['photometry'][the_instrument]['NRESAMPLING'])
for j in range(1,options['photometry'][the_instrument]['NRESAMPLING']+1):
tij[j-1] = model_t[idx_resampling_pred[i]] + ((j - ((options['photometry'][the_instrument]['NRESAMPLING']+1)/2.))*(options['photometry'][the_instrument]['TEXP']/\
np.double(options['photometry'][the_instrument]['NRESAMPLING'])))
t_resampling_pred = np.append(t_resampling_pred, np.copy(tij))
params2,m2 = init_batman(t_resampling_pred, law=options['photometry'][the_instrument]['LD_LAW'])
transit_flat_pred = np.ones(len(model_t))
transit_flat_pred[idx_resampling_pred] = np.zeros(len(idx_resampling_pred))
model = m2.light_curve(params[the_instrument])
for i in range(len(idx_resampling_pred)):
transit_flat_pred[idx_resampling_pred[i]] = \
np.mean(model[i*options['photometry'][the_instrument]['NRESAMPLING']:options['photometry'][the_instrument]['NRESAMPLING']*(i+1)])
model = transit_flat_pred
else:
residuals = (yt-model)
params2,m2 = init_batman(model_t, law=options['photometry'][the_instrument]['LD_LAW'])
model = m2.light_curve(params[the_instrument])
idx_phase = np.argsort(phase)
idx_model_phase = np.argsort(model_phase)
plt.plot(phase[idx_phase],yt[idx_phase],'.',color='black',alpha=0.4)
plt.plot(model_phase[idx_model_phase],model[idx_model_phase],'r-')
sigma = get_sigma(residuals[idx_phase],0.0)
plt.plot(phase[idx_phase],residuals[idx_phase]+(1-1.8*(parameters['p']['object'].value**2))-10*sigma,'.',color='black',alpha=0.4)
plt.title(the_instrument)
plt.ylabel('Relative flux')
plt.xlabel('Phase')
# Save phased model, data and residuals for the transit:
fout_model = open(out_dir+'tr_model.dat','w')
for i in idx_model_phase:
fout_model.write('{0:.10f} {1:.10f}\n'.format(model_phase[i],model[i]))
fout_model.close()
fout_data = open(out_dir+'tr_data.dat','w')
for i in range(len(idx_phase)):
fout_data.write('{0:.10f} {1:.10f} {2:.10f}\n'.format(xt[i],phase[i],yt[i]))
fout_data.close()
fout_res = open(out_dir+'tr_residuals.dat','w')
for i in range(len(idx_phase)):
fout_res.write('{0:.10f} {1:.10f} {2:.10f}\n'.format(xt[i],phase[i],residuals[i]))
fout_res.close()
# Get log-likelihood for transit fit:
if options['photometry'][the_instrument]['PHOT_NOISE_MODEL'] == 'flicker':
log_like = exonailer_mcmc_fit.get_fn_likelihood(residuals*1e6,parameters['sigma_w']['object'].value,\
parameters['sigma_r']['object'].value)
elif options['photometry'][the_instrument]['PHOT_NOISE_MODEL'] == 'GPExpSquaredKernel':
log_like = exonailer_mcmc_fit.get_sq_exp_likelihood(xt,residuals*1e6,yerrt*1e6,\
parameters['sigma_w']['object'].value,\
parameters['lnh']['object'].value,\
parameters['lnlambda']['object'].value)
elif options['photometry'][the_instrument]['PHOT_NOISE_MODEL'] == 'GPGranulation':
log_like = exonailer_mcmc_fit.get_granulation_likelihood(xt,residuals*1e6,yerrt*1e6,\
parameters['sigma_w']['object'].value,\
parameters['lnomega']['object'].value,\
parameters['lnS']['object'].value)
elif options['photometry'][the_instrument]['PHOT_NOISE_MODEL'] == 'GPAsteroseismology':
log_like = exonailer_mcmc_fit.get_asteroseismology_likelihood(xt,residuals*1e6,yerrt*1e6,\
parameters['sigma_w']['object'].value,\
parameters['lnomega']['object'].value,\
parameters['lnS']['object'].value,\
parameters['lnQ']['object'].value,\
parameters['lnA']['object'].value,\
parameters['epsilon']['object'].value,\
parameters['lnW']['object'].value,\
parameters['lnnu']['object'].value,\
parameters['lnDeltanu']['object'].value,\
the_instrument)
else:
taus = 1.0/((yerrt*1e6)**2 + (parameters['sigma_w']['object'].value)**2)
log_like = -0.5*(n_data_trs[0]*log2pi+np.sum(np.log(1./taus)+taus*((residuals*1e6)**2)))
print '\t Log-likelihood for transit fit:',log_like
else:
#sufix[instrument][orig_par]
log_like = 0.0
for k in range(len(all_tr_instruments)):
plt.subplot2grid((nrows,ncols),(0,k))
if k == 0:
plt.ylabel('Relative flux')
#plt.xlabel('Phase')
instrument = all_tr_instruments[k]
coeff1,coeff2 = reverse_ld_coeffs(options['photometry'][instrument]['LD_LAW'], \
parameters['q1'+sufix[instrument]['q1']]['object'].value,\
parameters['q2'+sufix[instrument]['q2']]['object'].value)
params[instrument].t0 = parameters['t0'+sufix[instrument]['t0']]['object'].value
params[instrument].per = parameters['P']['object'].value
params[instrument].rp = parameters['p'+sufix[instrument]['p']]['object'].value
params[instrument].a = parameters['a'+sufix[instrument]['a']]['object'].value
params[instrument].inc = parameters['inc']['object'].value
params[instrument].ecc = parameters['ecc']['object'].value
params[instrument].w = parameters['omega']['object'].value
params[instrument].u = [coeff1,coeff2]
model = m[instrument].light_curve(params[instrument])
model_t = np.linspace(np.min(xt[all_tr_instruments_idxs[k]]),np.max(xt[all_tr_instruments_idxs[k]]),len(all_tr_instruments_idxs[k])*4)
model_phase = get_phases(model_t,params[instrument].per,params[instrument].t0)
phase = get_phases(xt[all_tr_instruments_idxs[k]],params[instrument].per,params[instrument].t0)
if options['photometry'][instrument]['RESAMPLING']:
for i in range(len(idx_resampling[instrument])):
transit_flat[instrument][idx_resampling[instrument][i]] = \
np.mean(model[i*options['photometry'][instrument]['NRESAMPLING']:options['photometry'][instrument]['NRESAMPLING']*(i+1)])
residuals = (yt[all_tr_instruments_idxs[k]]-transit_flat[instrument])*1e6
idx_resampling_pred = np.where((model_phase>-options['photometry'][instrument]['PHASE_MAX_RESAMPLING'])&\
(model_phase<options['photometry'][instrument]['PHASE_MAX_RESAMPLING']))[0]
t_resampling_pred = np.array([])
for i in range(len(idx_resampling_pred)):
tij = np.zeros(options['photometry'][instrument]['NRESAMPLING'])
for j in range(1,options['photometry'][instrument]['NRESAMPLING']+1):
tij[j-1] = model_t[idx_resampling_pred[i]] + ((j - ((options['photometry'][instrument]['NRESAMPLING']+1)/2.))*(options['photometry'][instrument]['TEXP']/\
np.double(options['photometry'][instrument]['NRESAMPLING'])))
t_resampling_pred = np.append(t_resampling_pred, np.copy(tij))
params2,m2 = init_batman(t_resampling_pred, law=options['photometry'][instrument]['LD_LAW'])
transit_flat_pred = np.ones(len(model_t))
transit_flat_pred[idx_resampling_pred] = np.zeros(len(idx_resampling_pred))
model = m2.light_curve(params[instrument])
for i in range(len(idx_resampling_pred)):
transit_flat_pred[idx_resampling_pred[i]] = np.mean(model[i*options['photometry'][instrument]['NRESAMPLING']:\
options['photometry'][instrument]['NRESAMPLING']*(i+1)])
model = transit_flat_pred
else:
residuals = (yt[all_tr_instruments_idxs[k]]-model)*1e6
params2,m2 = init_batman(model_t, law=options['photometry'][instrument]['LD_LAW'])
model = m2.light_curve(params[instrument])
idx_phase = np.argsort(phase)
idx_model_phase = np.argsort(model_phase)
plt.plot(phase[idx_phase],yt[all_tr_instruments_idxs[k]][idx_phase],'.',color='black',alpha=0.4)
plt.plot(model_phase[idx_model_phase],model[idx_model_phase],'r-')
sigma = get_sigma(residuals[idx_phase]*1e-6,0.0)
plt.plot(phase[idx_phase],residuals[idx_phase]*1e-6+(1-1.8*(parameters['p'+sufix[instrument]['p']]['object'].value**2))-3*sigma,'.',color='black',alpha=0.4)
plt.title(instrument)
# Save phased model, data and residuals for the transit:
fout_model = open(out_dir+'tr_model_'+instrument+'.dat','w')
for i in idx_model_phase:
fout_model.write('{0:.10f} {1:.10f}\n'.format(model_phase[i],model[i]))
fout_model.close()
fout_data = open(out_dir+'tr_data_'+instrument+'.dat','w')
for i in range(len(idx_phase)):
fout_data.write('{0:.10f} {1:.10f}\n'.format(phase[i],yt[i]))
fout_data.close()
fout_res = open(out_dir+'tr_residuals_'+instrument+'.dat','w')
for i in range(len(idx_phase)):
fout_res.write('{0:.10f} {1:.10f}\n'.format(phase[i],residuals[i]))
fout_res.close()
# Get log-likelihood for transit fit(s):
if options['photometry'][instrument]['PHOT_NOISE_MODEL'] == 'flicker':
log_like = log_like + get_fn_likelihood(residuals*1e6,parameters['sigma_w'+sufix[instrument]['sigma_w']]['object'].value,\
parameters['sigma_r'+sufix[instrument]['sigma_r']]['object'].value)
elif options['photometry'][instrument]['PHOT_NOISE_MODEL'] == 'GPExpSquaredKernel':
log_like = log_like + get_sq_exp_likelihood(xt[all_tr_instruments_idxs[k]],residuals*1e6,yerrt[all_tr_instruments_idxs[k]]*1e6,\
parameters['sigma_w'+sufix[instrument]['sigma_w']]['object'].value,\
parameters['lnh'+sufix[instrument]['lnh']]['object'].value,\
parameters['lnlambda'+sufix[instrument]['lnlambda']]['object'].value)
elif options['photometry'][instrument]['PHOT_NOISE_MODEL'] == 'GPGranulation':
log_like = log_like + get_granulation_likelihood(xt[all_tr_instruments_idxs[k]],residuals*1e6,yerrt[all_tr_instruments_idxs[k]]*1e6,\
parameters['sigma_w'+sufix[instrument]['sigma_w']]['object'].value,\
parameters['lnomega'+sufix[instrument]['lnomega']]['object'].value,\
parameters['lnS'+sufix[instrument]['lnS']]['object'].value)
elif options['photometry'][instrument]['PHOT_NOISE_MODEL'] == 'GPAsteroseismology':
log_like = log_like + get_asteroseismology_likelihood(xt[all_tr_instruments_idxs[k]],residuals*1e6,yerrt[all_tr_instruments_idxs[k]]*1e6,\
parameters['sigma_w'+sufix[instrument]['sigma_w']]['object'].value,\
parameters['lnomega'+sufix[instrument]['lnomega']]['object'].value,\
parameters['lnS'+sufix[instrument]['lnS']]['object'].value,\
parameters['lnQ'+sufix[instrument]['lnQ']]['object'].value,\
parameters['lnA'+sufix[instrument]['lnA']]['object'].value,\
parameters['epsilon'+sufix[instrument]['epsilon']]['object'].value,\
parameters['lnW'+sufix[instrument]['lnW']]['object'].value,\
parameters['lnnu'+sufix[instrument]['lnnu']]['object'].value,\
parameters['lnDeltanu'+sufix[instrument]['lnDeltanu']]['object'].value,\
instrument)
else:
taus = 1.0/((yerrt[all_tr_instruments_idxs[k]]*1e6)**2 + (parameters['sigma_w'+sufix[instrument]['sigma_w']]['object'].value)**2)
log_like = log_like - 0.5*(n_data_trs[k]*log2pi+np.sum(np.log(1./taus)+taus*((residuals*1e6)**2)))
print '\t Log-likelihood for transit fit(s):',log_like
# Plot RVs:
if options['MODE'] != 'transit':
radvel_params = init_radvel()
if options['MODE'] == 'full':
plt.subplot2grid((nrows,ncols),(1,0),colspan=ncols)
elif options['MODE'] == 'rvs':
plt.subplot2grid((nrows,ncols),(0,0),colspan=ncols)
if len(all_rv_instruments) == 1:
radvel_params['per1'] = radvel.Parameter(value=parameters['P']['object'].value)
radvel_params['tc1'] = radvel.Parameter(value=parameters['t0']['object'].value)
radvel_params['w1'] = radvel.Parameter(value=parameters['omega']['object'].value*np.pi/180.)
radvel_params['e1'] = radvel.Parameter(value=parameters['ecc']['object'].value)
radvel_params['k1'] = radvel.Parameter(value=parameters['K']['object'].value)
model = parameters['mu']['object'].value + radvel.model.RVModel(radvel_params).__call__(xrv)
residuals = (yrv-model)
model_t = parameters['t0']['object'].value + np.linspace(-0.5,0.5,500)*parameters['P']['object'].value
model_pred = parameters['mu']['object'].value + radvel.model.RVModel(radvel_params).__call__(model_t)
phase = get_phases(xrv,parameters['P']['object'].value,parameters['t0']['object'].value)
plt.errorbar(phase,(yrv-parameters['mu']['object'].value),yerr=rv_err,fmt='o',label=all_rv_instruments[0])
model_phase = get_phases(model_t,parameters['P']['object'].value,parameters['t0']['object'].value)
idx_rv_model = np.argsort(model_phase)
plt.plot(model_phase[idx_rv_model],model_pred[idx_rv_model]-parameters['mu']['object'].value)
plt.ylabel('Radial velocity')
plt.xlabel('Phase')
if options['MODE'] == 'full':
plt.subplot2grid((4,len(all_tr_instruments)),(2,0),colspan=len(all_tr_instruments))
elif options['MODE'] == 'rvs':
plt.subplot2grid((3,len(all_tr_instruments)),(1,0),colspan=len(all_tr_instruments))
tzero = int(xrv[0])
plt.errorbar(xrv-tzero,(yrv-parameters['mu']['object'].value),rv_err,fmt='o')
ttmodel = np.linspace(np.min(xrv),np.max(xrv),1000)
mmodel = radvel.model.RVModel(radvel_params).__call__(ttmodel)
plt.plot(ttmodel-tzero,mmodel)
plt.ylabel('Radial velocity')
plt.xlabel('Time - '+str(tzero))
if options['MODE'] == 'full':
plt.subplot2grid((4,len(all_tr_instruments)),(3,0),colspan=len(all_tr_instruments))
elif options['MODE'] == 'rvs':
plt.subplot2grid((3,len(all_tr_instruments)),(2,0),colspan=len(all_tr_instruments))
plt.errorbar(phase,residuals,rv_err,fmt='o')
plt.ylabel('RV Residuals')
plt.xlabel('Phase')
# Save phased model, data and residuals for the RVs:
fout_model = open(out_dir+'rv_model.dat','w')
for i in range(len(model_t)):
fout_model.write('{0:.10f} {1:.10f}\n'.format(model_phase[i],(model_pred-parameters['mu']['object'].value)[i]))
fout_model.close()
fout_data = open(out_dir+'rv_data.dat','w')
for i in range(len(phase)):
fout_data.write('{0:.10f} {1:.10f} {2:.10f}\n'.format(phase[i],((yrv-parameters['mu']['object'].value))[i],rv_err[i]))
fout_data.close()
fout_res = open(out_dir+'rv_residuals.dat','w')
for i in range(len(phase)):
fout_res.write('{0:.10f} {1:.10f} {2:.10f}\n'.format(phase[i],residuals[i],rv_err[i]))
fout_res.close()
# Get RV log-likelihood:
taus = 1.0/((rv_err)**2 + (parameters['sigma_w_rv']['object'].value)**2)
log_like = -0.5*(n_data_rvs[0]*log2pi+np.sum(np.log(1./taus)+taus*(residuals**2)))
print '\t Log-likelihood radial-velocity:',log_like
else:
log_like = 0.0
all_residuals = []
all_phases = []
radvel_params['per1'] = radvel.Parameter(value=parameters['P']['object'].value)
radvel_params['tc1'] = radvel.Parameter(value=parameters['t0']['object'].value)
radvel_params['w1'] = radvel.Parameter(value=parameters['omega']['object'].value*np.pi/180.)
radvel_params['e1'] = radvel.Parameter(value=parameters['ecc']['object'].value)
radvel_params['k1'] = radvel.Parameter(value=parameters['K']['object'].value)
model_t = parameters['t0']['object'].value + np.linspace(-0.5,0.5,500)*parameters['P']['object'].value
model_pred = radvel.model.RVModel(radvel_params).__call__(model_t)
for i in range(len(all_rv_instruments)):
model = parameters['mu_'+all_rv_instruments[i]]['object'].value + \
radvel.model.RVModel(radvel_params).__call__(xrv[all_rv_instruments_idxs[i]])
residuals = (yrv[all_rv_instruments_idxs[i]]-model)
all_residuals.append(residuals)
phase = get_phases(xrv[all_rv_instruments_idxs[i]],parameters['P']['object'].value,parameters['t0']['object'].value)
all_phases.append(phase)
plt.errorbar(phase,(yrv[all_rv_instruments_idxs[i]]-parameters['mu_'+all_rv_instruments[i]]['object'].value),\
yerr=rv_err[all_rv_instruments_idxs[i]],label=all_rv_instruments[i],fmt='o')
# Save data and residuals:
fout_data = open(out_dir+'rv_data_'+all_rv_instruments[i]+'.dat','w')
for ii in range(len(phase)):
fout_data.write('{0:.10f} {1:.10f} {2:.10f}\n'.format(phase[ii],(yrv[all_rv_instruments_idxs[i]]-\
parameters['mu_'+all_rv_instruments[i]]['object'].value)[ii],rv_err[all_rv_instruments_idxs[i]][ii]))
fout_data.close()
fout_res = open(out_dir+'rv_residuals_'+all_rv_instruments[i]+'.dat','w')
for ii in range(len(phase)):
fout_res.write('{0:.10f} {1:.10f} {2:.10f}\n'.format(phase[ii],residuals[ii],rv_err[all_rv_instruments_idxs[i]][ii]))
taus = 1.0/((rv_err[all_rv_instruments_idxs[i]])**2 + (parameters['sigma_w_rv'+sufix[all_rv_instruments[i]]['sigma_w_rv']]['object'].value)**2)
log_like = log_like -0.5*(n_data_rvs[i]*log2pi+np.sum(np.log(1./taus)+taus*(residuals**2)))
print '\t Log-likelihood radial velocities:',log_like
fout_res.close()
model_phase = get_phases(model_t,parameters['P']['object'].value,parameters['t0']['object'].value)
idx_rvs_sorted = np.argsort(model_phase)
plt.plot(model_phase[idx_rvs_sorted],model_pred[idx_rvs_sorted],'-',color='red')
# Save model:
fout_model = open(out_dir+'rv_model.dat','w')
for i in idx_rvs_sorted:
fout_model.write('{0:.10f} {1:.10f}\n'.format(model_phase[i],model_pred[i]))
fout_model.close()
plt.legend()
plt.ylabel('Radial velocity')
plt.xlabel('Phase')
plt.subplot2grid((4,len(all_tr_instruments)),(2,0),colspan=len(all_tr_instruments))
xrv_min = np.inf
xrv_max = -np.inf
for i in range(len(all_rv_instruments)):
if np.min(xrv[all_rv_instruments_idxs[i]])<xrv_min:
xrv_min = np.min(xrv[all_rv_instruments_idxs[i]])
if np.max(xrv[all_rv_instruments_idxs[i]])>xrv_max:
xrv_max = np.max(xrv[all_rv_instruments_idxs[i]])
tzero = int(xrv_min)
for i in range(len(all_rv_instruments)):
plt.errorbar(xrv[all_rv_instruments_idxs[i]]-tzero,(yrv[all_rv_instruments_idxs[i]]-parameters['mu_'+all_rv_instruments[i]]['object'].value),\
yerr=rv_err[all_rv_instruments_idxs[i]],label=all_rv_instruments[i],fmt='o')
ttmodel = np.linspace(xrv_min,xrv_max,1000)
mmodel = radvel.model.RVModel(radvel_params).__call__(ttmodel)
plt.plot(ttmodel-tzero,mmodel)
plt.ylabel('Radial velocity')
plt.xlabel('Time - '+str(tzero))
plt.subplot2grid((4,len(all_tr_instruments)),(3,0),colspan=len(all_tr_instruments))
for i in range(len(all_rv_instruments)):
plt.errorbar(all_phases[i],all_residuals[i],yerr=rv_err[all_rv_instruments_idxs[i]],fmt='o')
plt.ylabel('RV Residuals')
plt.xlabel('Phase')
if options['PLOT'].lower() != 'no' and options['PLOT'].lower() != 'false' and options['PLOT'].lower() != 'save':
plt.show()
elif options['PLOT'].lower() == 'save':
plt.savefig(out_dir+'fig.png',dpi=300)
else:
plt.clf()
| mit |
rabrahm/ceres | pucheros/pucherosutils.py | 1 | 10415 | import sys
import matplotlib
matplotlib.use("Agg")
base = '../'
sys.path.append(base+"utils/GLOBALutils")
import GLOBALutils
import numpy as np
import scipy
from astropy.io import fits as pyfits
import os
import glob
import tempfile
import StringIO
import pycurl
from pylab import *
def is_there(string, word):
l=len(word)
i=0
ist = False
while i < len(string)-l:
if string[i:i+l] == word:
ist = True
i+=1
return ist
def search_name(obj):
name = obj.split('/')[-1]
try:
name = name.split('_')[1]
except:
name = name.split('_')[1]
#print 'NAME:', name
return name
def FileClassify(path,log):
biases = []
flats = []
img_flats = []
fib_flats = []
objects = []
darks = []
thars = []
lines = []
dates = []
archs = glob.glob(path+'*.fit')
bad_files = []
if os.access(path+'bad_files.txt',os.F_OK):
bf = open(path+'bad_files.txt')
linesbf = bf.readlines()
for line in linesbf:
bad_files.append(path+line[:-1])
bf.close()
for arch in archs:
dump = False
for bf in bad_files:
if arch == bf:
dump = True
break
if not dump:
h = pyfits.open(arch)
#print h[0].header['XBINNING'], h[0].header['YBINNING'], arch
if h[0].header['XBINNING'] == 1 and h[0].header['YBINNING'] == 1:
if h[0].header['IMAGETYP'] == 'Light Frame' or h[0].header['IMAGETYP'] == 'LIGHT':
if 'flat' in arch:
flats.append(arch)
else:
name = h[0].header['OBJECT']
expt = h[0].header['EXPTIME']
date = h[0].header['DATE-OBS']
line = "%-15s %8.2f %8s %s\n" % (name, expt, date, arch)
ye = float(date[:4])
mo = float(date[5:7])
da = float(date[8:10])
ho = float(date[11:13])-4.0
mi = float(date[14:15])
se = float(date[17:])
lines.append(line)
dates.append( jd( ye,mo,da,ho,mi,se ) )
#f.write(line)
if is_there(arch.lower(),'thar') or is_there(arch.lower(),'th_ar'):
thars.append(arch)
else:
objects.append(arch)
elif h[0].header['IMAGETYP'] == 'Bias Frame' or h[0].header['IMAGETYP'] == 'BIAS':
biases.append(arch)
elif (h[0].header['IMAGETYP'] == 'Flat Frame' or h[0].header['IMAGETYP'] == 'FLAT') and arch != 'MasterFlat.fits':
# Now check which kind of flat it is.
# Maybe a "surface" flat...
if(is_there(arch.lower(),'imgflat')):
img_flats.append(arch)
# ...a fibre flat...
elif(is_there(arch.lower(),'fibre')):
fib_flats.append(arch)
# (use them for traces, blaze and col-to-col)
flats.append(arch)
# ...else, it is a screen flat (w/difussor):
else:
flats.append(arch)
elif h[0].header['IMAGETYP'] == 'Dark Frame' or h[0].header['IMAGETYP'] == 'DARK':
if h[0].header['EXPTIME']!=0.0:
darks.append(arch)
h.close()
lines = np.array(lines)
dates = np.array(dates)
I = np.argsort(dates)
lines = lines[I]
f = open(log,'w')
for line in lines:
f.write(line)
f.close()
return biases,flats,img_flats,fib_flats,objects,thars,darks
def get_rg():
return 9.6,1.6
def MedianCombine(ImgList,zero_bo,zero,dark_bo=False, dlist = []):
"""
Median combine a list of images
"""
hf = pyfits.getheader(ImgList[0])
if zero_bo:
Master = pyfits.getdata(zero)
if dark_bo:
Dark = get_dark(dlist,hf['EXPTIME'])
n = len(ImgList)
if n==0:
raise ValueError("empty list provided!")
d = pyfits.getdata(ImgList[0])
if zero_bo:
d = d - Master
if dark_bo:
d = d - Dark
factor = 1.25
if (n < 3):
factor = 1
#ronoise = factor * h.header['ENOISE'] / np.sqrt(n)
#gain = h.header['EGAIN']
ronoise,gain=get_rg()
if (n == 1):
return d, ronoise, gain
else:
for i in range(n-1):
h = pyfits.getdata(ImgList[i+1])
if zero_bo:
h = h-Master
if dark_bo:
h = h-Dark
d = np.dstack((d,h))
return np.median(d,axis=2), ronoise/np.sqrt(n), gain
def get_dark(darks,t):
exact = 0
dts = []
for dark in darks:
hd = pyfits.getheader(dark)
dt = hd['EXPTIME']
dts.append(dt)
if dt == t:
#print 'dark:',dark
DARK = pyfits.getdata(dark)
exact = 1
dts = np.array(dts)
if exact == 0:
if t < dts.min():
I = np.where( dts == dts.min() )[0]
DARK = pyfits.getdata(darks[I[0]])*t/dts[I[0]]
elif t > dts.max():
I = np.where( dts == dts.max() )[0]
DARK = pyfits.getdata(darks[I[0]])*t/dts[I[0]]
#print darks[I[0]]
else:
tmin = dts.min()
tmax = dts.max()
I = np.where( dts == dts.min() )[0]
Dmin = pyfits.getdata(darks[I[0]])
Dminname=darks[I[0]]
I = np.where( dts == dts.max() )[0]
Dmax = pyfits.getdata(darks[I[0]])
Dmaxname = darks[I[0]]
i = 0
while i < len(dts):
if dts[i] < t and dts[i] > tmin:
tmin = dts[i]
Dminname = darks[i]
Dmin = pyfits.getdata(darks[i])
elif dts[i] > t and dts[i] < tmax:
tmax = dts[i]
Dmaxname = darks[i]
Dmax = pyfits.getdata(darks[i])
i+=1
num = Dmax - Dmin
den = tmax-tmin
m = num/den
n = Dmax - m*tmax
DARK = m*t+n
return DARK
def jd(y,m,d,h,mins,s):
"Julian day is calculated here if it's needed"
MY = (m-14)/12
y = MY+y
return ( 1461 * ( y + 4800 ) ) / 4 + ( 367 * ( m - 2 - 12*MY ) ) / 12 - ( 3 * ( ( y + 4900 ) / 100 ) ) / 4 + d -32077.5 + htosec(h,mins,s)/86400.0
def htosec(h,m,s):
"transform from hour,minute and seconds, to seconds"
return s+60.0*(m+60.0*h)
def fit_blaze(w,f,n=5):
warnings.simplefilter('ignore', np.RankWarning)
li = len(w)
co = np.polyfit(w,f,n)
res = f - np.polyval(co,w)
dev = np.sqrt(np.var(res))
J1 = np.where(res < -1.5*dev)[0]
J2 = np.where(res > 3*dev)[0]
J = np.hstack((J1,J2))
J = np.sort(J)
I = np.where( (res >= -1.5*dev) & (res <= 3*dev) )[0]
cond = True
if len(J)==0 or len(I) < .3*li:
cond = False
while cond:
w,f = w[I],f[I]
co = np.polyfit(w,f,n)
res = f - np.polyval(co,w)
dev = np.sqrt(np.var(res))
J1 = np.where(res < -1.5*dev)[0]
J2 = np.where(res > 3*dev)[0]
J = np.hstack((J1,J2))
J = np.sort(J)
I = np.where( (res >= -1.5*dev) & (res <= 3*dev) )[0]
cond = True
if len(J)==0 or len(I) < .3*li:
cond = False
return co
def mjd_fromheader(h):
"""
return modified Julian date from header
"""
datetu = h[0].header['DATE-OBS']
mjd0,mjd,i = GLOBALutils.iau_cal2jd(int(datetu[:4]),int(datetu[5:7]),int(datetu[8:10]))
ut = float(datetu[11:13]) + float(datetu[14:16])/60. + float(datetu[17:])/3600.
mjd_start = mjd + ut/24.0
secinday = 24*3600.0
fraction = 0.5
texp = h[0].header['EXPTIME'] #sec
mjd = mjd_start + (fraction * texp) / secinday
return mjd, mjd0
def get_coords(obname,mjd):
if obname.lower() == 'alphacent':
obname = 'alpha cent'
elif obname.lower() == 'alphaboo':
obname = 'alpha boo'
elif obname.lower() == 'hadar' or obname.lower() == 'betacen':
obname = 'beta cen'
elif obname.lower() == 'diphda':
obname = 'bet cet'
elif obname.lower() == 'betacar':
obname = 'beta car'
elif obname.lower() == 'betscl':
obname = 'bet scl'
elif obname.lower() == 'bvel':
obname = 'b vel'
elif obname.lower() == 'deltasco':
obname = 'del sco'
elif obname.lower() == 'delcen':
obname = 'del cen'
elif obname.lower() == 'epsilonaqr':
obname = 'eps aqr'
elif obname.lower() == 'epspsa':
obname = 'eps psa'
elif obname.lower() == 'etahya' or obname.lower() == 'ethahydra':
obname = 'eta Hya'
elif obname.lower() == 'etapsa':
obname = 'eta psa'
elif obname.lower() == 'etacen':
obname = 'eta cen'
elif obname.lower() == 'opup':
obname = 'o Pup'
elif obname.lower() == 'etacar':
obname = 'eta Car'
elif obname.lower() == 'agcar':
obname = 'ag Car'
elif obname.lower() == 'hrcar':
obname = 'hr Car'
elif obname.lower() == 'sslep':
obname = 'ss lep'
elif obname.lower() == 'thetavir':
obname = 'theta vir'
elif obname.lower() == 'mucen':
obname = 'mu cen'
elif obname.lower() == 'lesath':
obname = 'ups sco'
elif obname.lower() == 'mulup':
obname = 'mu lup'
elif obname.lower() == 'chioph':
obname = 'chi oph'
elif obname.lower() == 'dlup':
obname = 'd lup'
elif obname.lower() == '48lib':
obname = '48 lib'
elif obname.lower() == 'iotara':
obname = 'iot ara'
elif obname.lower() == 'qvtel':
obname = 'qv tel'
elif obname.lower() == 'taucet':
obname = 'tau cet'
elif obname.lower() == 'pi2ori':
obname = 'pi2 ori'
elif obname.lower() == 'zetapeg':
obname = 'zet peg'
elif obname.lower() == 'tpyx':
obname = 't pyx'
elif obname.lower() == 'omicronpup':
obname = 'omi pup'
sp,ra,dec = 0,0,0
(th,tfile) = tempfile.mkstemp(prefix='CP', text=True)
tf = open(tfile,'w')
tf.write("output console=off\n")
tf.write("output script=off\n")
tf.write("output error=merge\n")
tf.write("set limit 1\n")
tf.write("format object fmt1 \"%IDLIST(1) | %OTYPELIST(S) | %SP(S) | %COO(A) | %COO(D) | %PM(A) | %PM(D)\"\n")
tf.write("result full\n")
tf.write("query id %s\n" % ( obname ) )
tf.close()
values = [("scriptFIle", (pycurl.FORM_FILE, tfile))]
output = StringIO.StringIO()
c = pycurl.Curl()
c.setopt(pycurl.URL, "http://simbad.harvard.edu/simbad/sim-script")
c.setopt(c.HTTPPOST, values)
c.setopt(pycurl.WRITEFUNCTION, output.write)
cond = True
while cond:
try:
c.perform()
except:
print 'Trying again to perform query to SIMBAD'
else:
cond = False
c.close()
result = output.getvalue()
lines = result.split('\n')
info = lines[6].split('|')
if 'Unrecogniezd' in info[0] or 'not' in info[0]:
know = False
else:
know = True
sp,ra,dec,pmra,pmdec = info[2],info[3],info[4],info[5],info[6]
if '~' in pmra:
pmra = '0.'
if '~' in pmdec:
pmdec = '0.'
rad = ra.split()
decd = dec.split()
ra = float(rad[0])*360./24. + float(rad[1])*6./24. + float(rad[2])/240. + (float(pmra)/(3600*1000.))*((mjd-51544.5)/365.)
if float(decd[0])<0:
dec = -(np.absolute(float(decd[0])) + float(decd[1])/60. + float(decd[2])/3600.) + (float(pmdec)/(3600*1000.))*((mjd-51544.5)/365.)
else:
dec = float(decd[0]) + float(decd[1])/60. + float(decd[2])/3600. + (float(pmdec)/(3600*1000.))*((mjd-51544.5)/365.)
return sp,ra,dec,know
| mit |
dualphase90/Learning-Neural-Networks | Neural Nets Simple example.py | 1 | 4881 | # Neural Networks Demystified
# Part 2: Forward Propagation
#
# Supporting code for short YouTube series on artificial neural networks.
#
# Stephen Welch
# @stephencwelch
from scipy.optimize import minimize
from scipy import optimize
from scipy.optimize import minimize
## ----------------------- Part 1 ---------------------------- ##
import numpy as np
# X = (hours sleeping, hours studying), y = Score on test
X = np.array(([3, 5], [5, 1], [10, 2]), dtype=float)
y = np.array(([75], [82], [93]), dtype=float)
# Normalize
X = X / np.amax(X, axis=0)
y = y / 100 # Max test score is 100
## ----------------------- Part 2 ---------------------------- ##
class Neural_Network(object):
def __init__(self):
# Define Hyper parameters
self.inputLayerSize = 2
self.outputLayerSize = 1
self.hiddenLayerSize = 3
# Weights (parameters)
self.W1 = np.random.randn(self.inputLayerSize, self.hiddenLayerSize)
self.W2 = np.random.randn(self.hiddenLayerSize, self.outputLayerSize)
def forward(self, X):
# Propagate inputs though network
self.z2 = np.dot(X, self.W1)
self.a2 = self.sigmoid(self.z2)
self.z3 = np.dot(self.a2, self.W2)
yHat = self.sigmoid(self.z3)
return yHat
def sigmoid(self, z):
# Apply sigmoid activation function to scalar, vector, or matrix
return 1 / (1 + np.exp(-z))
def costFunction(self,X,y):
self.yHat=self.forward(X)
J=0.5* sum((y-self.yHat)**2)
return J
def sigmoidPrime(self,z):
#Gradient of sigmoid
return np.exp(-z)/((1+np.exp(-z))**2)
def costFunctionPrime(self, X, y):
# Compute derivative with respect to W and W2 for a given X and y:
self.yHat = self.forward(X)
delta3 = np.multiply(-(y - self.yHat), self.sigmoidPrime(self.z3))
dJdW2 = np.dot(self.a2.T, delta3)
delta2 = np.dot(delta3, self.W2.T) * self.sigmoidPrime(self.z2)
dJdW1 = np.dot(X.T, delta2)
return dJdW1, dJdW2
def getParams(self):
# Get W1 and W2 unrolled into vector:
params = np.concatenate((self.W1.ravel(), self.W2.ravel()))
return params
def setParams(self, params):
# Set W1 and W2 using single paramater vector.
W1_start = 0
W1_end = self.hiddenLayerSize * self.inputLayerSize
self.W1 = np.reshape(params[W1_start:W1_end], (self.inputLayerSize, self.hiddenLayerSize))
W2_end = W1_end + self.hiddenLayerSize * self.outputLayerSize
self.W2 = np.reshape(params[W1_end:W2_end], (self.hiddenLayerSize, self.outputLayerSize))
def computeGradients(self, X, y):
dJdW1, dJdW2 = self.costFunctionPrime(X, y)
return np.concatenate((dJdW1.ravel(), dJdW2.ravel()))
NN=Neural_Network()
import numpy as np
X=np.array(([3,5],[5,1],[10,2]),dtype=float)
y=np.array(([75],[82],[93]),dtype=float)
# Normalize
X = X/np.amax(X, axis=0)
y = y/100 #Max test score is 100
#print NN.forward(X)
#print NN.costFunction(X,y)
djdW1,djdW2= NN.costFunctionPrime(X,y)
cost1= NN.costFunction(X,y)
#print djdW1
#print djdW2
scalar=100
NN.W1=NN.W1-scalar*djdW1
NN.W2=NN.W2-scalar*djdW2
cost2= NN.costFunction(X,y)
scalar=100*2
NN.W1=NN.W1+scalar*djdW1
NN.W2=NN.W2+scalar*djdW2
cost3= NN.costFunction(X,y)
print " original "+str(cost1)
print " Added "+str(cost3)
print " Subtracted "+str(cost2)
class trainer(object):
def __init__(self, N):
# Make Local reference to network:
self.N = N
def callbackF(self, params):
self.N.setParams(params)
self.J.append(self.N.costFunction(self.X, self.y))
def costFunctionWrapper(self, params, X, y):
self.N.setParams(params)
cost = self.N.costFunction(X, y)
grad = self.N.computeGradients(X, y)
return cost, grad
def train(self, X, y):
# Make an internal variable for the callback function:
self.X = X
self.y = y
# Make empty list to store costs:
self.J = []
params0 = self.N.getParams()
options = {'maxiter': 2000, 'disp': True}
_res = optimize.minimize(self.costFunctionWrapper, params0, jac=True, method='BFGS', \
args=(X, y), options=options, callback=self.callbackF)
self.N.setParams(_res.x)
self.optimizationResults = _res
NN=Neural_Network()
T=trainer(NN)
T.train(X,y)
print T.callbacksF
# import time
#
# weights=np.linspace(-5,5,1000)
# costs=np.zeros(1000)
#
#
# startTime=time.clock()
# for i in range(1000):
# NN.W1[0,0]=weights[i]
# yHat=NN.forward(X)
# costs[i] = 0.5*sum((y-yHat)**2)
# endTime=time.clock()
#
#
# timeElapsed = endTime-startTime
# # print timeElapsed
# #
# # import matplotlib.pyplot as plt
# # plt.interactive(False)
# #
# # plt.plot(weights, costs)
# # plt.show() | mit |
ryfeus/lambda-packs | Pandas_numpy/source/pandas/core/frame.py | 1 | 238227 | """
DataFrame
---------
An efficient 2D container for potentially mixed-type time series or other
labeled data series.
Similar to its R counterpart, data.frame, except providing automatic data
alignment and a host of useful data manipulation methods having to do with the
labeling information
"""
from __future__ import division
# pylint: disable=E1101,E1103
# pylint: disable=W0212,W0231,W0703,W0622
import functools
import collections
import itertools
import sys
import types
import warnings
from textwrap import dedent
import numpy as np
import numpy.ma as ma
from pandas.core.dtypes.cast import (
maybe_upcast,
cast_scalar_to_array,
maybe_cast_to_datetime,
maybe_infer_to_datetimelike,
maybe_convert_platform,
maybe_downcast_to_dtype,
invalidate_string_dtypes,
coerce_to_dtypes,
maybe_upcast_putmask,
find_common_type)
from pandas.core.dtypes.common import (
is_categorical_dtype,
is_object_dtype,
is_extension_type,
is_datetimetz,
is_datetime64_any_dtype,
is_datetime64tz_dtype,
is_bool_dtype,
is_integer_dtype,
is_float_dtype,
is_integer,
is_scalar,
is_dtype_equal,
needs_i8_conversion,
_get_dtype_from_object,
_ensure_float,
_ensure_float64,
_ensure_int64,
_ensure_platform_int,
is_list_like,
is_iterator,
is_sequence,
is_named_tuple)
from pandas.core.dtypes.missing import isna, notna
from pandas.core.common import (_try_sort,
_default_index,
_values_from_object,
_maybe_box_datetimelike,
_dict_compat,
standardize_mapping)
from pandas.core.generic import NDFrame, _shared_docs
from pandas.core.index import (Index, MultiIndex, _ensure_index,
_ensure_index_from_sequences)
from pandas.core.indexing import (maybe_droplevels, convert_to_index_sliceable,
check_bool_indexer)
from pandas.core.internals import (BlockManager,
create_block_manager_from_arrays,
create_block_manager_from_blocks)
from pandas.core.series import Series
from pandas.core.categorical import Categorical
import pandas.core.algorithms as algorithms
from pandas.compat import (range, map, zip, lrange, lmap, lzip, StringIO, u,
OrderedDict, raise_with_traceback)
from pandas import compat
from pandas.compat import PY36
from pandas.compat.numpy import function as nv
from pandas.util._decorators import (Appender, Substitution,
rewrite_axis_style_signature)
from pandas.util._validators import (validate_bool_kwarg,
validate_axis_style_args)
from pandas.core.indexes.period import PeriodIndex
from pandas.core.indexes.datetimes import DatetimeIndex
from pandas.core.indexes.timedeltas import TimedeltaIndex
from pandas.core import accessor
import pandas.core.common as com
import pandas.core.nanops as nanops
import pandas.core.ops as ops
import pandas.io.formats.format as fmt
import pandas.io.formats.console as console
from pandas.io.formats.printing import pprint_thing
import pandas.plotting._core as gfx
from pandas._libs import lib, algos as libalgos
from pandas.core.config import get_option
# ---------------------------------------------------------------------
# Docstring templates
_shared_doc_kwargs = dict(
axes='index, columns', klass='DataFrame',
axes_single_arg="{0 or 'index', 1 or 'columns'}",
optional_by="""
by : str or list of str
Name or list of names which refer to the axis items.""",
versionadded_to_excel='',
optional_labels="""labels : array-like, optional
New labels / index to conform the axis specified by 'axis' to.""",
optional_axis="""axis : int or str, optional
Axis to target. Can be either the axis name ('index', 'columns')
or number (0, 1).""",
)
_numeric_only_doc = """numeric_only : boolean, default None
Include only float, int, boolean data. If None, will attempt to use
everything, then use only numeric data
"""
_merge_doc = """
Merge DataFrame objects by performing a database-style join operation by
columns or indexes.
If joining columns on columns, the DataFrame indexes *will be
ignored*. Otherwise if joining indexes on indexes or indexes on a column or
columns, the index will be passed on.
Parameters
----------%s
right : DataFrame
how : {'left', 'right', 'outer', 'inner'}, default 'inner'
* left: use only keys from left frame, similar to a SQL left outer join;
preserve key order
* right: use only keys from right frame, similar to a SQL right outer join;
preserve key order
* outer: use union of keys from both frames, similar to a SQL full outer
join; sort keys lexicographically
* inner: use intersection of keys from both frames, similar to a SQL inner
join; preserve the order of the left keys
on : label or list
Field names to join on. Must be found in both DataFrames. If on is
None and not merging on indexes, then it merges on the intersection of
the columns by default.
left_on : label or list, or array-like
Field names to join on in left DataFrame. Can be a vector or list of
vectors of the length of the DataFrame to use a particular vector as
the join key instead of columns
right_on : label or list, or array-like
Field names to join on in right DataFrame or vector/list of vectors per
left_on docs
left_index : boolean, default False
Use the index from the left DataFrame as the join key(s). If it is a
MultiIndex, the number of keys in the other DataFrame (either the index
or a number of columns) must match the number of levels
right_index : boolean, default False
Use the index from the right DataFrame as the join key. Same caveats as
left_index
sort : boolean, default False
Sort the join keys lexicographically in the result DataFrame. If False,
the order of the join keys depends on the join type (how keyword)
suffixes : 2-length sequence (tuple, list, ...)
Suffix to apply to overlapping column names in the left and right
side, respectively
copy : boolean, default True
If False, do not copy data unnecessarily
indicator : boolean or string, default False
If True, adds a column to output DataFrame called "_merge" with
information on the source of each row.
If string, column with information on source of each row will be added to
output DataFrame, and column will be named value of string.
Information column is Categorical-type and takes on a value of "left_only"
for observations whose merge key only appears in 'left' DataFrame,
"right_only" for observations whose merge key only appears in 'right'
DataFrame, and "both" if the observation's merge key is found in both.
.. versionadded:: 0.17.0
validate : string, default None
If specified, checks if merge is of specified type.
* "one_to_one" or "1:1": check if merge keys are unique in both
left and right datasets.
* "one_to_many" or "1:m": check if merge keys are unique in left
dataset.
* "many_to_one" or "m:1": check if merge keys are unique in right
dataset.
* "many_to_many" or "m:m": allowed, but does not result in checks.
.. versionadded:: 0.21.0
Examples
--------
>>> A >>> B
lkey value rkey value
0 foo 1 0 foo 5
1 bar 2 1 bar 6
2 baz 3 2 qux 7
3 foo 4 3 bar 8
>>> A.merge(B, left_on='lkey', right_on='rkey', how='outer')
lkey value_x rkey value_y
0 foo 1 foo 5
1 foo 4 foo 5
2 bar 2 bar 6
3 bar 2 bar 8
4 baz 3 NaN NaN
5 NaN NaN qux 7
Returns
-------
merged : DataFrame
The output type will the be same as 'left', if it is a subclass
of DataFrame.
See also
--------
merge_ordered
merge_asof
"""
# -----------------------------------------------------------------------
# DataFrame class
class DataFrame(NDFrame):
""" Two-dimensional size-mutable, potentially heterogeneous tabular data
structure with labeled axes (rows and columns). Arithmetic operations
align on both row and column labels. Can be thought of as a dict-like
container for Series objects. The primary pandas data structure
Parameters
----------
data : numpy ndarray (structured or homogeneous), dict, or DataFrame
Dict can contain Series, arrays, constants, or list-like objects
index : Index or array-like
Index to use for resulting frame. Will default to np.arange(n) if
no indexing information part of input data and no index provided
columns : Index or array-like
Column labels to use for resulting frame. Will default to
np.arange(n) if no column labels are provided
dtype : dtype, default None
Data type to force. Only a single dtype is allowed. If None, infer
copy : boolean, default False
Copy data from inputs. Only affects DataFrame / 2d ndarray input
Examples
--------
Constructing DataFrame from a dictionary.
>>> d = {'col1': [1, 2], 'col2': [3, 4]}
>>> df = pd.DataFrame(data=d)
>>> df
col1 col2
0 1 3
1 2 4
Notice that the inferred dtype is int64.
>>> df.dtypes
col1 int64
col2 int64
dtype: object
To enforce a single dtype:
>>> df = pd.DataFrame(data=d, dtype=np.int8)
>>> df.dtypes
col1 int8
col2 int8
dtype: object
Constructing DataFrame from numpy ndarray:
>>> df2 = pd.DataFrame(np.random.randint(low=0, high=10, size=(5, 5)),
... columns=['a', 'b', 'c', 'd', 'e'])
>>> df2
a b c d e
0 2 8 8 3 4
1 4 2 9 0 9
2 1 0 7 8 0
3 5 1 7 1 3
4 6 0 2 4 2
See also
--------
DataFrame.from_records : constructor from tuples, also record arrays
DataFrame.from_dict : from dicts of Series, arrays, or dicts
DataFrame.from_items : from sequence of (key, value) pairs
pandas.read_csv, pandas.read_table, pandas.read_clipboard
"""
@property
def _constructor(self):
return DataFrame
_constructor_sliced = Series
_deprecations = NDFrame._deprecations | frozenset(
['sortlevel', 'get_value', 'set_value', 'from_csv'])
@property
def _constructor_expanddim(self):
from pandas.core.panel import Panel
return Panel
def __init__(self, data=None, index=None, columns=None, dtype=None,
copy=False):
if data is None:
data = {}
if dtype is not None:
dtype = self._validate_dtype(dtype)
if isinstance(data, DataFrame):
data = data._data
if isinstance(data, BlockManager):
mgr = self._init_mgr(data, axes=dict(index=index, columns=columns),
dtype=dtype, copy=copy)
elif isinstance(data, dict):
mgr = self._init_dict(data, index, columns, dtype=dtype)
elif isinstance(data, ma.MaskedArray):
import numpy.ma.mrecords as mrecords
# masked recarray
if isinstance(data, mrecords.MaskedRecords):
mgr = _masked_rec_array_to_mgr(data, index, columns, dtype,
copy)
# a masked array
else:
mask = ma.getmaskarray(data)
if mask.any():
data, fill_value = maybe_upcast(data, copy=True)
data[mask] = fill_value
else:
data = data.copy()
mgr = self._init_ndarray(data, index, columns, dtype=dtype,
copy=copy)
elif isinstance(data, (np.ndarray, Series, Index)):
if data.dtype.names:
data_columns = list(data.dtype.names)
data = dict((k, data[k]) for k in data_columns)
if columns is None:
columns = data_columns
mgr = self._init_dict(data, index, columns, dtype=dtype)
elif getattr(data, 'name', None) is not None:
mgr = self._init_dict({data.name: data}, index, columns,
dtype=dtype)
else:
mgr = self._init_ndarray(data, index, columns, dtype=dtype,
copy=copy)
elif isinstance(data, (list, types.GeneratorType)):
if isinstance(data, types.GeneratorType):
data = list(data)
if len(data) > 0:
if is_list_like(data[0]) and getattr(data[0], 'ndim', 1) == 1:
if is_named_tuple(data[0]) and columns is None:
columns = data[0]._fields
arrays, columns = _to_arrays(data, columns, dtype=dtype)
columns = _ensure_index(columns)
# set the index
if index is None:
if isinstance(data[0], Series):
index = _get_names_from_index(data)
elif isinstance(data[0], Categorical):
index = _default_index(len(data[0]))
else:
index = _default_index(len(data))
mgr = _arrays_to_mgr(arrays, columns, index, columns,
dtype=dtype)
else:
mgr = self._init_ndarray(data, index, columns, dtype=dtype,
copy=copy)
else:
mgr = self._init_dict({}, index, columns, dtype=dtype)
elif isinstance(data, collections.Iterator):
raise TypeError("data argument can't be an iterator")
else:
try:
arr = np.array(data, dtype=dtype, copy=copy)
except (ValueError, TypeError) as e:
exc = TypeError('DataFrame constructor called with '
'incompatible data and dtype: %s' % e)
raise_with_traceback(exc)
if arr.ndim == 0 and index is not None and columns is not None:
values = cast_scalar_to_array((len(index), len(columns)),
data, dtype=dtype)
mgr = self._init_ndarray(values, index, columns,
dtype=values.dtype, copy=False)
else:
raise ValueError('DataFrame constructor not properly called!')
NDFrame.__init__(self, mgr, fastpath=True)
def _init_dict(self, data, index, columns, dtype=None):
"""
Segregate Series based on type and coerce into matrices.
Needs to handle a lot of exceptional cases.
"""
if columns is not None:
columns = _ensure_index(columns)
# GH10856
# raise ValueError if only scalars in dict
if index is None:
extract_index(list(data.values()))
# prefilter if columns passed
data = dict((k, v) for k, v in compat.iteritems(data)
if k in columns)
if index is None:
index = extract_index(list(data.values()))
else:
index = _ensure_index(index)
arrays = []
data_names = []
for k in columns:
if k not in data:
# no obvious "empty" int column
if dtype is not None and issubclass(dtype.type,
np.integer):
continue
if dtype is None:
# 1783
v = np.empty(len(index), dtype=object)
elif np.issubdtype(dtype, np.flexible):
v = np.empty(len(index), dtype=object)
else:
v = np.empty(len(index), dtype=dtype)
v.fill(np.nan)
else:
v = data[k]
data_names.append(k)
arrays.append(v)
else:
keys = list(data.keys())
if not isinstance(data, OrderedDict):
keys = _try_sort(keys)
columns = data_names = Index(keys)
arrays = [data[k] for k in keys]
return _arrays_to_mgr(arrays, data_names, index, columns, dtype=dtype)
def _init_ndarray(self, values, index, columns, dtype=None, copy=False):
# input must be a ndarray, list, Series, index
if isinstance(values, Series):
if columns is None:
if values.name is not None:
columns = [values.name]
if index is None:
index = values.index
else:
values = values.reindex(index)
# zero len case (GH #2234)
if not len(values) and columns is not None and len(columns):
values = np.empty((0, 1), dtype=object)
# helper to create the axes as indexes
def _get_axes(N, K, index=index, columns=columns):
# return axes or defaults
if index is None:
index = _default_index(N)
else:
index = _ensure_index(index)
if columns is None:
columns = _default_index(K)
else:
columns = _ensure_index(columns)
return index, columns
# we could have a categorical type passed or coerced to 'category'
# recast this to an _arrays_to_mgr
if (is_categorical_dtype(getattr(values, 'dtype', None)) or
is_categorical_dtype(dtype)):
if not hasattr(values, 'dtype'):
values = _prep_ndarray(values, copy=copy)
values = values.ravel()
elif copy:
values = values.copy()
index, columns = _get_axes(len(values), 1)
return _arrays_to_mgr([values], columns, index, columns,
dtype=dtype)
elif is_datetimetz(values):
return self._init_dict({0: values}, index, columns, dtype=dtype)
# by definition an array here
# the dtypes will be coerced to a single dtype
values = _prep_ndarray(values, copy=copy)
if dtype is not None:
if not is_dtype_equal(values.dtype, dtype):
try:
values = values.astype(dtype)
except Exception as orig:
e = ValueError("failed to cast to '%s' (Exception was: %s)"
% (dtype, orig))
raise_with_traceback(e)
index, columns = _get_axes(*values.shape)
values = values.T
# if we don't have a dtype specified, then try to convert objects
# on the entire block; this is to convert if we have datetimelike's
# embedded in an object type
if dtype is None and is_object_dtype(values):
values = maybe_infer_to_datetimelike(values)
return create_block_manager_from_blocks([values], [columns, index])
@property
def axes(self):
"""
Return a list with the row axis labels and column axis labels as the
only members. They are returned in that order.
"""
return [self.index, self.columns]
@property
def shape(self):
"""
Return a tuple representing the dimensionality of the DataFrame.
"""
return len(self.index), len(self.columns)
def _repr_fits_vertical_(self):
"""
Check length against max_rows.
"""
max_rows = get_option("display.max_rows")
return len(self) <= max_rows
def _repr_fits_horizontal_(self, ignore_width=False):
"""
Check if full repr fits in horizontal boundaries imposed by the display
options width and max_columns. In case off non-interactive session, no
boundaries apply.
ignore_width is here so ipnb+HTML output can behave the way
users expect. display.max_columns remains in effect.
GH3541, GH3573
"""
width, height = console.get_console_size()
max_columns = get_option("display.max_columns")
nb_columns = len(self.columns)
# exceed max columns
if ((max_columns and nb_columns > max_columns) or
((not ignore_width) and width and nb_columns > (width // 2))):
return False
# used by repr_html under IPython notebook or scripts ignore terminal
# dims
if ignore_width or not com.in_interactive_session():
return True
if (get_option('display.width') is not None or
com.in_ipython_frontend()):
# check at least the column row for excessive width
max_rows = 1
else:
max_rows = get_option("display.max_rows")
# when auto-detecting, so width=None and not in ipython front end
# check whether repr fits horizontal by actualy checking
# the width of the rendered repr
buf = StringIO()
# only care about the stuff we'll actually print out
# and to_string on entire frame may be expensive
d = self
if not (max_rows is None): # unlimited rows
# min of two, where one may be None
d = d.iloc[:min(max_rows, len(d))]
else:
return True
d.to_string(buf=buf)
value = buf.getvalue()
repr_width = max([len(l) for l in value.split('\n')])
return repr_width < width
def _info_repr(self):
"""True if the repr should show the info view."""
info_repr_option = (get_option("display.large_repr") == "info")
return info_repr_option and not (self._repr_fits_horizontal_() and
self._repr_fits_vertical_())
def __unicode__(self):
"""
Return a string representation for a particular DataFrame
Invoked by unicode(df) in py2 only. Yields a Unicode String in both
py2/py3.
"""
buf = StringIO(u(""))
if self._info_repr():
self.info(buf=buf)
return buf.getvalue()
max_rows = get_option("display.max_rows")
max_cols = get_option("display.max_columns")
show_dimensions = get_option("display.show_dimensions")
if get_option("display.expand_frame_repr"):
width, _ = console.get_console_size()
else:
width = None
self.to_string(buf=buf, max_rows=max_rows, max_cols=max_cols,
line_width=width, show_dimensions=show_dimensions)
return buf.getvalue()
def _repr_html_(self):
"""
Return a html representation for a particular DataFrame.
Mainly for IPython notebook.
"""
# qtconsole doesn't report its line width, and also
# behaves badly when outputting an HTML table
# that doesn't fit the window, so disable it.
# XXX: In IPython 3.x and above, the Qt console will not attempt to
# display HTML, so this check can be removed when support for
# IPython 2.x is no longer needed.
if com.in_qtconsole():
# 'HTML output is disabled in QtConsole'
return None
if self._info_repr():
buf = StringIO(u(""))
self.info(buf=buf)
# need to escape the <class>, should be the first line.
val = buf.getvalue().replace('<', r'<', 1)
val = val.replace('>', r'>', 1)
return '<pre>' + val + '</pre>'
if get_option("display.notebook_repr_html"):
max_rows = get_option("display.max_rows")
max_cols = get_option("display.max_columns")
show_dimensions = get_option("display.show_dimensions")
return self.to_html(max_rows=max_rows, max_cols=max_cols,
show_dimensions=show_dimensions, notebook=True)
else:
return None
@property
def style(self):
"""
Property returning a Styler object containing methods for
building a styled HTML representation fo the DataFrame.
See Also
--------
pandas.io.formats.style.Styler
"""
from pandas.io.formats.style import Styler
return Styler(self)
def iteritems(self):
"""
Iterator over (column name, Series) pairs.
See also
--------
iterrows : Iterate over DataFrame rows as (index, Series) pairs.
itertuples : Iterate over DataFrame rows as namedtuples of the values.
"""
if self.columns.is_unique and hasattr(self, '_item_cache'):
for k in self.columns:
yield k, self._get_item_cache(k)
else:
for i, k in enumerate(self.columns):
yield k, self._ixs(i, axis=1)
def iterrows(self):
"""
Iterate over DataFrame rows as (index, Series) pairs.
Notes
-----
1. Because ``iterrows`` returns a Series for each row,
it does **not** preserve dtypes across the rows (dtypes are
preserved across columns for DataFrames). For example,
>>> df = pd.DataFrame([[1, 1.5]], columns=['int', 'float'])
>>> row = next(df.iterrows())[1]
>>> row
int 1.0
float 1.5
Name: 0, dtype: float64
>>> print(row['int'].dtype)
float64
>>> print(df['int'].dtype)
int64
To preserve dtypes while iterating over the rows, it is better
to use :meth:`itertuples` which returns namedtuples of the values
and which is generally faster than ``iterrows``.
2. You should **never modify** something you are iterating over.
This is not guaranteed to work in all cases. Depending on the
data types, the iterator returns a copy and not a view, and writing
to it will have no effect.
Returns
-------
it : generator
A generator that iterates over the rows of the frame.
See also
--------
itertuples : Iterate over DataFrame rows as namedtuples of the values.
iteritems : Iterate over (column name, Series) pairs.
"""
columns = self.columns
klass = self._constructor_sliced
for k, v in zip(self.index, self.values):
s = klass(v, index=columns, name=k)
yield k, s
def itertuples(self, index=True, name="Pandas"):
"""
Iterate over DataFrame rows as namedtuples, with index value as first
element of the tuple.
Parameters
----------
index : boolean, default True
If True, return the index as the first element of the tuple.
name : string, default "Pandas"
The name of the returned namedtuples or None to return regular
tuples.
Notes
-----
The column names will be renamed to positional names if they are
invalid Python identifiers, repeated, or start with an underscore.
With a large number of columns (>255), regular tuples are returned.
See also
--------
iterrows : Iterate over DataFrame rows as (index, Series) pairs.
iteritems : Iterate over (column name, Series) pairs.
Examples
--------
>>> df = pd.DataFrame({'col1': [1, 2], 'col2': [0.1, 0.2]},
index=['a', 'b'])
>>> df
col1 col2
a 1 0.1
b 2 0.2
>>> for row in df.itertuples():
... print(row)
...
Pandas(Index='a', col1=1, col2=0.10000000000000001)
Pandas(Index='b', col1=2, col2=0.20000000000000001)
"""
arrays = []
fields = []
if index:
arrays.append(self.index)
fields.append("Index")
# use integer indexing because of possible duplicate column names
arrays.extend(self.iloc[:, k] for k in range(len(self.columns)))
# Python 3 supports at most 255 arguments to constructor, and
# things get slow with this many fields in Python 2
if name is not None and len(self.columns) + index < 256:
# `rename` is unsupported in Python 2.6
try:
itertuple = collections.namedtuple(name,
fields + list(self.columns),
rename=True)
return map(itertuple._make, zip(*arrays))
except Exception:
pass
# fallback to regular tuples
return zip(*arrays)
items = iteritems
def __len__(self):
"""Returns length of info axis, but here we use the index """
return len(self.index)
def dot(self, other):
"""
Matrix multiplication with DataFrame or Series objects
Parameters
----------
other : DataFrame or Series
Returns
-------
dot_product : DataFrame or Series
"""
if isinstance(other, (Series, DataFrame)):
common = self.columns.union(other.index)
if (len(common) > len(self.columns) or
len(common) > len(other.index)):
raise ValueError('matrices are not aligned')
left = self.reindex(columns=common, copy=False)
right = other.reindex(index=common, copy=False)
lvals = left.values
rvals = right.values
else:
left = self
lvals = self.values
rvals = np.asarray(other)
if lvals.shape[1] != rvals.shape[0]:
raise ValueError('Dot product shape mismatch, %s vs %s' %
(lvals.shape, rvals.shape))
if isinstance(other, DataFrame):
return self._constructor(np.dot(lvals, rvals), index=left.index,
columns=other.columns)
elif isinstance(other, Series):
return Series(np.dot(lvals, rvals), index=left.index)
elif isinstance(rvals, (np.ndarray, Index)):
result = np.dot(lvals, rvals)
if result.ndim == 2:
return self._constructor(result, index=left.index)
else:
return Series(result, index=left.index)
else: # pragma: no cover
raise TypeError('unsupported type: %s' % type(other))
# ----------------------------------------------------------------------
# IO methods (to / from other formats)
@classmethod
def from_dict(cls, data, orient='columns', dtype=None):
"""
Construct DataFrame from dict of array-like or dicts
Parameters
----------
data : dict
{field : array-like} or {field : dict}
orient : {'columns', 'index'}, default 'columns'
The "orientation" of the data. If the keys of the passed dict
should be the columns of the resulting DataFrame, pass 'columns'
(default). Otherwise if the keys should be rows, pass 'index'.
dtype : dtype, default None
Data type to force, otherwise infer
Returns
-------
DataFrame
"""
index, columns = None, None
orient = orient.lower()
if orient == 'index':
if len(data) > 0:
# TODO speed up Series case
if isinstance(list(data.values())[0], (Series, dict)):
data = _from_nested_dict(data)
else:
data, index = list(data.values()), list(data.keys())
elif orient != 'columns': # pragma: no cover
raise ValueError('only recognize index or columns for orient')
return cls(data, index=index, columns=columns, dtype=dtype)
def to_dict(self, orient='dict', into=dict):
"""Convert DataFrame to dictionary.
Parameters
----------
orient : str {'dict', 'list', 'series', 'split', 'records', 'index'}
Determines the type of the values of the dictionary.
- dict (default) : dict like {column -> {index -> value}}
- list : dict like {column -> [values]}
- series : dict like {column -> Series(values)}
- split : dict like
{index -> [index], columns -> [columns], data -> [values]}
- records : list like
[{column -> value}, ... , {column -> value}]
- index : dict like {index -> {column -> value}}
.. versionadded:: 0.17.0
Abbreviations are allowed. `s` indicates `series` and `sp`
indicates `split`.
into : class, default dict
The collections.Mapping subclass used for all Mappings
in the return value. Can be the actual class or an empty
instance of the mapping type you want. If you want a
collections.defaultdict, you must pass it initialized.
.. versionadded:: 0.21.0
Returns
-------
result : collections.Mapping like {column -> {index -> value}}
Examples
--------
>>> df = pd.DataFrame(
{'col1': [1, 2], 'col2': [0.5, 0.75]}, index=['a', 'b'])
>>> df
col1 col2
a 1 0.1
b 2 0.2
>>> df.to_dict()
{'col1': {'a': 1, 'b': 2}, 'col2': {'a': 0.5, 'b': 0.75}}
You can specify the return orientation.
>>> df.to_dict('series')
{'col1': a 1
b 2
Name: col1, dtype: int64, 'col2': a 0.50
b 0.75
Name: col2, dtype: float64}
>>> df.to_dict('split')
{'columns': ['col1', 'col2'],
'data': [[1.0, 0.5], [2.0, 0.75]],
'index': ['a', 'b']}
>>> df.to_dict('records')
[{'col1': 1.0, 'col2': 0.5}, {'col1': 2.0, 'col2': 0.75}]
>>> df.to_dict('index')
{'a': {'col1': 1.0, 'col2': 0.5}, 'b': {'col1': 2.0, 'col2': 0.75}}
You can also specify the mapping type.
>>> from collections import OrderedDict, defaultdict
>>> df.to_dict(into=OrderedDict)
OrderedDict([('col1', OrderedDict([('a', 1), ('b', 2)])),
('col2', OrderedDict([('a', 0.5), ('b', 0.75)]))])
If you want a `defaultdict`, you need to initialize it:
>>> dd = defaultdict(list)
>>> df.to_dict('records', into=dd)
[defaultdict(<type 'list'>, {'col2': 0.5, 'col1': 1.0}),
defaultdict(<type 'list'>, {'col2': 0.75, 'col1': 2.0})]
"""
if not self.columns.is_unique:
warnings.warn("DataFrame columns are not unique, some "
"columns will be omitted.", UserWarning,
stacklevel=2)
# GH16122
into_c = standardize_mapping(into)
if orient.lower().startswith('d'):
return into_c(
(k, v.to_dict(into)) for k, v in compat.iteritems(self))
elif orient.lower().startswith('l'):
return into_c((k, v.tolist()) for k, v in compat.iteritems(self))
elif orient.lower().startswith('sp'):
return into_c((('index', self.index.tolist()),
('columns', self.columns.tolist()),
('data', lib.map_infer(self.values.ravel(),
_maybe_box_datetimelike)
.reshape(self.values.shape).tolist())))
elif orient.lower().startswith('s'):
return into_c((k, _maybe_box_datetimelike(v))
for k, v in compat.iteritems(self))
elif orient.lower().startswith('r'):
return [into_c((k, _maybe_box_datetimelike(v))
for k, v in zip(self.columns, np.atleast_1d(row)))
for row in self.values]
elif orient.lower().startswith('i'):
return into_c((k, v.to_dict(into)) for k, v in self.iterrows())
else:
raise ValueError("orient '%s' not understood" % orient)
def to_gbq(self, destination_table, project_id, chunksize=10000,
verbose=True, reauth=False, if_exists='fail', private_key=None):
"""Write a DataFrame to a Google BigQuery table.
The main method a user calls to export pandas DataFrame contents to
Google BigQuery table.
Google BigQuery API Client Library v2 for Python is used.
Documentation is available `here
<https://developers.google.com/api-client-library/python/apis/bigquery/v2>`__
Authentication to the Google BigQuery service is via OAuth 2.0.
- If "private_key" is not provided:
By default "application default credentials" are used.
If default application credentials are not found or are restrictive,
user account credentials are used. In this case, you will be asked to
grant permissions for product name 'pandas GBQ'.
- If "private_key" is provided:
Service account credentials will be used to authenticate.
Parameters
----------
dataframe : DataFrame
DataFrame to be written
destination_table : string
Name of table to be written, in the form 'dataset.tablename'
project_id : str
Google BigQuery Account project ID.
chunksize : int (default 10000)
Number of rows to be inserted in each chunk from the dataframe.
verbose : boolean (default True)
Show percentage complete
reauth : boolean (default False)
Force Google BigQuery to reauthenticate the user. This is useful
if multiple accounts are used.
if_exists : {'fail', 'replace', 'append'}, default 'fail'
'fail': If table exists, do nothing.
'replace': If table exists, drop it, recreate it, and insert data.
'append': If table exists, insert data. Create if does not exist.
private_key : str (optional)
Service account private key in JSON format. Can be file path
or string contents. This is useful for remote server
authentication (eg. jupyter iPython notebook on remote host)
"""
from pandas.io import gbq
return gbq.to_gbq(self, destination_table, project_id=project_id,
chunksize=chunksize, verbose=verbose, reauth=reauth,
if_exists=if_exists, private_key=private_key)
@classmethod
def from_records(cls, data, index=None, exclude=None, columns=None,
coerce_float=False, nrows=None):
"""
Convert structured or record ndarray to DataFrame
Parameters
----------
data : ndarray (structured dtype), list of tuples, dict, or DataFrame
index : string, list of fields, array-like
Field of array to use as the index, alternately a specific set of
input labels to use
exclude : sequence, default None
Columns or fields to exclude
columns : sequence, default None
Column names to use. If the passed data do not have names
associated with them, this argument provides names for the
columns. Otherwise this argument indicates the order of the columns
in the result (any names not found in the data will become all-NA
columns)
coerce_float : boolean, default False
Attempt to convert values of non-string, non-numeric objects (like
decimal.Decimal) to floating point, useful for SQL result sets
Returns
-------
df : DataFrame
"""
# Make a copy of the input columns so we can modify it
if columns is not None:
columns = _ensure_index(columns)
if is_iterator(data):
if nrows == 0:
return cls()
try:
first_row = next(data)
except StopIteration:
return cls(index=index, columns=columns)
dtype = None
if hasattr(first_row, 'dtype') and first_row.dtype.names:
dtype = first_row.dtype
values = [first_row]
if nrows is None:
values += data
else:
values.extend(itertools.islice(data, nrows - 1))
if dtype is not None:
data = np.array(values, dtype=dtype)
else:
data = values
if isinstance(data, dict):
if columns is None:
columns = arr_columns = _ensure_index(sorted(data))
arrays = [data[k] for k in columns]
else:
arrays = []
arr_columns = []
for k, v in compat.iteritems(data):
if k in columns:
arr_columns.append(k)
arrays.append(v)
arrays, arr_columns = _reorder_arrays(arrays, arr_columns,
columns)
elif isinstance(data, (np.ndarray, DataFrame)):
arrays, columns = _to_arrays(data, columns)
if columns is not None:
columns = _ensure_index(columns)
arr_columns = columns
else:
arrays, arr_columns = _to_arrays(data, columns,
coerce_float=coerce_float)
arr_columns = _ensure_index(arr_columns)
if columns is not None:
columns = _ensure_index(columns)
else:
columns = arr_columns
if exclude is None:
exclude = set()
else:
exclude = set(exclude)
result_index = None
if index is not None:
if (isinstance(index, compat.string_types) or
not hasattr(index, "__iter__")):
i = columns.get_loc(index)
exclude.add(index)
if len(arrays) > 0:
result_index = Index(arrays[i], name=index)
else:
result_index = Index([], name=index)
else:
try:
to_remove = [arr_columns.get_loc(field) for field in index]
index_data = [arrays[i] for i in to_remove]
result_index = _ensure_index_from_sequences(index_data,
names=index)
exclude.update(index)
except Exception:
result_index = index
if any(exclude):
arr_exclude = [x for x in exclude if x in arr_columns]
to_remove = [arr_columns.get_loc(col) for col in arr_exclude]
arrays = [v for i, v in enumerate(arrays) if i not in to_remove]
arr_columns = arr_columns.drop(arr_exclude)
columns = columns.drop(exclude)
mgr = _arrays_to_mgr(arrays, arr_columns, result_index, columns)
return cls(mgr)
def to_records(self, index=True, convert_datetime64=True):
"""
Convert DataFrame to record array. Index will be put in the
'index' field of the record array if requested
Parameters
----------
index : boolean, default True
Include index in resulting record array, stored in 'index' field
convert_datetime64 : boolean, default True
Whether to convert the index to datetime.datetime if it is a
DatetimeIndex
Returns
-------
y : recarray
"""
if index:
if is_datetime64_any_dtype(self.index) and convert_datetime64:
ix_vals = [self.index.to_pydatetime()]
else:
if isinstance(self.index, MultiIndex):
# array of tuples to numpy cols. copy copy copy
ix_vals = lmap(np.array, zip(*self.index.values))
else:
ix_vals = [self.index.values]
arrays = ix_vals + [self[c].get_values() for c in self.columns]
count = 0
index_names = list(self.index.names)
if isinstance(self.index, MultiIndex):
for i, n in enumerate(index_names):
if n is None:
index_names[i] = 'level_%d' % count
count += 1
elif index_names[0] is None:
index_names = ['index']
names = (lmap(compat.text_type, index_names) +
lmap(compat.text_type, self.columns))
else:
arrays = [self[c].get_values() for c in self.columns]
names = lmap(compat.text_type, self.columns)
formats = [v.dtype for v in arrays]
return np.rec.fromarrays(
arrays,
dtype={'names': names, 'formats': formats}
)
@classmethod
def from_items(cls, items, columns=None, orient='columns'):
"""
Convert (key, value) pairs to DataFrame. The keys will be the axis
index (usually the columns, but depends on the specified
orientation). The values should be arrays or Series.
Parameters
----------
items : sequence of (key, value) pairs
Values should be arrays or Series.
columns : sequence of column labels, optional
Must be passed if orient='index'.
orient : {'columns', 'index'}, default 'columns'
The "orientation" of the data. If the keys of the
input correspond to column labels, pass 'columns'
(default). Otherwise if the keys correspond to the index,
pass 'index'.
Returns
-------
frame : DataFrame
"""
keys, values = lzip(*items)
if orient == 'columns':
if columns is not None:
columns = _ensure_index(columns)
idict = dict(items)
if len(idict) < len(items):
if not columns.equals(_ensure_index(keys)):
raise ValueError('With non-unique item names, passed '
'columns must be identical')
arrays = values
else:
arrays = [idict[k] for k in columns if k in idict]
else:
columns = _ensure_index(keys)
arrays = values
return cls._from_arrays(arrays, columns, None)
elif orient == 'index':
if columns is None:
raise TypeError("Must pass columns with orient='index'")
keys = _ensure_index(keys)
arr = np.array(values, dtype=object).T
data = [lib.maybe_convert_objects(v) for v in arr]
return cls._from_arrays(data, columns, keys)
else: # pragma: no cover
raise ValueError("'orient' must be either 'columns' or 'index'")
@classmethod
def _from_arrays(cls, arrays, columns, index, dtype=None):
mgr = _arrays_to_mgr(arrays, columns, index, columns, dtype=dtype)
return cls(mgr)
@classmethod
def from_csv(cls, path, header=0, sep=',', index_col=0, parse_dates=True,
encoding=None, tupleize_cols=None,
infer_datetime_format=False):
"""
Read CSV file (DEPRECATED, please use :func:`pandas.read_csv`
instead).
It is preferable to use the more powerful :func:`pandas.read_csv`
for most general purposes, but ``from_csv`` makes for an easy
roundtrip to and from a file (the exact counterpart of
``to_csv``), especially with a DataFrame of time series data.
This method only differs from the preferred :func:`pandas.read_csv`
in some defaults:
- `index_col` is ``0`` instead of ``None`` (take first column as index
by default)
- `parse_dates` is ``True`` instead of ``False`` (try parsing the index
as datetime by default)
So a ``pd.DataFrame.from_csv(path)`` can be replaced by
``pd.read_csv(path, index_col=0, parse_dates=True)``.
Parameters
----------
path : string file path or file handle / StringIO
header : int, default 0
Row to use as header (skip prior rows)
sep : string, default ','
Field delimiter
index_col : int or sequence, default 0
Column to use for index. If a sequence is given, a MultiIndex
is used. Different default from read_table
parse_dates : boolean, default True
Parse dates. Different default from read_table
tupleize_cols : boolean, default False
write multi_index columns as a list of tuples (if True)
or new (expanded format) if False)
infer_datetime_format: boolean, default False
If True and `parse_dates` is True for a column, try to infer the
datetime format based on the first datetime string. If the format
can be inferred, there often will be a large parsing speed-up.
See also
--------
pandas.read_csv
Returns
-------
y : DataFrame
"""
warnings.warn("from_csv is deprecated. Please use read_csv(...) "
"instead. Note that some of the default arguments are "
"different, so please refer to the documentation "
"for from_csv when changing your function calls",
FutureWarning, stacklevel=2)
from pandas.io.parsers import read_table
return read_table(path, header=header, sep=sep,
parse_dates=parse_dates, index_col=index_col,
encoding=encoding, tupleize_cols=tupleize_cols,
infer_datetime_format=infer_datetime_format)
def to_sparse(self, fill_value=None, kind='block'):
"""
Convert to SparseDataFrame
Parameters
----------
fill_value : float, default NaN
kind : {'block', 'integer'}
Returns
-------
y : SparseDataFrame
"""
from pandas.core.sparse.frame import SparseDataFrame
return SparseDataFrame(self._series, index=self.index,
columns=self.columns, default_kind=kind,
default_fill_value=fill_value)
def to_panel(self):
"""
Transform long (stacked) format (DataFrame) into wide (3D, Panel)
format.
Currently the index of the DataFrame must be a 2-level MultiIndex. This
may be generalized later
Returns
-------
panel : Panel
"""
# only support this kind for now
if (not isinstance(self.index, MultiIndex) or # pragma: no cover
len(self.index.levels) != 2):
raise NotImplementedError('Only 2-level MultiIndex are supported.')
if not self.index.is_unique:
raise ValueError("Can't convert non-uniquely indexed "
"DataFrame to Panel")
self._consolidate_inplace()
# minor axis must be sorted
if self.index.lexsort_depth < 2:
selfsorted = self.sort_index(level=0)
else:
selfsorted = self
major_axis, minor_axis = selfsorted.index.levels
major_labels, minor_labels = selfsorted.index.labels
shape = len(major_axis), len(minor_axis)
# preserve names, if any
major_axis = major_axis.copy()
major_axis.name = self.index.names[0]
minor_axis = minor_axis.copy()
minor_axis.name = self.index.names[1]
# create new axes
new_axes = [selfsorted.columns, major_axis, minor_axis]
# create new manager
new_mgr = selfsorted._data.reshape_nd(axes=new_axes,
labels=[major_labels,
minor_labels],
shape=shape,
ref_items=selfsorted.columns)
return self._constructor_expanddim(new_mgr)
def to_csv(self, path_or_buf=None, sep=",", na_rep='', float_format=None,
columns=None, header=True, index=True, index_label=None,
mode='w', encoding=None, compression=None, quoting=None,
quotechar='"', line_terminator='\n', chunksize=None,
tupleize_cols=None, date_format=None, doublequote=True,
escapechar=None, decimal='.'):
r"""Write DataFrame to a comma-separated values (csv) file
Parameters
----------
path_or_buf : string or file handle, default None
File path or object, if None is provided the result is returned as
a string.
sep : character, default ','
Field delimiter for the output file.
na_rep : string, default ''
Missing data representation
float_format : string, default None
Format string for floating point numbers
columns : sequence, optional
Columns to write
header : boolean or list of string, default True
Write out the column names. If a list of strings is given it is
assumed to be aliases for the column names
index : boolean, default True
Write row names (index)
index_label : string or sequence, or False, default None
Column label for index column(s) if desired. If None is given, and
`header` and `index` are True, then the index names are used. A
sequence should be given if the DataFrame uses MultiIndex. If
False do not print fields for index names. Use index_label=False
for easier importing in R
mode : str
Python write mode, default 'w'
encoding : string, optional
A string representing the encoding to use in the output file,
defaults to 'ascii' on Python 2 and 'utf-8' on Python 3.
compression : string, optional
a string representing the compression to use in the output file,
allowed values are 'gzip', 'bz2', 'xz',
only used when the first argument is a filename
line_terminator : string, default ``'\n'``
The newline character or character sequence to use in the output
file
quoting : optional constant from csv module
defaults to csv.QUOTE_MINIMAL. If you have set a `float_format`
then floats are converted to strings and thus csv.QUOTE_NONNUMERIC
will treat them as non-numeric
quotechar : string (length 1), default '\"'
character used to quote fields
doublequote : boolean, default True
Control quoting of `quotechar` inside a field
escapechar : string (length 1), default None
character used to escape `sep` and `quotechar` when appropriate
chunksize : int or None
rows to write at a time
tupleize_cols : boolean, default False
.. deprecated:: 0.21.0
This argument will be removed and will always write each row
of the multi-index as a separate row in the CSV file.
Write MultiIndex columns as a list of tuples (if True) or in
the new, expanded format, where each MultiIndex column is a row
in the CSV (if False).
date_format : string, default None
Format string for datetime objects
decimal: string, default '.'
Character recognized as decimal separator. E.g. use ',' for
European data
"""
if tupleize_cols is not None:
warnings.warn("The 'tupleize_cols' parameter is deprecated and "
"will be removed in a future version",
FutureWarning, stacklevel=2)
else:
tupleize_cols = False
formatter = fmt.CSVFormatter(self, path_or_buf,
line_terminator=line_terminator, sep=sep,
encoding=encoding,
compression=compression, quoting=quoting,
na_rep=na_rep, float_format=float_format,
cols=columns, header=header, index=index,
index_label=index_label, mode=mode,
chunksize=chunksize, quotechar=quotechar,
tupleize_cols=tupleize_cols,
date_format=date_format,
doublequote=doublequote,
escapechar=escapechar, decimal=decimal)
formatter.save()
if path_or_buf is None:
return formatter.path_or_buf.getvalue()
@Appender(_shared_docs['to_excel'] % _shared_doc_kwargs)
def to_excel(self, excel_writer, sheet_name='Sheet1', na_rep='',
float_format=None, columns=None, header=True, index=True,
index_label=None, startrow=0, startcol=0, engine=None,
merge_cells=True, encoding=None, inf_rep='inf', verbose=True,
freeze_panes=None):
from pandas.io.formats.excel import ExcelFormatter
formatter = ExcelFormatter(self, na_rep=na_rep, cols=columns,
header=header,
float_format=float_format, index=index,
index_label=index_label,
merge_cells=merge_cells,
inf_rep=inf_rep)
formatter.write(excel_writer, sheet_name=sheet_name, startrow=startrow,
startcol=startcol, freeze_panes=freeze_panes,
engine=engine)
def to_stata(self, fname, convert_dates=None, write_index=True,
encoding="latin-1", byteorder=None, time_stamp=None,
data_label=None, variable_labels=None):
"""
A class for writing Stata binary dta files from array-like objects
Parameters
----------
fname : str or buffer
String path of file-like object
convert_dates : dict
Dictionary mapping columns containing datetime types to stata
internal format to use when wirting the dates. Options are 'tc',
'td', 'tm', 'tw', 'th', 'tq', 'ty'. Column can be either an integer
or a name. Datetime columns that do not have a conversion type
specified will be converted to 'tc'. Raises NotImplementedError if
a datetime column has timezone information
write_index : bool
Write the index to Stata dataset.
encoding : str
Default is latin-1. Unicode is not supported
byteorder : str
Can be ">", "<", "little", or "big". default is `sys.byteorder`
time_stamp : datetime
A datetime to use as file creation date. Default is the current
time.
dataset_label : str
A label for the data set. Must be 80 characters or smaller.
variable_labels : dict
Dictionary containing columns as keys and variable labels as
values. Each label must be 80 characters or smaller.
.. versionadded:: 0.19.0
Raises
------
NotImplementedError
* If datetimes contain timezone information
* Column dtype is not representable in Stata
ValueError
* Columns listed in convert_dates are noth either datetime64[ns]
or datetime.datetime
* Column listed in convert_dates is not in DataFrame
* Categorical label contains more than 32,000 characters
.. versionadded:: 0.19.0
Examples
--------
>>> writer = StataWriter('./data_file.dta', data)
>>> writer.write_file()
Or with dates
>>> writer = StataWriter('./date_data_file.dta', data, {2 : 'tw'})
>>> writer.write_file()
"""
from pandas.io.stata import StataWriter
writer = StataWriter(fname, self, convert_dates=convert_dates,
encoding=encoding, byteorder=byteorder,
time_stamp=time_stamp, data_label=data_label,
write_index=write_index,
variable_labels=variable_labels)
writer.write_file()
def to_feather(self, fname):
"""
write out the binary feather-format for DataFrames
.. versionadded:: 0.20.0
Parameters
----------
fname : str
string file path
"""
from pandas.io.feather_format import to_feather
to_feather(self, fname)
def to_parquet(self, fname, engine='auto', compression='snappy',
**kwargs):
"""
Write a DataFrame to the binary parquet format.
.. versionadded:: 0.21.0
Parameters
----------
fname : str
string file path
engine : {'auto', 'pyarrow', 'fastparquet'}, default 'auto'
Parquet reader library to use. If 'auto', then the option
'io.parquet.engine' is used. If 'auto', then the first
library to be installed is used.
compression : str, optional, default 'snappy'
compression method, includes {'gzip', 'snappy', 'brotli'}
kwargs
Additional keyword arguments passed to the engine
"""
from pandas.io.parquet import to_parquet
to_parquet(self, fname, engine,
compression=compression, **kwargs)
@Substitution(header='Write out the column names. If a list of strings '
'is given, it is assumed to be aliases for the '
'column names')
@Appender(fmt.docstring_to_string, indents=1)
def to_string(self, buf=None, columns=None, col_space=None, header=True,
index=True, na_rep='NaN', formatters=None, float_format=None,
sparsify=None, index_names=True, justify=None,
line_width=None, max_rows=None, max_cols=None,
show_dimensions=False):
"""
Render a DataFrame to a console-friendly tabular output.
"""
formatter = fmt.DataFrameFormatter(self, buf=buf, columns=columns,
col_space=col_space, na_rep=na_rep,
formatters=formatters,
float_format=float_format,
sparsify=sparsify, justify=justify,
index_names=index_names,
header=header, index=index,
line_width=line_width,
max_rows=max_rows,
max_cols=max_cols,
show_dimensions=show_dimensions)
formatter.to_string()
if buf is None:
result = formatter.buf.getvalue()
return result
@Substitution(header='whether to print column labels, default True')
@Appender(fmt.docstring_to_string, indents=1)
def to_html(self, buf=None, columns=None, col_space=None, header=True,
index=True, na_rep='NaN', formatters=None, float_format=None,
sparsify=None, index_names=True, justify=None, bold_rows=True,
classes=None, escape=True, max_rows=None, max_cols=None,
show_dimensions=False, notebook=False, decimal='.',
border=None):
"""
Render a DataFrame as an HTML table.
`to_html`-specific options:
bold_rows : boolean, default True
Make the row labels bold in the output
classes : str or list or tuple, default None
CSS class(es) to apply to the resulting html table
escape : boolean, default True
Convert the characters <, >, and & to HTML-safe sequences.=
max_rows : int, optional
Maximum number of rows to show before truncating. If None, show
all.
max_cols : int, optional
Maximum number of columns to show before truncating. If None, show
all.
decimal : string, default '.'
Character recognized as decimal separator, e.g. ',' in Europe
.. versionadded:: 0.18.0
border : int
A ``border=border`` attribute is included in the opening
`<table>` tag. Default ``pd.options.html.border``.
.. versionadded:: 0.19.0
"""
if (justify is not None and
justify not in fmt._VALID_JUSTIFY_PARAMETERS):
raise ValueError("Invalid value for justify parameter")
formatter = fmt.DataFrameFormatter(self, buf=buf, columns=columns,
col_space=col_space, na_rep=na_rep,
formatters=formatters,
float_format=float_format,
sparsify=sparsify, justify=justify,
index_names=index_names,
header=header, index=index,
bold_rows=bold_rows, escape=escape,
max_rows=max_rows,
max_cols=max_cols,
show_dimensions=show_dimensions,
decimal=decimal)
# TODO: a generic formatter wld b in DataFrameFormatter
formatter.to_html(classes=classes, notebook=notebook, border=border)
if buf is None:
return formatter.buf.getvalue()
def info(self, verbose=None, buf=None, max_cols=None, memory_usage=None,
null_counts=None):
"""
Concise summary of a DataFrame.
Parameters
----------
verbose : {None, True, False}, optional
Whether to print the full summary.
None follows the `display.max_info_columns` setting.
True or False overrides the `display.max_info_columns` setting.
buf : writable buffer, defaults to sys.stdout
max_cols : int, default None
Determines whether full summary or short summary is printed.
None follows the `display.max_info_columns` setting.
memory_usage : boolean/string, default None
Specifies whether total memory usage of the DataFrame
elements (including index) should be displayed. None follows
the `display.memory_usage` setting. True or False overrides
the `display.memory_usage` setting. A value of 'deep' is equivalent
of True, with deep introspection. Memory usage is shown in
human-readable units (base-2 representation).
null_counts : boolean, default None
Whether to show the non-null counts
- If None, then only show if the frame is smaller than
max_info_rows and max_info_columns.
- If True, always show counts.
- If False, never show counts.
"""
from pandas.io.formats.format import _put_lines
if buf is None: # pragma: no cover
buf = sys.stdout
lines = []
lines.append(str(type(self)))
lines.append(self.index.summary())
if len(self.columns) == 0:
lines.append('Empty %s' % type(self).__name__)
_put_lines(buf, lines)
return
cols = self.columns
# hack
if max_cols is None:
max_cols = get_option('display.max_info_columns',
len(self.columns) + 1)
max_rows = get_option('display.max_info_rows', len(self) + 1)
if null_counts is None:
show_counts = ((len(self.columns) <= max_cols) and
(len(self) < max_rows))
else:
show_counts = null_counts
exceeds_info_cols = len(self.columns) > max_cols
def _verbose_repr():
lines.append('Data columns (total %d columns):' %
len(self.columns))
space = max([len(pprint_thing(k)) for k in self.columns]) + 4
counts = None
tmpl = "%s%s"
if show_counts:
counts = self.count()
if len(cols) != len(counts): # pragma: no cover
raise AssertionError('Columns must equal counts (%d != %d)'
% (len(cols), len(counts)))
tmpl = "%s non-null %s"
dtypes = self.dtypes
for i, col in enumerate(self.columns):
dtype = dtypes.iloc[i]
col = pprint_thing(col)
count = ""
if show_counts:
count = counts.iloc[i]
lines.append(_put_str(col, space) + tmpl % (count, dtype))
def _non_verbose_repr():
lines.append(self.columns.summary(name='Columns'))
def _sizeof_fmt(num, size_qualifier):
# returns size in human readable format
for x in ['bytes', 'KB', 'MB', 'GB', 'TB']:
if num < 1024.0:
return "%3.1f%s %s" % (num, size_qualifier, x)
num /= 1024.0
return "%3.1f%s %s" % (num, size_qualifier, 'PB')
if verbose:
_verbose_repr()
elif verbose is False: # specifically set to False, not nesc None
_non_verbose_repr()
else:
if exceeds_info_cols:
_non_verbose_repr()
else:
_verbose_repr()
counts = self.get_dtype_counts()
dtypes = ['%s(%d)' % k for k in sorted(compat.iteritems(counts))]
lines.append('dtypes: %s' % ', '.join(dtypes))
if memory_usage is None:
memory_usage = get_option('display.memory_usage')
if memory_usage:
# append memory usage of df to display
size_qualifier = ''
if memory_usage == 'deep':
deep = True
else:
# size_qualifier is just a best effort; not guaranteed to catch
# all cases (e.g., it misses categorical data even with object
# categories)
deep = False
if ('object' in counts or
self.index._is_memory_usage_qualified()):
size_qualifier = '+'
mem_usage = self.memory_usage(index=True, deep=deep).sum()
lines.append("memory usage: %s\n" %
_sizeof_fmt(mem_usage, size_qualifier))
_put_lines(buf, lines)
def memory_usage(self, index=True, deep=False):
"""Memory usage of DataFrame columns.
Parameters
----------
index : bool
Specifies whether to include memory usage of DataFrame's
index in returned Series. If `index=True` (default is False)
the first index of the Series is `Index`.
deep : bool
Introspect the data deeply, interrogate
`object` dtypes for system-level memory consumption
Returns
-------
sizes : Series
A series with column names as index and memory usage of
columns with units of bytes.
Notes
-----
Memory usage does not include memory consumed by elements that
are not components of the array if deep=False
See Also
--------
numpy.ndarray.nbytes
"""
result = Series([c.memory_usage(index=False, deep=deep)
for col, c in self.iteritems()], index=self.columns)
if index:
result = Series(self.index.memory_usage(deep=deep),
index=['Index']).append(result)
return result
def transpose(self, *args, **kwargs):
"""Transpose index and columns"""
nv.validate_transpose(args, dict())
return super(DataFrame, self).transpose(1, 0, **kwargs)
T = property(transpose)
# ----------------------------------------------------------------------
# Picklability
# legacy pickle formats
def _unpickle_frame_compat(self, state): # pragma: no cover
from pandas.core.common import _unpickle_array
if len(state) == 2: # pragma: no cover
series, idx = state
columns = sorted(series)
else:
series, cols, idx = state
columns = _unpickle_array(cols)
index = _unpickle_array(idx)
self._data = self._init_dict(series, index, columns, None)
def _unpickle_matrix_compat(self, state): # pragma: no cover
from pandas.core.common import _unpickle_array
# old unpickling
(vals, idx, cols), object_state = state
index = _unpickle_array(idx)
dm = DataFrame(vals, index=index, columns=_unpickle_array(cols),
copy=False)
if object_state is not None:
ovals, _, ocols = object_state
objects = DataFrame(ovals, index=index,
columns=_unpickle_array(ocols), copy=False)
dm = dm.join(objects)
self._data = dm._data
# ----------------------------------------------------------------------
# Getting and setting elements
def get_value(self, index, col, takeable=False):
"""
Quickly retrieve single value at passed column and index
.. deprecated:: 0.21.0
Please use .at[] or .iat[] accessors.
Parameters
----------
index : row label
col : column label
takeable : interpret the index/col as indexers, default False
Returns
-------
value : scalar value
"""
warnings.warn("get_value is deprecated and will be removed "
"in a future release. Please use "
".at[] or .iat[] accessors instead", FutureWarning,
stacklevel=2)
return self._get_value(index, col, takeable=takeable)
def _get_value(self, index, col, takeable=False):
if takeable:
series = self._iget_item_cache(col)
return _maybe_box_datetimelike(series._values[index])
series = self._get_item_cache(col)
engine = self.index._engine
try:
return engine.get_value(series._values, index)
except (TypeError, ValueError):
# we cannot handle direct indexing
# use positional
col = self.columns.get_loc(col)
index = self.index.get_loc(index)
return self._get_value(index, col, takeable=True)
_get_value.__doc__ = get_value.__doc__
def set_value(self, index, col, value, takeable=False):
"""
Put single value at passed column and index
.. deprecated:: 0.21.0
Please use .at[] or .iat[] accessors.
Parameters
----------
index : row label
col : column label
value : scalar value
takeable : interpret the index/col as indexers, default False
Returns
-------
frame : DataFrame
If label pair is contained, will be reference to calling DataFrame,
otherwise a new object
"""
warnings.warn("set_value is deprecated and will be removed "
"in a future release. Please use "
".at[] or .iat[] accessors instead", FutureWarning,
stacklevel=2)
return self._set_value(index, col, value, takeable=takeable)
def _set_value(self, index, col, value, takeable=False):
try:
if takeable is True:
series = self._iget_item_cache(col)
return series._set_value(index, value, takeable=True)
series = self._get_item_cache(col)
engine = self.index._engine
engine.set_value(series._values, index, value)
return self
except (KeyError, TypeError):
# set using a non-recursive method & reset the cache
self.loc[index, col] = value
self._item_cache.pop(col, None)
return self
_set_value.__doc__ = set_value.__doc__
def _ixs(self, i, axis=0):
"""
i : int, slice, or sequence of integers
axis : int
"""
# irow
if axis == 0:
"""
Notes
-----
If slice passed, the resulting data will be a view
"""
if isinstance(i, slice):
return self[i]
else:
label = self.index[i]
if isinstance(label, Index):
# a location index by definition
result = self.take(i, axis=axis)
copy = True
else:
new_values = self._data.fast_xs(i)
if is_scalar(new_values):
return new_values
# if we are a copy, mark as such
copy = (isinstance(new_values, np.ndarray) and
new_values.base is None)
result = self._constructor_sliced(new_values,
index=self.columns,
name=self.index[i],
dtype=new_values.dtype)
result._set_is_copy(self, copy=copy)
return result
# icol
else:
"""
Notes
-----
If slice passed, the resulting data will be a view
"""
label = self.columns[i]
if isinstance(i, slice):
# need to return view
lab_slice = slice(label[0], label[-1])
return self.loc[:, lab_slice]
else:
if isinstance(label, Index):
return self._take(i, axis=1, convert=True)
index_len = len(self.index)
# if the values returned are not the same length
# as the index (iow a not found value), iget returns
# a 0-len ndarray. This is effectively catching
# a numpy error (as numpy should really raise)
values = self._data.iget(i)
if index_len and not len(values):
values = np.array([np.nan] * index_len, dtype=object)
result = self._constructor_sliced.from_array(values,
index=self.index,
name=label,
fastpath=True)
# this is a cached value, mark it so
result._set_as_cached(label, self)
return result
def __getitem__(self, key):
key = com._apply_if_callable(key, self)
# shortcut if we are an actual column
is_mi_columns = isinstance(self.columns, MultiIndex)
try:
if key in self.columns and not is_mi_columns:
return self._getitem_column(key)
except:
pass
# see if we can slice the rows
indexer = convert_to_index_sliceable(self, key)
if indexer is not None:
return self._getitem_slice(indexer)
if isinstance(key, (Series, np.ndarray, Index, list)):
# either boolean or fancy integer index
return self._getitem_array(key)
elif isinstance(key, DataFrame):
return self._getitem_frame(key)
elif is_mi_columns:
return self._getitem_multilevel(key)
else:
return self._getitem_column(key)
def _getitem_column(self, key):
""" return the actual column """
# get column
if self.columns.is_unique:
return self._get_item_cache(key)
# duplicate columns & possible reduce dimensionality
result = self._constructor(self._data.get(key))
if result.columns.is_unique:
result = result[key]
return result
def _getitem_slice(self, key):
return self._slice(key, axis=0)
def _getitem_array(self, key):
# also raises Exception if object array with NA values
if com.is_bool_indexer(key):
# warning here just in case -- previously __setitem__ was
# reindexing but __getitem__ was not; it seems more reasonable to
# go with the __setitem__ behavior since that is more consistent
# with all other indexing behavior
if isinstance(key, Series) and not key.index.equals(self.index):
warnings.warn("Boolean Series key will be reindexed to match "
"DataFrame index.", UserWarning, stacklevel=3)
elif len(key) != len(self.index):
raise ValueError('Item wrong length %d instead of %d.' %
(len(key), len(self.index)))
# check_bool_indexer will throw exception if Series key cannot
# be reindexed to match DataFrame rows
key = check_bool_indexer(self.index, key)
indexer = key.nonzero()[0]
return self._take(indexer, axis=0, convert=False)
else:
indexer = self.loc._convert_to_indexer(key, axis=1)
return self._take(indexer, axis=1, convert=True)
def _getitem_multilevel(self, key):
loc = self.columns.get_loc(key)
if isinstance(loc, (slice, Series, np.ndarray, Index)):
new_columns = self.columns[loc]
result_columns = maybe_droplevels(new_columns, key)
if self._is_mixed_type:
result = self.reindex(columns=new_columns)
result.columns = result_columns
else:
new_values = self.values[:, loc]
result = self._constructor(new_values, index=self.index,
columns=result_columns)
result = result.__finalize__(self)
# If there is only one column being returned, and its name is
# either an empty string, or a tuple with an empty string as its
# first element, then treat the empty string as a placeholder
# and return the column as if the user had provided that empty
# string in the key. If the result is a Series, exclude the
# implied empty string from its name.
if len(result.columns) == 1:
top = result.columns[0]
if isinstance(top, tuple):
top = top[0]
if top == '':
result = result['']
if isinstance(result, Series):
result = self._constructor_sliced(result,
index=self.index,
name=key)
result._set_is_copy(self)
return result
else:
return self._get_item_cache(key)
def _getitem_frame(self, key):
if key.values.size and not is_bool_dtype(key.values):
raise ValueError('Must pass DataFrame with boolean values only')
return self.where(key)
def query(self, expr, inplace=False, **kwargs):
"""Query the columns of a frame with a boolean expression.
Parameters
----------
expr : string
The query string to evaluate. You can refer to variables
in the environment by prefixing them with an '@' character like
``@a + b``.
inplace : bool
Whether the query should modify the data in place or return
a modified copy
.. versionadded:: 0.18.0
kwargs : dict
See the documentation for :func:`pandas.eval` for complete details
on the keyword arguments accepted by :meth:`DataFrame.query`.
Returns
-------
q : DataFrame
Notes
-----
The result of the evaluation of this expression is first passed to
:attr:`DataFrame.loc` and if that fails because of a
multidimensional key (e.g., a DataFrame) then the result will be passed
to :meth:`DataFrame.__getitem__`.
This method uses the top-level :func:`pandas.eval` function to
evaluate the passed query.
The :meth:`~pandas.DataFrame.query` method uses a slightly
modified Python syntax by default. For example, the ``&`` and ``|``
(bitwise) operators have the precedence of their boolean cousins,
:keyword:`and` and :keyword:`or`. This *is* syntactically valid Python,
however the semantics are different.
You can change the semantics of the expression by passing the keyword
argument ``parser='python'``. This enforces the same semantics as
evaluation in Python space. Likewise, you can pass ``engine='python'``
to evaluate an expression using Python itself as a backend. This is not
recommended as it is inefficient compared to using ``numexpr`` as the
engine.
The :attr:`DataFrame.index` and
:attr:`DataFrame.columns` attributes of the
:class:`~pandas.DataFrame` instance are placed in the query namespace
by default, which allows you to treat both the index and columns of the
frame as a column in the frame.
The identifier ``index`` is used for the frame index; you can also
use the name of the index to identify it in a query.
For further details and examples see the ``query`` documentation in
:ref:`indexing <indexing.query>`.
See Also
--------
pandas.eval
DataFrame.eval
Examples
--------
>>> from numpy.random import randn
>>> from pandas import DataFrame
>>> df = DataFrame(randn(10, 2), columns=list('ab'))
>>> df.query('a > b')
>>> df[df.a > df.b] # same result as the previous expression
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if not isinstance(expr, compat.string_types):
msg = "expr must be a string to be evaluated, {0} given"
raise ValueError(msg.format(type(expr)))
kwargs['level'] = kwargs.pop('level', 0) + 1
kwargs['target'] = None
res = self.eval(expr, **kwargs)
try:
new_data = self.loc[res]
except ValueError:
# when res is multi-dimensional loc raises, but this is sometimes a
# valid query
new_data = self[res]
if inplace:
self._update_inplace(new_data)
else:
return new_data
def eval(self, expr, inplace=False, **kwargs):
"""Evaluate an expression in the context of the calling DataFrame
instance.
Parameters
----------
expr : string
The expression string to evaluate.
inplace : bool, default False
If the expression contains an assignment, whether to perform the
operation inplace and mutate the existing DataFrame. Otherwise,
a new DataFrame is returned.
.. versionadded:: 0.18.0
kwargs : dict
See the documentation for :func:`~pandas.eval` for complete details
on the keyword arguments accepted by
:meth:`~pandas.DataFrame.query`.
Returns
-------
ret : ndarray, scalar, or pandas object
See Also
--------
pandas.DataFrame.query
pandas.DataFrame.assign
pandas.eval
Notes
-----
For more details see the API documentation for :func:`~pandas.eval`.
For detailed examples see :ref:`enhancing performance with eval
<enhancingperf.eval>`.
Examples
--------
>>> from numpy.random import randn
>>> from pandas import DataFrame
>>> df = DataFrame(randn(10, 2), columns=list('ab'))
>>> df.eval('a + b')
>>> df.eval('c = a + b')
"""
from pandas.core.computation.eval import eval as _eval
inplace = validate_bool_kwarg(inplace, 'inplace')
resolvers = kwargs.pop('resolvers', None)
kwargs['level'] = kwargs.pop('level', 0) + 1
if resolvers is None:
index_resolvers = self._get_index_resolvers()
resolvers = dict(self.iteritems()), index_resolvers
if 'target' not in kwargs:
kwargs['target'] = self
kwargs['resolvers'] = kwargs.get('resolvers', ()) + tuple(resolvers)
return _eval(expr, inplace=inplace, **kwargs)
def select_dtypes(self, include=None, exclude=None):
"""Return a subset of a DataFrame including/excluding columns based on
their ``dtype``.
Parameters
----------
include, exclude : scalar or list-like
A selection of dtypes or strings to be included/excluded. At least
one of these parameters must be supplied.
Raises
------
ValueError
* If both of ``include`` and ``exclude`` are empty
* If ``include`` and ``exclude`` have overlapping elements
* If any kind of string dtype is passed in.
Returns
-------
subset : DataFrame
The subset of the frame including the dtypes in ``include`` and
excluding the dtypes in ``exclude``.
Notes
-----
* To select all *numeric* types use the numpy dtype ``numpy.number``
* To select strings you must use the ``object`` dtype, but note that
this will return *all* object dtype columns
* See the `numpy dtype hierarchy
<http://docs.scipy.org/doc/numpy/reference/arrays.scalars.html>`__
* To select datetimes, use np.datetime64, 'datetime' or 'datetime64'
* To select timedeltas, use np.timedelta64, 'timedelta' or
'timedelta64'
* To select Pandas categorical dtypes, use 'category'
* To select Pandas datetimetz dtypes, use 'datetimetz' (new in 0.20.0),
or a 'datetime64[ns, tz]' string
Examples
--------
>>> df = pd.DataFrame({'a': np.random.randn(6).astype('f4'),
... 'b': [True, False] * 3,
... 'c': [1.0, 2.0] * 3})
>>> df
a b c
0 0.3962 True 1
1 0.1459 False 2
2 0.2623 True 1
3 0.0764 False 2
4 -0.9703 True 1
5 -1.2094 False 2
>>> df.select_dtypes(include='bool')
c
0 True
1 False
2 True
3 False
4 True
5 False
>>> df.select_dtypes(include=['float64'])
c
0 1
1 2
2 1
3 2
4 1
5 2
>>> df.select_dtypes(exclude=['floating'])
b
0 True
1 False
2 True
3 False
4 True
5 False
"""
if not is_list_like(include):
include = (include,) if include is not None else ()
if not is_list_like(exclude):
exclude = (exclude,) if exclude is not None else ()
selection = tuple(map(frozenset, (include, exclude)))
if not any(selection):
raise ValueError('at least one of include or exclude must be '
'nonempty')
# convert the myriad valid dtypes object to a single representation
include, exclude = map(
lambda x: frozenset(map(_get_dtype_from_object, x)), selection)
for dtypes in (include, exclude):
invalidate_string_dtypes(dtypes)
# can't both include AND exclude!
if not include.isdisjoint(exclude):
raise ValueError('include and exclude overlap on %s' %
(include & exclude))
# empty include/exclude -> defaults to True
# three cases (we've already raised if both are empty)
# case 1: empty include, nonempty exclude
# we have True, True, ... True for include, same for exclude
# in the loop below we get the excluded
# and when we call '&' below we get only the excluded
# case 2: nonempty include, empty exclude
# same as case 1, but with include
# case 3: both nonempty
# the "union" of the logic of case 1 and case 2:
# we get the included and excluded, and return their logical and
include_these = Series(not bool(include), index=self.columns)
exclude_these = Series(not bool(exclude), index=self.columns)
def is_dtype_instance_mapper(column, dtype):
return column, functools.partial(issubclass, dtype.type)
for column, f in itertools.starmap(is_dtype_instance_mapper,
self.dtypes.iteritems()):
if include: # checks for the case of empty include or exclude
include_these[column] = any(map(f, include))
if exclude:
exclude_these[column] = not any(map(f, exclude))
dtype_indexer = include_these & exclude_these
return self.loc[com._get_info_slice(self, dtype_indexer)]
def _box_item_values(self, key, values):
items = self.columns[self.columns.get_loc(key)]
if values.ndim == 2:
return self._constructor(values.T, columns=items, index=self.index)
else:
return self._box_col_values(values, items)
def _box_col_values(self, values, items):
""" provide boxed values for a column """
return self._constructor_sliced.from_array(values, index=self.index,
name=items, fastpath=True)
def __setitem__(self, key, value):
key = com._apply_if_callable(key, self)
# see if we can slice the rows
indexer = convert_to_index_sliceable(self, key)
if indexer is not None:
return self._setitem_slice(indexer, value)
if isinstance(key, (Series, np.ndarray, list, Index)):
self._setitem_array(key, value)
elif isinstance(key, DataFrame):
self._setitem_frame(key, value)
else:
# set column
self._set_item(key, value)
def _setitem_slice(self, key, value):
self._check_setitem_copy()
self.loc._setitem_with_indexer(key, value)
def _setitem_array(self, key, value):
# also raises Exception if object array with NA values
if com.is_bool_indexer(key):
if len(key) != len(self.index):
raise ValueError('Item wrong length %d instead of %d!' %
(len(key), len(self.index)))
key = check_bool_indexer(self.index, key)
indexer = key.nonzero()[0]
self._check_setitem_copy()
self.loc._setitem_with_indexer(indexer, value)
else:
if isinstance(value, DataFrame):
if len(value.columns) != len(key):
raise ValueError('Columns must be same length as key')
for k1, k2 in zip(key, value.columns):
self[k1] = value[k2]
else:
indexer = self.loc._convert_to_indexer(key, axis=1)
self._check_setitem_copy()
self.loc._setitem_with_indexer((slice(None), indexer), value)
def _setitem_frame(self, key, value):
# support boolean setting with DataFrame input, e.g.
# df[df > df2] = 0
if key.values.size and not is_bool_dtype(key.values):
raise TypeError('Must pass DataFrame with boolean values only')
self._check_inplace_setting(value)
self._check_setitem_copy()
self._where(-key, value, inplace=True)
def _ensure_valid_index(self, value):
"""
ensure that if we don't have an index, that we can create one from the
passed value
"""
# GH5632, make sure that we are a Series convertible
if not len(self.index) and is_list_like(value):
try:
value = Series(value)
except:
raise ValueError('Cannot set a frame with no defined index '
'and a value that cannot be converted to a '
'Series')
self._data = self._data.reindex_axis(value.index.copy(), axis=1,
fill_value=np.nan)
def _set_item(self, key, value):
"""
Add series to DataFrame in specified column.
If series is a numpy-array (not a Series/TimeSeries), it must be the
same length as the DataFrames index or an error will be thrown.
Series/TimeSeries will be conformed to the DataFrames index to
ensure homogeneity.
"""
self._ensure_valid_index(value)
value = self._sanitize_column(key, value)
NDFrame._set_item(self, key, value)
# check if we are modifying a copy
# try to set first as we want an invalid
# value exception to occur first
if len(self):
self._check_setitem_copy()
def insert(self, loc, column, value, allow_duplicates=False):
"""
Insert column into DataFrame at specified location.
Raises a ValueError if `column` is already contained in the DataFrame,
unless `allow_duplicates` is set to True.
Parameters
----------
loc : int
Insertion index. Must verify 0 <= loc <= len(columns)
column : string, number, or hashable object
label of the inserted column
value : int, Series, or array-like
allow_duplicates : bool, optional
"""
self._ensure_valid_index(value)
value = self._sanitize_column(column, value, broadcast=False)
self._data.insert(loc, column, value,
allow_duplicates=allow_duplicates)
def assign(self, **kwargs):
"""
Assign new columns to a DataFrame, returning a new object
(a copy) with all the original columns in addition to the new ones.
Parameters
----------
kwargs : keyword, value pairs
keywords are the column names. If the values are
callable, they are computed on the DataFrame and
assigned to the new columns. The callable must not
change input DataFrame (though pandas doesn't check it).
If the values are not callable, (e.g. a Series, scalar, or array),
they are simply assigned.
Returns
-------
df : DataFrame
A new DataFrame with the new columns in addition to
all the existing columns.
Notes
-----
For python 3.6 and above, the columns are inserted in the order of
\*\*kwargs. For python 3.5 and earlier, since \*\*kwargs is unordered,
the columns are inserted in alphabetical order at the end of your
DataFrame. Assigning multiple columns within the same ``assign``
is possible, but you cannot reference other columns created within
the same ``assign`` call.
Examples
--------
>>> df = DataFrame({'A': range(1, 11), 'B': np.random.randn(10)})
Where the value is a callable, evaluated on `df`:
>>> df.assign(ln_A = lambda x: np.log(x.A))
A B ln_A
0 1 0.426905 0.000000
1 2 -0.780949 0.693147
2 3 -0.418711 1.098612
3 4 -0.269708 1.386294
4 5 -0.274002 1.609438
5 6 -0.500792 1.791759
6 7 1.649697 1.945910
7 8 -1.495604 2.079442
8 9 0.549296 2.197225
9 10 -0.758542 2.302585
Where the value already exists and is inserted:
>>> newcol = np.log(df['A'])
>>> df.assign(ln_A=newcol)
A B ln_A
0 1 0.426905 0.000000
1 2 -0.780949 0.693147
2 3 -0.418711 1.098612
3 4 -0.269708 1.386294
4 5 -0.274002 1.609438
5 6 -0.500792 1.791759
6 7 1.649697 1.945910
7 8 -1.495604 2.079442
8 9 0.549296 2.197225
9 10 -0.758542 2.302585
"""
data = self.copy()
# do all calculations first...
results = OrderedDict()
for k, v in kwargs.items():
results[k] = com._apply_if_callable(v, data)
# preserve order for 3.6 and later, but sort by key for 3.5 and earlier
if PY36:
results = results.items()
else:
results = sorted(results.items())
# ... and then assign
for k, v in results:
data[k] = v
return data
def _sanitize_column(self, key, value, broadcast=True):
"""
Ensures new columns (which go into the BlockManager as new blocks) are
always copied and converted into an array.
Parameters
----------
key : object
value : scalar, Series, or array-like
broadcast : bool, default True
If ``key`` matches multiple duplicate column names in the
DataFrame, this parameter indicates whether ``value`` should be
tiled so that the returned array contains a (duplicated) column for
each occurrence of the key. If False, ``value`` will not be tiled.
Returns
-------
sanitized_column : numpy-array
"""
def reindexer(value):
# reindex if necessary
if value.index.equals(self.index) or not len(self.index):
value = value._values.copy()
else:
# GH 4107
try:
value = value.reindex(self.index)._values
except Exception as e:
# duplicate axis
if not value.index.is_unique:
raise e
# other
raise TypeError('incompatible index of inserted column '
'with frame index')
return value
if isinstance(value, Series):
value = reindexer(value)
elif isinstance(value, DataFrame):
# align right-hand-side columns if self.columns
# is multi-index and self[key] is a sub-frame
if isinstance(self.columns, MultiIndex) and key in self.columns:
loc = self.columns.get_loc(key)
if isinstance(loc, (slice, Series, np.ndarray, Index)):
cols = maybe_droplevels(self.columns[loc], key)
if len(cols) and not cols.equals(value.columns):
value = value.reindex(cols, axis=1)
# now align rows
value = reindexer(value).T
elif isinstance(value, Categorical):
value = value.copy()
elif isinstance(value, Index) or is_sequence(value):
from pandas.core.series import _sanitize_index
# turn me into an ndarray
value = _sanitize_index(value, self.index, copy=False)
if not isinstance(value, (np.ndarray, Index)):
if isinstance(value, list) and len(value) > 0:
value = maybe_convert_platform(value)
else:
value = com._asarray_tuplesafe(value)
elif value.ndim == 2:
value = value.copy().T
elif isinstance(value, Index):
value = value.copy(deep=True)
else:
value = value.copy()
# possibly infer to datetimelike
if is_object_dtype(value.dtype):
value = maybe_infer_to_datetimelike(value)
else:
# upcast the scalar
value = cast_scalar_to_array(len(self.index), value)
value = maybe_cast_to_datetime(value, value.dtype)
# return internal types directly
if is_extension_type(value):
return value
# broadcast across multiple columns if necessary
if broadcast and key in self.columns and value.ndim == 1:
if (not self.columns.is_unique or
isinstance(self.columns, MultiIndex)):
existing_piece = self[key]
if isinstance(existing_piece, DataFrame):
value = np.tile(value, (len(existing_piece.columns), 1))
return np.atleast_2d(np.asarray(value))
@property
def _series(self):
result = {}
for idx, item in enumerate(self.columns):
result[item] = Series(self._data.iget(idx), index=self.index,
name=item)
return result
def lookup(self, row_labels, col_labels):
"""Label-based "fancy indexing" function for DataFrame.
Given equal-length arrays of row and column labels, return an
array of the values corresponding to each (row, col) pair.
Parameters
----------
row_labels : sequence
The row labels to use for lookup
col_labels : sequence
The column labels to use for lookup
Notes
-----
Akin to::
result = []
for row, col in zip(row_labels, col_labels):
result.append(df.get_value(row, col))
Examples
--------
values : ndarray
The found values
"""
n = len(row_labels)
if n != len(col_labels):
raise ValueError('Row labels must have same size as column labels')
thresh = 1000
if not self._is_mixed_type or n > thresh:
values = self.values
ridx = self.index.get_indexer(row_labels)
cidx = self.columns.get_indexer(col_labels)
if (ridx == -1).any():
raise KeyError('One or more row labels was not found')
if (cidx == -1).any():
raise KeyError('One or more column labels was not found')
flat_index = ridx * len(self.columns) + cidx
result = values.flat[flat_index]
else:
result = np.empty(n, dtype='O')
for i, (r, c) in enumerate(zip(row_labels, col_labels)):
result[i] = self._get_value(r, c)
if is_object_dtype(result):
result = lib.maybe_convert_objects(result)
return result
# ----------------------------------------------------------------------
# Reindexing and alignment
def _reindex_axes(self, axes, level, limit, tolerance, method, fill_value,
copy):
frame = self
columns = axes['columns']
if columns is not None:
frame = frame._reindex_columns(columns, method, copy, level,
fill_value, limit, tolerance)
index = axes['index']
if index is not None:
frame = frame._reindex_index(index, method, copy, level,
fill_value, limit, tolerance)
return frame
def _reindex_index(self, new_index, method, copy, level, fill_value=np.nan,
limit=None, tolerance=None):
new_index, indexer = self.index.reindex(new_index, method=method,
level=level, limit=limit,
tolerance=tolerance)
return self._reindex_with_indexers({0: [new_index, indexer]},
copy=copy, fill_value=fill_value,
allow_dups=False)
def _reindex_columns(self, new_columns, method, copy, level,
fill_value=np.nan, limit=None, tolerance=None):
new_columns, indexer = self.columns.reindex(new_columns, method=method,
level=level, limit=limit,
tolerance=tolerance)
return self._reindex_with_indexers({1: [new_columns, indexer]},
copy=copy, fill_value=fill_value,
allow_dups=False)
def _reindex_multi(self, axes, copy, fill_value):
""" we are guaranteed non-Nones in the axes! """
new_index, row_indexer = self.index.reindex(axes['index'])
new_columns, col_indexer = self.columns.reindex(axes['columns'])
if row_indexer is not None and col_indexer is not None:
indexer = row_indexer, col_indexer
new_values = algorithms.take_2d_multi(self.values, indexer,
fill_value=fill_value)
return self._constructor(new_values, index=new_index,
columns=new_columns)
else:
return self._reindex_with_indexers({0: [new_index, row_indexer],
1: [new_columns, col_indexer]},
copy=copy,
fill_value=fill_value)
@Appender(_shared_docs['align'] % _shared_doc_kwargs)
def align(self, other, join='outer', axis=None, level=None, copy=True,
fill_value=None, method=None, limit=None, fill_axis=0,
broadcast_axis=None):
return super(DataFrame, self).align(other, join=join, axis=axis,
level=level, copy=copy,
fill_value=fill_value,
method=method, limit=limit,
fill_axis=fill_axis,
broadcast_axis=broadcast_axis)
@Appender(_shared_docs['reindex'] % _shared_doc_kwargs)
@rewrite_axis_style_signature('labels', [('method', None),
('copy', True),
('level', None),
('fill_value', np.nan),
('limit', None),
('tolerance', None)])
def reindex(self, *args, **kwargs):
axes = validate_axis_style_args(self, args, kwargs, 'labels',
'reindex')
kwargs.update(axes)
# Pop these, since the values are in `kwargs` under different names
kwargs.pop('axis', None)
kwargs.pop('labels', None)
return super(DataFrame, self).reindex(**kwargs)
@Appender(_shared_docs['reindex_axis'] % _shared_doc_kwargs)
def reindex_axis(self, labels, axis=0, method=None, level=None, copy=True,
limit=None, fill_value=np.nan):
return super(DataFrame,
self).reindex_axis(labels=labels, axis=axis,
method=method, level=level, copy=copy,
limit=limit, fill_value=fill_value)
@rewrite_axis_style_signature('mapper', [('copy', True),
('inplace', False),
('level', None)])
def rename(self, *args, **kwargs):
"""Alter axes labels.
Function / dict values must be unique (1-to-1). Labels not contained in
a dict / Series will be left as-is. Extra labels listed don't throw an
error.
See the :ref:`user guide <basics.rename>` for more.
Parameters
----------
mapper, index, columns : dict-like or function, optional
dict-like or functions transformations to apply to
that axis' values. Use either ``mapper`` and ``axis`` to
specify the axis to target with ``mapper``, or ``index`` and
``columns``.
axis : int or str, optional
Axis to target with ``mapper``. Can be either the axis name
('index', 'columns') or number (0, 1). The default is 'index'.
copy : boolean, default True
Also copy underlying data
inplace : boolean, default False
Whether to return a new %(klass)s. If True then value of copy is
ignored.
level : int or level name, default None
In case of a MultiIndex, only rename labels in the specified
level.
Returns
-------
renamed : DataFrame
See Also
--------
pandas.DataFrame.rename_axis
Examples
--------
``DataFrame.rename`` supports two calling conventions
* ``(index=index_mapper, columns=columns_mapper, ...)``
* ``(mapper, axis={'index', 'columns'}, ...)``
We *highly* recommend using keyword arguments to clarify your
intent.
>>> df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
>>> df.rename(index=str, columns={"A": "a", "B": "c"})
a c
0 1 4
1 2 5
2 3 6
>>> df.rename(index=str, columns={"A": "a", "C": "c"})
a B
0 1 4
1 2 5
2 3 6
Using axis-style parameters
>>> df.rename(str.lower, axis='columns')
a b
0 1 4
1 2 5
2 3 6
>>> df.rename({1: 2, 2: 4}, axis='index')
A B
0 1 4
2 2 5
4 3 6
"""
axes = validate_axis_style_args(self, args, kwargs, 'mapper', 'rename')
kwargs.update(axes)
# Pop these, since the values are in `kwargs` under different names
kwargs.pop('axis', None)
kwargs.pop('mapper', None)
return super(DataFrame, self).rename(**kwargs)
@Appender(_shared_docs['fillna'] % _shared_doc_kwargs)
def fillna(self, value=None, method=None, axis=None, inplace=False,
limit=None, downcast=None, **kwargs):
return super(DataFrame,
self).fillna(value=value, method=method, axis=axis,
inplace=inplace, limit=limit,
downcast=downcast, **kwargs)
@Appender(_shared_docs['shift'] % _shared_doc_kwargs)
def shift(self, periods=1, freq=None, axis=0):
return super(DataFrame, self).shift(periods=periods, freq=freq,
axis=axis)
def set_index(self, keys, drop=True, append=False, inplace=False,
verify_integrity=False):
"""
Set the DataFrame index (row labels) using one or more existing
columns. By default yields a new object.
Parameters
----------
keys : column label or list of column labels / arrays
drop : boolean, default True
Delete columns to be used as the new index
append : boolean, default False
Whether to append columns to existing index
inplace : boolean, default False
Modify the DataFrame in place (do not create a new object)
verify_integrity : boolean, default False
Check the new index for duplicates. Otherwise defer the check until
necessary. Setting to False will improve the performance of this
method
Examples
--------
>>> df = pd.DataFrame({'month': [1, 4, 7, 10],
... 'year': [2012, 2014, 2013, 2014],
... 'sale':[55, 40, 84, 31]})
month sale year
0 1 55 2012
1 4 40 2014
2 7 84 2013
3 10 31 2014
Set the index to become the 'month' column:
>>> df.set_index('month')
sale year
month
1 55 2012
4 40 2014
7 84 2013
10 31 2014
Create a multi-index using columns 'year' and 'month':
>>> df.set_index(['year', 'month'])
sale
year month
2012 1 55
2014 4 40
2013 7 84
2014 10 31
Create a multi-index using a set of values and a column:
>>> df.set_index([[1, 2, 3, 4], 'year'])
month sale
year
1 2012 1 55
2 2014 4 40
3 2013 7 84
4 2014 10 31
Returns
-------
dataframe : DataFrame
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if not isinstance(keys, list):
keys = [keys]
if inplace:
frame = self
else:
frame = self.copy()
arrays = []
names = []
if append:
names = [x for x in self.index.names]
if isinstance(self.index, MultiIndex):
for i in range(self.index.nlevels):
arrays.append(self.index._get_level_values(i))
else:
arrays.append(self.index)
to_remove = []
for col in keys:
if isinstance(col, MultiIndex):
# append all but the last column so we don't have to modify
# the end of this loop
for n in range(col.nlevels - 1):
arrays.append(col._get_level_values(n))
level = col._get_level_values(col.nlevels - 1)
names.extend(col.names)
elif isinstance(col, Series):
level = col._values
names.append(col.name)
elif isinstance(col, Index):
level = col
names.append(col.name)
elif isinstance(col, (list, np.ndarray, Index)):
level = col
names.append(None)
else:
level = frame[col]._values
names.append(col)
if drop:
to_remove.append(col)
arrays.append(level)
index = _ensure_index_from_sequences(arrays, names)
if verify_integrity and not index.is_unique:
duplicates = index.get_duplicates()
raise ValueError('Index has duplicate keys: %s' % duplicates)
for c in to_remove:
del frame[c]
# clear up memory usage
index._cleanup()
frame.index = index
if not inplace:
return frame
def reset_index(self, level=None, drop=False, inplace=False, col_level=0,
col_fill=''):
"""
For DataFrame with multi-level index, return new DataFrame with
labeling information in the columns under the index names, defaulting
to 'level_0', 'level_1', etc. if any are None. For a standard index,
the index name will be used (if set), otherwise a default 'index' or
'level_0' (if 'index' is already taken) will be used.
Parameters
----------
level : int, str, tuple, or list, default None
Only remove the given levels from the index. Removes all levels by
default
drop : boolean, default False
Do not try to insert index into dataframe columns. This resets
the index to the default integer index.
inplace : boolean, default False
Modify the DataFrame in place (do not create a new object)
col_level : int or str, default 0
If the columns have multiple levels, determines which level the
labels are inserted into. By default it is inserted into the first
level.
col_fill : object, default ''
If the columns have multiple levels, determines how the other
levels are named. If None then the index name is repeated.
Returns
-------
resetted : DataFrame
Examples
--------
>>> df = pd.DataFrame([('bird', 389.0),
... ('bird', 24.0),
... ('mammal', 80.5),
... ('mammal', np.nan)],
... index=['falcon', 'parrot', 'lion', 'monkey'],
... columns=('class', 'max_speed'))
>>> df
class max_speed
falcon bird 389.0
parrot bird 24.0
lion mammal 80.5
monkey mammal NaN
When we reset the index, the old index is added as a column, and a
new sequential index is used:
>>> df.reset_index()
index class max_speed
0 falcon bird 389.0
1 parrot bird 24.0
2 lion mammal 80.5
3 monkey mammal NaN
We can use the `drop` parameter to avoid the old index being added as
a column:
>>> df.reset_index(drop=True)
class max_speed
0 bird 389.0
1 bird 24.0
2 mammal 80.5
3 mammal NaN
You can also use `reset_index` with `MultiIndex`.
>>> index = pd.MultiIndex.from_tuples([('bird', 'falcon'),
... ('bird', 'parrot'),
... ('mammal', 'lion'),
... ('mammal', 'monkey')],
... names=['class', 'name'])
>>> columns = pd.MultiIndex.from_tuples([('speed', 'max'),
... ('species', 'type')])
>>> df = pd.DataFrame([(389.0, 'fly'),
... ( 24.0, 'fly'),
... ( 80.5, 'run'),
... (np.nan, 'jump')],
... index=index,
... columns=columns)
>>> df
speed species
max type
class name
bird falcon 389.0 fly
parrot 24.0 fly
mammal lion 80.5 run
monkey NaN jump
If the index has multiple levels, we can reset a subset of them:
>>> df.reset_index(level='class')
class speed species
max type
name
falcon bird 389.0 fly
parrot bird 24.0 fly
lion mammal 80.5 run
monkey mammal NaN jump
If we are not dropping the index, by default, it is placed in the top
level. We can place it in another level:
>>> df.reset_index(level='class', col_level=1)
speed species
class max type
name
falcon bird 389.0 fly
parrot bird 24.0 fly
lion mammal 80.5 run
monkey mammal NaN jump
When the index is inserted under another level, we can specify under
which one with the parameter `col_fill`:
>>> df.reset_index(level='class', col_level=1, col_fill='species')
species speed species
class max type
name
falcon bird 389.0 fly
parrot bird 24.0 fly
lion mammal 80.5 run
monkey mammal NaN jump
If we specify a nonexistent level for `col_fill`, it is created:
>>> df.reset_index(level='class', col_level=1, col_fill='genus')
genus speed species
class max type
name
falcon bird 389.0 fly
parrot bird 24.0 fly
lion mammal 80.5 run
monkey mammal NaN jump
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if inplace:
new_obj = self
else:
new_obj = self.copy()
def _maybe_casted_values(index, labels=None):
if isinstance(index, PeriodIndex):
values = index.asobject.values
elif isinstance(index, DatetimeIndex) and index.tz is not None:
values = index
else:
values = index.values
if values.dtype == np.object_:
values = lib.maybe_convert_objects(values)
# if we have the labels, extract the values with a mask
if labels is not None:
mask = labels == -1
# we can have situations where the whole mask is -1,
# meaning there is nothing found in labels, so make all nan's
if mask.all():
values = np.empty(len(mask))
values.fill(np.nan)
else:
values = values.take(labels)
if mask.any():
values, changed = maybe_upcast_putmask(
values, mask, np.nan)
return values
new_index = _default_index(len(new_obj))
if level is not None:
if not isinstance(level, (tuple, list)):
level = [level]
level = [self.index._get_level_number(lev) for lev in level]
if isinstance(self.index, MultiIndex):
if len(level) < self.index.nlevels:
new_index = self.index.droplevel(level)
if not drop:
if isinstance(self.index, MultiIndex):
names = [n if n is not None else ('level_%d' % i)
for (i, n) in enumerate(self.index.names)]
to_insert = lzip(self.index.levels, self.index.labels)
else:
default = 'index' if 'index' not in self else 'level_0'
names = ([default] if self.index.name is None
else [self.index.name])
to_insert = ((self.index, None),)
multi_col = isinstance(self.columns, MultiIndex)
for i, (lev, lab) in reversed(list(enumerate(to_insert))):
if not (level is None or i in level):
continue
name = names[i]
if multi_col:
col_name = (list(name) if isinstance(name, tuple)
else [name])
if col_fill is None:
if len(col_name) not in (1, self.columns.nlevels):
raise ValueError("col_fill=None is incompatible "
"with incomplete column name "
"{}".format(name))
col_fill = col_name[0]
lev_num = self.columns._get_level_number(col_level)
name_lst = [col_fill] * lev_num + col_name
missing = self.columns.nlevels - len(name_lst)
name_lst += [col_fill] * missing
name = tuple(name_lst)
# to ndarray and maybe infer different dtype
level_values = _maybe_casted_values(lev, lab)
new_obj.insert(0, name, level_values)
new_obj.index = new_index
if not inplace:
return new_obj
# ----------------------------------------------------------------------
# Reindex-based selection methods
@Appender(_shared_docs['isna'] % _shared_doc_kwargs)
def isna(self):
return super(DataFrame, self).isna()
@Appender(_shared_docs['isna'] % _shared_doc_kwargs)
def isnull(self):
return super(DataFrame, self).isnull()
@Appender(_shared_docs['notna'] % _shared_doc_kwargs)
def notna(self):
return super(DataFrame, self).notna()
@Appender(_shared_docs['notna'] % _shared_doc_kwargs)
def notnull(self):
return super(DataFrame, self).notnull()
def dropna(self, axis=0, how='any', thresh=None, subset=None,
inplace=False):
"""
Return object with labels on given axis omitted where alternately any
or all of the data are missing
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, or tuple/list thereof
Pass tuple or list to drop on multiple axes
how : {'any', 'all'}
* any : if any NA values are present, drop that label
* all : if all values are NA, drop that label
thresh : int, default None
int value : require that many non-NA values
subset : array-like
Labels along other axis to consider, e.g. if you are dropping rows
these would be a list of columns to include
inplace : boolean, default False
If True, do operation inplace and return None.
Returns
-------
dropped : DataFrame
Examples
--------
>>> df = pd.DataFrame([[np.nan, 2, np.nan, 0], [3, 4, np.nan, 1],
... [np.nan, np.nan, np.nan, 5]],
... columns=list('ABCD'))
>>> df
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 NaN NaN NaN 5
Drop the columns where all elements are nan:
>>> df.dropna(axis=1, how='all')
A B D
0 NaN 2.0 0
1 3.0 4.0 1
2 NaN NaN 5
Drop the columns where any of the elements is nan
>>> df.dropna(axis=1, how='any')
D
0 0
1 1
2 5
Drop the rows where all of the elements are nan
(there is no row to drop, so df stays the same):
>>> df.dropna(axis=0, how='all')
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 NaN NaN NaN 5
Keep only the rows with at least 2 non-na values:
>>> df.dropna(thresh=2)
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if isinstance(axis, (tuple, list)):
result = self
for ax in axis:
result = result.dropna(how=how, thresh=thresh, subset=subset,
axis=ax)
else:
axis = self._get_axis_number(axis)
agg_axis = 1 - axis
agg_obj = self
if subset is not None:
ax = self._get_axis(agg_axis)
indices = ax.get_indexer_for(subset)
check = indices == -1
if check.any():
raise KeyError(list(np.compress(check, subset)))
agg_obj = self.take(indices, axis=agg_axis)
count = agg_obj.count(axis=agg_axis)
if thresh is not None:
mask = count >= thresh
elif how == 'any':
mask = count == len(agg_obj._get_axis(agg_axis))
elif how == 'all':
mask = count > 0
else:
if how is not None:
raise ValueError('invalid how option: %s' % how)
else:
raise TypeError('must specify how or thresh')
result = self._take(mask.nonzero()[0], axis=axis, convert=False)
if inplace:
self._update_inplace(result)
else:
return result
def drop_duplicates(self, subset=None, keep='first', inplace=False):
"""
Return DataFrame with duplicate rows removed, optionally only
considering certain columns
Parameters
----------
subset : column label or sequence of labels, optional
Only consider certain columns for identifying duplicates, by
default use all of the columns
keep : {'first', 'last', False}, default 'first'
- ``first`` : Drop duplicates except for the first occurrence.
- ``last`` : Drop duplicates except for the last occurrence.
- False : Drop all duplicates.
inplace : boolean, default False
Whether to drop duplicates in place or to return a copy
Returns
-------
deduplicated : DataFrame
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
duplicated = self.duplicated(subset, keep=keep)
if inplace:
inds, = (-duplicated).nonzero()
new_data = self._data.take(inds)
self._update_inplace(new_data)
else:
return self[-duplicated]
def duplicated(self, subset=None, keep='first'):
"""
Return boolean Series denoting duplicate rows, optionally only
considering certain columns
Parameters
----------
subset : column label or sequence of labels, optional
Only consider certain columns for identifying duplicates, by
default use all of the columns
keep : {'first', 'last', False}, default 'first'
- ``first`` : Mark duplicates as ``True`` except for the
first occurrence.
- ``last`` : Mark duplicates as ``True`` except for the
last occurrence.
- False : Mark all duplicates as ``True``.
Returns
-------
duplicated : Series
"""
from pandas.core.sorting import get_group_index
from pandas._libs.hashtable import duplicated_int64, _SIZE_HINT_LIMIT
def f(vals):
labels, shape = algorithms.factorize(
vals, size_hint=min(len(self), _SIZE_HINT_LIMIT))
return labels.astype('i8', copy=False), len(shape)
if subset is None:
subset = self.columns
elif (not np.iterable(subset) or
isinstance(subset, compat.string_types) or
isinstance(subset, tuple) and subset in self.columns):
subset = subset,
vals = (col.values for name, col in self.iteritems()
if name in subset)
labels, shape = map(list, zip(*map(f, vals)))
ids = get_group_index(labels, shape, sort=False, xnull=False)
return Series(duplicated_int64(ids, keep), index=self.index)
# ----------------------------------------------------------------------
# Sorting
@Appender(_shared_docs['sort_values'] % _shared_doc_kwargs)
def sort_values(self, by, axis=0, ascending=True, inplace=False,
kind='quicksort', na_position='last'):
inplace = validate_bool_kwarg(inplace, 'inplace')
axis = self._get_axis_number(axis)
other_axis = 0 if axis == 1 else 1
if not isinstance(by, list):
by = [by]
if is_sequence(ascending) and len(by) != len(ascending):
raise ValueError('Length of ascending (%d) != length of by (%d)' %
(len(ascending), len(by)))
if len(by) > 1:
from pandas.core.sorting import lexsort_indexer
keys = []
for x in by:
k = self.xs(x, axis=other_axis).values
if k.ndim == 2:
raise ValueError('Cannot sort by duplicate column %s' %
str(x))
keys.append(k)
indexer = lexsort_indexer(keys, orders=ascending,
na_position=na_position)
indexer = _ensure_platform_int(indexer)
else:
from pandas.core.sorting import nargsort
by = by[0]
k = self.xs(by, axis=other_axis).values
if k.ndim == 2:
# try to be helpful
if isinstance(self.columns, MultiIndex):
raise ValueError('Cannot sort by column %s in a '
'multi-index you need to explicitly '
'provide all the levels' % str(by))
raise ValueError('Cannot sort by duplicate column %s' %
str(by))
if isinstance(ascending, (tuple, list)):
ascending = ascending[0]
indexer = nargsort(k, kind=kind, ascending=ascending,
na_position=na_position)
new_data = self._data.take(indexer,
axis=self._get_block_manager_axis(axis),
verify=False)
if inplace:
return self._update_inplace(new_data)
else:
return self._constructor(new_data).__finalize__(self)
@Appender(_shared_docs['sort_index'] % _shared_doc_kwargs)
def sort_index(self, axis=0, level=None, ascending=True, inplace=False,
kind='quicksort', na_position='last', sort_remaining=True,
by=None):
# TODO: this can be combined with Series.sort_index impl as
# almost identical
inplace = validate_bool_kwarg(inplace, 'inplace')
# 10726
if by is not None:
warnings.warn("by argument to sort_index is deprecated, "
"please use .sort_values(by=...)",
FutureWarning, stacklevel=2)
if level is not None:
raise ValueError("unable to simultaneously sort by and level")
return self.sort_values(by, axis=axis, ascending=ascending,
inplace=inplace)
axis = self._get_axis_number(axis)
labels = self._get_axis(axis)
if level:
new_axis, indexer = labels.sortlevel(level, ascending=ascending,
sort_remaining=sort_remaining)
elif isinstance(labels, MultiIndex):
from pandas.core.sorting import lexsort_indexer
# make sure that the axis is lexsorted to start
# if not we need to reconstruct to get the correct indexer
labels = labels._sort_levels_monotonic()
indexer = lexsort_indexer(labels._get_labels_for_sorting(),
orders=ascending,
na_position=na_position)
else:
from pandas.core.sorting import nargsort
# Check monotonic-ness before sort an index
# GH11080
if ((ascending and labels.is_monotonic_increasing) or
(not ascending and labels.is_monotonic_decreasing)):
if inplace:
return
else:
return self.copy()
indexer = nargsort(labels, kind=kind, ascending=ascending,
na_position=na_position)
baxis = self._get_block_manager_axis(axis)
new_data = self._data.take(indexer,
axis=baxis,
verify=False)
# reconstruct axis if needed
new_data.axes[baxis] = new_data.axes[baxis]._sort_levels_monotonic()
if inplace:
return self._update_inplace(new_data)
else:
return self._constructor(new_data).__finalize__(self)
def sortlevel(self, level=0, axis=0, ascending=True, inplace=False,
sort_remaining=True):
"""
DEPRECATED: use :meth:`DataFrame.sort_index`
Sort multilevel index by chosen axis and primary level. Data will be
lexicographically sorted by the chosen level followed by the other
levels (in order)
Parameters
----------
level : int
axis : {0 or 'index', 1 or 'columns'}, default 0
ascending : boolean, default True
inplace : boolean, default False
Sort the DataFrame without creating a new instance
sort_remaining : boolean, default True
Sort by the other levels too.
Returns
-------
sorted : DataFrame
See Also
--------
DataFrame.sort_index(level=...)
"""
warnings.warn("sortlevel is deprecated, use sort_index(level= ...)",
FutureWarning, stacklevel=2)
return self.sort_index(level=level, axis=axis, ascending=ascending,
inplace=inplace, sort_remaining=sort_remaining)
def nlargest(self, n, columns, keep='first'):
"""Get the rows of a DataFrame sorted by the `n` largest
values of `columns`.
.. versionadded:: 0.17.0
Parameters
----------
n : int
Number of items to retrieve
columns : list or str
Column name or names to order by
keep : {'first', 'last'}, default 'first'
Where there are duplicate values:
- ``first`` : take the first occurrence.
- ``last`` : take the last occurrence.
Returns
-------
DataFrame
Examples
--------
>>> df = DataFrame({'a': [1, 10, 8, 11, -1],
... 'b': list('abdce'),
... 'c': [1.0, 2.0, np.nan, 3.0, 4.0]})
>>> df.nlargest(3, 'a')
a b c
3 11 c 3
1 10 b 2
2 8 d NaN
"""
return algorithms.SelectNFrame(self,
n=n,
keep=keep,
columns=columns).nlargest()
def nsmallest(self, n, columns, keep='first'):
"""Get the rows of a DataFrame sorted by the `n` smallest
values of `columns`.
.. versionadded:: 0.17.0
Parameters
----------
n : int
Number of items to retrieve
columns : list or str
Column name or names to order by
keep : {'first', 'last'}, default 'first'
Where there are duplicate values:
- ``first`` : take the first occurrence.
- ``last`` : take the last occurrence.
Returns
-------
DataFrame
Examples
--------
>>> df = DataFrame({'a': [1, 10, 8, 11, -1],
... 'b': list('abdce'),
... 'c': [1.0, 2.0, np.nan, 3.0, 4.0]})
>>> df.nsmallest(3, 'a')
a b c
4 -1 e 4
0 1 a 1
2 8 d NaN
"""
return algorithms.SelectNFrame(self,
n=n,
keep=keep,
columns=columns).nsmallest()
def swaplevel(self, i=-2, j=-1, axis=0):
"""
Swap levels i and j in a MultiIndex on a particular axis
Parameters
----------
i, j : int, string (can be mixed)
Level of index to be swapped. Can pass level name as string.
Returns
-------
swapped : type of caller (new object)
.. versionchanged:: 0.18.1
The indexes ``i`` and ``j`` are now optional, and default to
the two innermost levels of the index.
"""
result = self.copy()
axis = self._get_axis_number(axis)
if axis == 0:
result.index = result.index.swaplevel(i, j)
else:
result.columns = result.columns.swaplevel(i, j)
return result
def reorder_levels(self, order, axis=0):
"""
Rearrange index levels using input order.
May not drop or duplicate levels
Parameters
----------
order : list of int or list of str
List representing new level order. Reference level by number
(position) or by key (label).
axis : int
Where to reorder levels.
Returns
-------
type of caller (new object)
"""
axis = self._get_axis_number(axis)
if not isinstance(self._get_axis(axis),
MultiIndex): # pragma: no cover
raise TypeError('Can only reorder levels on a hierarchical axis.')
result = self.copy()
if axis == 0:
result.index = result.index.reorder_levels(order)
else:
result.columns = result.columns.reorder_levels(order)
return result
# ----------------------------------------------------------------------
# Arithmetic / combination related
def _combine_frame(self, other, func, fill_value=None, level=None,
try_cast=True):
this, other = self.align(other, join='outer', level=level, copy=False)
new_index, new_columns = this.index, this.columns
def _arith_op(left, right):
if fill_value is not None:
left_mask = isna(left)
right_mask = isna(right)
left = left.copy()
right = right.copy()
# one but not both
mask = left_mask ^ right_mask
left[left_mask & mask] = fill_value
right[right_mask & mask] = fill_value
return func(left, right)
if this._is_mixed_type or other._is_mixed_type:
# unique
if this.columns.is_unique:
def f(col):
r = _arith_op(this[col].values, other[col].values)
return self._constructor_sliced(r, index=new_index,
dtype=r.dtype)
result = dict([(col, f(col)) for col in this])
# non-unique
else:
def f(i):
r = _arith_op(this.iloc[:, i].values,
other.iloc[:, i].values)
return self._constructor_sliced(r, index=new_index,
dtype=r.dtype)
result = dict([
(i, f(i)) for i, col in enumerate(this.columns)
])
result = self._constructor(result, index=new_index, copy=False)
result.columns = new_columns
return result
else:
result = _arith_op(this.values, other.values)
return self._constructor(result, index=new_index, columns=new_columns,
copy=False)
def _combine_series(self, other, func, fill_value=None, axis=None,
level=None, try_cast=True):
if axis is not None:
axis = self._get_axis_name(axis)
if axis == 'index':
return self._combine_match_index(other, func, level=level,
fill_value=fill_value,
try_cast=try_cast)
else:
return self._combine_match_columns(other, func, level=level,
fill_value=fill_value,
try_cast=try_cast)
return self._combine_series_infer(other, func, level=level,
fill_value=fill_value,
try_cast=try_cast)
def _combine_series_infer(self, other, func, level=None,
fill_value=None, try_cast=True):
if len(other) == 0:
return self * np.nan
if len(self) == 0:
# Ambiguous case, use _series so works with DataFrame
return self._constructor(data=self._series, index=self.index,
columns=self.columns)
return self._combine_match_columns(other, func, level=level,
fill_value=fill_value,
try_cast=try_cast)
def _combine_match_index(self, other, func, level=None,
fill_value=None, try_cast=True):
left, right = self.align(other, join='outer', axis=0, level=level,
copy=False)
if fill_value is not None:
raise NotImplementedError("fill_value %r not supported." %
fill_value)
return self._constructor(func(left.values.T, right.values).T,
index=left.index, columns=self.columns,
copy=False)
def _combine_match_columns(self, other, func, level=None,
fill_value=None, try_cast=True):
left, right = self.align(other, join='outer', axis=1, level=level,
copy=False)
if fill_value is not None:
raise NotImplementedError("fill_value %r not supported" %
fill_value)
new_data = left._data.eval(func=func, other=right,
axes=[left.columns, self.index],
try_cast=try_cast)
return self._constructor(new_data)
def _combine_const(self, other, func, errors='raise', try_cast=True):
new_data = self._data.eval(func=func, other=other,
errors=errors,
try_cast=try_cast)
return self._constructor(new_data)
def _compare_frame_evaluate(self, other, func, str_rep, try_cast=True):
import pandas.core.computation.expressions as expressions
# unique
if self.columns.is_unique:
def _compare(a, b):
return dict([(col, func(a[col], b[col])) for col in a.columns])
new_data = expressions.evaluate(_compare, str_rep, self, other)
return self._constructor(data=new_data, index=self.index,
columns=self.columns, copy=False)
# non-unique
else:
def _compare(a, b):
return dict([(i, func(a.iloc[:, i], b.iloc[:, i]))
for i, col in enumerate(a.columns)])
new_data = expressions.evaluate(_compare, str_rep, self, other)
result = self._constructor(data=new_data, index=self.index,
copy=False)
result.columns = self.columns
return result
def _compare_frame(self, other, func, str_rep, try_cast=True):
if not self._indexed_same(other):
raise ValueError('Can only compare identically-labeled '
'DataFrame objects')
return self._compare_frame_evaluate(other, func, str_rep,
try_cast=try_cast)
def _flex_compare_frame(self, other, func, str_rep, level, try_cast=True):
if not self._indexed_same(other):
self, other = self.align(other, 'outer', level=level, copy=False)
return self._compare_frame_evaluate(other, func, str_rep,
try_cast=try_cast)
def combine(self, other, func, fill_value=None, overwrite=True):
"""
Add two DataFrame objects and do not propagate NaN values, so if for a
(column, time) one frame is missing a value, it will default to the
other frame's value (which might be NaN as well)
Parameters
----------
other : DataFrame
func : function
Function that takes two series as inputs and return a Series or a
scalar
fill_value : scalar value
overwrite : boolean, default True
If True then overwrite values for common keys in the calling frame
Returns
-------
result : DataFrame
Examples
--------
>>> df1 = DataFrame({'A': [0, 0], 'B': [4, 4]})
>>> df2 = DataFrame({'A': [1, 1], 'B': [3, 3]})
>>> df1.combine(df2, lambda s1, s2: s1 if s1.sum() < s2.sum() else s2)
A B
0 0 3
1 0 3
See Also
--------
DataFrame.combine_first : Combine two DataFrame objects and default to
non-null values in frame calling the method
"""
other_idxlen = len(other.index) # save for compare
this, other = self.align(other, copy=False)
new_index = this.index
if other.empty and len(new_index) == len(self.index):
return self.copy()
if self.empty and len(other) == other_idxlen:
return other.copy()
# sorts if possible
new_columns = this.columns.union(other.columns)
do_fill = fill_value is not None
result = {}
for col in new_columns:
series = this[col]
otherSeries = other[col]
this_dtype = series.dtype
other_dtype = otherSeries.dtype
this_mask = isna(series)
other_mask = isna(otherSeries)
# don't overwrite columns unecessarily
# DO propagate if this column is not in the intersection
if not overwrite and other_mask.all():
result[col] = this[col].copy()
continue
if do_fill:
series = series.copy()
otherSeries = otherSeries.copy()
series[this_mask] = fill_value
otherSeries[other_mask] = fill_value
# if we have different dtypes, possibily promote
new_dtype = this_dtype
if not is_dtype_equal(this_dtype, other_dtype):
new_dtype = find_common_type([this_dtype, other_dtype])
if not is_dtype_equal(this_dtype, new_dtype):
series = series.astype(new_dtype)
if not is_dtype_equal(other_dtype, new_dtype):
otherSeries = otherSeries.astype(new_dtype)
# see if we need to be represented as i8 (datetimelike)
# try to keep us at this dtype
needs_i8_conversion_i = needs_i8_conversion(new_dtype)
if needs_i8_conversion_i:
arr = func(series, otherSeries, True)
else:
arr = func(series, otherSeries)
if do_fill:
arr = _ensure_float(arr)
arr[this_mask & other_mask] = np.nan
# try to downcast back to the original dtype
if needs_i8_conversion_i:
# ToDo: This conversion should be handled in
# _maybe_cast_to_datetime but the change affects lot...
if is_datetime64tz_dtype(new_dtype):
arr = DatetimeIndex._simple_new(arr, tz=new_dtype.tz)
else:
arr = maybe_cast_to_datetime(arr, new_dtype)
else:
arr = maybe_downcast_to_dtype(arr, this_dtype)
result[col] = arr
# convert_objects just in case
return self._constructor(result, index=new_index,
columns=new_columns)._convert(datetime=True,
copy=False)
def combine_first(self, other):
"""
Combine two DataFrame objects and default to non-null values in frame
calling the method. Result index columns will be the union of the
respective indexes and columns
Parameters
----------
other : DataFrame
Returns
-------
combined : DataFrame
Examples
--------
df1's values prioritized, use values from df2 to fill holes:
>>> df1 = pd.DataFrame([[1, np.nan]])
>>> df2 = pd.DataFrame([[3, 4]])
>>> df1.combine_first(df2)
0 1
0 1 4.0
See Also
--------
DataFrame.combine : Perform series-wise operation on two DataFrames
using a given function
"""
import pandas.core.computation.expressions as expressions
def combiner(x, y, needs_i8_conversion=False):
x_values = x.values if hasattr(x, 'values') else x
y_values = y.values if hasattr(y, 'values') else y
if needs_i8_conversion:
mask = isna(x)
x_values = x_values.view('i8')
y_values = y_values.view('i8')
else:
mask = isna(x_values)
return expressions.where(mask, y_values, x_values)
return self.combine(other, combiner, overwrite=False)
def update(self, other, join='left', overwrite=True, filter_func=None,
raise_conflict=False):
"""
Modify DataFrame in place using non-NA values from passed
DataFrame. Aligns on indices
Parameters
----------
other : DataFrame, or object coercible into a DataFrame
join : {'left'}, default 'left'
overwrite : boolean, default True
If True then overwrite values for common keys in the calling frame
filter_func : callable(1d-array) -> 1d-array<boolean>, default None
Can choose to replace values other than NA. Return True for values
that should be updated
raise_conflict : boolean
If True, will raise an error if the DataFrame and other both
contain data in the same place.
Examples
--------
>>> df = pd.DataFrame({'A': [1, 2, 3],
... 'B': [400, 500, 600]})
>>> new_df = pd.DataFrame({'B': [4, 5, 6],
... 'C': [7, 8, 9]})
>>> df.update(new_df)
>>> df
A B
0 1 4
1 2 5
2 3 6
>>> df = pd.DataFrame({'A': ['a', 'b', 'c'],
... 'B': ['x', 'y', 'z']})
>>> new_df = pd.DataFrame({'B': ['d', 'e', 'f', 'g', 'h', 'i']})
>>> df.update(new_df)
>>> df
A B
0 a d
1 b e
2 c f
>>> df = pd.DataFrame({'A': ['a', 'b', 'c'],
... 'B': ['x', 'y', 'z']})
>>> new_column = pd.Series(['d', 'e'], name='B', index=[0, 2])
>>> df.update(new_column)
>>> df
A B
0 a d
1 b y
2 c e
>>> df = pd.DataFrame({'A': ['a', 'b', 'c'],
... 'B': ['x', 'y', 'z']})
>>> new_df = pd.DataFrame({'B': ['d', 'e']}, index=[1, 2])
>>> df.update(new_df)
>>> df
A B
0 a x
1 b d
2 c e
If ``other`` contains NaNs the corresponding values are not updated
in the original dataframe.
>>> df = pd.DataFrame({'A': [1, 2, 3],
... 'B': [400, 500, 600]})
>>> new_df = pd.DataFrame({'B': [4, np.nan, 6]})
>>> df.update(new_df)
>>> df
A B
0 1 4.0
1 2 500.0
2 3 6.0
"""
import pandas.core.computation.expressions as expressions
# TODO: Support other joins
if join != 'left': # pragma: no cover
raise NotImplementedError("Only left join is supported")
if not isinstance(other, DataFrame):
other = DataFrame(other)
other = other.reindex_like(self)
for col in self.columns:
this = self[col].values
that = other[col].values
if filter_func is not None:
with np.errstate(all='ignore'):
mask = ~filter_func(this) | isna(that)
else:
if raise_conflict:
mask_this = notna(that)
mask_that = notna(this)
if any(mask_this & mask_that):
raise ValueError("Data overlaps.")
if overwrite:
mask = isna(that)
else:
mask = notna(this)
# don't overwrite columns unecessarily
if mask.all():
continue
self[col] = expressions.where(mask, this, that)
# ----------------------------------------------------------------------
# Misc methods
def _get_valid_indices(self):
is_valid = self.count(1) > 0
return self.index[is_valid]
@Appender(_shared_docs['valid_index'] % {
'position': 'first', 'klass': 'DataFrame'})
def first_valid_index(self):
if len(self) == 0:
return None
valid_indices = self._get_valid_indices()
return valid_indices[0] if len(valid_indices) else None
@Appender(_shared_docs['valid_index'] % {
'position': 'last', 'klass': 'DataFrame'})
def last_valid_index(self):
if len(self) == 0:
return None
valid_indices = self._get_valid_indices()
return valid_indices[-1] if len(valid_indices) else None
# ----------------------------------------------------------------------
# Data reshaping
def pivot(self, index=None, columns=None, values=None):
"""
Reshape data (produce a "pivot" table) based on column values. Uses
unique values from index / columns to form axes of the resulting
DataFrame.
Parameters
----------
index : string or object, optional
Column name to use to make new frame's index. If None, uses
existing index.
columns : string or object
Column name to use to make new frame's columns
values : string or object, optional
Column name to use for populating new frame's values. If not
specified, all remaining columns will be used and the result will
have hierarchically indexed columns
Returns
-------
pivoted : DataFrame
See also
--------
DataFrame.pivot_table : generalization of pivot that can handle
duplicate values for one index/column pair
DataFrame.unstack : pivot based on the index values instead of a
column
Notes
-----
For finer-tuned control, see hierarchical indexing documentation along
with the related stack/unstack methods
Examples
--------
>>> df = pd.DataFrame({'foo': ['one','one','one','two','two','two'],
'bar': ['A', 'B', 'C', 'A', 'B', 'C'],
'baz': [1, 2, 3, 4, 5, 6]})
>>> df
foo bar baz
0 one A 1
1 one B 2
2 one C 3
3 two A 4
4 two B 5
5 two C 6
>>> df.pivot(index='foo', columns='bar', values='baz')
A B C
one 1 2 3
two 4 5 6
>>> df.pivot(index='foo', columns='bar')['baz']
A B C
one 1 2 3
two 4 5 6
"""
from pandas.core.reshape.reshape import pivot
return pivot(self, index=index, columns=columns, values=values)
_shared_docs['pivot_table'] = """
Create a spreadsheet-style pivot table as a DataFrame. The levels in
the pivot table will be stored in MultiIndex objects (hierarchical
indexes) on the index and columns of the result DataFrame
Parameters
----------%s
values : column to aggregate, optional
index : column, Grouper, array, or list of the previous
If an array is passed, it must be the same length as the data. The
list can contain any of the other types (except list).
Keys to group by on the pivot table index. If an array is passed,
it is being used as the same manner as column values.
columns : column, Grouper, array, or list of the previous
If an array is passed, it must be the same length as the data. The
list can contain any of the other types (except list).
Keys to group by on the pivot table column. If an array is passed,
it is being used as the same manner as column values.
aggfunc : function or list of functions, default numpy.mean
If list of functions passed, the resulting pivot table will have
hierarchical columns whose top level are the function names
(inferred from the function objects themselves)
fill_value : scalar, default None
Value to replace missing values with
margins : boolean, default False
Add all row / columns (e.g. for subtotal / grand totals)
dropna : boolean, default True
Do not include columns whose entries are all NaN
margins_name : string, default 'All'
Name of the row / column that will contain the totals
when margins is True.
Examples
--------
>>> df = pd.DataFrame({"A": ["foo", "foo", "foo", "foo", "foo",
... "bar", "bar", "bar", "bar"],
... "B": ["one", "one", "one", "two", "two",
... "one", "one", "two", "two"],
... "C": ["small", "large", "large", "small",
... "small", "large", "small", "small",
... "large"],
... "D": [1, 2, 2, 3, 3, 4, 5, 6, 7]})
>>> df
A B C D
0 foo one small 1
1 foo one large 2
2 foo one large 2
3 foo two small 3
4 foo two small 3
5 bar one large 4
6 bar one small 5
7 bar two small 6
8 bar two large 7
>>> table = pivot_table(df, values='D', index=['A', 'B'],
... columns=['C'], aggfunc=np.sum)
>>> table
... # doctest: +NORMALIZE_WHITESPACE
C large small
A B
bar one 4.0 5.0
two 7.0 6.0
foo one 4.0 1.0
two NaN 6.0
Returns
-------
table : DataFrame
See also
--------
DataFrame.pivot : pivot without aggregation that can handle
non-numeric data
"""
@Substitution('')
@Appender(_shared_docs['pivot_table'])
def pivot_table(self, values=None, index=None, columns=None,
aggfunc='mean', fill_value=None, margins=False,
dropna=True, margins_name='All'):
from pandas.core.reshape.pivot import pivot_table
return pivot_table(self, values=values, index=index, columns=columns,
aggfunc=aggfunc, fill_value=fill_value,
margins=margins, dropna=dropna,
margins_name=margins_name)
def stack(self, level=-1, dropna=True):
"""
Pivot a level of the (possibly hierarchical) column labels, returning a
DataFrame (or Series in the case of an object with a single level of
column labels) having a hierarchical index with a new inner-most level
of row labels.
The level involved will automatically get sorted.
Parameters
----------
level : int, string, or list of these, default last level
Level(s) to stack, can pass level name
dropna : boolean, default True
Whether to drop rows in the resulting Frame/Series with no valid
values
Examples
----------
>>> s
a b
one 1. 2.
two 3. 4.
>>> s.stack()
one a 1
b 2
two a 3
b 4
Returns
-------
stacked : DataFrame or Series
"""
from pandas.core.reshape.reshape import stack, stack_multiple
if isinstance(level, (tuple, list)):
return stack_multiple(self, level, dropna=dropna)
else:
return stack(self, level, dropna=dropna)
def unstack(self, level=-1, fill_value=None):
"""
Pivot a level of the (necessarily hierarchical) index labels, returning
a DataFrame having a new level of column labels whose inner-most level
consists of the pivoted index labels. If the index is not a MultiIndex,
the output will be a Series (the analogue of stack when the columns are
not a MultiIndex).
The level involved will automatically get sorted.
Parameters
----------
level : int, string, or list of these, default -1 (last level)
Level(s) of index to unstack, can pass level name
fill_value : replace NaN with this value if the unstack produces
missing values
.. versionadded: 0.18.0
See also
--------
DataFrame.pivot : Pivot a table based on column values.
DataFrame.stack : Pivot a level of the column labels (inverse operation
from `unstack`).
Examples
--------
>>> index = pd.MultiIndex.from_tuples([('one', 'a'), ('one', 'b'),
... ('two', 'a'), ('two', 'b')])
>>> s = pd.Series(np.arange(1.0, 5.0), index=index)
>>> s
one a 1.0
b 2.0
two a 3.0
b 4.0
dtype: float64
>>> s.unstack(level=-1)
a b
one 1.0 2.0
two 3.0 4.0
>>> s.unstack(level=0)
one two
a 1.0 3.0
b 2.0 4.0
>>> df = s.unstack(level=0)
>>> df.unstack()
one a 1.0
b 2.0
two a 3.0
b 4.0
dtype: float64
Returns
-------
unstacked : DataFrame or Series
"""
from pandas.core.reshape.reshape import unstack
return unstack(self, level, fill_value)
_shared_docs['melt'] = ("""
"Unpivots" a DataFrame from wide format to long format, optionally
leaving identifier variables set.
This function is useful to massage a DataFrame into a format where one
or more columns are identifier variables (`id_vars`), while all other
columns, considered measured variables (`value_vars`), are "unpivoted" to
the row axis, leaving just two non-identifier columns, 'variable' and
'value'.
%(versionadded)s
Parameters
----------
frame : DataFrame
id_vars : tuple, list, or ndarray, optional
Column(s) to use as identifier variables.
value_vars : tuple, list, or ndarray, optional
Column(s) to unpivot. If not specified, uses all columns that
are not set as `id_vars`.
var_name : scalar
Name to use for the 'variable' column. If None it uses
``frame.columns.name`` or 'variable'.
value_name : scalar, default 'value'
Name to use for the 'value' column.
col_level : int or string, optional
If columns are a MultiIndex then use this level to melt.
See also
--------
%(other)s
pivot_table
DataFrame.pivot
Examples
--------
>>> import pandas as pd
>>> df = pd.DataFrame({'A': {0: 'a', 1: 'b', 2: 'c'},
... 'B': {0: 1, 1: 3, 2: 5},
... 'C': {0: 2, 1: 4, 2: 6}})
>>> df
A B C
0 a 1 2
1 b 3 4
2 c 5 6
>>> %(caller)sid_vars=['A'], value_vars=['B'])
A variable value
0 a B 1
1 b B 3
2 c B 5
>>> %(caller)sid_vars=['A'], value_vars=['B', 'C'])
A variable value
0 a B 1
1 b B 3
2 c B 5
3 a C 2
4 b C 4
5 c C 6
The names of 'variable' and 'value' columns can be customized:
>>> %(caller)sid_vars=['A'], value_vars=['B'],
... var_name='myVarname', value_name='myValname')
A myVarname myValname
0 a B 1
1 b B 3
2 c B 5
If you have multi-index columns:
>>> df.columns = [list('ABC'), list('DEF')]
>>> df
A B C
D E F
0 a 1 2
1 b 3 4
2 c 5 6
>>> %(caller)scol_level=0, id_vars=['A'], value_vars=['B'])
A variable value
0 a B 1
1 b B 3
2 c B 5
>>> %(caller)sid_vars=[('A', 'D')], value_vars=[('B', 'E')])
(A, D) variable_0 variable_1 value
0 a B E 1
1 b B E 3
2 c B E 5
""")
@Appender(_shared_docs['melt'] %
dict(caller='df.melt(',
versionadded='.. versionadded:: 0.20.0\n',
other='melt'))
def melt(self, id_vars=None, value_vars=None, var_name=None,
value_name='value', col_level=None):
from pandas.core.reshape.reshape import melt
return melt(self, id_vars=id_vars, value_vars=value_vars,
var_name=var_name, value_name=value_name,
col_level=col_level)
# ----------------------------------------------------------------------
# Time series-related
def diff(self, periods=1, axis=0):
"""
1st discrete difference of object
Parameters
----------
periods : int, default 1
Periods to shift for forming difference
axis : {0 or 'index', 1 or 'columns'}, default 0
Take difference over rows (0) or columns (1).
.. versionadded: 0.16.1
Returns
-------
diffed : DataFrame
"""
bm_axis = self._get_block_manager_axis(axis)
new_data = self._data.diff(n=periods, axis=bm_axis)
return self._constructor(new_data)
# ----------------------------------------------------------------------
# Function application
def _gotitem(self, key, ndim, subset=None):
"""
sub-classes to define
return a sliced object
Parameters
----------
key : string / list of selections
ndim : 1,2
requested ndim of result
subset : object, default None
subset to act on
"""
if subset is None:
subset = self
# TODO: _shallow_copy(subset)?
return self[key]
_agg_doc = dedent("""
Examples
--------
>>> df = pd.DataFrame(np.random.randn(10, 3), columns=['A', 'B', 'C'],
... index=pd.date_range('1/1/2000', periods=10))
>>> df.iloc[3:7] = np.nan
Aggregate these functions across all columns
>>> df.agg(['sum', 'min'])
A B C
sum -0.182253 -0.614014 -2.909534
min -1.916563 -1.460076 -1.568297
Different aggregations per column
>>> df.agg({'A' : ['sum', 'min'], 'B' : ['min', 'max']})
A B
max NaN 1.514318
min -1.916563 -1.460076
sum -0.182253 NaN
See also
--------
pandas.DataFrame.apply
pandas.DataFrame.transform
pandas.DataFrame.groupby.aggregate
pandas.DataFrame.resample.aggregate
pandas.DataFrame.rolling.aggregate
""")
@Appender(_agg_doc)
@Appender(_shared_docs['aggregate'] % dict(
versionadded='.. versionadded:: 0.20.0',
**_shared_doc_kwargs))
def aggregate(self, func, axis=0, *args, **kwargs):
axis = self._get_axis_number(axis)
# TODO: flipped axis
result = None
if axis == 0:
try:
result, how = self._aggregate(func, axis=0, *args, **kwargs)
except TypeError:
pass
if result is None:
return self.apply(func, axis=axis, args=args, **kwargs)
return result
agg = aggregate
def apply(self, func, axis=0, broadcast=False, raw=False, reduce=None,
args=(), **kwds):
"""
Applies function along input axis of DataFrame.
Objects passed to functions are Series objects having index
either the DataFrame's index (axis=0) or the columns (axis=1).
Return type depends on whether passed function aggregates, or the
reduce argument if the DataFrame is empty.
Parameters
----------
func : function
Function to apply to each column/row
axis : {0 or 'index', 1 or 'columns'}, default 0
* 0 or 'index': apply function to each column
* 1 or 'columns': apply function to each row
broadcast : boolean, default False
For aggregation functions, return object of same size with values
propagated
raw : boolean, default False
If False, convert each row or column into a Series. If raw=True the
passed function will receive ndarray objects instead. If you are
just applying a NumPy reduction function this will achieve much
better performance
reduce : boolean or None, default None
Try to apply reduction procedures. If the DataFrame is empty,
apply will use reduce to determine whether the result should be a
Series or a DataFrame. If reduce is None (the default), apply's
return value will be guessed by calling func an empty Series (note:
while guessing, exceptions raised by func will be ignored). If
reduce is True a Series will always be returned, and if False a
DataFrame will always be returned.
args : tuple
Positional arguments to pass to function in addition to the
array/series
Additional keyword arguments will be passed as keywords to the function
Notes
-----
In the current implementation apply calls func twice on the
first column/row to decide whether it can take a fast or slow
code path. This can lead to unexpected behavior if func has
side-effects, as they will take effect twice for the first
column/row.
Examples
--------
>>> df.apply(numpy.sqrt) # returns DataFrame
>>> df.apply(numpy.sum, axis=0) # equiv to df.sum(0)
>>> df.apply(numpy.sum, axis=1) # equiv to df.sum(1)
See also
--------
DataFrame.applymap: For elementwise operations
DataFrame.aggregate: only perform aggregating type operations
DataFrame.transform: only perform transformating type operations
Returns
-------
applied : Series or DataFrame
"""
axis = self._get_axis_number(axis)
ignore_failures = kwds.pop('ignore_failures', False)
# dispatch to agg
if axis == 0 and isinstance(func, (list, dict)):
return self.aggregate(func, axis=axis, *args, **kwds)
if len(self.columns) == 0 and len(self.index) == 0:
return self._apply_empty_result(func, axis, reduce, *args, **kwds)
# if we are a string, try to dispatch
if isinstance(func, compat.string_types):
if axis:
kwds['axis'] = axis
return getattr(self, func)(*args, **kwds)
if kwds or args and not isinstance(func, np.ufunc):
def f(x):
return func(x, *args, **kwds)
else:
f = func
if isinstance(f, np.ufunc):
with np.errstate(all='ignore'):
results = f(self.values)
return self._constructor(data=results, index=self.index,
columns=self.columns, copy=False)
else:
if not broadcast:
if not all(self.shape):
return self._apply_empty_result(func, axis, reduce, *args,
**kwds)
if raw and not self._is_mixed_type:
return self._apply_raw(f, axis)
else:
if reduce is None:
reduce = True
return self._apply_standard(
f, axis,
reduce=reduce,
ignore_failures=ignore_failures)
else:
return self._apply_broadcast(f, axis)
def _apply_empty_result(self, func, axis, reduce, *args, **kwds):
if reduce is None:
reduce = False
try:
reduce = not isinstance(func(_EMPTY_SERIES, *args, **kwds),
Series)
except Exception:
pass
if reduce:
return Series(np.nan, index=self._get_agg_axis(axis))
else:
return self.copy()
def _apply_raw(self, func, axis):
try:
result = lib.reduce(self.values, func, axis=axis)
except Exception:
result = np.apply_along_axis(func, axis, self.values)
# TODO: mixed type case
if result.ndim == 2:
return DataFrame(result, index=self.index, columns=self.columns)
else:
return Series(result, index=self._get_agg_axis(axis))
def _apply_standard(self, func, axis, ignore_failures=False, reduce=True):
# skip if we are mixed datelike and trying reduce across axes
# GH6125
if (reduce and axis == 1 and self._is_mixed_type and
self._is_datelike_mixed_type):
reduce = False
# try to reduce first (by default)
# this only matters if the reduction in values is of different dtype
# e.g. if we want to apply to a SparseFrame, then can't directly reduce
if reduce:
values = self.values
# we cannot reduce using non-numpy dtypes,
# as demonstrated in gh-12244
if not is_extension_type(values):
# Create a dummy Series from an empty array
index = self._get_axis(axis)
empty_arr = np.empty(len(index), dtype=values.dtype)
dummy = Series(empty_arr, index=self._get_axis(axis),
dtype=values.dtype)
try:
labels = self._get_agg_axis(axis)
result = lib.reduce(values, func, axis=axis, dummy=dummy,
labels=labels)
return Series(result, index=labels)
except Exception:
pass
dtype = object if self._is_mixed_type else None
if axis == 0:
series_gen = (self._ixs(i, axis=1)
for i in range(len(self.columns)))
res_index = self.columns
res_columns = self.index
elif axis == 1:
res_index = self.index
res_columns = self.columns
values = self.values
series_gen = (Series.from_array(arr, index=res_columns, name=name,
dtype=dtype)
for i, (arr, name) in enumerate(zip(values,
res_index)))
else: # pragma : no cover
raise AssertionError('Axis must be 0 or 1, got %s' % str(axis))
i = None
keys = []
results = {}
if ignore_failures:
successes = []
for i, v in enumerate(series_gen):
try:
results[i] = func(v)
keys.append(v.name)
successes.append(i)
except Exception:
pass
# so will work with MultiIndex
if len(successes) < len(res_index):
res_index = res_index.take(successes)
else:
try:
for i, v in enumerate(series_gen):
results[i] = func(v)
keys.append(v.name)
except Exception as e:
if hasattr(e, 'args'):
# make sure i is defined
if i is not None:
k = res_index[i]
e.args = e.args + ('occurred at index %s' %
pprint_thing(k), )
raise
if len(results) > 0 and is_sequence(results[0]):
if not isinstance(results[0], Series):
index = res_columns
else:
index = None
result = self._constructor(data=results, index=index)
result.columns = res_index
if axis == 1:
result = result.T
result = result._convert(datetime=True, timedelta=True, copy=False)
else:
result = Series(results)
result.index = res_index
return result
def _apply_broadcast(self, func, axis):
if axis == 0:
target = self
elif axis == 1:
target = self.T
else: # pragma: no cover
raise AssertionError('Axis must be 0 or 1, got %s' % axis)
result_values = np.empty_like(target.values)
columns = target.columns
for i, col in enumerate(columns):
result_values[:, i] = func(target[col])
result = self._constructor(result_values, index=target.index,
columns=target.columns)
if axis == 1:
result = result.T
return result
def applymap(self, func):
"""
Apply a function to a DataFrame that is intended to operate
elementwise, i.e. like doing map(func, series) for each series in the
DataFrame
Parameters
----------
func : function
Python function, returns a single value from a single value
Examples
--------
>>> df = pd.DataFrame(np.random.randn(3, 3))
>>> df
0 1 2
0 -0.029638 1.081563 1.280300
1 0.647747 0.831136 -1.549481
2 0.513416 -0.884417 0.195343
>>> df = df.applymap(lambda x: '%.2f' % x)
>>> df
0 1 2
0 -0.03 1.08 1.28
1 0.65 0.83 -1.55
2 0.51 -0.88 0.20
Returns
-------
applied : DataFrame
See also
--------
DataFrame.apply : For operations on rows/columns
"""
# if we have a dtype == 'M8[ns]', provide boxed values
def infer(x):
if x.empty:
return lib.map_infer(x, func)
return lib.map_infer(x.asobject, func)
return self.apply(infer)
# ----------------------------------------------------------------------
# Merging / joining methods
def append(self, other, ignore_index=False, verify_integrity=False):
"""
Append rows of `other` to the end of this frame, returning a new
object. Columns not in this frame are added as new columns.
Parameters
----------
other : DataFrame or Series/dict-like object, or list of these
The data to append.
ignore_index : boolean, default False
If True, do not use the index labels.
verify_integrity : boolean, default False
If True, raise ValueError on creating index with duplicates.
Returns
-------
appended : DataFrame
Notes
-----
If a list of dict/series is passed and the keys are all contained in
the DataFrame's index, the order of the columns in the resulting
DataFrame will be unchanged.
Iteratively appending rows to a DataFrame can be more computationally
intensive than a single concatenate. A better solution is to append
those rows to a list and then concatenate the list with the original
DataFrame all at once.
See also
--------
pandas.concat : General function to concatenate DataFrame, Series
or Panel objects
Examples
--------
>>> df = pd.DataFrame([[1, 2], [3, 4]], columns=list('AB'))
>>> df
A B
0 1 2
1 3 4
>>> df2 = pd.DataFrame([[5, 6], [7, 8]], columns=list('AB'))
>>> df.append(df2)
A B
0 1 2
1 3 4
0 5 6
1 7 8
With `ignore_index` set to True:
>>> df.append(df2, ignore_index=True)
A B
0 1 2
1 3 4
2 5 6
3 7 8
The following, while not recommended methods for generating DataFrames,
show two ways to generate a DataFrame from multiple data sources.
Less efficient:
>>> df = pd.DataFrame(columns=['A'])
>>> for i in range(5):
... df = df.append({'A': i}, ignore_index=True)
>>> df
A
0 0
1 1
2 2
3 3
4 4
More efficient:
>>> pd.concat([pd.DataFrame([i], columns=['A']) for i in range(5)],
... ignore_index=True)
A
0 0
1 1
2 2
3 3
4 4
"""
if isinstance(other, (Series, dict)):
if isinstance(other, dict):
other = Series(other)
if other.name is None and not ignore_index:
raise TypeError('Can only append a Series if ignore_index=True'
' or if the Series has a name')
if other.name is None:
index = None
else:
# other must have the same index name as self, otherwise
# index name will be reset
index = Index([other.name], name=self.index.name)
combined_columns = self.columns.tolist() + self.columns.union(
other.index).difference(self.columns).tolist()
other = other.reindex(combined_columns, copy=False)
other = DataFrame(other.values.reshape((1, len(other))),
index=index,
columns=combined_columns)
other = other._convert(datetime=True, timedelta=True)
if not self.columns.equals(combined_columns):
self = self.reindex(columns=combined_columns)
elif isinstance(other, list) and not isinstance(other[0], DataFrame):
other = DataFrame(other)
if (self.columns.get_indexer(other.columns) >= 0).all():
other = other.loc[:, self.columns]
from pandas.core.reshape.concat import concat
if isinstance(other, (list, tuple)):
to_concat = [self] + other
else:
to_concat = [self, other]
return concat(to_concat, ignore_index=ignore_index,
verify_integrity=verify_integrity)
def join(self, other, on=None, how='left', lsuffix='', rsuffix='',
sort=False):
"""
Join columns with other DataFrame either on index or on a key
column. Efficiently Join multiple DataFrame objects by index at once by
passing a list.
Parameters
----------
other : DataFrame, Series with name field set, or list of DataFrame
Index should be similar to one of the columns in this one. If a
Series is passed, its name attribute must be set, and that will be
used as the column name in the resulting joined DataFrame
on : column name, tuple/list of column names, or array-like
Column(s) in the caller to join on the index in other,
otherwise joins index-on-index. If multiples
columns given, the passed DataFrame must have a MultiIndex. Can
pass an array as the join key if not already contained in the
calling DataFrame. Like an Excel VLOOKUP operation
how : {'left', 'right', 'outer', 'inner'}, default: 'left'
How to handle the operation of the two objects.
* left: use calling frame's index (or column if on is specified)
* right: use other frame's index
* outer: form union of calling frame's index (or column if on is
specified) with other frame's index, and sort it
lexicographically
* inner: form intersection of calling frame's index (or column if
on is specified) with other frame's index, preserving the order
of the calling's one
lsuffix : string
Suffix to use from left frame's overlapping columns
rsuffix : string
Suffix to use from right frame's overlapping columns
sort : boolean, default False
Order result DataFrame lexicographically by the join key. If False,
the order of the join key depends on the join type (how keyword)
Notes
-----
on, lsuffix, and rsuffix options are not supported when passing a list
of DataFrame objects
Examples
--------
>>> caller = pd.DataFrame({'key': ['K0', 'K1', 'K2', 'K3', 'K4', 'K5'],
... 'A': ['A0', 'A1', 'A2', 'A3', 'A4', 'A5']})
>>> caller
A key
0 A0 K0
1 A1 K1
2 A2 K2
3 A3 K3
4 A4 K4
5 A5 K5
>>> other = pd.DataFrame({'key': ['K0', 'K1', 'K2'],
... 'B': ['B0', 'B1', 'B2']})
>>> other
B key
0 B0 K0
1 B1 K1
2 B2 K2
Join DataFrames using their indexes.
>>> caller.join(other, lsuffix='_caller', rsuffix='_other')
>>> A key_caller B key_other
0 A0 K0 B0 K0
1 A1 K1 B1 K1
2 A2 K2 B2 K2
3 A3 K3 NaN NaN
4 A4 K4 NaN NaN
5 A5 K5 NaN NaN
If we want to join using the key columns, we need to set key to be
the index in both caller and other. The joined DataFrame will have
key as its index.
>>> caller.set_index('key').join(other.set_index('key'))
>>> A B
key
K0 A0 B0
K1 A1 B1
K2 A2 B2
K3 A3 NaN
K4 A4 NaN
K5 A5 NaN
Another option to join using the key columns is to use the on
parameter. DataFrame.join always uses other's index but we can use any
column in the caller. This method preserves the original caller's
index in the result.
>>> caller.join(other.set_index('key'), on='key')
>>> A key B
0 A0 K0 B0
1 A1 K1 B1
2 A2 K2 B2
3 A3 K3 NaN
4 A4 K4 NaN
5 A5 K5 NaN
See also
--------
DataFrame.merge : For column(s)-on-columns(s) operations
Returns
-------
joined : DataFrame
"""
# For SparseDataFrame's benefit
return self._join_compat(other, on=on, how=how, lsuffix=lsuffix,
rsuffix=rsuffix, sort=sort)
def _join_compat(self, other, on=None, how='left', lsuffix='', rsuffix='',
sort=False):
from pandas.core.reshape.merge import merge
from pandas.core.reshape.concat import concat
if isinstance(other, Series):
if other.name is None:
raise ValueError('Other Series must have a name')
other = DataFrame({other.name: other})
if isinstance(other, DataFrame):
return merge(self, other, left_on=on, how=how,
left_index=on is None, right_index=True,
suffixes=(lsuffix, rsuffix), sort=sort)
else:
if on is not None:
raise ValueError('Joining multiple DataFrames only supported'
' for joining on index')
# join indexes only using concat
if how == 'left':
how = 'outer'
join_axes = [self.index]
else:
join_axes = None
frames = [self] + list(other)
can_concat = all(df.index.is_unique for df in frames)
if can_concat:
return concat(frames, axis=1, join=how, join_axes=join_axes,
verify_integrity=True)
joined = frames[0]
for frame in frames[1:]:
joined = merge(joined, frame, how=how, left_index=True,
right_index=True)
return joined
@Substitution('')
@Appender(_merge_doc, indents=2)
def merge(self, right, how='inner', on=None, left_on=None, right_on=None,
left_index=False, right_index=False, sort=False,
suffixes=('_x', '_y'), copy=True, indicator=False,
validate=None):
from pandas.core.reshape.merge import merge
return merge(self, right, how=how, on=on, left_on=left_on,
right_on=right_on, left_index=left_index,
right_index=right_index, sort=sort, suffixes=suffixes,
copy=copy, indicator=indicator, validate=validate)
def round(self, decimals=0, *args, **kwargs):
"""
Round a DataFrame to a variable number of decimal places.
.. versionadded:: 0.17.0
Parameters
----------
decimals : int, dict, Series
Number of decimal places to round each column to. If an int is
given, round each column to the same number of places.
Otherwise dict and Series round to variable numbers of places.
Column names should be in the keys if `decimals` is a
dict-like, or in the index if `decimals` is a Series. Any
columns not included in `decimals` will be left as is. Elements
of `decimals` which are not columns of the input will be
ignored.
Examples
--------
>>> df = pd.DataFrame(np.random.random([3, 3]),
... columns=['A', 'B', 'C'], index=['first', 'second', 'third'])
>>> df
A B C
first 0.028208 0.992815 0.173891
second 0.038683 0.645646 0.577595
third 0.877076 0.149370 0.491027
>>> df.round(2)
A B C
first 0.03 0.99 0.17
second 0.04 0.65 0.58
third 0.88 0.15 0.49
>>> df.round({'A': 1, 'C': 2})
A B C
first 0.0 0.992815 0.17
second 0.0 0.645646 0.58
third 0.9 0.149370 0.49
>>> decimals = pd.Series([1, 0, 2], index=['A', 'B', 'C'])
>>> df.round(decimals)
A B C
first 0.0 1 0.17
second 0.0 1 0.58
third 0.9 0 0.49
Returns
-------
DataFrame object
See Also
--------
numpy.around
Series.round
"""
from pandas.core.reshape.concat import concat
def _dict_round(df, decimals):
for col, vals in df.iteritems():
try:
yield _series_round(vals, decimals[col])
except KeyError:
yield vals
def _series_round(s, decimals):
if is_integer_dtype(s) or is_float_dtype(s):
return s.round(decimals)
return s
nv.validate_round(args, kwargs)
if isinstance(decimals, (dict, Series)):
if isinstance(decimals, Series):
if not decimals.index.is_unique:
raise ValueError("Index of decimals must be unique")
new_cols = [col for col in _dict_round(self, decimals)]
elif is_integer(decimals):
# Dispatch to Series.round
new_cols = [_series_round(v, decimals)
for _, v in self.iteritems()]
else:
raise TypeError("decimals must be an integer, a dict-like or a "
"Series")
if len(new_cols) > 0:
return self._constructor(concat(new_cols, axis=1),
index=self.index,
columns=self.columns)
else:
return self
# ----------------------------------------------------------------------
# Statistical methods, etc.
def corr(self, method='pearson', min_periods=1):
"""
Compute pairwise correlation of columns, excluding NA/null values
Parameters
----------
method : {'pearson', 'kendall', 'spearman'}
* pearson : standard correlation coefficient
* kendall : Kendall Tau correlation coefficient
* spearman : Spearman rank correlation
min_periods : int, optional
Minimum number of observations required per pair of columns
to have a valid result. Currently only available for pearson
and spearman correlation
Returns
-------
y : DataFrame
"""
numeric_df = self._get_numeric_data()
cols = numeric_df.columns
idx = cols.copy()
mat = numeric_df.values
if method == 'pearson':
correl = libalgos.nancorr(_ensure_float64(mat), minp=min_periods)
elif method == 'spearman':
correl = libalgos.nancorr_spearman(_ensure_float64(mat),
minp=min_periods)
else:
if min_periods is None:
min_periods = 1
mat = _ensure_float64(mat).T
corrf = nanops.get_corr_func(method)
K = len(cols)
correl = np.empty((K, K), dtype=float)
mask = np.isfinite(mat)
for i, ac in enumerate(mat):
for j, bc in enumerate(mat):
if i > j:
continue
valid = mask[i] & mask[j]
if valid.sum() < min_periods:
c = np.nan
elif i == j:
c = 1.
elif not valid.all():
c = corrf(ac[valid], bc[valid])
else:
c = corrf(ac, bc)
correl[i, j] = c
correl[j, i] = c
return self._constructor(correl, index=idx, columns=cols)
def cov(self, min_periods=None):
"""
Compute pairwise covariance of columns, excluding NA/null values
Parameters
----------
min_periods : int, optional
Minimum number of observations required per pair of columns
to have a valid result.
Returns
-------
y : DataFrame
Notes
-----
`y` contains the covariance matrix of the DataFrame's time series.
The covariance is normalized by N-1 (unbiased estimator).
"""
numeric_df = self._get_numeric_data()
cols = numeric_df.columns
idx = cols.copy()
mat = numeric_df.values
if notna(mat).all():
if min_periods is not None and min_periods > len(mat):
baseCov = np.empty((mat.shape[1], mat.shape[1]))
baseCov.fill(np.nan)
else:
baseCov = np.cov(mat.T)
baseCov = baseCov.reshape((len(cols), len(cols)))
else:
baseCov = libalgos.nancorr(_ensure_float64(mat), cov=True,
minp=min_periods)
return self._constructor(baseCov, index=idx, columns=cols)
def corrwith(self, other, axis=0, drop=False):
"""
Compute pairwise correlation between rows or columns of two DataFrame
objects.
Parameters
----------
other : DataFrame
axis : {0 or 'index', 1 or 'columns'}, default 0
0 or 'index' to compute column-wise, 1 or 'columns' for row-wise
drop : boolean, default False
Drop missing indices from result, default returns union of all
Returns
-------
correls : Series
"""
axis = self._get_axis_number(axis)
if isinstance(other, Series):
return self.apply(other.corr, axis=axis)
this = self._get_numeric_data()
other = other._get_numeric_data()
left, right = this.align(other, join='inner', copy=False)
# mask missing values
left = left + right * 0
right = right + left * 0
if axis == 1:
left = left.T
right = right.T
# demeaned data
ldem = left - left.mean()
rdem = right - right.mean()
num = (ldem * rdem).sum()
dom = (left.count() - 1) * left.std() * right.std()
correl = num / dom
if not drop:
raxis = 1 if axis == 0 else 0
result_index = this._get_axis(raxis).union(other._get_axis(raxis))
correl = correl.reindex(result_index)
return correl
# ----------------------------------------------------------------------
# ndarray-like stats methods
def count(self, axis=0, level=None, numeric_only=False):
"""
Return Series with number of non-NA/null observations over requested
axis. Works with non-floating point data as well (detects NaN and None)
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
0 or 'index' for row-wise, 1 or 'columns' for column-wise
level : int or level name, default None
If the axis is a MultiIndex (hierarchical), count along a
particular level, collapsing into a DataFrame
numeric_only : boolean, default False
Include only float, int, boolean data
Returns
-------
count : Series (or DataFrame if level specified)
"""
axis = self._get_axis_number(axis)
if level is not None:
return self._count_level(level, axis=axis,
numeric_only=numeric_only)
if numeric_only:
frame = self._get_numeric_data()
else:
frame = self
# GH #423
if len(frame._get_axis(axis)) == 0:
result = Series(0, index=frame._get_agg_axis(axis))
else:
if frame._is_mixed_type:
result = notna(frame).sum(axis=axis)
else:
counts = notna(frame.values).sum(axis=axis)
result = Series(counts, index=frame._get_agg_axis(axis))
return result.astype('int64')
def _count_level(self, level, axis=0, numeric_only=False):
if numeric_only:
frame = self._get_numeric_data()
else:
frame = self
count_axis = frame._get_axis(axis)
agg_axis = frame._get_agg_axis(axis)
if not isinstance(count_axis, MultiIndex):
raise TypeError("Can only count levels on hierarchical %s." %
self._get_axis_name(axis))
if frame._is_mixed_type:
# Since we have mixed types, calling notna(frame.values) might
# upcast everything to object
mask = notna(frame).values
else:
# But use the speedup when we have homogeneous dtypes
mask = notna(frame.values)
if axis == 1:
# We're transposing the mask rather than frame to avoid potential
# upcasts to object, which induces a ~20x slowdown
mask = mask.T
if isinstance(level, compat.string_types):
level = count_axis._get_level_number(level)
level_index = count_axis.levels[level]
labels = _ensure_int64(count_axis.labels[level])
counts = lib.count_level_2d(mask, labels, len(level_index), axis=0)
result = DataFrame(counts, index=level_index, columns=agg_axis)
if axis == 1:
# Undo our earlier transpose
return result.T
else:
return result
def _reduce(self, op, name, axis=0, skipna=True, numeric_only=None,
filter_type=None, **kwds):
axis = self._get_axis_number(axis)
def f(x):
return op(x, axis=axis, skipna=skipna, **kwds)
labels = self._get_agg_axis(axis)
# exclude timedelta/datetime unless we are uniform types
if axis == 1 and self._is_mixed_type and self._is_datelike_mixed_type:
numeric_only = True
if numeric_only is None:
try:
values = self.values
result = f(values)
except Exception as e:
# try by-column first
if filter_type is None and axis == 0:
try:
# this can end up with a non-reduction
# but not always. if the types are mixed
# with datelike then need to make sure a series
# we only end up here if we have not specified
# numeric_only and yet we have tried a
# column-by-column reduction, where we have mixed type.
# So let's just do what we can
result = self.apply(f, reduce=False,
ignore_failures=True)
if result.ndim == self.ndim:
result = result.iloc[0]
return result
except:
pass
if filter_type is None or filter_type == 'numeric':
data = self._get_numeric_data()
elif filter_type == 'bool':
data = self._get_bool_data()
else: # pragma: no cover
e = NotImplementedError("Handling exception with filter_"
"type %s not implemented." %
filter_type)
raise_with_traceback(e)
with np.errstate(all='ignore'):
result = f(data.values)
labels = data._get_agg_axis(axis)
else:
if numeric_only:
if filter_type is None or filter_type == 'numeric':
data = self._get_numeric_data()
elif filter_type == 'bool':
data = self._get_bool_data()
else: # pragma: no cover
msg = ("Generating numeric_only data with filter_type %s"
"not supported." % filter_type)
raise NotImplementedError(msg)
values = data.values
labels = data._get_agg_axis(axis)
else:
values = self.values
result = f(values)
if hasattr(result, 'dtype') and is_object_dtype(result.dtype):
try:
if filter_type is None or filter_type == 'numeric':
result = result.astype(np.float64)
elif filter_type == 'bool' and notna(result).all():
result = result.astype(np.bool_)
except (ValueError, TypeError):
# try to coerce to the original dtypes item by item if we can
if axis == 0:
result = coerce_to_dtypes(result, self.dtypes)
return Series(result, index=labels)
def nunique(self, axis=0, dropna=True):
"""
Return Series with number of distinct observations over requested
axis.
.. versionadded:: 0.20.0
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
dropna : boolean, default True
Don't include NaN in the counts.
Returns
-------
nunique : Series
Examples
--------
>>> df = pd.DataFrame({'A': [1, 2, 3], 'B': [1, 1, 1]})
>>> df.nunique()
A 3
B 1
>>> df.nunique(axis=1)
0 1
1 2
2 2
"""
return self.apply(Series.nunique, axis=axis, dropna=dropna)
def idxmin(self, axis=0, skipna=True):
"""
Return index of first occurrence of minimum over requested axis.
NA/null values are excluded.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
0 or 'index' for row-wise, 1 or 'columns' for column-wise
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result
will be NA.
Raises
------
ValueError
* If the row/column is empty
Returns
-------
idxmin : Series
Notes
-----
This method is the DataFrame version of ``ndarray.argmin``.
See Also
--------
Series.idxmin
"""
axis = self._get_axis_number(axis)
indices = nanops.nanargmin(self.values, axis=axis, skipna=skipna)
index = self._get_axis(axis)
result = [index[i] if i >= 0 else np.nan for i in indices]
return Series(result, index=self._get_agg_axis(axis))
def idxmax(self, axis=0, skipna=True):
"""
Return index of first occurrence of maximum over requested axis.
NA/null values are excluded.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
0 or 'index' for row-wise, 1 or 'columns' for column-wise
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result
will be NA.
Raises
------
ValueError
* If the row/column is empty
Returns
-------
idxmax : Series
Notes
-----
This method is the DataFrame version of ``ndarray.argmax``.
See Also
--------
Series.idxmax
"""
axis = self._get_axis_number(axis)
indices = nanops.nanargmax(self.values, axis=axis, skipna=skipna)
index = self._get_axis(axis)
result = [index[i] if i >= 0 else np.nan for i in indices]
return Series(result, index=self._get_agg_axis(axis))
def _get_agg_axis(self, axis_num):
""" let's be explict about this """
if axis_num == 0:
return self.columns
elif axis_num == 1:
return self.index
else:
raise ValueError('Axis must be 0 or 1 (got %r)' % axis_num)
def mode(self, axis=0, numeric_only=False):
"""
Gets the mode(s) of each element along the axis selected. Adds a row
for each mode per label, fills in gaps with nan.
Note that there could be multiple values returned for the selected
axis (when more than one item share the maximum frequency), which is
the reason why a dataframe is returned. If you want to impute missing
values with the mode in a dataframe ``df``, you can just do this:
``df.fillna(df.mode().iloc[0])``
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
* 0 or 'index' : get mode of each column
* 1 or 'columns' : get mode of each row
numeric_only : boolean, default False
if True, only apply to numeric columns
Returns
-------
modes : DataFrame (sorted)
Examples
--------
>>> df = pd.DataFrame({'A': [1, 2, 1, 2, 1, 2, 3]})
>>> df.mode()
A
0 1
1 2
"""
data = self if not numeric_only else self._get_numeric_data()
def f(s):
return s.mode()
return data.apply(f, axis=axis)
def quantile(self, q=0.5, axis=0, numeric_only=True,
interpolation='linear'):
"""
Return values at the given quantile over requested axis, a la
numpy.percentile.
Parameters
----------
q : float or array-like, default 0.5 (50% quantile)
0 <= q <= 1, the quantile(s) to compute
axis : {0, 1, 'index', 'columns'} (default 0)
0 or 'index' for row-wise, 1 or 'columns' for column-wise
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
.. versionadded:: 0.18.0
This optional parameter specifies the interpolation method to use,
when the desired quantile lies between two data points `i` and `j`:
* linear: `i + (j - i) * fraction`, where `fraction` is the
fractional part of the index surrounded by `i` and `j`.
* lower: `i`.
* higher: `j`.
* nearest: `i` or `j` whichever is nearest.
* midpoint: (`i` + `j`) / 2.
Returns
-------
quantiles : Series or DataFrame
- If ``q`` is an array, a DataFrame will be returned where the
index is ``q``, the columns are the columns of self, and the
values are the quantiles.
- If ``q`` is a float, a Series will be returned where the
index is the columns of self and the values are the quantiles.
Examples
--------
>>> df = DataFrame(np.array([[1, 1], [2, 10], [3, 100], [4, 100]]),
columns=['a', 'b'])
>>> df.quantile(.1)
a 1.3
b 3.7
dtype: float64
>>> df.quantile([.1, .5])
a b
0.1 1.3 3.7
0.5 2.5 55.0
"""
self._check_percentile(q)
data = self._get_numeric_data() if numeric_only else self
axis = self._get_axis_number(axis)
is_transposed = axis == 1
if is_transposed:
data = data.T
result = data._data.quantile(qs=q,
axis=1,
interpolation=interpolation,
transposed=is_transposed)
if result.ndim == 2:
result = self._constructor(result)
else:
result = self._constructor_sliced(result, name=q)
if is_transposed:
result = result.T
return result
def to_timestamp(self, freq=None, how='start', axis=0, copy=True):
"""
Cast to DatetimeIndex of timestamps, at *beginning* of period
Parameters
----------
freq : string, default frequency of PeriodIndex
Desired frequency
how : {'s', 'e', 'start', 'end'}
Convention for converting period to timestamp; start of period
vs. end
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to convert (the index by default)
copy : boolean, default True
If false then underlying input data is not copied
Returns
-------
df : DataFrame with DatetimeIndex
"""
new_data = self._data
if copy:
new_data = new_data.copy()
axis = self._get_axis_number(axis)
if axis == 0:
new_data.set_axis(1, self.index.to_timestamp(freq=freq, how=how))
elif axis == 1:
new_data.set_axis(0, self.columns.to_timestamp(freq=freq, how=how))
else: # pragma: no cover
raise AssertionError('Axis must be 0 or 1. Got %s' % str(axis))
return self._constructor(new_data)
def to_period(self, freq=None, axis=0, copy=True):
"""
Convert DataFrame from DatetimeIndex to PeriodIndex with desired
frequency (inferred from index if not passed)
Parameters
----------
freq : string, default
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to convert (the index by default)
copy : boolean, default True
If False then underlying input data is not copied
Returns
-------
ts : TimeSeries with PeriodIndex
"""
new_data = self._data
if copy:
new_data = new_data.copy()
axis = self._get_axis_number(axis)
if axis == 0:
new_data.set_axis(1, self.index.to_period(freq=freq))
elif axis == 1:
new_data.set_axis(0, self.columns.to_period(freq=freq))
else: # pragma: no cover
raise AssertionError('Axis must be 0 or 1. Got %s' % str(axis))
return self._constructor(new_data)
def isin(self, values):
"""
Return boolean DataFrame showing whether each element in the
DataFrame is contained in values.
Parameters
----------
values : iterable, Series, DataFrame or dictionary
The result will only be true at a location if all the
labels match. If `values` is a Series, that's the index. If
`values` is a dictionary, the keys must be the column names,
which must match. If `values` is a DataFrame,
then both the index and column labels must match.
Returns
-------
DataFrame of booleans
Examples
--------
When ``values`` is a list:
>>> df = DataFrame({'A': [1, 2, 3], 'B': ['a', 'b', 'f']})
>>> df.isin([1, 3, 12, 'a'])
A B
0 True True
1 False False
2 True False
When ``values`` is a dict:
>>> df = DataFrame({'A': [1, 2, 3], 'B': [1, 4, 7]})
>>> df.isin({'A': [1, 3], 'B': [4, 7, 12]})
A B
0 True False # Note that B didn't match the 1 here.
1 False True
2 True True
When ``values`` is a Series or DataFrame:
>>> df = DataFrame({'A': [1, 2, 3], 'B': ['a', 'b', 'f']})
>>> other = DataFrame({'A': [1, 3, 3, 2], 'B': ['e', 'f', 'f', 'e']})
>>> df.isin(other)
A B
0 True False
1 False False # Column A in `other` has a 3, but not at index 1.
2 True True
"""
if isinstance(values, dict):
from pandas.core.reshape.concat import concat
values = collections.defaultdict(list, values)
return concat((self.iloc[:, [i]].isin(values[col])
for i, col in enumerate(self.columns)), axis=1)
elif isinstance(values, Series):
if not values.index.is_unique:
raise ValueError("cannot compute isin with "
"a duplicate axis.")
return self.eq(values.reindex_like(self), axis='index')
elif isinstance(values, DataFrame):
if not (values.columns.is_unique and values.index.is_unique):
raise ValueError("cannot compute isin with "
"a duplicate axis.")
return self.eq(values.reindex_like(self))
else:
if not is_list_like(values):
raise TypeError("only list-like or dict-like objects are "
"allowed to be passed to DataFrame.isin(), "
"you passed a "
"{0!r}".format(type(values).__name__))
return DataFrame(
algorithms.isin(self.values.ravel(),
values).reshape(self.shape), self.index,
self.columns)
# ----------------------------------------------------------------------
# Add plotting methods to DataFrame
plot = accessor.AccessorProperty(gfx.FramePlotMethods,
gfx.FramePlotMethods)
hist = gfx.hist_frame
boxplot = gfx.boxplot_frame
DataFrame._setup_axes(['index', 'columns'], info_axis=1, stat_axis=0,
axes_are_reversed=True, aliases={'rows': 0})
DataFrame._add_numeric_operations()
DataFrame._add_series_or_dataframe_operations()
ops.add_flex_arithmetic_methods(DataFrame, **ops.frame_flex_funcs)
ops.add_special_arithmetic_methods(DataFrame, **ops.frame_special_funcs)
_EMPTY_SERIES = Series([])
def _arrays_to_mgr(arrays, arr_names, index, columns, dtype=None):
"""
Segregate Series based on type and coerce into matrices.
Needs to handle a lot of exceptional cases.
"""
# figure out the index, if necessary
if index is None:
index = extract_index(arrays)
else:
index = _ensure_index(index)
# don't force copy because getting jammed in an ndarray anyway
arrays = _homogenize(arrays, index, dtype)
# from BlockManager perspective
axes = [_ensure_index(columns), _ensure_index(index)]
return create_block_manager_from_arrays(arrays, arr_names, axes)
def extract_index(data):
from pandas.core.index import _union_indexes
index = None
if len(data) == 0:
index = Index([])
elif len(data) > 0:
raw_lengths = []
indexes = []
have_raw_arrays = False
have_series = False
have_dicts = False
for v in data:
if isinstance(v, Series):
have_series = True
indexes.append(v.index)
elif isinstance(v, dict):
have_dicts = True
indexes.append(list(v.keys()))
elif is_list_like(v) and getattr(v, 'ndim', 1) == 1:
have_raw_arrays = True
raw_lengths.append(len(v))
if not indexes and not raw_lengths:
raise ValueError('If using all scalar values, you must pass'
' an index')
if have_series or have_dicts:
index = _union_indexes(indexes)
if have_raw_arrays:
lengths = list(set(raw_lengths))
if len(lengths) > 1:
raise ValueError('arrays must all be same length')
if have_dicts:
raise ValueError('Mixing dicts with non-Series may lead to '
'ambiguous ordering.')
if have_series:
if lengths[0] != len(index):
msg = ('array length %d does not match index length %d' %
(lengths[0], len(index)))
raise ValueError(msg)
else:
index = _default_index(lengths[0])
return _ensure_index(index)
def _prep_ndarray(values, copy=True):
if not isinstance(values, (np.ndarray, Series, Index)):
if len(values) == 0:
return np.empty((0, 0), dtype=object)
def convert(v):
return maybe_convert_platform(v)
# we could have a 1-dim or 2-dim list here
# this is equiv of np.asarray, but does object conversion
# and platform dtype preservation
try:
if is_list_like(values[0]) or hasattr(values[0], 'len'):
values = np.array([convert(v) for v in values])
else:
values = convert(values)
except:
values = convert(values)
else:
# drop subclass info, do not copy data
values = np.asarray(values)
if copy:
values = values.copy()
if values.ndim == 1:
values = values.reshape((values.shape[0], 1))
elif values.ndim != 2:
raise ValueError('Must pass 2-d input')
return values
def _to_arrays(data, columns, coerce_float=False, dtype=None):
"""
Return list of arrays, columns
"""
if isinstance(data, DataFrame):
if columns is not None:
arrays = [data._ixs(i, axis=1).values
for i, col in enumerate(data.columns) if col in columns]
else:
columns = data.columns
arrays = [data._ixs(i, axis=1).values for i in range(len(columns))]
return arrays, columns
if not len(data):
if isinstance(data, np.ndarray):
columns = data.dtype.names
if columns is not None:
return [[]] * len(columns), columns
return [], [] # columns if columns is not None else []
if isinstance(data[0], (list, tuple)):
return _list_to_arrays(data, columns, coerce_float=coerce_float,
dtype=dtype)
elif isinstance(data[0], collections.Mapping):
return _list_of_dict_to_arrays(data, columns,
coerce_float=coerce_float, dtype=dtype)
elif isinstance(data[0], Series):
return _list_of_series_to_arrays(data, columns,
coerce_float=coerce_float,
dtype=dtype)
elif isinstance(data[0], Categorical):
if columns is None:
columns = _default_index(len(data))
return data, columns
elif (isinstance(data, (np.ndarray, Series, Index)) and
data.dtype.names is not None):
columns = list(data.dtype.names)
arrays = [data[k] for k in columns]
return arrays, columns
else:
# last ditch effort
data = lmap(tuple, data)
return _list_to_arrays(data, columns, coerce_float=coerce_float,
dtype=dtype)
def _masked_rec_array_to_mgr(data, index, columns, dtype, copy):
""" extract from a masked rec array and create the manager """
# essentially process a record array then fill it
fill_value = data.fill_value
fdata = ma.getdata(data)
if index is None:
index = _get_names_from_index(fdata)
if index is None:
index = _default_index(len(data))
index = _ensure_index(index)
if columns is not None:
columns = _ensure_index(columns)
arrays, arr_columns = _to_arrays(fdata, columns)
# fill if needed
new_arrays = []
for fv, arr, col in zip(fill_value, arrays, arr_columns):
mask = ma.getmaskarray(data[col])
if mask.any():
arr, fv = maybe_upcast(arr, fill_value=fv, copy=True)
arr[mask] = fv
new_arrays.append(arr)
# create the manager
arrays, arr_columns = _reorder_arrays(new_arrays, arr_columns, columns)
if columns is None:
columns = arr_columns
mgr = _arrays_to_mgr(arrays, arr_columns, index, columns)
if copy:
mgr = mgr.copy()
return mgr
def _reorder_arrays(arrays, arr_columns, columns):
# reorder according to the columns
if (columns is not None and len(columns) and arr_columns is not None and
len(arr_columns)):
indexer = _ensure_index(arr_columns).get_indexer(columns)
arr_columns = _ensure_index([arr_columns[i] for i in indexer])
arrays = [arrays[i] for i in indexer]
return arrays, arr_columns
def _list_to_arrays(data, columns, coerce_float=False, dtype=None):
if len(data) > 0 and isinstance(data[0], tuple):
content = list(lib.to_object_array_tuples(data).T)
else:
# list of lists
content = list(lib.to_object_array(data).T)
return _convert_object_array(content, columns, dtype=dtype,
coerce_float=coerce_float)
def _list_of_series_to_arrays(data, columns, coerce_float=False, dtype=None):
from pandas.core.index import _get_objs_combined_axis
if columns is None:
columns = _get_objs_combined_axis(data)
indexer_cache = {}
aligned_values = []
for s in data:
index = getattr(s, 'index', None)
if index is None:
index = _default_index(len(s))
if id(index) in indexer_cache:
indexer = indexer_cache[id(index)]
else:
indexer = indexer_cache[id(index)] = index.get_indexer(columns)
values = _values_from_object(s)
aligned_values.append(algorithms.take_1d(values, indexer))
values = np.vstack(aligned_values)
if values.dtype == np.object_:
content = list(values.T)
return _convert_object_array(content, columns, dtype=dtype,
coerce_float=coerce_float)
else:
return values.T, columns
def _list_of_dict_to_arrays(data, columns, coerce_float=False, dtype=None):
if columns is None:
gen = (list(x.keys()) for x in data)
sort = not any(isinstance(d, OrderedDict) for d in data)
columns = lib.fast_unique_multiple_list_gen(gen, sort=sort)
# assure that they are of the base dict class and not of derived
# classes
data = [(type(d) is dict) and d or dict(d) for d in data]
content = list(lib.dicts_to_array(data, list(columns)).T)
return _convert_object_array(content, columns, dtype=dtype,
coerce_float=coerce_float)
def _convert_object_array(content, columns, coerce_float=False, dtype=None):
if columns is None:
columns = _default_index(len(content))
else:
if len(columns) != len(content): # pragma: no cover
# caller's responsibility to check for this...
raise AssertionError('%d columns passed, passed data had %s '
'columns' % (len(columns), len(content)))
# provide soft conversion of object dtypes
def convert(arr):
if dtype != object and dtype != np.object:
arr = lib.maybe_convert_objects(arr, try_float=coerce_float)
arr = maybe_cast_to_datetime(arr, dtype)
return arr
arrays = [convert(arr) for arr in content]
return arrays, columns
def _get_names_from_index(data):
has_some_name = any([getattr(s, 'name', None) is not None for s in data])
if not has_some_name:
return _default_index(len(data))
index = lrange(len(data))
count = 0
for i, s in enumerate(data):
n = getattr(s, 'name', None)
if n is not None:
index[i] = n
else:
index[i] = 'Unnamed %d' % count
count += 1
return index
def _homogenize(data, index, dtype=None):
from pandas.core.series import _sanitize_array
oindex = None
homogenized = []
for v in data:
if isinstance(v, Series):
if dtype is not None:
v = v.astype(dtype)
if v.index is not index:
# Forces alignment. No need to copy data since we
# are putting it into an ndarray later
v = v.reindex(index, copy=False)
else:
if isinstance(v, dict):
if oindex is None:
oindex = index.astype('O')
if isinstance(index, (DatetimeIndex, TimedeltaIndex)):
v = _dict_compat(v)
else:
v = dict(v)
v = lib.fast_multiget(v, oindex.values, default=np.nan)
v = _sanitize_array(v, index, dtype=dtype, copy=False,
raise_cast_failure=False)
homogenized.append(v)
return homogenized
def _from_nested_dict(data):
# TODO: this should be seriously cythonized
new_data = OrderedDict()
for index, s in compat.iteritems(data):
for col, v in compat.iteritems(s):
new_data[col] = new_data.get(col, OrderedDict())
new_data[col][index] = v
return new_data
def _put_str(s, space):
return ('%s' % s)[:space].ljust(space)
| mit |
kubeflow/pipelines | backend/src/apiserver/visualization/types/table.py | 1 | 1680 | # Copyright 2019 The Kubeflow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# gcsfs is required for pandas GCS integration.
import gcsfs
from itables import show
# itables is requires as importing it changes the way pandas DataFrames are
# rendered.
import itables.interactive
from itables.javascript import load_datatables
import itables.options as opts
import pandas as pd
from tensorflow.python.lib.io import file_io
# Forcefully load required JavaScript and CSS for datatables.
load_datatables()
# Remove maxByte limit to prevent issues where entire table cannot be rendered
# due to size of data.
opts.maxBytes = 0
dfs = []
files = file_io.get_matching_files(source)
# Read data from file and write it to a DataFrame object.
if not variables.get("headers", False):
# If no headers are provided, use the first row as headers
for f in files:
dfs.append(pd.read_csv(f))
else:
# If headers are provided, do not set headers for DataFrames
for f in files:
dfs.append(pd.read_csv(f, header=None))
# Display DataFrame as output.
df = pd.concat(dfs)
if variables.get("headers", False):
df.columns = variables.get("headers")
show(df)
| apache-2.0 |
mixturemodel-flow/tensorflow | tensorflow/python/estimator/inputs/pandas_io.py | 86 | 4503 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Methods to allow pandas.DataFrame."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.estimator.inputs.queues import feeding_functions
try:
# pylint: disable=g-import-not-at-top
# pylint: disable=unused-import
import pandas as pd
HAS_PANDAS = True
except IOError:
# Pandas writes a temporary file during import. If it fails, don't use pandas.
HAS_PANDAS = False
except ImportError:
HAS_PANDAS = False
def pandas_input_fn(x,
y=None,
batch_size=128,
num_epochs=1,
shuffle=None,
queue_capacity=1000,
num_threads=1,
target_column='target'):
"""Returns input function that would feed Pandas DataFrame into the model.
Note: `y`'s index must match `x`'s index.
Args:
x: pandas `DataFrame` object.
y: pandas `Series` object. `None` if absent.
batch_size: int, size of batches to return.
num_epochs: int, number of epochs to iterate over data. If not `None`,
read attempts that would exceed this value will raise `OutOfRangeError`.
shuffle: bool, whether to read the records in random order.
queue_capacity: int, size of the read queue. If `None`, it will be set
roughly to the size of `x`.
num_threads: Integer, number of threads used for reading and enqueueing. In
order to have predicted and repeatable order of reading and enqueueing,
such as in prediction and evaluation mode, `num_threads` should be 1.
target_column: str, name to give the target column `y`.
Returns:
Function, that has signature of ()->(dict of `features`, `target`)
Raises:
ValueError: if `x` already contains a column with the same name as `y`, or
if the indexes of `x` and `y` don't match.
TypeError: `shuffle` is not bool.
"""
if not HAS_PANDAS:
raise TypeError(
'pandas_input_fn should not be called without pandas installed')
if not isinstance(shuffle, bool):
raise TypeError('shuffle must be explicitly set as boolean; '
'got {}'.format(shuffle))
x = x.copy()
if y is not None:
if target_column in x:
raise ValueError(
'Cannot use name %s for target column: DataFrame already has a '
'column with that name: %s' % (target_column, x.columns))
if not np.array_equal(x.index, y.index):
raise ValueError('Index for x and y are mismatched.\nIndex for x: %s\n'
'Index for y: %s\n' % (x.index, y.index))
x[target_column] = y
# TODO(mdan): These are memory copies. We probably don't need 4x slack space.
# The sizes below are consistent with what I've seen elsewhere.
if queue_capacity is None:
if shuffle:
queue_capacity = 4 * len(x)
else:
queue_capacity = len(x)
min_after_dequeue = max(queue_capacity / 4, 1)
def input_fn():
"""Pandas input function."""
queue = feeding_functions._enqueue_data( # pylint: disable=protected-access
x,
queue_capacity,
shuffle=shuffle,
min_after_dequeue=min_after_dequeue,
num_threads=num_threads,
enqueue_size=batch_size,
num_epochs=num_epochs)
if num_epochs is None:
features = queue.dequeue_many(batch_size)
else:
features = queue.dequeue_up_to(batch_size)
assert len(features) == len(x.columns) + 1, ('Features should have one '
'extra element for the index.')
features = features[1:]
features = dict(zip(list(x.columns), features))
if y is not None:
target = features.pop(target_column)
return features, target
return features
return input_fn
| apache-2.0 |
gfyoung/pandas | pandas/tests/tseries/holiday/test_observance.py | 7 | 2723 | from datetime import datetime
import pytest
from pandas.tseries.holiday import (
after_nearest_workday,
before_nearest_workday,
nearest_workday,
next_monday,
next_monday_or_tuesday,
next_workday,
previous_friday,
previous_workday,
sunday_to_monday,
weekend_to_monday,
)
_WEDNESDAY = datetime(2014, 4, 9)
_THURSDAY = datetime(2014, 4, 10)
_FRIDAY = datetime(2014, 4, 11)
_SATURDAY = datetime(2014, 4, 12)
_SUNDAY = datetime(2014, 4, 13)
_MONDAY = datetime(2014, 4, 14)
_TUESDAY = datetime(2014, 4, 15)
_NEXT_WEDNESDAY = datetime(2014, 4, 16)
@pytest.mark.parametrize("day", [_SATURDAY, _SUNDAY])
def test_next_monday(day):
assert next_monday(day) == _MONDAY
@pytest.mark.parametrize(
"day,expected", [(_SATURDAY, _MONDAY), (_SUNDAY, _TUESDAY), (_MONDAY, _TUESDAY)]
)
def test_next_monday_or_tuesday(day, expected):
assert next_monday_or_tuesday(day) == expected
@pytest.mark.parametrize("day", [_SATURDAY, _SUNDAY])
def test_previous_friday(day):
assert previous_friday(day) == _FRIDAY
def test_sunday_to_monday():
assert sunday_to_monday(_SUNDAY) == _MONDAY
@pytest.mark.parametrize(
"day,expected", [(_SATURDAY, _FRIDAY), (_SUNDAY, _MONDAY), (_MONDAY, _MONDAY)]
)
def test_nearest_workday(day, expected):
assert nearest_workday(day) == expected
@pytest.mark.parametrize(
"day,expected", [(_SATURDAY, _MONDAY), (_SUNDAY, _MONDAY), (_MONDAY, _MONDAY)]
)
def test_weekend_to_monday(day, expected):
assert weekend_to_monday(day) == expected
@pytest.mark.parametrize(
"day,expected",
[
(_WEDNESDAY, _THURSDAY),
(_THURSDAY, _FRIDAY),
(_SATURDAY, _MONDAY),
(_SUNDAY, _MONDAY),
(_MONDAY, _TUESDAY),
(_TUESDAY, _NEXT_WEDNESDAY), # WED is same week as TUE
],
)
def test_next_workday(day, expected):
assert next_workday(day) == expected
@pytest.mark.parametrize(
"day,expected", [(_SATURDAY, _FRIDAY), (_SUNDAY, _FRIDAY), (_TUESDAY, _MONDAY)]
)
def test_previous_workday(day, expected):
assert previous_workday(day) == expected
@pytest.mark.parametrize(
"day,expected",
[
(_THURSDAY, _WEDNESDAY),
(_FRIDAY, _THURSDAY),
(_SATURDAY, _THURSDAY),
(_SUNDAY, _FRIDAY),
(_MONDAY, _FRIDAY), # last week Friday
(_TUESDAY, _MONDAY),
(_NEXT_WEDNESDAY, _TUESDAY), # WED is same week as TUE
],
)
def test_before_nearest_workday(day, expected):
assert before_nearest_workday(day) == expected
@pytest.mark.parametrize(
"day,expected", [(_SATURDAY, _MONDAY), (_SUNDAY, _TUESDAY), (_FRIDAY, _MONDAY)]
)
def test_after_nearest_workday(day, expected):
assert after_nearest_workday(day) == expected
| bsd-3-clause |
crichardson17/starburst_atlas | Low_resolution_sims/Dusty_LowRes/Padova_cont/padova_cont_5/fullgrid/UV2.py | 31 | 9339 | import csv
import matplotlib.pyplot as plt
from numpy import *
import scipy.interpolate
import math
from pylab import *
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
import matplotlib.patches as patches
from matplotlib.path import Path
import os
# ------------------------------------------------------------------------------------------------------
#inputs
for file in os.listdir('.'):
if file.endswith("1.grd"):
gridfile1 = file
for file in os.listdir('.'):
if file.endswith("2.grd"):
gridfile2 = file
for file in os.listdir('.'):
if file.endswith("3.grd"):
gridfile3 = file
# ------------------------
for file in os.listdir('.'):
if file.endswith("1.txt"):
Elines1 = file
for file in os.listdir('.'):
if file.endswith("2.txt"):
Elines2 = file
for file in os.listdir('.'):
if file.endswith("3.txt"):
Elines3 = file
# ------------------------------------------------------------------------------------------------------
#Patches data
#for the Kewley and Levesque data
verts = [
(1., 7.97712125471966000000), # left, bottom
(1., 9.57712125471966000000), # left, top
(2., 10.57712125471970000000), # right, top
(2., 8.97712125471966000000), # right, bottom
(0., 0.), # ignored
]
codes = [Path.MOVETO,
Path.LINETO,
Path.LINETO,
Path.LINETO,
Path.CLOSEPOLY,
]
path = Path(verts, codes)
# ------------------------
#for the Kewley 01 data
verts2 = [
(2.4, 9.243038049), # left, bottom
(2.4, 11.0211893), # left, top
(2.6, 11.0211893), # right, top
(2.6, 9.243038049), # right, bottom
(0, 0.), # ignored
]
path = Path(verts, codes)
path2 = Path(verts2, codes)
# -------------------------
#for the Moy et al data
verts3 = [
(1., 6.86712125471966000000), # left, bottom
(1., 10.18712125471970000000), # left, top
(3., 12.18712125471970000000), # right, top
(3., 8.86712125471966000000), # right, bottom
(0., 0.), # ignored
]
path = Path(verts, codes)
path3 = Path(verts3, codes)
# ------------------------------------------------------------------------------------------------------
#the routine to add patches for others peoples' data onto our plots.
def add_patches(ax):
patch3 = patches.PathPatch(path3, facecolor='yellow', lw=0)
patch2 = patches.PathPatch(path2, facecolor='green', lw=0)
patch = patches.PathPatch(path, facecolor='red', lw=0)
ax1.add_patch(patch3)
ax1.add_patch(patch2)
ax1.add_patch(patch)
# ------------------------------------------------------------------------------------------------------
#the subplot routine
def add_sub_plot(sub_num):
numplots = 16
plt.subplot(numplots/4.,4,sub_num)
rbf = scipy.interpolate.Rbf(x, y, z[:,sub_num-1], function='linear')
zi = rbf(xi, yi)
contour = plt.contour(xi,yi,zi, levels, colors='c', linestyles = 'dashed')
contour2 = plt.contour(xi,yi,zi, levels2, colors='k', linewidths=1.5)
plt.scatter(max_values[line[sub_num-1],2], max_values[line[sub_num-1],3], c ='k',marker = '*')
plt.annotate(headers[line[sub_num-1]], xy=(8,11), xytext=(6,8.5), fontsize = 10)
plt.annotate(max_values[line[sub_num-1],0], xy= (max_values[line[sub_num-1],2], max_values[line[sub_num-1],3]), xytext = (0, -10), textcoords = 'offset points', ha = 'right', va = 'bottom', fontsize=10)
if sub_num == numplots / 2.:
print "half the plots are complete"
#axis limits
yt_min = 8
yt_max = 23
xt_min = 0
xt_max = 12
plt.ylim(yt_min,yt_max)
plt.xlim(xt_min,xt_max)
plt.yticks(arange(yt_min+1,yt_max,1),fontsize=10)
plt.xticks(arange(xt_min+1,xt_max,1), fontsize = 10)
if sub_num in [2,3,4,6,7,8,10,11,12,14,15,16]:
plt.tick_params(labelleft = 'off')
else:
plt.tick_params(labelleft = 'on')
plt.ylabel('Log ($ \phi _{\mathrm{H}} $)')
if sub_num in [1,2,3,4,5,6,7,8,9,10,11,12]:
plt.tick_params(labelbottom = 'off')
else:
plt.tick_params(labelbottom = 'on')
plt.xlabel('Log($n _{\mathrm{H}} $)')
if sub_num == 1:
plt.yticks(arange(yt_min+1,yt_max+1,1),fontsize=10)
if sub_num == 13:
plt.yticks(arange(yt_min,yt_max,1),fontsize=10)
plt.xticks(arange(xt_min,xt_max,1), fontsize = 10)
if sub_num == 16 :
plt.xticks(arange(xt_min+1,xt_max+1,1), fontsize = 10)
# ---------------------------------------------------
#this is where the grid information (phi and hdens) is read in and saved to grid.
grid1 = [];
grid2 = [];
grid3 = [];
with open(gridfile1, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
for row in csvReader:
grid1.append(row);
grid1 = asarray(grid1)
with open(gridfile2, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
for row in csvReader:
grid2.append(row);
grid2 = asarray(grid2)
with open(gridfile3, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
for row in csvReader:
grid3.append(row);
grid3 = asarray(grid3)
#here is where the data for each line is read in and saved to dataEmissionlines
dataEmissionlines1 = [];
dataEmissionlines2 = [];
dataEmissionlines3 = [];
with open(Elines1, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers = csvReader.next()
for row in csvReader:
dataEmissionlines1.append(row);
dataEmissionlines1 = asarray(dataEmissionlines1)
with open(Elines2, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers2 = csvReader.next()
for row in csvReader:
dataEmissionlines2.append(row);
dataEmissionlines2 = asarray(dataEmissionlines2)
with open(Elines3, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers3 = csvReader.next()
for row in csvReader:
dataEmissionlines3.append(row);
dataEmissionlines3 = asarray(dataEmissionlines3)
print "import files complete"
# ---------------------------------------------------
#for concatenating grid
#pull the phi and hdens values from each of the runs. exclude header lines
grid1new = zeros((len(grid1[:,0])-1,2))
grid1new[:,0] = grid1[1:,6]
grid1new[:,1] = grid1[1:,7]
grid2new = zeros((len(grid2[:,0])-1,2))
x = array(17.00000)
grid2new[:,0] = repeat(x,len(grid2[:,0])-1)
grid2new[:,1] = grid2[1:,6]
grid3new = zeros((len(grid3[:,0])-1,2))
grid3new[:,0] = grid3[1:,6]
grid3new[:,1] = grid3[1:,7]
grid = concatenate((grid1new,grid2new,grid3new))
hdens_values = grid[:,1]
phi_values = grid[:,0]
# ---------------------------------------------------
#for concatenating Emission lines data
Emissionlines = concatenate((dataEmissionlines1[:,1:],dataEmissionlines2[:,1:],dataEmissionlines3[:,1:]))
#for lines
headers = headers[1:]
concatenated_data = zeros((len(Emissionlines),len(Emissionlines[0])))
max_values = zeros((len(concatenated_data[0]),4))
# ---------------------------------------------------
#constructing grid by scaling
#select the scaling factor
#for 1215
#incident = Emissionlines[1:,4]
#for 4860
incident = concatenated_data[:,57]
#take the ratio of incident and all the lines and put it all in an array concatenated_data
for i in range(len(Emissionlines)):
for j in range(len(Emissionlines[0])):
if math.log(4860.*(float(Emissionlines[i,j])/float(Emissionlines[i,57])), 10) > 0:
concatenated_data[i,j] = math.log(4860.*(float(Emissionlines[i,j])/float(Emissionlines[i,57])), 10)
else:
concatenated_data[i,j] == 0
# for 1215
#for i in range(len(Emissionlines)):
# for j in range(len(Emissionlines[0])):
# if math.log(1215.*(float(Emissionlines[i,j])/float(Emissionlines[i,4])), 10) > 0:
# concatenated_data[i,j] = math.log(1215.*(float(Emissionlines[i,j])/float(Emissionlines[i,4])), 10)
# else:
# concatenated_data[i,j] == 0
# ---------------------------------------------------
#find the maxima to plot onto the contour plots
for j in range(len(concatenated_data[0])):
max_values[j,0] = max(concatenated_data[:,j])
max_values[j,1] = argmax(concatenated_data[:,j], axis = 0)
max_values[j,2] = hdens_values[max_values[j,1]]
max_values[j,3] = phi_values[max_values[j,1]]
#to round off the maxima
max_values[:,0] = [ '%.1f' % elem for elem in max_values[:,0] ]
print "data arranged"
# ---------------------------------------------------
#Creating the grid to interpolate with for contours.
gridarray = zeros((len(concatenated_data),2))
gridarray[:,0] = hdens_values
gridarray[:,1] = phi_values
x = gridarray[:,0]
y = gridarray[:,1]
# ---------------------------------------------------
#change desired lines here!
line = [18, #1549
19, #1640
20, #1665
21, #1671
23, #1750
24, #1860
25, #1888
26, #1907
27, #2297
28, #2321
29, #2471
30, #2326
31, #2335
32, #2665
33, #2798
34] #2803
#create z array for this plot
z = concatenated_data[:,line[:]]
# ---------------------------------------------------
# Interpolate
print "starting interpolation"
xi, yi = linspace(x.min(), x.max(), 10), linspace(y.min(), y.max(), 10)
xi, yi = meshgrid(xi, yi)
# ---------------------------------------------------
print "interpolatation complete; now plotting"
#plot
plt.subplots_adjust(wspace=0, hspace=0) #remove space between plots
levels = arange(10**-1,10, .2)
levels2 = arange(10**-2,10**2, 1)
plt.suptitle("Dusty UV Lines Continued", fontsize=14)
# ---------------------------------------------------
for i in range(16):
add_sub_plot(i)
ax1 = plt.subplot(4,4,1)
add_patches(ax1)
print "complete"
plt.savefig('Dusty_UV_Lines_cntd.pdf')
plt.clf()
print "figure saved"
| gpl-2.0 |
HoliestCow/ece692_deeplearning | project5/gru/cnn4gruDETandSID.py | 1 | 12966 |
# Sample code implementing LeNet-5 from Liu Liu
import tensorflow as tf
import numpy as np
import time
import h5py
# import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix
import itertools
from copy import deepcopy
import os
import os.path
import cPickle as pickle
# from tensorflow.examples.tutorials.mnist import input_data
# mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
class cnnMNIST(object):
def __init__(self):
self.use_gpu = True
self.lr = 1e-3
self.epochs = 1000
self.runname = 'cnndetandsid_{}'.format(self.epochs)
self.dataset_filename = 'sequential_dataset_relabel_60seconds.h5'
self.build_graph()
def onehot_labels(self, labels):
out = np.zeros((labels.shape[0], 7))
for i in range(labels.shape[0]):
out[i, :] = np.eye(7)[labels[i]]
return out
def onenothot_labels(self, labels):
out = np.zeros((labels.shape[0],))
for i in range(labels.shape[0]):
out[i] = np.argmax(labels[i, :])
return out
def get_data(self):
# data_norm = True
# data_augmentation = False
try:
f = h5py.File(self.dataset_filename, 'r')
except:
# f = h5py.File('/home/holiestcow/Documents/2017_fall/ne697_hayward/lecture/datacompetition/sequential_dataset_balanced.h5', 'r')
f = h5py.File('../data/{}'.format(self.dataset_filename), 'r')
training = f['train']
testing = f['test']
training_dataset = []
training_labels = []
for item in training:
training_dataset += [np.array(training[item]['measured_spectra'])]
training_labels += [np.array(training[item]['labels'])]
training_dataset = np.concatenate(training_dataset, axis=0)
training_labels = np.array(training_labels)
training_labels = np.concatenate(training_labels, axis=0)
testing_dataset = []
testing_labels = []
for item in testing:
testing_dataset += [np.array(testing[item]['measured_spectra'])]
testing_labels += [np.array(testing[item]['labels'])]
testing_dataset = np.concatenate(testing_dataset, axis=0)
testing_labels = np.concatenate(testing_labels, axis=0)
self.x_train = training_dataset
self.y_train = self.onehot_labels(training_labels)
self.x_test = testing_dataset
self.y_test = self.onehot_labels(testing_labels)
return
def naive_get_data(self):
# data_norm = True
# data_augmentation = False
f = h5py.File('naive_dataset.h5', 'r')
g = f['training']
X = np.array(g['spectra'])
Y = self.onehot_labels(np.array(g['labels'], dtype=np.int32))
g = f['testing']
X_test = np.array(g['spectra'])
Y_test = self.onehot_labels(np.array(g['labels'], dtype=np.int32))
self.x_train = X
self.y_train = Y
self.x_test = X_test
self.y_test = Y_test
f.close()
return
def batch(self, iterable, n=1, shuffle=True):
if shuffle:
self.shuffle()
# l = len(iterable)
l = iterable.shape[0]
for ndx in range(0, l, n):
data = iterable[ndx:min(ndx + n, l), :]
# normalization = np.linalg.norm(data, 1, axis=1)
# for j in range(data.shape[0]):
# data[j, :] = np.divide(data[j, :], normalization[j])
yield data
def validation_batcher(self):
try:
f = h5py.File(self.dataset_filename, 'r')
except:
f = h5py.File('../data/{}'.format('sequential_dataset_relabel_validationonly.h5'), 'r')
g = f['validate']
samplelist = list(g.keys())
for i in range(len(samplelist)):
data = np.array(g[samplelist[i]])
yield data, samplelist[i]
def build_graph(self):
feature_map1 = 32
feature_map2 = 64
final_hidden_nodes = 512
self.x = tf.placeholder(tf.float32, shape=[None, 1024])
self.y_ = tf.placeholder(tf.float32, shape=[None, 7])
self.keep_prob = tf.placeholder(tf.float32)
x_image = self.hack_1dreshape(self.x)
# define conv-layer variables
W_conv1 = self.weight_variable([1, 9, 1, feature_map1]) # first conv-layer has 32 kernels, size=5
b_conv1 = self.bias_variable([feature_map1])
W_conv2 = self.weight_variable([1, 3, feature_map1, feature_map2])
b_conv2 = self.bias_variable([feature_map2])
# x_image = tf.reshape(self.x, [-1, 28, 28, 1])
h_conv1 = tf.nn.relu(self.conv2d(x_image, W_conv1) + b_conv1)
h_pool1 = self.max_pool_2x2(h_conv1)
h_conv2 = tf.nn.relu(self.conv2d(h_pool1, W_conv2) + b_conv2)
h_pool2 = self.max_pool_2x2(h_conv2)
# W_conv3 = self.weight_variable([1, 3, feature_map2, feature_map3])
# b_conv3 = self.bias_variable([feature_map3])
# W_conv4 = self.weight_variable([1, 3, feature_map3, feature_map4])
# b_conv4 = self.bias_variable([feature_map4])
# h_conv3 = tf.nn.relu(self.conv2d(h_pool2, W_conv3) + b_conv3)
# h_pool3 = self.max_pool_2x2(h_conv3)
# h_conv4 = tf.nn.relu(self.conv2d(h_pool3, W_conv4) + b_conv4)
# h_pool4 = self.max_pool_2x2(h_conv4)
# densely/fully connected layer
W_fc1 = self.weight_variable([256 * feature_map2, final_hidden_nodes])
b_fc1 = self.bias_variable([final_hidden_nodes])
h_pool2_flat = tf.reshape(h_pool2, [-1, 256 * feature_map2])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
# W_fc1 = self.weight_variable([64 * feature_map4, final_hidden_nodes])
# b_fc1 = self.bias_variable([final_hidden_nodes])
# h_pool4_flat = tf.reshape(h_pool4, [-1, 64 * feature_map4])
# h_fc1 = tf.nn.relu(tf.matmul(h_pool4_flat, W_fc1) + b_fc1)
# dropout regularization
h_fc1_drop = tf.nn.dropout(h_fc1, self.keep_prob)
# linear classifier
W_fc2 = self.weight_variable([final_hidden_nodes, 7])
b_fc2 = self.bias_variable([7])
y_conv = tf.matmul(h_fc1_drop, W_fc2) + b_fc2
self.y_conv = y_conv
# Now I have to weight to logits
# class_weights = tf.constant([0.1, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0])
# class_weights = tf.constant([1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0])
# self.y_conv = tf.multiply(y_conv, class_weights)
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=self.y_, logits=self.y_conv))
self.train_step = tf.train.AdamOptimizer(self.lr).minimize(cross_entropy)
def shuffle(self):
rng_state = np.random.get_state()
np.random.set_state(rng_state)
np.random.shuffle(self.x_train)
np.random.set_state(rng_state)
np.random.shuffle(self.y_train)
# permutation = np.random.permutation(self.x_train.shape[0])
# self.x_train = self.x_train[permutation, :]
# self.y_train = self.y_train[permutation, :]
return
def train(self):
if self.use_gpu:
# use half of the gpu memory
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.9)
self.sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
else:
self.sess = tf.Session()
# self.sess = tf.Session()
init = tf.global_variables_initializer()
self.sess.run(init)
self.eval() # creating evaluation
a = time.time()
for i in range(self.epochs):
# batch = mnist.train.next_batch(50)
x_generator = self.batch(self.x_train, n=128)
y_generator = self.batch(self.y_train, n=128)
# print(batch[0].shape)
# print(batch[1].shape)
if i % 100 == 0 and i != 0:
test_acc = self.sess.run(self.accuracy,feed_dict={self.x: self.x_test[:1000, :],
self.y_: self.y_test[:1000, :],
self.keep_prob: 1.0})
train_acc = self.sess.run(self.accuracy, feed_dict={self.x: current_x,
self.y_: current_y,
self.keep_prob: 1.0})
print('step %d, training accuracy %g, testing accuracy %g, elapsed time %f' % (i, train_acc, test_acc, time.time()-a))
current_x = next(x_generator)
current_y = next(y_generator)
self.sess.run([self.train_step], feed_dict={self.x: current_x,
self.y_: current_y,
self.keep_prob: 0.10})
# self.shuffle()
def eval(self):
# self.time_index = np.arange(self.y_conv.get_shape()[0])
self.prediction = tf.argmax(self.y_conv, 1)
truth = tf.argmax(self.y_, 1)
correct_prediction = tf.equal(self.prediction, truth)
self.accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
def test_eval(self):
self.eval()
x_generator = self.batch(self.x_test, n=100, shuffle=False)
y_generator = self.batch(self.y_test, n=100, shuffle=False)
test_acc = []
counter = 0
for data in x_generator:
test_acc += [self.sess.run(self.accuracy, feed_dict={
self.x: data, self.y_: next(y_generator), self.keep_prob: 1.0})]
total_test_acc = sum(test_acc) / float(len(test_acc))
print('test accuracy %g' % total_test_acc)
def weight_variable(self, shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(self, shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
def hack_1dreshape(self, x):
# expand its dimensionality to fit into conv2d
tensor_expand = tf.expand_dims(x, 1)
tensor_expand = tf.expand_dims(tensor_expand, -1)
return tensor_expand
def conv2d(self, x, W):
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
def max_pool_2x2(self, x):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='SAME')
def get_label_predictions(self):
x_batcher = self.batch(self.x_test, n=1000, shuffle=False)
# y_batcher = self.batch(self.y_test, n=1000, shuffle=False)
predictions = np.zeros((0, 1))
for data in x_batcher:
temp_predictions = self.sess.run(
self.prediction,
feed_dict={self.x: data,
self.keep_prob: 1.0})
temp_predictions = temp_predictions.reshape((temp_predictions.shape[0], 1))
predictions = np.vstack((predictions, temp_predictions))
return predictions
def save_obj(obj, name ):
with open('obj/'+ name + '.pkl', 'wb') as f:
pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)
def load_obj(name):
with open('obj/' + name + '.pkl', 'rb') as f:
return pickle.load(f)
def main():
interest = 'cnndetalt3_relabel_lr0.0001_ep10000_datasequential_dataset_relabel_allseconds.h5_sequential_dataset_relabel_allsecond_py3to2converted'
cnn = cnnMNIST()
a = time.time()
print('Retrieving data')
cnn.get_data()
b = time.time()
print('Built the data in {} s'.format(b-a))
validation_data = cnn.validation_batcher()
a = time.time()
cnn.train()
b = time.time()
print('Training time: {} s'.format(b-a))
cnn.test_eval()
predictions = cnn.get_label_predictions()
predictions_decode = predictions
labels_decode = cnn.onenothot_labels(cnn.y_test)
# np.save('{}_{}_predictions.npy'.format(cnn.runname, cnn.dataset_filename[:-4]), predictions_decode)
# np.save('{}_{}_ground_truth.npy'.format(cnn.runname, cnn.dataset_filename[:-4]), labels_decode)
hits = load_obj('{}_hits'.format(interest))
answers = open('approach3_answers.csv', 'w')
answers.write('RunID,SourceID,SourceTime,Comment\n')
for sample in hits:
key = sample
data = hits[key]
if data['time'] == 0:
answers.write('{},{},{},\n'.format(key, 0, 0))
continue
x = np.array(data['spectra'])
x = x.reshape((1, len(x)))
predictions = cnn.sess.run(
cnn.prediction,
feed_dict = {cnn.x: x,
cnn.keep_prob: 1.0})
t = np.array(data['time'])
if predictions[0] <= 0.5:
answers.write('{}, 0, 0,\n'.format(key))
else:
answers.write('{},{},{},\n'.format(
key, predictions[0], t))
answers.close()
return
main()
| mit |
ilo10/scikit-learn | examples/model_selection/grid_search_digits.py | 227 | 2665 | """
============================================================
Parameter estimation using grid search with cross-validation
============================================================
This examples shows how a classifier is optimized by cross-validation,
which is done using the :class:`sklearn.grid_search.GridSearchCV` object
on a development set that comprises only half of the available labeled data.
The performance of the selected hyper-parameters and trained model is
then measured on a dedicated evaluation set that was not used during
the model selection step.
More details on tools available for model selection can be found in the
sections on :ref:`cross_validation` and :ref:`grid_search`.
"""
from __future__ import print_function
from sklearn import datasets
from sklearn.cross_validation import train_test_split
from sklearn.grid_search import GridSearchCV
from sklearn.metrics import classification_report
from sklearn.svm import SVC
print(__doc__)
# Loading the Digits dataset
digits = datasets.load_digits()
# To apply an classifier on this data, we need to flatten the image, to
# turn the data in a (samples, feature) matrix:
n_samples = len(digits.images)
X = digits.images.reshape((n_samples, -1))
y = digits.target
# Split the dataset in two equal parts
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.5, random_state=0)
# Set the parameters by cross-validation
tuned_parameters = [{'kernel': ['rbf'], 'gamma': [1e-3, 1e-4],
'C': [1, 10, 100, 1000]},
{'kernel': ['linear'], 'C': [1, 10, 100, 1000]}]
scores = ['precision', 'recall']
for score in scores:
print("# Tuning hyper-parameters for %s" % score)
print()
clf = GridSearchCV(SVC(C=1), tuned_parameters, cv=5,
scoring='%s_weighted' % score)
clf.fit(X_train, y_train)
print("Best parameters set found on development set:")
print()
print(clf.best_params_)
print()
print("Grid scores on development set:")
print()
for params, mean_score, scores in clf.grid_scores_:
print("%0.3f (+/-%0.03f) for %r"
% (mean_score, scores.std() * 2, params))
print()
print("Detailed classification report:")
print()
print("The model is trained on the full development set.")
print("The scores are computed on the full evaluation set.")
print()
y_true, y_pred = y_test, clf.predict(X_test)
print(classification_report(y_true, y_pred))
print()
# Note the problem is too easy: the hyperparameter plateau is too flat and the
# output model is the same for precision and recall with ties in quality.
| bsd-3-clause |
AlexanderFabisch/scikit-learn | sklearn/ensemble/__init__.py | 153 | 1382 | """
The :mod:`sklearn.ensemble` module includes ensemble-based methods for
classification, regression and anomaly detection.
"""
from .base import BaseEnsemble
from .forest import RandomForestClassifier
from .forest import RandomForestRegressor
from .forest import RandomTreesEmbedding
from .forest import ExtraTreesClassifier
from .forest import ExtraTreesRegressor
from .bagging import BaggingClassifier
from .bagging import BaggingRegressor
from .iforest import IsolationForest
from .weight_boosting import AdaBoostClassifier
from .weight_boosting import AdaBoostRegressor
from .gradient_boosting import GradientBoostingClassifier
from .gradient_boosting import GradientBoostingRegressor
from .voting_classifier import VotingClassifier
from . import bagging
from . import forest
from . import weight_boosting
from . import gradient_boosting
from . import partial_dependence
__all__ = ["BaseEnsemble",
"RandomForestClassifier", "RandomForestRegressor",
"RandomTreesEmbedding", "ExtraTreesClassifier",
"ExtraTreesRegressor", "BaggingClassifier",
"BaggingRegressor", "IsolationForest", "GradientBoostingClassifier",
"GradientBoostingRegressor", "AdaBoostClassifier",
"AdaBoostRegressor", "VotingClassifier",
"bagging", "forest", "gradient_boosting",
"partial_dependence", "weight_boosting"]
| bsd-3-clause |
fyffyt/scikit-learn | examples/svm/plot_rbf_parameters.py | 132 | 8096 | '''
==================
RBF SVM parameters
==================
This example illustrates the effect of the parameters ``gamma`` and ``C`` of
the Radial Basis Function (RBF) kernel SVM.
Intuitively, the ``gamma`` parameter defines how far the influence of a single
training example reaches, with low values meaning 'far' and high values meaning
'close'. The ``gamma`` parameters can be seen as the inverse of the radius of
influence of samples selected by the model as support vectors.
The ``C`` parameter trades off misclassification of training examples against
simplicity of the decision surface. A low ``C`` makes the decision surface
smooth, while a high ``C`` aims at classifying all training examples correctly
by giving the model freedom to select more samples as support vectors.
The first plot is a visualization of the decision function for a variety of
parameter values on a simplified classification problem involving only 2 input
features and 2 possible target classes (binary classification). Note that this
kind of plot is not possible to do for problems with more features or target
classes.
The second plot is a heatmap of the classifier's cross-validation accuracy as a
function of ``C`` and ``gamma``. For this example we explore a relatively large
grid for illustration purposes. In practice, a logarithmic grid from
:math:`10^{-3}` to :math:`10^3` is usually sufficient. If the best parameters
lie on the boundaries of the grid, it can be extended in that direction in a
subsequent search.
Note that the heat map plot has a special colorbar with a midpoint value close
to the score values of the best performing models so as to make it easy to tell
them appart in the blink of an eye.
The behavior of the model is very sensitive to the ``gamma`` parameter. If
``gamma`` is too large, the radius of the area of influence of the support
vectors only includes the support vector itself and no amount of
regularization with ``C`` will be able to prevent overfitting.
When ``gamma`` is very small, the model is too constrained and cannot capture
the complexity or "shape" of the data. The region of influence of any selected
support vector would include the whole training set. The resulting model will
behave similarly to a linear model with a set of hyperplanes that separate the
centers of high density of any pair of two classes.
For intermediate values, we can see on the second plot that good models can
be found on a diagonal of ``C`` and ``gamma``. Smooth models (lower ``gamma``
values) can be made more complex by selecting a larger number of support
vectors (larger ``C`` values) hence the diagonal of good performing models.
Finally one can also observe that for some intermediate values of ``gamma`` we
get equally performing models when ``C`` becomes very large: it is not
necessary to regularize by limiting the number of support vectors. The radius of
the RBF kernel alone acts as a good structural regularizer. In practice though
it might still be interesting to limit the number of support vectors with a
lower value of ``C`` so as to favor models that use less memory and that are
faster to predict.
We should also note that small differences in scores results from the random
splits of the cross-validation procedure. Those spurious variations can be
smoothed out by increasing the number of CV iterations ``n_iter`` at the
expense of compute time. Increasing the value number of ``C_range`` and
``gamma_range`` steps will increase the resolution of the hyper-parameter heat
map.
'''
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import Normalize
from sklearn.svm import SVC
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import load_iris
from sklearn.cross_validation import StratifiedShuffleSplit
from sklearn.grid_search import GridSearchCV
# Utility function to move the midpoint of a colormap to be around
# the values of interest.
class MidpointNormalize(Normalize):
def __init__(self, vmin=None, vmax=None, midpoint=None, clip=False):
self.midpoint = midpoint
Normalize.__init__(self, vmin, vmax, clip)
def __call__(self, value, clip=None):
x, y = [self.vmin, self.midpoint, self.vmax], [0, 0.5, 1]
return np.ma.masked_array(np.interp(value, x, y))
##############################################################################
# Load and prepare data set
#
# dataset for grid search
iris = load_iris()
X = iris.data
y = iris.target
# Dataset for decision function visualization: we only keep the first two
# features in X and sub-sample the dataset to keep only 2 classes and
# make it a binary classification problem.
X_2d = X[:, :2]
X_2d = X_2d[y > 0]
y_2d = y[y > 0]
y_2d -= 1
# It is usually a good idea to scale the data for SVM training.
# We are cheating a bit in this example in scaling all of the data,
# instead of fitting the transformation on the training set and
# just applying it on the test set.
scaler = StandardScaler()
X = scaler.fit_transform(X)
X_2d = scaler.fit_transform(X_2d)
##############################################################################
# Train classifiers
#
# For an initial search, a logarithmic grid with basis
# 10 is often helpful. Using a basis of 2, a finer
# tuning can be achieved but at a much higher cost.
C_range = np.logspace(-2, 10, 13)
gamma_range = np.logspace(-9, 3, 13)
param_grid = dict(gamma=gamma_range, C=C_range)
cv = StratifiedShuffleSplit(y, n_iter=5, test_size=0.2, random_state=42)
grid = GridSearchCV(SVC(), param_grid=param_grid, cv=cv)
grid.fit(X, y)
print("The best parameters are %s with a score of %0.2f"
% (grid.best_params_, grid.best_score_))
# Now we need to fit a classifier for all parameters in the 2d version
# (we use a smaller set of parameters here because it takes a while to train)
C_2d_range = [1e-2, 1, 1e2]
gamma_2d_range = [1e-1, 1, 1e1]
classifiers = []
for C in C_2d_range:
for gamma in gamma_2d_range:
clf = SVC(C=C, gamma=gamma)
clf.fit(X_2d, y_2d)
classifiers.append((C, gamma, clf))
##############################################################################
# visualization
#
# draw visualization of parameter effects
plt.figure(figsize=(8, 6))
xx, yy = np.meshgrid(np.linspace(-3, 3, 200), np.linspace(-3, 3, 200))
for (k, (C, gamma, clf)) in enumerate(classifiers):
# evaluate decision function in a grid
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
# visualize decision function for these parameters
plt.subplot(len(C_2d_range), len(gamma_2d_range), k + 1)
plt.title("gamma=10^%d, C=10^%d" % (np.log10(gamma), np.log10(C)),
size='medium')
# visualize parameter's effect on decision function
plt.pcolormesh(xx, yy, -Z, cmap=plt.cm.RdBu)
plt.scatter(X_2d[:, 0], X_2d[:, 1], c=y_2d, cmap=plt.cm.RdBu_r)
plt.xticks(())
plt.yticks(())
plt.axis('tight')
# plot the scores of the grid
# grid_scores_ contains parameter settings and scores
# We extract just the scores
scores = [x[1] for x in grid.grid_scores_]
scores = np.array(scores).reshape(len(C_range), len(gamma_range))
# Draw heatmap of the validation accuracy as a function of gamma and C
#
# The score are encoded as colors with the hot colormap which varies from dark
# red to bright yellow. As the most interesting scores are all located in the
# 0.92 to 0.97 range we use a custom normalizer to set the mid-point to 0.92 so
# as to make it easier to visualize the small variations of score values in the
# interesting range while not brutally collapsing all the low score values to
# the same color.
plt.figure(figsize=(8, 6))
plt.subplots_adjust(left=.2, right=0.95, bottom=0.15, top=0.95)
plt.imshow(scores, interpolation='nearest', cmap=plt.cm.hot,
norm=MidpointNormalize(vmin=0.2, midpoint=0.92))
plt.xlabel('gamma')
plt.ylabel('C')
plt.colorbar()
plt.xticks(np.arange(len(gamma_range)), gamma_range, rotation=45)
plt.yticks(np.arange(len(C_range)), C_range)
plt.title('Validation accuracy')
plt.show()
| bsd-3-clause |
Abraxos/clustering_tsp_solver | tsp.py | 1 | 27673 | from re import compile
from math import sqrt
from collections import defaultdict
from sys import maxsize
from time import clock
from itertools import product, permutations
from sklearn.cluster import KMeans
from sklearn.cluster import Birch
from sklearn.cluster import DBSCAN
from sklearn.cluster import AgglomerativeClustering
from ortools.constraint_solver import pywrapcp
class DistanceMatrix(object):
def __init__(self,matrix, points, high_level=False, gamma=None):
self.high_level = high_level
self.gamma = gamma if gamma else None
self.matrix = matrix
self.points = points
self.num_points = len(matrix)
def num_points(self):
return self.num_points
def distance(self, from_node, to_node):
return self.matrix[from_node][to_node] if not self.high_level else self.matrix[from_node][to_node][0]
def euclidean_distance(x,y):
return sqrt(pow(y[0] - x[0], 2) + pow(y[1] - x[1], 2))
def points_from_file(file_path):
pattern = compile(r'\s*(\d+)\s+([0-9\.e\+]+)\s+([0-9\.e\+]+).*')
points = []
with open(file_path,'r') as f:
for line in f:
m = pattern.match(line)
if m:
points.append((int(m.group(1)) - 1, float(m.group(2)), float(m.group(3))))
return points
def load_matrix(points,gamma=None):
X = 1
Y = 2
gamma = gamma if gamma else None
matrix = {}
for a in points:
matrix[a[0]] = {}
for b in points:
matrix[a[0]][b[0]] = euclidean_distance((a[X],a[Y]),(b[X],b[Y]))
return DistanceMatrix(matrix,points,gamma=gamma)
def load_matrices_from_labels(points, labels):
# This function takes in a list of points as imported from the input file, and
# uses the labels to group them into matrices representing clusters. It
# returns a tuple of the format (high-level distance matrix, dict{label:low-
# level distance matrices}. This is the function to use AFTER clustering.
# for correctness, the list of labels must match the list of points. It is
# recommended that before running this function you use the following assert:
# (assuming that points, formatted for scikit is named `data`)
# assert(all([p[1] == d[0] and p[2] == d[1] for p,d in zip(points, data)]))
assert(len(points) == len(labels))
clusters = defaultdict(list)
for point,label in zip(points,labels):
clusters[label].append(point)
# Convert the low-level clusters into distance matrices
G = {}
for label,cluster in clusters.items():
G[label] = {}
for i in range(len(cluster)):
c,x,y = cluster[i]
G[label][i] = c
cluster[i] = (i,x,y)
for label in clusters:
clusters[label] = load_matrix(clusters[label],gamma=G[label])
hl_points = list(labels)
hl_matrix = {}
L = set(labels)
for label in L:
hl_matrix[label] = {}
for other_label in L:
if label == other_label:
hl_matrix[label][other_label] = (0.0, label, other_label)
else:
# compute minimal distance edge between the two clusters
min_point = 0
min_other_point = 0
min_distance = maxsize
for i in range(len(labels)):
for j in range(len(labels)):
if labels[i] == label and labels[j] == other_label:
a,b = points[i],points[j]
a,b = (a[1],a[2]),(b[1],b[2])
d = euclidean_distance(a,b)
if d < min_distance:
min_distance = d
min_point = points[i]
min_other_point = points[j]
hl_matrix[label][other_label] = (min_distance,min_point,min_other_point)
hl_distance_matrix = DistanceMatrix(hl_matrix,hl_points,high_level=True)
return hl_distance_matrix, clusters, G
def cp_tsp_solve(matrix, depot):
if matrix.num_points:
# Set a global parameter.
param = pywrapcp.RoutingParameters()
param.use_light_propagation = False
pywrapcp.RoutingModel.SetGlobalParameters(param)
routing = pywrapcp.RoutingModel(matrix.num_points, 1)
parameters = pywrapcp.RoutingSearchParameters()
# Setting first solution heuristic (cheapest addition).
parameters.first_solution = 'PathCheapestArc'
# Disabling Large Neighborhood Search, comment out to activate it.
parameters.no_lns = True
parameters.no_tsp = False
matrix_callback = matrix.distance
routing.SetArcCostEvaluatorOfAllVehicles(matrix_callback)
routing.SetDepot(depot)
# Solve, returns a solution if any.
assignment = routing.SolveWithParameters(parameters, None)
if assignment:
route_number = 0
node = routing.Start(route_number)
route = []
while not routing.IsEnd(node):
route.append(int(node))
node = assignment.Value(routing.NextVar(node))
route.append(depot)
# Return Cost, Route
if matrix.gamma:
for i in range(len(route)):
route[i] = matrix.gamma[route[i]]
return float(assignment.ObjectiveValue()), route
else:
print 'No solution found.'
else:
return 0.0,[]
def cluster_test(file_path,num_clusters):
points = points_from_file(file_path)
X = [[p[1],p[2]] for p in points]
est = KMeans(n_clusters=num_clusters)
est.fit(X)
labels = est.labels_
CSV = [','.join([str(points[i][0]),str(points[i][1]),str(points[i][2]),str(labels[i])]) for i in range(len(labels))]
return CSV
def compute_depots(clusters, matrix, gamma, per_cluster=False):
"""
This is where we actually map the high-level TSP onto the final TSP solution
by figuring out which nodes should be used as start/endpoints of each cluster
which in turn determines the routes.
"""
args = [clusters[cid].points for cid in sorted(clusters.keys())]
min_D = maxsize
min_R = None
for depot_set in product(*args):
depot_set = [(i, depot_set[i]) for i in range(len(depot_set))]
for depot_route in permutations(depot_set):
c = 0.0
for i in range(len(depot_route) - 1):
gid1, point1 = depot_route[i]
point1 = gamma[gid1][point1[0]]
gid2, point2 = depot_route[i+1]
point2 = gamma[gid2][point2[0]]
c += matrix.distance(point1,point2)
#compute_route_cost(depot_route, matrix, gamma)
if min_D > c:
min_D = c
min_R = depot_route
if not per_cluster:
min_R = [gamma[gid][p[0]] for gid,p in min_R]
return min_R, min_D
def clustered_tsp_solve(points, num_clusters, estimator=None, labels=None, basic=True, depots=None):
clustering_start = clock()
if estimator is not None and labels is None:
X = [[p[1],p[2]] for p in points]
estimator.fit(X)
labels = estimator.labels_
clustering_time = clock() - clustering_start
elif labels is not None and estimator is None:
labels = labels
clustering_time = 0.0
else:
print("The clustering TSP solver requires either a set of labels OR a clustering algorithm, but not both or neither.")
exit()
hl_matrix, clusters, G = load_matrices_from_labels(points,labels)
C = 0.0
R = []
ll_cluster_solve_start = clock()
if depots is not None:
depots = sorted(depots, key=lambda t:t[0])
print(depots)
for label,cluster in clusters.items():
R.append(cp_tsp_solve(cluster,depots[label][1][0]))
C += R[-1][0]
else:
for label,cluster in clusters.items():
R.append(cp_tsp_solve(cluster,0))
C += R[-1][0]
ll_cluster_solve_time = clock() - ll_cluster_solve_start
hl_cluster_solve_start = clock()
hl_cost, hl_route = cp_tsp_solve(hl_matrix,0)
hl_cluster_solve_time = clock() - hl_cluster_solve_start
total_cluster_solve_time = clock() - clustering_start
C += hl_cost
if basic:
return C, R, hl_route
else:
return C, R, hl_route, (clustering_time, ll_cluster_solve_time, hl_cluster_solve_time, total_cluster_solve_time)
def cluster_tsp_vs_cp_tsp(file_path,num_clusters):
print("SOLVING: {0} USING {1} CLUSTERS".format(file_path,num_clusters))
optimal_start = clock()
matrix = load_matrix(points_from_file(file_path))
optimal_cost, route = solve_tsp(matrix,0)
optimal_solve_time = clock() - optimal_start
# print("\nOPTIMAL TSP: COST: {0} RUNTIME: {1}\n\tSOLUTION: {2}".format(optimal_cost, optimal_solve_time, route))
# print("\nOPTIMAL TSP: COST: {0} RUNTIME: {1}".format(optimal_cost, optimal_solve_time))
points = points_from_file(file_path)
cluster_start = clock()
# Do Clustering Here to Generate a set of Labels:
# K-Means Clustering Example
X = [[p[1],p[2]] for p in points]
clustering_start = clock()
est = KMeans(n_clusters=num_clusters)
est.fit(X)
clustering_time = clock() - clustering_start
labels = est.labels_
C = 0.0
cluster_solve_start = clock()
hl_matrix, clusters = load_matrices_from_labels(points,labels)
for label,cluster in clusters.items():
start = clock()
cost, route = solve_tsp(cluster,0)
end = clock()
C += cost
# print("\nLOW LEVEL SOLUTIONS: CLUSTER: {0} COST: {1} RUNTIME: {2}\n\tSOLUTION: {3}".format(label, cost, (end - start), route))
# print("\nLOW LEVEL SOLUTIONS: CLUSTER: {0} COST: {1} RUNTIME: {2}".format(label, cost, (end - start)))
# print(hl_matrix)
high_level_start = clock()
cost, route = solve_tsp(hl_matrix,0)
high_level_cluster_solution_time = clock() - high_level_start
cluster_solve_time = clock() - cluster_solve_start
cluster_total_time = clock() - clustering_start
C += cost
# print("\nHIGH LEVEL SOLUTION: COST: {0} RUNTIME: {1}\n\tSOLUTION: {2}".format(cost, (end - start), route))
# print("\nHIGH LEVEL SOLUTION: COST: {0} RUNTIME: {1}".format(cost, (end - start)))
# print("\nTOTAL CLUSTER SOLUTION COST: {0} RUNTIME: {1} ~> {2}% OPTIMAL".format(C, cluster_total_time, optimal_cost / C * 100.0))
# num_cities,optimal_cost,optimal_solve_time,num_clusters,clustering_time,high_level_cluster_solution_time,cluster_solve_time,cluster_total_time,cluster_optimal_cost,cluser_optimality, speedup
return [matrix.num_points, optimal_cost, optimal_solve_time, num_clusters, clustering_time, high_level_cluster_solution_time, cluster_solve_time, cluster_total_time, C, optimal_cost / C, optimal_solve_time / cluster_total_time]
def test_depot_calculation():
points = points_from_file('tsps/berlin52.txt')
matrix = load_matrix(points)
X = [[p[1],p[2]] for p in points]
est = KMeans(n_clusters=2)
est.fit(X)
labels = est.labels_
hl_matrix, clusters, G = load_matrices_from_labels(points,labels)
compute_depots(clusters, matrix, G, per_cluster=True)
def test_birch_with_depot_calculation():
points = points_from_file('tsps/berlin52.txt')
matrix = load_matrix(points)
X = [[p[1],p[2]] for p in points]
est = Birch(n_clusters=3)
est.fit(X)
labels = est.labels_
hl_matrix, clusters, G = load_matrices_from_labels(points,labels)
depots, C = compute_depots(clusters, matrix, G, per_cluster=True)
depots_actual, _ = compute_depots(clusters, matrix, G)
cluster_optimal_cost, R, hl_route = clustered_tsp_solve(points, 3, labels=labels, depots=depots)
cluster_optimal_cost += C
print(depots_actual)
print(R,C)
for depot in depots_actual:
for r in R:
if r[1][0] == depot:
for point in r[1]:
print(matrix.points[point])
print('')
def test_kmeans_clustering():
Results = {}
files = ['tsps/berlin52.txt','tsps/bier127.txt','tsps/a280.txt','tsps/d493.txt',
'tsps/rat575.txt','tsps/d657.txt','tsps/u724.txt','tsps/vm1084.txt',
'tsps/d1291.txt','tsps/d1655.txt',]
for file_path in files:
points = points_from_file(file_path)
optimal_start = clock()
optimal_cost, _ = cp_tsp_solve(load_matrix(points), 0)
optimal_solve_time = clock() - optimal_start
for num_clusters in range(2,21):
# KMeans clustering
estimator = KMeans(n_clusters=num_clusters)
cluster_optimal_cost, R, hl_route, times = clustered_tsp_solve(points, num_clusters, estimator, basic=False)
clustering_time, ll_cluster_solve_time, hl_cluster_solve_time, total_cluster_solve_time = times
Results[(file_path,num_clusters)] = ','.join(str(e) for e in [len(points), optimal_cost, optimal_solve_time, num_clusters, clustering_time, hl_cluster_solve_time,
ll_cluster_solve_time, total_cluster_solve_time, cluster_optimal_cost,
optimal_cost / cluster_optimal_cost, optimal_solve_time / total_cluster_solve_time])
print("CLUSTERING TSP SOLUTION({0},{7}) COST: {1}\n\tHIGH-LEVEL ROUTE: {2}\n\tTIME SPENT CLUSTERING: {3}\n\tTIME SPENT SOLVING LOW-LEVEL TSPS: {4}\n\tTIME SPENT SOLVING HIGH-LEVEL TSP: {5}\n\tTIME SPENT TOTAL: {6}\n".format(file_path, cluster_optimal_cost, hl_route, clustering_time, ll_cluster_solve_time, hl_cluster_solve_time, total_cluster_solve_time, num_clusters))
print("Writing Output to CSV")
CSV = ['num_cities,optimal_cost,optimal_solve_time,num_clusters,clustering_time,high_level_cluster_solution_time,cluster_solve_time,cluster_total_time,cluster_optimal_cost,cluser_optimality,speedup']
for file_path in files:
for num_clusters in range(2,21):
CSV.append(Results[(file_path,num_clusters)])
with open('kmeans_results.csv','w+') as csv_file:
csv_file.write('\n'.join(CSV))
def test_birch_clustering():
Results = {}
files = ['tsps/berlin52.txt','tsps/bier127.txt','tsps/a280.txt','tsps/d493.txt',
'tsps/rat575.txt','tsps/d657.txt','tsps/u724.txt','tsps/vm1084.txt',
'tsps/d1291.txt','tsps/d1655.txt',]
for file_path in files:
points = points_from_file(file_path)
optimal_start = clock()
optimal_cost, _ = cp_tsp_solve(load_matrix(points), 0)
optimal_solve_time = clock() - optimal_start
for num_clusters in range(2,21):
# Birch clustering
estimator = Birch(n_clusters=num_clusters)
cluster_optimal_cost, R, hl_route, times = clustered_tsp_solve(points, num_clusters, estimator, basic=False)
clustering_time, ll_cluster_solve_time, hl_cluster_solve_time, total_cluster_solve_time = times
Results[(file_path,num_clusters)] = ','.join(str(e) for e in [len(points), optimal_cost, optimal_solve_time, num_clusters, clustering_time, hl_cluster_solve_time,
ll_cluster_solve_time, total_cluster_solve_time, cluster_optimal_cost,
optimal_cost / cluster_optimal_cost, optimal_solve_time / total_cluster_solve_time])
print("CLUSTERING TSP SOLUTION({0},{7}) COST: {1}\n\tHIGH-LEVEL ROUTE: {2}\n\tTIME SPENT CLUSTERING: {3}\n\tTIME SPENT SOLVING LOW-LEVEL TSPS: {4}\n\tTIME SPENT SOLVING HIGH-LEVEL TSP: {5}\n\tTIME SPENT TOTAL: {6}\n".format(file_path, cluster_optimal_cost, hl_route, clustering_time, ll_cluster_solve_time, hl_cluster_solve_time, total_cluster_solve_time, num_clusters))
print("Writing Output to CSV")
CSV = ['num_cities,optimal_cost,optimal_solve_time,num_clusters,clustering_time,high_level_cluster_solution_time,cluster_solve_time,cluster_total_time,cluster_optimal_cost,cluser_optimality,speedup']
for file_path in files:
for num_clusters in range(2,21):
CSV.append(Results[(file_path,num_clusters)])
with open('birch_results.csv','w+') as csv_file:
csv_file.write('\n'.join(CSV))
def test_dbscan_clustering():
Results = {}
files = ['tsps/berlin52.txt','tsps/bier127.txt','tsps/a280.txt','tsps/d493.txt',
'tsps/rat575.txt','tsps/d657.txt','tsps/u724.txt','tsps/vm1084.txt',
'tsps/d1291.txt','tsps/d1655.txt',]
for file_path in files:
points = points_from_file(file_path)
optimal_start = clock()
optimal_cost, _ = cp_tsp_solve(load_matrix(points), 0)
optimal_solve_time = clock() - optimal_start
for num_clusters in range(2,21):
# DBSCAN clustering
estimator = DBSCAN(eps=0.3, min_samples=10)
cluster_optimal_cost, R, hl_route, times = clustered_tsp_solve(points, num_clusters, estimator, basic=False)
clustering_time, ll_cluster_solve_time, hl_cluster_solve_time, total_cluster_solve_time = times
Results[(file_path,num_clusters)] = ','.join(str(e) for e in [len(points), optimal_cost, optimal_solve_time, num_clusters, clustering_time, hl_cluster_solve_time,
ll_cluster_solve_time, total_cluster_solve_time, cluster_optimal_cost,
optimal_cost / cluster_optimal_cost, optimal_solve_time / total_cluster_solve_time])
print("CLUSTERING TSP SOLUTION({0},{7}) COST: {1}\n\tHIGH-LEVEL ROUTE: {2}\n\tTIME SPENT CLUSTERING: {3}\n\tTIME SPENT SOLVING LOW-LEVEL TSPS: {4}\n\tTIME SPENT SOLVING HIGH-LEVEL TSP: {5}\n\tTIME SPENT TOTAL: {6}\n".format(file_path, cluster_optimal_cost, hl_route, clustering_time, ll_cluster_solve_time, hl_cluster_solve_time, total_cluster_solve_time, num_clusters))
print("Writing Output to CSV")
CSV = ['num_cities,optimal_cost,optimal_solve_time,num_clusters,clustering_time,high_level_cluster_solution_time,cluster_solve_time,cluster_total_time,cluster_optimal_cost,cluser_optimality,speedup']
for file_path in files:
for num_clusters in range(2,21):
CSV.append(Results[(file_path,num_clusters)])
with open('dbscan_results.csv','w+') as csv_file:
csv_file.write('\n'.join(CSV))
def test_agglomerative_ward_clustering():
Results = {}
files = ['tsps/berlin52.txt','tsps/bier127.txt','tsps/a280.txt','tsps/d493.txt',
'tsps/rat575.txt','tsps/d657.txt','tsps/u724.txt','tsps/vm1084.txt',
'tsps/d1291.txt','tsps/d1655.txt',]
for file_path in files:
points = points_from_file(file_path)
optimal_start = clock()
optimal_cost, _ = cp_tsp_solve(load_matrix(points), 0)
optimal_solve_time = clock() - optimal_start
for num_clusters in range(2,21):
# DBSCAN clustering
# estimator = DBSCAN(eps=0.3, min_samples=10)
# Agglomerative
estimator = AgglomerativeClustering(n_clusters=num_clusters, affinity='euclidean', linkage='ward')
cluster_optimal_cost, R, hl_route, times = clustered_tsp_solve(points, num_clusters, estimator, basic=False)
clustering_time, ll_cluster_solve_time, hl_cluster_solve_time, total_cluster_solve_time = times
Results[(file_path,num_clusters)] = ','.join(str(e) for e in [len(points), optimal_cost, optimal_solve_time, num_clusters, clustering_time, hl_cluster_solve_time,
ll_cluster_solve_time, total_cluster_solve_time, cluster_optimal_cost,
optimal_cost / cluster_optimal_cost, optimal_solve_time / total_cluster_solve_time])
print("CLUSTERING TSP SOLUTION({0},{7}) COST: {1}\n\tHIGH-LEVEL ROUTE: {2}\n\tTIME SPENT CLUSTERING: {3}\n\tTIME SPENT SOLVING LOW-LEVEL TSPS: {4}\n\tTIME SPENT SOLVING HIGH-LEVEL TSP: {5}\n\tTIME SPENT TOTAL: {6}\n".format(file_path, cluster_optimal_cost, hl_route, clustering_time, ll_cluster_solve_time, hl_cluster_solve_time, total_cluster_solve_time, num_clusters))
print("Writing Output to CSV")
CSV = ['num_cities,optimal_cost,optimal_solve_time,num_clusters,clustering_time,high_level_cluster_solution_time,cluster_solve_time,cluster_total_time,cluster_optimal_cost,cluser_optimality,speedup']
for file_path in files:
for num_clusters in range(2,21):
CSV.append(Results[(file_path,num_clusters)])
with open('agglom_ward_results.csv','w+') as csv_file:
csv_file.write('\n'.join(CSV))
def test_agglomerative_complete_clustering():
Results = {}
files = ['tsps/berlin52.txt','tsps/bier127.txt','tsps/a280.txt','tsps/d493.txt',
'tsps/rat575.txt','tsps/d657.txt','tsps/u724.txt','tsps/vm1084.txt',
'tsps/d1291.txt','tsps/d1655.txt',]
for file_path in files:
points = points_from_file(file_path)
optimal_start = clock()
optimal_cost, _ = cp_tsp_solve(load_matrix(points), 0)
optimal_solve_time = clock() - optimal_start
for num_clusters in range(2,21):
# Agglomerative
estimator = AgglomerativeClustering(n_clusters=num_clusters, affinity='euclidean', linkage='complete')
cluster_optimal_cost, R, hl_route, times = clustered_tsp_solve(points, num_clusters, estimator, basic=False)
clustering_time, ll_cluster_solve_time, hl_cluster_solve_time, total_cluster_solve_time = times
Results[(file_path,num_clusters)] = ','.join(str(e) for e in [len(points), optimal_cost, optimal_solve_time, num_clusters, clustering_time, hl_cluster_solve_time,
ll_cluster_solve_time, total_cluster_solve_time, cluster_optimal_cost,
optimal_cost / cluster_optimal_cost, optimal_solve_time / total_cluster_solve_time])
print("CLUSTERING TSP SOLUTION({0},{7}) COST: {1}\n\tHIGH-LEVEL ROUTE: {2}\n\tTIME SPENT CLUSTERING: {3}\n\tTIME SPENT SOLVING LOW-LEVEL TSPS: {4}\n\tTIME SPENT SOLVING HIGH-LEVEL TSP: {5}\n\tTIME SPENT TOTAL: {6}\n".format(file_path, cluster_optimal_cost, hl_route, clustering_time, ll_cluster_solve_time, hl_cluster_solve_time, total_cluster_solve_time, num_clusters))
print("Writing Output to CSV")
CSV = ['num_cities,optimal_cost,optimal_solve_time,num_clusters,clustering_time,high_level_cluster_solution_time,cluster_solve_time,cluster_total_time,cluster_optimal_cost,cluser_optimality,speedup']
for file_path in files:
for num_clusters in range(2,21):
CSV.append(Results[(file_path,num_clusters)])
with open('agglom_complete_results.csv','w+') as csv_file:
csv_file.write('\n'.join(CSV))
def test_agglomerative_ave_clustering():
Results = {}
files = ['tsps/berlin52.txt','tsps/bier127.txt','tsps/a280.txt','tsps/d493.txt',
'tsps/rat575.txt','tsps/d657.txt','tsps/u724.txt','tsps/vm1084.txt',
'tsps/d1291.txt','tsps/d1655.txt',]
for file_path in files:
points = points_from_file(file_path)
optimal_start = clock()
optimal_cost, _ = cp_tsp_solve(load_matrix(points), 0)
optimal_solve_time = clock() - optimal_start
for num_clusters in range(2,21):
# Agglomerative
estimator = AgglomerativeClustering(n_clusters=num_clusters, affinity='euclidean', linkage='average')
cluster_optimal_cost, R, hl_route, times = clustered_tsp_solve(points, num_clusters, estimator, basic=False)
clustering_time, ll_cluster_solve_time, hl_cluster_solve_time, total_cluster_solve_time = times
Results[(file_path,num_clusters)] = ','.join(str(e) for e in [len(points), optimal_cost, optimal_solve_time, num_clusters, clustering_time, hl_cluster_solve_time,
ll_cluster_solve_time, total_cluster_solve_time, cluster_optimal_cost,
optimal_cost / cluster_optimal_cost, optimal_solve_time / total_cluster_solve_time])
print("CLUSTERING TSP SOLUTION({0},{7}) COST: {1}\n\tHIGH-LEVEL ROUTE: {2}\n\tTIME SPENT CLUSTERING: {3}\n\tTIME SPENT SOLVING LOW-LEVEL TSPS: {4}\n\tTIME SPENT SOLVING HIGH-LEVEL TSP: {5}\n\tTIME SPENT TOTAL: {6}\n".format(file_path, cluster_optimal_cost, hl_route, clustering_time, ll_cluster_solve_time, hl_cluster_solve_time, total_cluster_solve_time, num_clusters))
print("Writing Output to CSV")
CSV = ['num_cities,optimal_cost,optimal_solve_time,num_clusters,clustering_time,high_level_cluster_solution_time,cluster_solve_time,cluster_total_time,cluster_optimal_cost,cluser_optimality,speedup']
for file_path in files:
for num_clusters in range(2,21):
CSV.append(Results[(file_path,num_clusters)])
with open('agglom_ave_results.csv','w+') as csv_file:
csv_file.write('\n'.join(CSV))
def main():
Results = {}
files = ['tsps/berlin52.txt','tsps/bier127.txt','tsps/a280.txt','tsps/d493.txt',
'tsps/rat575.txt','tsps/d657.txt','tsps/u724.txt','tsps/vm1084.txt',
'tsps/d1291.txt','tsps/d1655.txt',]
for file_path in files:
points = points_from_file(file_path)
optimal_start = clock()
optimal_cost, _ = cp_tsp_solve(load_matrix(points), 0)
optimal_solve_time = clock() - optimal_start
for num_clusters in range(2,21):
# DBSCAN clustering
# estimator = DBSCAN(eps=0.3, min_samples=10)
# Agglomerative
#estimator = AgglomerativeClustering(n_clusters=num_clusters, affinity='euclidean', linkage='ward')
#estimator = AgglomerativeClustering(n_clusters=num_clusters, affinity='euclidean', linkage='complete')
estimator = AgglomerativeClustering(n_clusters=num_clusters, affinity='euclidean', linkage='average')
cluster_optimal_cost, R, hl_route, times = clustered_tsp_solve(points, num_clusters, estimator, basic=False)
clustering_time, ll_cluster_solve_time, hl_cluster_solve_time, total_cluster_solve_time = times
Results[(file_path,num_clusters)] = ','.join(str(e) for e in [len(points), optimal_cost, optimal_solve_time, num_clusters, clustering_time, hl_cluster_solve_time,
ll_cluster_solve_time, total_cluster_solve_time, cluster_optimal_cost,
optimal_cost / cluster_optimal_cost, optimal_solve_time / total_cluster_solve_time])
print("CLUSTERING TSP SOLUTION({0},{7}) COST: {1}\n\tHIGH-LEVEL ROUTE: {2}\n\tTIME SPENT CLUSTERING: {3}\n\tTIME SPENT SOLVING LOW-LEVEL TSPS: {4}\n\tTIME SPENT SOLVING HIGH-LEVEL TSP: {5}\n\tTIME SPENT TOTAL: {6}\n".format(file_path, cluster_optimal_cost, hl_route, clustering_time, ll_cluster_solve_time, hl_cluster_solve_time, total_cluster_solve_time, num_clusters))
print("Writing Output to CSV")
CSV = ['num_cities,optimal_cost,optimal_solve_time,num_clusters,clustering_time,high_level_cluster_solution_time,cluster_solve_time,cluster_total_time,cluster_optimal_cost,cluser_optimality,speedup']
for file_path in files:
for num_clusters in range(2,21):
CSV.append(Results[(file_path,num_clusters)])
with open('agglom_ave_results.csv','w+') as csv_file:
csv_file.write('\n'.join(CSV))
# files = ['tsps/berlin52.txt','tsps/bier127.txt','tsps/a280.txt','tsps/d493.txt',
# 'tsps/rat575.txt','tsps/d657.txt','tsps/u724.txt','tsps/vm1084.txt',
# 'tsps/d1291.txt','tsps/d1655.txt',]
# for file_path in files:
# for num_clusters in range(2,21):
# CSV.append(','.join([str(element) for element in cluster_tsp_vs_cp_tsp(file_path,num_clusters)]))
# with open('k_means_results.csv','w+') as csv_file:
# csv_file.write('\n'.join(CSV))
# CSV = ['point,X,Y,label']
# for file_path in files:
# for num_clusters in [2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20]:
# csv_file = '\n'.join(CSV + cluster_test(file_path,num_clusters))
# with open('clusters/k_means_{0}_{1}.csv'.format(file_path[5:-4],num_clusters),'w+') as f:
# f.write(csv_file)
if __name__ == '__main__':
# main()
# test_depot_calculation()
test_birch_with_depot_calculation()
| gpl-3.0 |
colour-science/colour-spectroscope | colour_spectroscope/fraunhofer/plotting.py | 1 | 6417 | # -*- coding: utf-8 -*-
"""
Fraunhofer Lines
================
Defines the objects for the analysis of the *Fraunhofer* lines in images
captured with the homemade spectroscope.
References
==========
.. [1] http://en.wikipedia.org/wiki/Fraunhofer_lines
"""
from __future__ import division, unicode_literals
import bisect
import matplotlib.pyplot as plt
import numpy as np
import re
from colour.plotting import (COLOUR_STYLE_CONSTANTS, override_style, artist,
render)
from colour_spectroscope.fraunhofer import calibrated_RGB_spectrum
from colour_spectroscope.fraunhofer.analysis import luminance_sd
__author__ = 'Colour Developers'
__copyright__ = 'Copyright (C) 2013-2021 - Colour Developers'
__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = '[email protected]'
__status__ = 'Production'
__all__ = [
'FRAUNHOFER_LINES_PUBLISHED', 'FRAUNHOFER_LINES_ELEMENTS_MAPPING',
'FRAUNHOFER_LINES_NOTABLE', 'FRAUNHOFER_LINES_CLUSTERED',
'FRAUNHOFER_LINES_MEASURED', 'fraunhofer_lines_plot'
]
FRAUNHOFER_LINES_PUBLISHED = {
'y': 898.765,
'Z': 822.696,
'A': 759.370,
'B': 686.719,
'C': 656.281,
'a': 627.661,
'D1': 589.592,
'D2': 588.995,
'D3': 587.5618,
'e (Hg)': 546.073,
'E2': 527.039,
'b1': 518.362,
'b2': 517.270,
'b3': 516.891,
'b4': 516.733,
'c': 495.761,
'F': 486.134,
'd': 466.814,
'e (Fe)': 438.355,
'G': 430.790,
'h': 410.175,
'H': 396.847,
'K': 393.368,
'L': 382.044,
'N': 358.121,
'P': 336.112,
'T': 302.108,
't': 299.444,
}
FRAUNHOFER_LINES_ELEMENTS_MAPPING = {
'y': 'O2',
'Z': 'O2',
'A': 'O2',
'B': 'O2',
'C': 'H Alpha',
'a': 'O2',
'D1': 'Na',
'D2': 'Na',
'D3': 'He',
'e (Hg)': 'Hg',
'E2': 'Fe',
'b1': 'Mg',
'b2': 'Mg',
'b3': 'Fe',
'b4': 'Mg',
'c': 'Fe',
'F': 'H Beta',
'd': 'Fe',
'e (Fe)': 'Fe',
'G"': 'H Gamma',
'G': 'Fe',
'G': 'Ca',
'h': 'H Delta',
'H': 'Ca+',
'K': 'Ca+',
'L': 'Fe',
'N': 'Fe',
'P': 'Ti+',
'T': 'Fe',
't': 'Ni',
}
FRAUNHOFER_LINES_NOTABLE = (
'A',
'B',
'C',
'D1',
'D2',
'D3',
'E2',
'F',
'G',
'H',
'K',
)
FRAUNHOFER_LINES_CLUSTERED = {
'b[1-4]': ('b2', (
'b4',
'b3',
'b1',
), 'b\n4-1'),
'D[1-3]': ('D3', ('D2', 'D1'), 'D\n3-1')
}
FRAUNHOFER_LINES_MEASURED = {
'G': 134,
'F': 371,
'b4': 502,
'E2': 545,
'D1': 810,
'a': 974,
'C': 1095
}
@override_style()
def fraunhofer_lines_plot(image,
measured_Fraunhofer_lines,
show_luminance_spd=True,
**kwargs):
"""
Plots the *Fraunhofer* lines of given image.
Parameters
----------
image : unicode
Path to read the image from.
measured_Fraunhofer_lines : dict, optional
Measured *Fraunhofer* lines locations.
show_luminance_spd : bool, optional
Whether to show the *Luminance* sd for given image.
Other Parameters
----------------
\\**kwargs : dict, optional
{:func:`colour.plotting.artist`, :func:`colour.plotting.render`},
Please refer to the documentation of the previously listed definitions.
Returns
-------
tuple
Current figure and axes.
"""
settings = {}
settings.update(kwargs)
figure, axes = artist(**settings)
spectrum = calibrated_RGB_spectrum(image, FRAUNHOFER_LINES_PUBLISHED,
measured_Fraunhofer_lines)
wavelengths = spectrum.wavelengths
input, output = wavelengths[0], wavelengths[-1]
width, height = figure.get_size_inches()
ratio = width / height
height = (output - input) * (1 / ratio)
axes.imshow(
COLOUR_STYLE_CONSTANTS.colour.colourspace.cctf_encoding(
np.clip(spectrum.values[np.newaxis, ...], 0, 1)),
extent=[input, output, 0, height])
sd = luminance_sd(spectrum).normalise(height - height * 0.05)
if show_luminance_spd:
axes.plot(sd.wavelengths, sd.values, color='black', linewidth=1)
fraunhofer_wavelengths = np.array(
sorted(FRAUNHOFER_LINES_PUBLISHED.values()))
fraunhofer_wavelengths = fraunhofer_wavelengths[np.where(
np.logical_and(fraunhofer_wavelengths >= input,
fraunhofer_wavelengths <= output))]
fraunhofer_lines_labels = [
tuple(FRAUNHOFER_LINES_PUBLISHED.keys())[tuple(
FRAUNHOFER_LINES_PUBLISHED.values()).index(i)]
for i in fraunhofer_wavelengths
]
y0, y1 = 0, height * .5
for i, label in enumerate(fraunhofer_lines_labels):
# Trick to cluster siblings *Fraunhofer* lines.
from_siblings = False
for pattern, (first, siblings,
specific_label) in FRAUNHOFER_LINES_CLUSTERED.items():
if re.match(pattern, label):
if label in siblings:
from_siblings = True
label = specific_label
break
power = bisect.bisect_left(wavelengths, fraunhofer_wavelengths[i])
scale = (sd[wavelengths[power]] / height)
is_large_line = label in FRAUNHOFER_LINES_NOTABLE
axes.vlines(
fraunhofer_wavelengths[i],
y0,
y1 * scale,
linewidth=1 if is_large_line else 1)
axes.vlines(
fraunhofer_wavelengths[i],
y0,
height,
linewidth=1 if is_large_line else 1,
alpha=0.075)
if not from_siblings:
axes.text(
fraunhofer_wavelengths[i],
y1 * scale + (y1 * 0.025),
label,
clip_on=True,
ha='center',
va='bottom',
fontdict={'size': 'large' if is_large_line else 'small'})
r = lambda x: int(x / 100) * 100
plt.xticks(np.arange(r(input), r(output * 1.5), 20))
plt.yticks([])
settings = {
'title': 'The Solar Spectrum - Fraunhofer Lines',
'bounding_box': [input, output, 0, height],
'x_label': u'Wavelength λ (nm)',
'y_label': False,
}
settings.update(**kwargs)
return render(**settings)
| bsd-3-clause |
lbybee/vc_network_learning_project | code/clean_data.py | 1 | 1166 | import process_data as prd
import pandas as pd
import sys
data = pd.read_pickle(sys.argv[1])
industry_data = pd.read_pickle(sys.argv[2])
ind_id = sys.argv[3]
data = prd.cleanData(data)
industry_data = prd.cleanIndustryData(industry_data)
data = prd.genCSuccess(data)
data = prd.joinIndData(data, industry_data)
data = prd.missingIndData(data)
data = prd.dropDuplicates(data)
data = prd.genNumInv(data)
data = prd.genNumIndInv(data, ind_id)
data = prd.dropSparceInv(data, int(sys.argv[4]))
data = prd.dropBadFirms(data)
data = prd.limitUS(data)
data = prd.dropUnfinished(data)
data = prd.genCoInvNum(data)
data = prd.genInvCoInvNum(data)
data = prd.genLocalIPO(data)
data = prd.genYear(data)
data = prd.genIndResults(data, ind_id)
data = prd.genIPOResults(data, ind_id)
data = prd.genIndExp(data, ind_id)
data = prd.genTotExp(data)
data = prd.genLagSuccess(data)
data = prd.genLagIndSuccess(data, ind_id)
data = prd.genYearDummy(data)
data = prd.genIndDummy(data, ind_id)
data = prd.genCumWinLoss(data, ind_id)
data = prd.genCumNetwork(data, ind_id)
data = prd.genCurNetwork(data)
data = prd.dropEarlyInv(data, int(sys.argv[5]), ind_id)
data.to_pickle(sys.argv[6])
| gpl-2.0 |
dfm/exopop | code/load_data.py | 1 | 3120 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This module eases interaction with the datasets and monkey patches the import
path. It's "research code"... give me a break!
"""
from __future__ import division, print_function
import os
import sys
import h5py
import numpy as np
import pandas as pd
d = os.path.dirname
base = d(os.path.abspath(__file__))
sys.path.insert(0, base)
from population import SavedCensoringFunction
bp = os.path.join(d(base), "data")
# Hard-coded transit probability scaling.
P = 10.0
transit_lnprob0 = -2.98353340397
ln_period0 = np.log(P)
# Un-comment the following lines to recompute the transit probability if you
# get a list of Petigura's injections.
# G = 2945.4625385377644
# stars = pd.read_hdf(os.path.join(bp, "stlr.h5"), "stlr")
# transit_lnprobs = np.log(stars.Rstar) + (np.log(4*np.pi*np.pi)
# - np.log(G*P*P*stars.Mstar)) / 3
# transit_lnprob0 = np.median(transit_lnprobs)
def load_detection_efficiency():
"""
Load a pre-computed detection efficiency grid.
"""
with h5py.File(os.path.join(bp, "completeness.h5"), "r") as f:
bins = [f["ln_period_bin_edges"][...],
f["ln_radius_bin_edges"][...]]
lnprob = f["ln_detect_eff"][...]
lncompleteness = f["ln_completeness"][...]
return SavedCensoringFunction(bins, lnprob, lncompleteness)
def load_completenes_sim(rp_func=np.log, per_rng=np.log([5, 400]),
rp_rng=None, K=50000):
"""
This function will only work if you request Petigura's completeness
simulations from him and save them in the ``data`` directory.
"""
if rp_rng is None:
rp_rng = rp_func([0.5, 16])
sim = pd.read_hdf(os.path.join(bp, "mcDV.h5"), "mcDV")
m = sim.found * sim.bDV
x, y, z = np.log(sim.inj_P), rp_func(sim.inj_Rp), m
if rp_rng[1] > rp_func(16):
x = np.append(x, np.random.uniform(per_rng[0], per_rng[1], K))
y = np.append(y, np.random.uniform(rp_func(16.), rp_rng[1], K))
z = np.append(z, np.ones(K, dtype=bool))
return x, y, z
def load_candidates():
lines = open(os.path.join(bp, "table_ekoi836.tex")).readlines()
data = np.array([[l.split("&")[i] for i in (0, 2, 12, 13)] for l in lines
if l.split("&")[4].strip() == "P"],
dtype=float)
return (np.array(data[:, 0], dtype=int), data[:, 1:3],
np.vstack([np.zeros(len(data)), data[:, 3]]).T)
def load_petigura_bins(mylog=np.log):
ep_Rp_logbins = 0.5 * np.log10(2) * np.arange(9)
ep_Rp_lnbins = mylog(10 ** ep_Rp_logbins)
ep_Rp_values = np.array([12., 14.2, 18.6, 5.9, 1.9, 1.0, 0.9, 0.7])
norm = np.sum(ep_Rp_values * (ep_Rp_lnbins[1:] - ep_Rp_lnbins[:-1]))
ep_Rp_pdf = ep_Rp_values / norm
ep_p_logbins = np.log10([6.25, 12.5, 25, 50, 100])
ep_p_lnbins = mylog(10 ** ep_p_logbins)
ep_p_values = np.array([8.9, 13.7, 15.8, 15.2])
norm = np.sum(ep_p_values * (ep_p_lnbins[1:] - ep_p_lnbins[:-1]))
ep_p_pdf = ep_p_values / norm
return (ep_p_lnbins, ep_p_pdf), (ep_Rp_lnbins, ep_Rp_pdf)
| mit |
alejospina/pydec | Examples/ResonantCavity/driver.py | 6 | 2164 | """
Solve the resonant cavity problem with Whitney forms.
References:
Douglas N. Arnold and Richard S. Falk and Ragnar Winther
"Finite element exterior calculus: from Hodge theory to numerical
stability"
Bull. Amer. Math. Soc. (N.S.), vol. 47, No. 2, pp. 281--354
DOI : 10.1090/S0273-0979-10-01278-4
"""
from pydec import simplicial_complex, d, delta, whitney_innerproduct, \
simplex_quivers
from numpy import loadtxt
from scipy import real, zeros
from scipy.linalg import eig
from matplotlib.pylab import quiver, figure, triplot, show
# Read in mesh data from files and construct complex
vertices = loadtxt('vertices.txt', dtype=float)
triangles = loadtxt('triangles.txt', dtype=int)
sc = simplicial_complex((vertices,triangles))
# Construct stiffness and mass matrices
K = sc[1].d.T * whitney_innerproduct(sc,2) * sc[1].d
M = whitney_innerproduct(sc,1)
# Eliminate Boundaries from matrices
boundary_edges = sc.boundary()
non_boundary_edges = set(sc[1].simplex_to_index.keys()) - set(boundary_edges)
non_boundary_indices = [sc[1].simplex_to_index[e] for e in non_boundary_edges]
# Eliminate boundary conditions
K = K[non_boundary_indices,:][:,non_boundary_indices]
M = M[non_boundary_indices,:][:,non_boundary_indices]
# Compute eigenvalues and eigenvectors
# (could use sparse eigenvalue solver instead)
eigenvalues, eigenvectors = eig(K.todense(), M.todense())
# Plot eigenvalues
NUM_EIGS = 50 # Number of eigenvalues to plot
values = sorted([x for x in real(eigenvalues) if x > 1e-10])[0:NUM_EIGS]
ax = figure().gca()
ax.set_title('First ' + str(len(values)) + ' Eigenvalues\n\n')
ax.hold(True)
ax.plot(values,'ko')
# Plot the eigenvector 1-cochain as a vector field
N = 2 # Which non-zero eigenvector to plot?
non_zero_values = real(eigenvectors[:,list(eigenvalues).index(values[N])])
all_values = zeros((sc[1].num_simplices,))
all_values[non_boundary_indices] = non_zero_values
bases, arrows = simplex_quivers(sc,all_values)
ax = figure().gca()
ax.set_title('Mode #' + str(N+1))
ax.quiver(bases[:,0],bases[:,1],arrows[:,0],arrows[:,1])
ax.triplot(sc.vertices[:,0], sc.vertices[:,1], sc.simplices)
ax.axis('equal')
show()
| bsd-3-clause |
russel1237/scikit-learn | examples/decomposition/plot_ica_blind_source_separation.py | 349 | 2228 | """
=====================================
Blind source separation using FastICA
=====================================
An example of estimating sources from noisy data.
:ref:`ICA` is used to estimate sources given noisy measurements.
Imagine 3 instruments playing simultaneously and 3 microphones
recording the mixed signals. ICA is used to recover the sources
ie. what is played by each instrument. Importantly, PCA fails
at recovering our `instruments` since the related signals reflect
non-Gaussian processes.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy import signal
from sklearn.decomposition import FastICA, PCA
###############################################################################
# Generate sample data
np.random.seed(0)
n_samples = 2000
time = np.linspace(0, 8, n_samples)
s1 = np.sin(2 * time) # Signal 1 : sinusoidal signal
s2 = np.sign(np.sin(3 * time)) # Signal 2 : square signal
s3 = signal.sawtooth(2 * np.pi * time) # Signal 3: saw tooth signal
S = np.c_[s1, s2, s3]
S += 0.2 * np.random.normal(size=S.shape) # Add noise
S /= S.std(axis=0) # Standardize data
# Mix data
A = np.array([[1, 1, 1], [0.5, 2, 1.0], [1.5, 1.0, 2.0]]) # Mixing matrix
X = np.dot(S, A.T) # Generate observations
# Compute ICA
ica = FastICA(n_components=3)
S_ = ica.fit_transform(X) # Reconstruct signals
A_ = ica.mixing_ # Get estimated mixing matrix
# We can `prove` that the ICA model applies by reverting the unmixing.
assert np.allclose(X, np.dot(S_, A_.T) + ica.mean_)
# For comparison, compute PCA
pca = PCA(n_components=3)
H = pca.fit_transform(X) # Reconstruct signals based on orthogonal components
###############################################################################
# Plot results
plt.figure()
models = [X, S, S_, H]
names = ['Observations (mixed signal)',
'True Sources',
'ICA recovered signals',
'PCA recovered signals']
colors = ['red', 'steelblue', 'orange']
for ii, (model, name) in enumerate(zip(models, names), 1):
plt.subplot(4, 1, ii)
plt.title(name)
for sig, color in zip(model.T, colors):
plt.plot(sig, color=color)
plt.subplots_adjust(0.09, 0.04, 0.94, 0.94, 0.26, 0.46)
plt.show()
| bsd-3-clause |
fengzhyuan/scikit-learn | examples/gaussian_process/gp_diabetes_dataset.py | 223 | 1976 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
========================================================================
Gaussian Processes regression: goodness-of-fit on the 'diabetes' dataset
========================================================================
In this example, we fit a Gaussian Process model onto the diabetes
dataset.
We determine the correlation parameters with maximum likelihood
estimation (MLE). We use an anisotropic squared exponential
correlation model with a constant regression model. We also use a
nugget of 1e-2 to account for the (strong) noise in the targets.
We compute a cross-validation estimate of the coefficient of
determination (R2) without reperforming MLE, using the set of correlation
parameters found on the whole dataset.
"""
print(__doc__)
# Author: Vincent Dubourg <[email protected]>
# Licence: BSD 3 clause
from sklearn import datasets
from sklearn.gaussian_process import GaussianProcess
from sklearn.cross_validation import cross_val_score, KFold
# Load the dataset from scikit's data sets
diabetes = datasets.load_diabetes()
X, y = diabetes.data, diabetes.target
# Instanciate a GP model
gp = GaussianProcess(regr='constant', corr='absolute_exponential',
theta0=[1e-4] * 10, thetaL=[1e-12] * 10,
thetaU=[1e-2] * 10, nugget=1e-2, optimizer='Welch')
# Fit the GP model to the data performing maximum likelihood estimation
gp.fit(X, y)
# Deactivate maximum likelihood estimation for the cross-validation loop
gp.theta0 = gp.theta_ # Given correlation parameter = MLE
gp.thetaL, gp.thetaU = None, None # None bounds deactivate MLE
# Perform a cross-validation estimate of the coefficient of determination using
# the cross_validation module using all CPUs available on the machine
K = 20 # folds
R2 = cross_val_score(gp, X, y=y, cv=KFold(y.size, K), n_jobs=1).mean()
print("The %d-Folds estimate of the coefficient of determination is R2 = %s"
% (K, R2))
| bsd-3-clause |
mne-tools/mne-tools.github.io | 0.12/_downloads/plot_decoding_spatio_temporal_source.py | 5 | 5963 | """
==========================
Decoding source space data
==========================
Decoding, a.k.a MVPA or supervised machine learning applied to MEG
data in source space on the left cortical surface. Here f-test feature
selection is employed to confine the classification to the potentially
relevant features. The classifier then is trained to selected features of
epochs in source space.
"""
# Author: Denis A. Engemann <[email protected]>
# Alexandre Gramfort <[email protected]>
#
# License: BSD (3-clause)
import mne
import os
import numpy as np
from mne import io
from mne.datasets import sample
from mne.minimum_norm import apply_inverse_epochs, read_inverse_operator
print(__doc__)
data_path = sample.data_path()
fname_fwd = data_path + 'MEG/sample/sample_audvis-meg-oct-6-fwd.fif'
fname_evoked = data_path + '/MEG/sample/sample_audvis-ave.fif'
subjects_dir = data_path + '/subjects'
subject = os.environ['SUBJECT'] = subjects_dir + '/sample'
os.environ['SUBJECTS_DIR'] = subjects_dir
###############################################################################
# Set parameters
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
fname_cov = data_path + '/MEG/sample/sample_audvis-cov.fif'
label_names = 'Aud-rh', 'Vis-rh'
fname_inv = data_path + '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif'
tmin, tmax = -0.2, 0.5
event_id = dict(aud_r=2, vis_r=4) # load contra-lateral conditions
# Setup for reading the raw data
raw = io.read_raw_fif(raw_fname, preload=True)
raw.filter(2, None, method='iir') # replace baselining with high-pass
events = mne.read_events(event_fname)
# Set up pick list: MEG - bad channels (modify to your needs)
raw.info['bads'] += ['MEG 2443'] # mark bads
picks = mne.pick_types(raw.info, meg=True, eeg=False, stim=True, eog=True,
exclude='bads')
# Read epochs
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True,
picks=picks, baseline=None, preload=True,
reject=dict(grad=4000e-13, eog=150e-6),
decim=5) # decimate to save memory and increase speed
epochs.equalize_event_counts(list(event_id.keys()), 'mintime', copy=False)
epochs_list = [epochs[k] for k in event_id]
# Compute inverse solution
snr = 3.0
lambda2 = 1.0 / snr ** 2
method = "dSPM" # use dSPM method (could also be MNE or sLORETA)
n_times = len(epochs.times)
n_vertices = 3732
n_epochs = len(epochs.events)
# Load data and compute inverse solution and stcs for each epoch.
noise_cov = mne.read_cov(fname_cov)
inverse_operator = read_inverse_operator(fname_inv)
X = np.zeros([n_epochs, n_vertices, n_times])
# to save memory, we'll load and transform our epochs step by step.
for condition_count, ep in zip([0, n_epochs / 2], epochs_list):
stcs = apply_inverse_epochs(ep, inverse_operator, lambda2,
method, pick_ori="normal", # saves us memory
return_generator=True)
for jj, stc in enumerate(stcs):
X[condition_count + jj] = stc.lh_data
###############################################################################
# Decoding in sensor space using a linear SVM
# Make arrays X and y such that :
# X is 3d with X.shape[0] is the total number of epochs to classify
# y is filled with integers coding for the class to predict
# We must have X.shape[0] equal to y.shape[0]
# we know the first half belongs to the first class, the second one
y = np.repeat([0, 1], len(X) / 2) # belongs to the second class
X = X.reshape(n_epochs, n_vertices * n_times)
# we have to normalize the data before supplying them to our classifier
X -= X.mean(axis=0)
X /= X.std(axis=0)
# prepare classifier
from sklearn.svm import SVC # noqa
from sklearn.cross_validation import ShuffleSplit # noqa
# Define a monte-carlo cross-validation generator (reduce variance):
n_splits = 10
clf = SVC(C=1, kernel='linear')
cv = ShuffleSplit(len(X), n_splits, test_size=0.2)
# setup feature selection and classification pipeline
from sklearn.feature_selection import SelectKBest, f_classif # noqa
from sklearn.pipeline import Pipeline # noqa
# we will use an ANOVA f-test to preselect relevant spatio-temporal units
feature_selection = SelectKBest(f_classif, k=500) # take the best 500
# to make life easier we will create a pipeline object
anova_svc = Pipeline([('anova', feature_selection), ('svc', clf)])
# initialize score and feature weights result arrays
scores = np.zeros(n_splits)
feature_weights = np.zeros([n_vertices, n_times])
# hold on, this may take a moment
for ii, (train, test) in enumerate(cv):
anova_svc.fit(X[train], y[train])
y_pred = anova_svc.predict(X[test])
y_test = y[test]
scores[ii] = np.sum(y_pred == y_test) / float(len(y_test))
feature_weights += feature_selection.inverse_transform(clf.coef_) \
.reshape(n_vertices, n_times)
print('Average prediction accuracy: %0.3f | standard deviation: %0.3f'
% (scores.mean(), scores.std()))
# prepare feature weights for visualization
feature_weights /= (ii + 1) # create average weights
# create mask to avoid division error
feature_weights = np.ma.masked_array(feature_weights, feature_weights == 0)
# normalize scores for visualization purposes
feature_weights /= feature_weights.std(axis=1)[:, None]
feature_weights -= feature_weights.mean(axis=1)[:, None]
# unmask, take absolute values, emulate f-value scale
feature_weights = np.abs(feature_weights.data) * 10
vertices = [stc.lh_vertno, np.array([], int)] # empty array for right hemi
stc_feat = mne.SourceEstimate(feature_weights, vertices=vertices,
tmin=stc.tmin, tstep=stc.tstep,
subject='sample')
brain = stc_feat.plot()
brain.set_time(100)
brain.show_view('l') # take the medial view to further explore visual areas
| bsd-3-clause |
tedunderwood/horizon | appendixA/countwordsinfeatures/parsefeaturejsons.py | 1 | 23956 | #!/usr/bin/env python3
# parsefeaturejsons.py
# classes and functions that can unpack the extracted feature files
# created by HTRC, and convert them into a .csv that is easier to
# manipulate
import csv, os, sys, bz2, random, json
from collections import Counter
import numpy as np
import pandas as pd
# import utils
currentdir = os.path.dirname(__file__)
libpath = os.path.join(currentdir, '../lib')
sys.path.append(libpath)
import SonicScrewdriver as utils
abspath = os.path.abspath(__file__)
thisdirectory = os.path.dirname(abspath)
namepath = os.path.join(thisdirectory, 'PersonalNames.txt')
placepath = os.path.join(thisdirectory, 'PlaceNames.txt')
romanpath = os.path.join(thisdirectory, 'RomanNumerals.txt')
with open(namepath, encoding = 'utf-8') as f:
personalnames = set([x.strip().lower() for x in f.readlines()])
with open(placepath, encoding = 'utf-8') as f:
placenames = set([x.strip().lower() for x in f.readlines()])
with open(romanpath, encoding = 'utf-8') as f:
romannumerals = set([x.strip().lower() for x in f.readlines()])
daysoftheweek = {'monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday', 'sunday'}
monthsoftheyear = {'january', 'february', 'march', 'april', 'may', 'june', 'july', 'august', 'september', 'october', 'november', 'december'}
# This is a little bit of a cheat, because it means we're not inferring everything
# empirically from evidence, but this is the product of a lot of previous experience,
# and hard-coding it here makes it possible to do a nice normalization at the page
# level.
ficwords = {'me', 'my', 'i', 'you', 'your', 'she', 'her', 'hers', 'he', 'him', 'his', 'the', 'said'}
def normalize_token(token):
''' Normalizes a token by lowercasing it and by bundling
certain categories together. The lists of personal and place names
are never going to be all-inclusive; you have to be aware of that,
and deactivate this in corpora where it could pose a problem.
'''
global personalnames, placenames, daysoftheweek, monthsoftheyear
token = token.lower()
if len(token) < 1:
return token
elif token[0].isdigit() and token[-1].isdigit():
return "#arabicnumeral"
elif token in daysoftheweek:
return "#dayoftheweek"
elif token in monthsoftheyear:
return "#monthoftheyear"
elif token in personalnames:
return "#personalname"
elif token in placenames:
return "#placename"
else:
return token
def normalize_token_for_page(token):
''' Normalizes a token by lowercasing it and by bundling
certain categories together. Differs from the previous
function in adding roman numerals.
'''
global personalnames, placenames, daysoftheweek, monthsoftheyear, romannumerals
if token == "I":
return token.lower()
# uppercase I is not usually a roman numeral!
token = token.lower()
if len(token) < 1:
return token
elif token[0].isdigit() and token[-1].isdigit():
return "#arabicnumeral"
elif token in daysoftheweek:
return "#dayoftheweek"
elif token in monthsoftheyear:
return "#monthoftheyear"
elif token in personalnames:
return "#personalname"
elif token in placenames:
return "#placename"
elif token in romannumerals:
return "#romannumeral"
else:
return token
class VolumeFromJson:
# Mainly a data object that contains page-level wordcounts
# for a volume.
# Has been expanded in Jan 2017 by adding the default argument
# pagestoinclude
def __init__(self, volumepath, volumeid, pagestoinclude = set()):
'''Initializes a LoadedVolume by reading wordcounts from
a json file. By default it reads all the pages. But if
a set of pagestoinclude is passed in, it will read only page numbers
belonging to that set.'''
if volumepath.endswith('bz2'):
with bz2.open(volumepath, mode = 'rt', encoding = 'utf-8') as f:
thestring = f.read()
else:
with open(volumepath, encoding = 'utf-8') as f:
thestring = f.read()
thejson = json.loads(thestring)
self.volumeid = thejson['id']
pagedata = thejson['features']['pages']
self.numpages = len(pagedata)
self.pagecounts = []
self.totalcounts = Counter()
self.totaltokens = 0
self.bodytokens = 0
self.sentencecount = 0
self.linecount = 0
typetokenratios = []
chunktokens = 0
typesinthischunk = set()
# a set of types in the current 10k-word chunk; progress
# toward which is tracked by chunktokens
self.integerless_pages = 0
self.out_of_order_pages = 0
self.skipped_pages = 0
compromise_pg = 0
if len(pagestoinclude) < 1:
pagestoinclude = set([x+1 for x in range(self.numpages)])
# If an empty set was passed in, or no set was provided,
# include all pages. the x+1 is because pages start counting
# at one, not zero.
for i in range(self.numpages):
thispagecounts = Counter()
thisbodytokens = 0
thisheadertokens = 0
thispage = pagedata[i]
# There are really two ways of numbering pages. They come in an order,
# which gives them an inherent ordinality (this is the *first* page). But
# they also have cardinal *labels* attached, in the "seq" field. These labels
# are usually, but not necessarily, convertible to integers. (Usually "00000001",
# but could be "notes.") *Usually* they are == to the ordinal number,
# but again, not necessarily.
# In this loop, i is the ordinal page number, and cardinal_page is the cardinal
# label; its value will be -1 if it can't be converted to an integer.
# compromise_pg skips pages that have no integer seq, but otherwise
# proceeds ordinally
try:
cardinal_page = int(thispage['seq'])
except:
cardinal_page = -1
if cardinal_page > 0:
compromise_pg += 1
elif cardinal_page < 0:
self.integerless_pages += 1
if compromise_pg != cardinal_page:
self.out_of_order_pages += 1
if cardinal_page >= 0 and compromise_pg in pagestoinclude:
linesonpage = int(thispage['lineCount'])
sentencesonpage = int(thispage['body']['sentenceCount'])
self.sentencecount += sentencesonpage
self.linecount += linesonpage
# I could look for sentences in the header or footer, but I think
# that would overvalue accidents of punctuation.
bodywords = thispage['body']['tokenPosCount']
for token, partsofspeech in bodywords.items():
lowertoken = token.lower()
typesinthischunk.add(lowertoken)
# we do that to keep track of types -- notably, before nortmalizing
normaltoken = normalize_token(lowertoken)
for part, count in partsofspeech.items():
thisbodytokens += count
chunktokens += count
thispagecounts[normaltoken] += count
if chunktokens > 10000:
typetoken = len(typesinthischunk) / chunktokens
typetokenratios.append(typetoken)
typesinthischunk = set()
chunktokens = 0
# generally speaking we count typetoken ratios on 10000-word chunks
headerwords = thispage['header']['tokenPosCount']
for token, partsofspeech in headerwords.items():
lowertoken = token.lower()
normaltoken = "#header" + normalize_token(lowertoken)
for part, count in partsofspeech.items():
thisheadertokens += count
thispagecounts[normaltoken] += count
# You will notice that I treat footers (mostly) as part of the body
# Footers are rare, and rarely interesting.
footerwords = thispage['footer']['tokenPosCount']
for token, partsofspeech in footerwords.items():
lowertoken = token.lower()
typesinthischunk.add(lowertoken)
# we do that to keep track of types -- notably before nortmalizing
normaltoken = normalize_token(lowertoken)
for part, count in partsofspeech.items():
thisbodytokens += count
chunktokens += count
thispagecounts[normaltoken] += count
self.pagecounts.append(thispagecounts)
for key, value in thispagecounts.items():
self.totalcounts[key] += value
self.totaltokens += thisbodytokens
self.totaltokens += thisheadertokens
self.bodytokens += thisbodytokens
else:
# print(i, cardinal_page, compromise_pg)
self.skipped_pages += 1
if len(typetokenratios) < 1 or chunktokens > 5000:
# After all pages are counted, we may be left with a
# chunk of fewer than 10000 words that we could use as further
# evidence about typetoken ratios.
# We do this only if we have to, or if the chunk is large
# enough to make it reasonable evidence.
chunktokens = chunktokens + 1 # Laplacian correction aka kludge
typetoken = len(typesinthischunk) / chunktokens
predictedtt = 4.549e-01 - (5.294e-05 * chunktokens) + (2.987e-09 * pow(chunktokens, 2))
# That's an empirical quadratic regression on observed data from many genres
extrapolatedtt = 0.2242 * (typetoken / predictedtt)
# We infer what typetoken *would* be for a 10k word chunk of this vol, given that it's
# typetoken for an n-word chunk.
if extrapolatedtt > 0.6:
extrapolatedtt = 0.6
if extrapolatedtt < 0.1:
extrapolatedtt = 0.1
# Let's be realistic. We have some priors on the bounds.
typetokenratios.append(extrapolatedtt)
self.typetoken = sum(typetokenratios) / len(typetokenratios)
self.sentencelength = self.bodytokens / (self.sentencecount + 1)
self.linelength = self.totaltokens / self.linecount
# We are done with the __init__ method for this volume.
# When I get a better feature sample, we'll add some information about initial
# capitalization.
def write_volume_features(self, outpath, override = False, translator = dict()):
''' This writes volume features while normalizing word frequencies,
after using a translation table to, for instance, convert American spellings
to British.
'''
if os.path.isfile(outpath) and not override:
print('Error: you are asking me to override an existing')
print('file without explicitly specifying to do so in your')
print('invocation of write_volume_features.')
for word, equivalent in translator.items():
if word in self.totalcounts:
self.totalcounts[equivalent] += self.totalcounts.pop(word)
with open(outpath, mode = 'w', encoding = 'utf-8') as f:
writer = csv.writer(f)
writer.writerow(['feature', 'count'])
for key, value in self.totalcounts.items():
if value > 0:
writer.writerow([key, value / self.totaltokens])
writer.writerow(['#sentencelength', self.sentencelength])
writer.writerow(['#typetoken', self.typetoken])
writer.writerow(['#linelength', self.linelength])
def get_raw_body_features(self):
'''
Return features sans normalization.
'''
outdict = Counter()
for key, value in self.totalcounts.items():
if not key.startswith('#header'):
outdict[key] = value
outdict['#sentencelength'] = self.sentencelength
outdict['#typetoken'] = self.typetoken
outdict['#linelength'] = self.linelength
return outdict, self.bodytokens
def get_volume_features(self):
'''
Just like write_volume_features, except we return them
as a dictionary.
'''
outdict = Counter()
if self.totaltokens < 1:
return outdict, 0
else:
for key, value in self.totalcounts.items():
outdict[key] = value / self.totaltokens
outdict['#sentencelength'] = self.sentencelength
outdict['#typetoken'] = self.typetoken
outdict['#linelength'] = self.linelength
return outdict, self.totaltokens
def append_volume_features(self, outpath):
''' This is probably the way to do it. Initialize the file with
a header, and then add a bunch of volumes to the same file,
incorporating a column that distinguishes them by docid.
'''
with open(outpath, mode = 'a', encoding = 'utf-8') as f:
writer = csv.writer(f)
for key, value in self.totalcounts.items():
writer.writerow([self.volumeid, key, value / self.totaltokens])
writer.writerow([self.volumeid, '#sentencelength', self.sentencelength])
writer.writerow([self.volumeid, '#typetoken', self.typetoken])
writer.writerow([self.volumeid, '#linelength', self.linelength])
def log_tokens_for_page(pagejson, pagedict, typesonpage, ficcount, headerflag):
'''
Takes data from the pagejson and logs it appropriately in pagedict
and typesonpage.
'''
global ficwords
for token, partsofspeech in pagejson.items():
if token.istitle():
titleflag = True
else:
titleflag = False
if token.isupper():
upperflag = True
else:
upperflag = False
lowertoken = token.lower()
typesonpage.add(lowertoken)
# we do that to keep track of types -- notably, before normalizing
normaltoken = normalize_token_for_page(token)
# normalizing also lowercases the token, but we don't
# want to *send in* a lowercased token
for part, count in partsofspeech.items():
if headerflag:
pagedict['headertokens'] += count
else:
pagedict['bodytokens'] += count
if upperflag:
pagedict['uppercase'] += count
if titleflag:
pagedict['titlecase'] += count
if lowertoken in ficwords:
ficcount += count
pagedict['tokens'][normaltoken] += count
return ficcount
class PagelistFromJson:
# A data object that contains page-level wordcounts
# for a volume,
def __init__(self, volumepath, volumeid):
'''initializes a LoadedVolume by reading wordcounts from
a json file'''
if volumepath.endswith('bz2'):
with bz2.open(volumepath, mode = 'rt', encoding = 'utf-8') as f:
thestring = f.read()
else:
with open(volumepath, encoding = 'utf-8') as f:
thestring = f.read()
thejson = json.loads(thestring)
assert thejson['id'] == volumeid
# I require volumeid to be explicitly passed in,
# although I could infer it, because I don't want
#any surprises.
self.volumeid = thejson['id']
pagejsons = thejson['features']['pages']
self.numpages = len(pagejsons)
self.pages = []
self.features = []
# in this data structure, a volume is a list of pages
for i in range(self.numpages):
pagedata = dict()
# each page is a dictionary that contains categories of
# features, most obviously wordcounts:
pagedata['tokens'] = Counter()
pagedata['bodytokens'] = 0
pagedata['titlecase'] = 0
pagedata['uppercase'] = 0
pagedata['headertokens'] = 0
self.pages.append(pagedata)
for i in range(self.numpages):
pagedata = self.pages[i]
thispage = pagejsons[i]
typesonpage = set()
ficcount = 0
pagedata['lines'] = int(thispage['lineCount'])
pagedata['sentences'] = int(thispage['body']['sentenceCount'])
# I could look for sentences in the header or footer, but I think
# that would overvalue accidents of punctuation.
bodywords = thispage['body']['tokenPosCount']
ficcount = log_tokens_for_page(bodywords, pagedata, typesonpage, ficcount, headerflag = False)
headerwords = thispage['header']['tokenPosCount']
ficcount = log_tokens_for_page(headerwords, pagedata, typesonpage, ficcount, headerflag = True)
footerwords = thispage['footer']['tokenPosCount']
ficcount = log_tokens_for_page(footerwords, pagedata, typesonpage, ficcount, headerflag = True)
pagefeatures = dict()
# We don't directly return token counts, but normalize them
# in various ways
totaltokens = pagedata['bodytokens'] + pagedata['headertokens']
if totaltokens > 0:
for key, value in pagedata['tokens'].items():
pagefeatures[key] = value / totaltokens
pagefeatures['#totaltokens'] = totaltokens
if totaltokens > 0:
pagefeatures['#typetoken'] = len(typesonpage) / totaltokens
else:
pagefeatures['#typetoken'] = 1
pagefeatures['#absfromedge'] = min(i, self.numpages - i)
pagefeatures['#pctfromedge'] = pagefeatures['#absfromedge'] / self.numpages
pagefeatures['#absupper'] = pagedata['uppercase']
if totaltokens > 0:
pagefeatures['#pctupper'] = pagedata['uppercase'] / totaltokens
else:
pagefeatures['#pctupper'] = 0.5
pagefeatures['#abstitle'] = pagedata['titlecase']
if totaltokens > 0:
pagefeatures['#pcttitle'] = pagedata['titlecase'] / totaltokens
else:
pagefeatures['#pcttitle'] = 0.5
if pagedata['lines'] > 0:
pagefeatures['#linelength'] = totaltokens / pagedata['lines']
else:
pagefeatures['#linelength'] = 10
if totaltokens > 0:
pagefeatures['#ficpct'] = ficcount / totaltokens
else:
pagefeatures['#ficpct'] = 0
self.features.append(pagefeatures)
# Some features also get recorded as Z values normalized by the mean and
# standard deviation for this volume.
tonormalize = ['#typetoken', '#pcttitle', '#linelength', '#totaltokens', '#ficpct']
for feature in tonormalize:
values = np.zeros(self.numpages)
for i in range(self.numpages):
pagefeatures = self.features[i]
values[i] = (pagefeatures[feature])
meanval = np.mean(values)
stdval = np.std(values) + .0001
normalizedfeature = feature + 'normed'
for i in range(self.numpages):
self.features[i][normalizedfeature] = (self.features[i][feature] - meanval) / stdval
# We are done with the __init__ method for this volume.
# When I get a better feature sample, we'll add some information about initial
# capitalization.
def get_feature_list(self):
'''
Returns a list where each page is represented as a dictionary of features.
Features should already be normalized in all the ways we're going to
normalize them.
'''
return self.features
class LiteralVolumeFromJson:
# Mainly a data object that contains page-level wordcounts
# for a volume.
def __init__(self, volumepath, volumeid):
'''initializes a LoadedVolume by reading wordcounts from
a json file'''
if volumepath.endswith('bz2'):
with bz2.open(volumepath, mode = 'rt', encoding = 'utf-8') as f:
thestring = f.read()
else:
with open(volumepath, encoding = 'utf-8') as f:
thestring = f.read()
thejson = json.loads(thestring)
assert thejson['id'] == volumeid
# I require volumeid to be explicitly passed in,
# although I could infer it, because I don't want
#any surprises.
self.volumeid = thejson['id']
pagedata = thejson['features']['pages']
self.numpages = len(pagedata)
self.pagecounts = []
self.totalcounts = Counter()
self.totaltokens = 0
for i in range(self.numpages):
thispagecounts = Counter()
thisbodytokens = 0
thisheadertokens = 0
thispage = pagedata[i]
linesonpage = int(thispage['lineCount'])
sentencesonpage = int(thispage['body']['sentenceCount'])
# I could look for sentences in the header or footer, but I think
# that would overvalue accidents of punctuation.
bodywords = thispage['body']['tokenPosCount']
for normaltoken, partsofspeech in bodywords.items():
for part, count in partsofspeech.items():
thisbodytokens += count
thispagecounts[normaltoken] += count
self.pagecounts.append(thispagecounts)
for key, value in thispagecounts.items():
self.totalcounts[key] += value
self.totaltokens += thisbodytokens
# We are done with the __init__ method for this volume.
# When I get a better feature sample, we'll add some information about initial
# capitalization.
def write_volume_features(self, outpath, override = False):
if os.path.isfile(outpath) and not override:
print('Error: you are asking me to override an existing')
print('file without explicitly specifying to do so in your')
print('invocation of write_volume_features.')
with open(outpath, mode = 'w', encoding = 'utf-8') as f:
writer = csv.writer(f)
writer.writerow(['feature', 'count'])
for key, value in self.totalcounts.items():
writer.writerow([key, value / self.totaltokens])
writer.writerow(['#sentencelength', self.sentencelength])
writer.writerow(['#typetoken', self.typetoken])
writer.writerow(['#linelength', self.linelength])
def get_volume_features(self):
'''
Just like write_volume_features, except we return them
as a dictionary.
'''
if self.totaltokens < 1:
return Counter(), 0
else:
return self.totalcounts, self.totaltokens
if __name__ == "__main__":
meta = pd.read_csv('/Users/tunder/Dropbox/python/train20/bzipmeta.csv', dtype = 'object', index_col = 'docid')
for index, row in meta.iterrows():
inpath = row['filepath']
vol = VolumeFromJson(inpath, index)
outpath = '/Volumes/TARDIS/work/train20/' + utils.clean_pairtree(index) + '.csv'
vol.write_volume_features(outpath, override = True)
| mit |
scpeters/benchmark | plot_helpers.py | 1 | 8441 | import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
mpl.rcParams.update({'font.size': 16})
from csv_dictionary import *
boxes = makeCsvDictOfArrays('test_results/BENCHMARK_boxes_dt.csv')
color1 = [0, 0, 0.5]
color2 = [0.5, 0.5, 0.5]
linestyle1 = '-'
linestyle2 = '--'
def plotTimePosition3(time, position3):
plt.gcf()
plt.plot(time, position3[:,0], linewidth=4.0, linestyle=linestyle1, color=color1)
plt.plot(time, position3[:,1], linewidth=4.0, linestyle=linestyle2, color=color1)
plt.plot(time, position3[:,2], linewidth=2.0, linestyle=linestyle1, color=color2)
plt.xlabel('Time (s)')
plt.ylabel('Position (m)')
plt.grid()
plt.legend(['x','y','z'], loc='best');
# helper function for resizing axes
def vector_scale(x, scale):
mean = np.mean(x)
centered = x - mean
return mean + centered*scale
def vector_log10_scale(x, scale):
logx = np.log10(x)
scaled = vector_scale(logx, scale)
return [10**l for l in scaled]
# Create a plot with time step Dt on horizontal axis
# Value of `yname` plotted on vertical axis
def plotEnginesDt(params, yname
, axscale=1.1
, ayscale=1.1
, csvDict=boxes
, legend='best'
, xname='dt'
, xlabel='Time step (s)'
, ylabel='Error'
, xlim=[]
, ylim=[]
, xscale='linear'
, yscale='linear'
, title='title'
, skipDart=False
):
engines = {}
engines['bullet'] = ['$B$', 'b--']
if not skipDart:
engines['dart'] = ['$d$', 'g--']
engines['ode'] = ['$O$', 'r--']
engines['simbody'] = ['$S$', 'k--']
fig = plt.figure()
xdata = {}
ydata = {}
for e in sorted(engines.keys()):
params['engine'] = e
ii = np.array(list(query(csvDict, params)))
xdata[e] = csvDict[xname][ii]
ydata[e] = csvDict[yname][ii]
color = engines[e][1][0]
plt.plot(xdata[e]
, ydata[e]+np.finfo(float).eps
, engines[e][1]
, mfc=color
, marker=engines[e][0]
, markersize=20.0
, markeredgecolor=color
, linewidth=2.0
)
plt.grid()
plt.xlabel(xlabel, fontsize=18)
plt.ylabel(ylabel, fontsize=18)
plt.gca().set_xscale(xscale)
plt.gca().set_yscale(yscale)
plt.title(title)
plt.gcf().set_size_inches(10, 6)
if len(xlim) == 2:
plt.xlim(xlim)
elif xscale == 'log':
plt.xlim(vector_log10_scale(plt.xlim(), axscale))
else:
plt.xlim(vector_scale(plt.xlim(), axscale))
if len(ylim) == 2:
plt.ylim(ylim)
elif yscale == 'log':
plt.ylim(vector_log10_scale(plt.ylim(), ayscale))
else:
plt.ylim(vector_scale(plt.ylim(), ayscale))
plt.legend(sorted(engines.keys()), loc=legend)
plt.show();
# some extra info about each plot
xdata_minmax = {}
ydata_minmax = {}
for e in sorted(engines.keys()):
xdata_minmax[e] = [min(xdata[e]), max(xdata[e])]
ydata_minmax[e] = [min(ydata[e]), max(ydata[e])]
def plotEnginesTime(params, yname
, csvDict=boxes
, legend='best'
, skipDart=False
, xname='timeRatio'
, xlabel='Time ratio (real / sim)'
, ylabel='Error'
, xlim=[]
, ylim=[]
, xscale='linear'
, yscale='linear'
, title='title'
):
plotEnginesDt(params, yname
, csvDict=csvDict
, legend=legend
, skipDart=skipDart
, xname=xname
, xlabel=xlabel
, ylabel=ylabel
, xlim=xlim
, ylim=ylim
, xscale=xscale
, yscale=yscale
, title=title
)
def plotEnginesModelCount(params, yname
, csvDict=boxes
, legend='best'
, skipDart=False
, xname='modelCount'
, xlabel='Model count'
, ylabel='Time ratio (real / sim)'
, xlim=[]
, ylim=[]
, xscale='linear'
, yscale='linear'
, title='title'
):
plotEnginesDt(params, yname
, csvDict=csvDict
, legend=legend
, skipDart=skipDart
, xname=xname
, xlabel=xlabel
, ylabel=ylabel
, xlim=xlim
, ylim=ylim
, xscale=xscale
, yscale=yscale
, title=title
)
def plot3TimeDt(params
, csvDict=boxes
, yname='linPositionErr_maxAbs'
, title=''
, skipDart=False
, xscale='linear'
, yscale='linear'
):
plotEnginesDt(params
, csvDict=csvDict
, yname=yname
, title=title
, skipDart=skipDart
, xscale=xscale
, yscale=yscale
)
plotEnginesDt(params
, csvDict=csvDict
, yname='timeRatio'
, ylabel='Computational time / sim time'
, title='Computational time'
, skipDart=skipDart
, xscale=xscale
, yscale=yscale
)
plotEnginesTime(params
, csvDict=csvDict
, yname=yname
, title=title
, skipDart=skipDart
, xscale=xscale
, yscale=yscale
)
def plotErrorDt(classname, title_prefix
, csvDict=boxes
, legend='best'
, xscale='linear'
, yscale='linear'):
p = {}
p['classname'] = classname
title_prefix = title_prefix
plotEnginesDt(p, yname='linPositionErr_maxAbs', title=title_prefix + 'position'
, csvDict=csvDict, legend=legend, xscale=xscale, yscale=yscale)
plotEnginesDt(p, yname='angPositionErr_mag_maxAbs', title=title_prefix + 'angle'
, csvDict=csvDict, legend=legend, yscale=yscale)
plotEnginesDt(p, yname='linVelocityErr_maxAbs', title=title_prefix + 'velocity'
, csvDict=csvDict, legend=legend, yscale=yscale)
plotEnginesDt(p, yname='angMomentumErr_maxAbs', title=title_prefix + 'angular momentum'
, csvDict=csvDict, legend=legend, yscale=yscale)
plotEnginesDt(p, yname='energyError_maxAbs', title=title_prefix + 'energy'
, csvDict=csvDict, legend=legend, yscale=yscale)
def plotTimeDt(classname, title_prefix
, csvDict=boxes
, legend='best'
, yscale='linear'):
p = {}
p['classname'] = classname
title_prefix = title_prefix
plotEnginesDt(p, yname='timeRatio', title=title_prefix + 'time ratio'
, ylabel='Time ratio (real / sim)'
, csvDict=csvDict, legend=legend, yscale=yscale)
def plotErrorTime(classname, title_prefix
, csvDict=boxes
, legend='best'
, yscale='linear'):
p = {}
p['classname'] = classname
title_prefix = title_prefix
plotEnginesTime(p, yname='linPositionErr_maxAbs', title=title_prefix + 'position'
, csvDict=csvDict, legend=legend, yscale=yscale)
plotEnginesTime(p, yname='angPositionErr_mag_maxAbs', title=title_prefix + 'angle'
, csvDict=csvDict, legend=legend, yscale=yscale)
plotEnginesTime(p, yname='linVelocityErr_maxAbs', title=title_prefix + 'velocity'
, csvDict=csvDict, legend=legend, yscale=yscale)
plotEnginesTime(p, yname='angMomentumErr_maxAbs', title=title_prefix + 'angular momentum'
, csvDict=csvDict, legend=legend, yscale=yscale)
plotEnginesTime(p, yname='energyError_maxAbs', title=title_prefix + 'energy'
, csvDict=csvDict, legend=legend, yscale=yscale)
| apache-2.0 |
crichardson17/starburst_atlas | Low_resolution_sims/DustFree_LowRes/Padova_inst/padova_inst_6/Rest.py | 33 | 7215 | import csv
import matplotlib.pyplot as plt
from numpy import *
import scipy.interpolate
import math
from pylab import *
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
import matplotlib.patches as patches
from matplotlib.path import Path
import os
# ------------------------------------------------------------------------------------------------------
#inputs
for file in os.listdir('.'):
if file.endswith(".grd"):
inputfile = file
for file in os.listdir('.'):
if file.endswith(".txt"):
inputfile2 = file
# ------------------------------------------------------------------------------------------------------
#Patches data
#for the Kewley and Levesque data
verts = [
(1., 7.97712125471966000000), # left, bottom
(1., 9.57712125471966000000), # left, top
(2., 10.57712125471970000000), # right, top
(2., 8.97712125471966000000), # right, bottom
(0., 0.), # ignored
]
codes = [Path.MOVETO,
Path.LINETO,
Path.LINETO,
Path.LINETO,
Path.CLOSEPOLY,
]
path = Path(verts, codes)
# ------------------------
#for the Kewley 01 data
verts2 = [
(2.4, 9.243038049), # left, bottom
(2.4, 11.0211893), # left, top
(2.6, 11.0211893), # right, top
(2.6, 9.243038049), # right, bottom
(0, 0.), # ignored
]
path = Path(verts, codes)
path2 = Path(verts2, codes)
# -------------------------
#for the Moy et al data
verts3 = [
(1., 6.86712125471966000000), # left, bottom
(1., 10.18712125471970000000), # left, top
(3., 12.18712125471970000000), # right, top
(3., 8.86712125471966000000), # right, bottom
(0., 0.), # ignored
]
path = Path(verts, codes)
path3 = Path(verts3, codes)
# ------------------------------------------------------------------------------------------------------
#the routine to add patches for others peoples' data onto our plots.
def add_patches(ax):
patch3 = patches.PathPatch(path3, facecolor='yellow', lw=0)
patch2 = patches.PathPatch(path2, facecolor='green', lw=0)
patch = patches.PathPatch(path, facecolor='red', lw=0)
ax1.add_patch(patch3)
ax1.add_patch(patch2)
ax1.add_patch(patch)
# ------------------------------------------------------------------------------------------------------
#the subplot routine
def add_sub_plot(sub_num):
numplots = 16
plt.subplot(numplots/4.,4,sub_num)
rbf = scipy.interpolate.Rbf(x, y, z[:,sub_num-1], function='linear')
zi = rbf(xi, yi)
contour = plt.contour(xi,yi,zi, levels, colors='c', linestyles = 'dashed')
contour2 = plt.contour(xi,yi,zi, levels2, colors='k', linewidths=1.5)
plt.scatter(max_values[line[sub_num-1],2], max_values[line[sub_num-1],3], c ='k',marker = '*')
plt.annotate(headers[line[sub_num-1]], xy=(8,11), xytext=(6,8.5), fontsize = 10)
plt.annotate(max_values[line[sub_num-1],0], xy= (max_values[line[sub_num-1],2], max_values[line[sub_num-1],3]), xytext = (0, -10), textcoords = 'offset points', ha = 'right', va = 'bottom', fontsize=10)
if sub_num == numplots / 2.:
print "half the plots are complete"
#axis limits
yt_min = 8
yt_max = 23
xt_min = 0
xt_max = 12
plt.ylim(yt_min,yt_max)
plt.xlim(xt_min,xt_max)
plt.yticks(arange(yt_min+1,yt_max,1),fontsize=10)
plt.xticks(arange(xt_min+1,xt_max,1), fontsize = 10)
if sub_num in [2,3,4,6,7,8,10,11,12,14,15,16]:
plt.tick_params(labelleft = 'off')
else:
plt.tick_params(labelleft = 'on')
plt.ylabel('Log ($ \phi _{\mathrm{H}} $)')
if sub_num in [1,2,3,4,5,6,7,8,9,10,11,12]:
plt.tick_params(labelbottom = 'off')
else:
plt.tick_params(labelbottom = 'on')
plt.xlabel('Log($n _{\mathrm{H}} $)')
if sub_num == 1:
plt.yticks(arange(yt_min+1,yt_max+1,1),fontsize=10)
if sub_num == 13:
plt.yticks(arange(yt_min,yt_max,1),fontsize=10)
plt.xticks(arange(xt_min,xt_max,1), fontsize = 10)
if sub_num == 16 :
plt.xticks(arange(xt_min+1,xt_max+1,1), fontsize = 10)
# ---------------------------------------------------
#this is where the grid information (phi and hdens) is read in and saved to grid.
grid = [];
with open(inputfile, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
for row in csvReader:
grid.append(row);
grid = asarray(grid)
#here is where the data for each line is read in and saved to dataEmissionlines
dataEmissionlines = [];
with open(inputfile2, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers = csvReader.next()
for row in csvReader:
dataEmissionlines.append(row);
dataEmissionlines = asarray(dataEmissionlines)
print "import files complete"
# ---------------------------------------------------
#for grid
phi_values = grid[1:len(dataEmissionlines)+1,6]
hdens_values = grid[1:len(dataEmissionlines)+1,7]
#for lines
headers = headers[1:]
Emissionlines = dataEmissionlines[:, 1:]
concatenated_data = zeros((len(Emissionlines),len(Emissionlines[0])))
max_values = zeros((len(Emissionlines[0]),4))
#select the scaling factor
#for 1215
#incident = Emissionlines[1:,4]
#for 4860
incident = Emissionlines[:,57]
#take the ratio of incident and all the lines and put it all in an array concatenated_data
for i in range(len(Emissionlines)):
for j in range(len(Emissionlines[0])):
if math.log(4860.*(float(Emissionlines[i,j])/float(Emissionlines[i,57])), 10) > 0:
concatenated_data[i,j] = math.log(4860.*(float(Emissionlines[i,j])/float(Emissionlines[i,57])), 10)
else:
concatenated_data[i,j] == 0
# for 1215
#for i in range(len(Emissionlines)):
# for j in range(len(Emissionlines[0])):
# if math.log(1215.*(float(Emissionlines[i,j])/float(Emissionlines[i,4])), 10) > 0:
# concatenated_data[i,j] = math.log(1215.*(float(Emissionlines[i,j])/float(Emissionlines[i,4])), 10)
# else:
# concatenated_data[i,j] == 0
#find the maxima to plot onto the contour plots
for j in range(len(concatenated_data[0])):
max_values[j,0] = max(concatenated_data[:,j])
max_values[j,1] = argmax(concatenated_data[:,j], axis = 0)
max_values[j,2] = hdens_values[max_values[j,1]]
max_values[j,3] = phi_values[max_values[j,1]]
#to round off the maxima
max_values[:,0] = [ '%.1f' % elem for elem in max_values[:,0] ]
print "data arranged"
# ---------------------------------------------------
#Creating the grid to interpolate with for contours.
gridarray = zeros((len(Emissionlines),2))
gridarray[:,0] = hdens_values
gridarray[:,1] = phi_values
x = gridarray[:,0]
y = gridarray[:,1]
#change desired lines here!
line = [3,4,15,22,37,53,54,55,57,62,77,88,89,90,92,93]
#create z array for this plot
z = concatenated_data[:,line[:]]
# ---------------------------------------------------
# Interpolate
print "starting interpolation"
xi, yi = linspace(x.min(), x.max(), 10), linspace(y.min(), y.max(), 10)
xi, yi = meshgrid(xi, yi)
# ---------------------------------------------------
print "interpolatation complete; now plotting"
#plot
plt.subplots_adjust(wspace=0, hspace=0) #remove space between plots
levels = arange(10**-1,10, .2)
levels2 = arange(10**-2,10**2, 1)
plt.suptitle("Rest of the Lines", fontsize=14)
# ---------------------------------------------------
for i in range(16):
add_sub_plot(i)
ax1 = plt.subplot(4,4,1)
add_patches(ax1)
print "complete"
plt.savefig('Rest.pdf')
plt.clf()
| gpl-2.0 |
mihail911/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/projections/geo.py | 69 | 19738 | import math
import numpy as np
import numpy.ma as ma
import matplotlib
rcParams = matplotlib.rcParams
from matplotlib.artist import kwdocd
from matplotlib.axes import Axes
from matplotlib import cbook
from matplotlib.patches import Circle
from matplotlib.path import Path
from matplotlib.ticker import Formatter, Locator, NullLocator, FixedLocator, NullFormatter
from matplotlib.transforms import Affine2D, Affine2DBase, Bbox, \
BboxTransformTo, IdentityTransform, Transform, TransformWrapper
class GeoAxes(Axes):
"""
An abstract base class for geographic projections
"""
class ThetaFormatter(Formatter):
"""
Used to format the theta tick labels. Converts the native
unit of radians into degrees and adds a degree symbol.
"""
def __init__(self, round_to=1.0):
self._round_to = round_to
def __call__(self, x, pos=None):
degrees = (x / np.pi) * 180.0
degrees = round(degrees / self._round_to) * self._round_to
if rcParams['text.usetex'] and not rcParams['text.latex.unicode']:
return r"$%0.0f^\circ$" % degrees
else:
return u"%0.0f\u00b0" % degrees
RESOLUTION = 75
def cla(self):
Axes.cla(self)
self.set_longitude_grid(30)
self.set_latitude_grid(15)
self.set_longitude_grid_ends(75)
self.xaxis.set_minor_locator(NullLocator())
self.yaxis.set_minor_locator(NullLocator())
self.xaxis.set_ticks_position('none')
self.yaxis.set_ticks_position('none')
self.grid(rcParams['axes.grid'])
Axes.set_xlim(self, -np.pi, np.pi)
Axes.set_ylim(self, -np.pi / 2.0, np.pi / 2.0)
def _set_lim_and_transforms(self):
# A (possibly non-linear) projection on the (already scaled) data
self.transProjection = self._get_core_transform(self.RESOLUTION)
self.transAffine = self._get_affine_transform()
self.transAxes = BboxTransformTo(self.bbox)
# The complete data transformation stack -- from data all the
# way to display coordinates
self.transData = \
self.transProjection + \
self.transAffine + \
self.transAxes
# This is the transform for longitude ticks.
self._xaxis_pretransform = \
Affine2D() \
.scale(1.0, self._longitude_cap * 2.0) \
.translate(0.0, -self._longitude_cap)
self._xaxis_transform = \
self._xaxis_pretransform + \
self.transData
self._xaxis_text1_transform = \
Affine2D().scale(1.0, 0.0) + \
self.transData + \
Affine2D().translate(0.0, 4.0)
self._xaxis_text2_transform = \
Affine2D().scale(1.0, 0.0) + \
self.transData + \
Affine2D().translate(0.0, -4.0)
# This is the transform for latitude ticks.
yaxis_stretch = Affine2D().scale(np.pi * 2.0, 1.0).translate(-np.pi, 0.0)
yaxis_space = Affine2D().scale(1.0, 1.1)
self._yaxis_transform = \
yaxis_stretch + \
self.transData
yaxis_text_base = \
yaxis_stretch + \
self.transProjection + \
(yaxis_space + \
self.transAffine + \
self.transAxes)
self._yaxis_text1_transform = \
yaxis_text_base + \
Affine2D().translate(-8.0, 0.0)
self._yaxis_text2_transform = \
yaxis_text_base + \
Affine2D().translate(8.0, 0.0)
def _get_affine_transform(self):
transform = self._get_core_transform(1)
xscale, _ = transform.transform_point((np.pi, 0))
_, yscale = transform.transform_point((0, np.pi / 2.0))
return Affine2D() \
.scale(0.5 / xscale, 0.5 / yscale) \
.translate(0.5, 0.5)
def get_xaxis_transform(self):
return self._xaxis_transform
def get_xaxis_text1_transform(self, pad):
return self._xaxis_text1_transform, 'bottom', 'center'
def get_xaxis_text2_transform(self, pad):
return self._xaxis_text2_transform, 'top', 'center'
def get_yaxis_transform(self):
return self._yaxis_transform
def get_yaxis_text1_transform(self, pad):
return self._yaxis_text1_transform, 'center', 'right'
def get_yaxis_text2_transform(self, pad):
return self._yaxis_text2_transform, 'center', 'left'
def _gen_axes_patch(self):
return Circle((0.5, 0.5), 0.5)
def set_yscale(self, *args, **kwargs):
if args[0] != 'linear':
raise NotImplementedError
set_xscale = set_yscale
def set_xlim(self, *args, **kwargs):
Axes.set_xlim(self, -np.pi, np.pi)
Axes.set_ylim(self, -np.pi / 2.0, np.pi / 2.0)
set_ylim = set_xlim
def format_coord(self, long, lat):
'return a format string formatting the coordinate'
long = long * (180.0 / np.pi)
lat = lat * (180.0 / np.pi)
if lat >= 0.0:
ns = 'N'
else:
ns = 'S'
if long >= 0.0:
ew = 'E'
else:
ew = 'W'
return u'%f\u00b0%s, %f\u00b0%s' % (abs(lat), ns, abs(long), ew)
def set_longitude_grid(self, degrees):
"""
Set the number of degrees between each longitude grid.
"""
number = (360.0 / degrees) + 1
self.xaxis.set_major_locator(
FixedLocator(
np.linspace(-np.pi, np.pi, number, True)[1:-1]))
self._logitude_degrees = degrees
self.xaxis.set_major_formatter(self.ThetaFormatter(degrees))
def set_latitude_grid(self, degrees):
"""
Set the number of degrees between each longitude grid.
"""
number = (180.0 / degrees) + 1
self.yaxis.set_major_locator(
FixedLocator(
np.linspace(-np.pi / 2.0, np.pi / 2.0, number, True)[1:-1]))
self._latitude_degrees = degrees
self.yaxis.set_major_formatter(self.ThetaFormatter(degrees))
def set_longitude_grid_ends(self, degrees):
"""
Set the latitude(s) at which to stop drawing the longitude grids.
"""
self._longitude_cap = degrees * (np.pi / 180.0)
self._xaxis_pretransform \
.clear() \
.scale(1.0, self._longitude_cap * 2.0) \
.translate(0.0, -self._longitude_cap)
def get_data_ratio(self):
'''
Return the aspect ratio of the data itself.
'''
return 1.0
### Interactive panning
def can_zoom(self):
"""
Return True if this axes support the zoom box
"""
return False
def start_pan(self, x, y, button):
pass
def end_pan(self):
pass
def drag_pan(self, button, key, x, y):
pass
class AitoffAxes(GeoAxes):
name = 'aitoff'
class AitoffTransform(Transform):
"""
The base Aitoff transform.
"""
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, resolution):
"""
Create a new Aitoff transform. Resolution is the number of steps
to interpolate between each input line segment to approximate its
path in curved Aitoff space.
"""
Transform.__init__(self)
self._resolution = resolution
def transform(self, ll):
longitude = ll[:, 0:1]
latitude = ll[:, 1:2]
# Pre-compute some values
half_long = longitude / 2.0
cos_latitude = np.cos(latitude)
alpha = np.arccos(cos_latitude * np.cos(half_long))
# Mask this array, or we'll get divide-by-zero errors
alpha = ma.masked_where(alpha == 0.0, alpha)
# We want unnormalized sinc. numpy.sinc gives us normalized
sinc_alpha = ma.sin(alpha) / alpha
x = (cos_latitude * np.sin(half_long)) / sinc_alpha
y = (np.sin(latitude) / sinc_alpha)
x.set_fill_value(0.0)
y.set_fill_value(0.0)
return np.concatenate((x.filled(), y.filled()), 1)
transform.__doc__ = Transform.transform.__doc__
transform_non_affine = transform
transform_non_affine.__doc__ = Transform.transform_non_affine.__doc__
def transform_path(self, path):
vertices = path.vertices
ipath = path.interpolated(self._resolution)
return Path(self.transform(ipath.vertices), ipath.codes)
transform_path.__doc__ = Transform.transform_path.__doc__
transform_path_non_affine = transform_path
transform_path_non_affine.__doc__ = Transform.transform_path_non_affine.__doc__
def inverted(self):
return AitoffAxes.InvertedAitoffTransform(self._resolution)
inverted.__doc__ = Transform.inverted.__doc__
class InvertedAitoffTransform(Transform):
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, resolution):
Transform.__init__(self)
self._resolution = resolution
def transform(self, xy):
# MGDTODO: Math is hard ;(
return xy
transform.__doc__ = Transform.transform.__doc__
def inverted(self):
return AitoffAxes.AitoffTransform(self._resolution)
inverted.__doc__ = Transform.inverted.__doc__
def __init__(self, *args, **kwargs):
self._longitude_cap = np.pi / 2.0
GeoAxes.__init__(self, *args, **kwargs)
self.set_aspect(0.5, adjustable='box', anchor='C')
self.cla()
def _get_core_transform(self, resolution):
return self.AitoffTransform(resolution)
class HammerAxes(GeoAxes):
name = 'hammer'
class HammerTransform(Transform):
"""
The base Hammer transform.
"""
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, resolution):
"""
Create a new Hammer transform. Resolution is the number of steps
to interpolate between each input line segment to approximate its
path in curved Hammer space.
"""
Transform.__init__(self)
self._resolution = resolution
def transform(self, ll):
longitude = ll[:, 0:1]
latitude = ll[:, 1:2]
# Pre-compute some values
half_long = longitude / 2.0
cos_latitude = np.cos(latitude)
sqrt2 = np.sqrt(2.0)
alpha = 1.0 + cos_latitude * np.cos(half_long)
x = (2.0 * sqrt2) * (cos_latitude * np.sin(half_long)) / alpha
y = (sqrt2 * np.sin(latitude)) / alpha
return np.concatenate((x, y), 1)
transform.__doc__ = Transform.transform.__doc__
transform_non_affine = transform
transform_non_affine.__doc__ = Transform.transform_non_affine.__doc__
def transform_path(self, path):
vertices = path.vertices
ipath = path.interpolated(self._resolution)
return Path(self.transform(ipath.vertices), ipath.codes)
transform_path.__doc__ = Transform.transform_path.__doc__
transform_path_non_affine = transform_path
transform_path_non_affine.__doc__ = Transform.transform_path_non_affine.__doc__
def inverted(self):
return HammerAxes.InvertedHammerTransform(self._resolution)
inverted.__doc__ = Transform.inverted.__doc__
class InvertedHammerTransform(Transform):
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, resolution):
Transform.__init__(self)
self._resolution = resolution
def transform(self, xy):
x = xy[:, 0:1]
y = xy[:, 1:2]
quarter_x = 0.25 * x
half_y = 0.5 * y
z = np.sqrt(1.0 - quarter_x*quarter_x - half_y*half_y)
longitude = 2 * np.arctan((z*x) / (2.0 * (2.0*z*z - 1.0)))
latitude = np.arcsin(y*z)
return np.concatenate((longitude, latitude), 1)
transform.__doc__ = Transform.transform.__doc__
def inverted(self):
return HammerAxes.HammerTransform(self._resolution)
inverted.__doc__ = Transform.inverted.__doc__
def __init__(self, *args, **kwargs):
self._longitude_cap = np.pi / 2.0
GeoAxes.__init__(self, *args, **kwargs)
self.set_aspect(0.5, adjustable='box', anchor='C')
self.cla()
def _get_core_transform(self, resolution):
return self.HammerTransform(resolution)
class MollweideAxes(GeoAxes):
name = 'mollweide'
class MollweideTransform(Transform):
"""
The base Mollweide transform.
"""
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, resolution):
"""
Create a new Mollweide transform. Resolution is the number of steps
to interpolate between each input line segment to approximate its
path in curved Mollweide space.
"""
Transform.__init__(self)
self._resolution = resolution
def transform(self, ll):
longitude = ll[:, 0:1]
latitude = ll[:, 1:2]
aux = 2.0 * np.arcsin((2.0 * latitude) / np.pi)
x = (2.0 * np.sqrt(2.0) * longitude * np.cos(aux)) / np.pi
y = (np.sqrt(2.0) * np.sin(aux))
return np.concatenate((x, y), 1)
transform.__doc__ = Transform.transform.__doc__
transform_non_affine = transform
transform_non_affine.__doc__ = Transform.transform_non_affine.__doc__
def transform_path(self, path):
vertices = path.vertices
ipath = path.interpolated(self._resolution)
return Path(self.transform(ipath.vertices), ipath.codes)
transform_path.__doc__ = Transform.transform_path.__doc__
transform_path_non_affine = transform_path
transform_path_non_affine.__doc__ = Transform.transform_path_non_affine.__doc__
def inverted(self):
return MollweideAxes.InvertedMollweideTransform(self._resolution)
inverted.__doc__ = Transform.inverted.__doc__
class InvertedMollweideTransform(Transform):
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, resolution):
Transform.__init__(self)
self._resolution = resolution
def transform(self, xy):
# MGDTODO: Math is hard ;(
return xy
transform.__doc__ = Transform.transform.__doc__
def inverted(self):
return MollweideAxes.MollweideTransform(self._resolution)
inverted.__doc__ = Transform.inverted.__doc__
def __init__(self, *args, **kwargs):
self._longitude_cap = np.pi / 2.0
GeoAxes.__init__(self, *args, **kwargs)
self.set_aspect(0.5, adjustable='box', anchor='C')
self.cla()
def _get_core_transform(self, resolution):
return self.MollweideTransform(resolution)
class LambertAxes(GeoAxes):
name = 'lambert'
class LambertTransform(Transform):
"""
The base Lambert transform.
"""
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, center_longitude, center_latitude, resolution):
"""
Create a new Lambert transform. Resolution is the number of steps
to interpolate between each input line segment to approximate its
path in curved Lambert space.
"""
Transform.__init__(self)
self._resolution = resolution
self._center_longitude = center_longitude
self._center_latitude = center_latitude
def transform(self, ll):
longitude = ll[:, 0:1]
latitude = ll[:, 1:2]
clong = self._center_longitude
clat = self._center_latitude
cos_lat = np.cos(latitude)
sin_lat = np.sin(latitude)
diff_long = longitude - clong
cos_diff_long = np.cos(diff_long)
inner_k = (1.0 +
np.sin(clat)*sin_lat +
np.cos(clat)*cos_lat*cos_diff_long)
# Prevent divide-by-zero problems
inner_k = np.where(inner_k == 0.0, 1e-15, inner_k)
k = np.sqrt(2.0 / inner_k)
x = k*cos_lat*np.sin(diff_long)
y = k*(np.cos(clat)*sin_lat -
np.sin(clat)*cos_lat*cos_diff_long)
return np.concatenate((x, y), 1)
transform.__doc__ = Transform.transform.__doc__
transform_non_affine = transform
transform_non_affine.__doc__ = Transform.transform_non_affine.__doc__
def transform_path(self, path):
vertices = path.vertices
ipath = path.interpolated(self._resolution)
return Path(self.transform(ipath.vertices), ipath.codes)
transform_path.__doc__ = Transform.transform_path.__doc__
transform_path_non_affine = transform_path
transform_path_non_affine.__doc__ = Transform.transform_path_non_affine.__doc__
def inverted(self):
return LambertAxes.InvertedLambertTransform(
self._center_longitude,
self._center_latitude,
self._resolution)
inverted.__doc__ = Transform.inverted.__doc__
class InvertedLambertTransform(Transform):
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, center_longitude, center_latitude, resolution):
Transform.__init__(self)
self._resolution = resolution
self._center_longitude = center_longitude
self._center_latitude = center_latitude
def transform(self, xy):
x = xy[:, 0:1]
y = xy[:, 1:2]
clong = self._center_longitude
clat = self._center_latitude
p = np.sqrt(x*x + y*y)
p = np.where(p == 0.0, 1e-9, p)
c = 2.0 * np.arcsin(0.5 * p)
sin_c = np.sin(c)
cos_c = np.cos(c)
lat = np.arcsin(cos_c*np.sin(clat) +
((y*sin_c*np.cos(clat)) / p))
long = clong + np.arctan(
(x*sin_c) / (p*np.cos(clat)*cos_c - y*np.sin(clat)*sin_c))
return np.concatenate((long, lat), 1)
transform.__doc__ = Transform.transform.__doc__
def inverted(self):
return LambertAxes.LambertTransform(
self._center_longitude,
self._center_latitude,
self._resolution)
inverted.__doc__ = Transform.inverted.__doc__
def __init__(self, *args, **kwargs):
self._longitude_cap = np.pi / 2.0
self._center_longitude = kwargs.pop("center_longitude", 0.0)
self._center_latitude = kwargs.pop("center_latitude", 0.0)
GeoAxes.__init__(self, *args, **kwargs)
self.set_aspect('equal', adjustable='box', anchor='C')
self.cla()
def cla(self):
GeoAxes.cla(self)
self.yaxis.set_major_formatter(NullFormatter())
def _get_core_transform(self, resolution):
return self.LambertTransform(
self._center_longitude,
self._center_latitude,
resolution)
def _get_affine_transform(self):
return Affine2D() \
.scale(0.25) \
.translate(0.5, 0.5)
| gpl-3.0 |
cwu2011/scikit-learn | sklearn/linear_model/setup.py | 169 | 1567 | import os
from os.path import join
import numpy
from sklearn._build_utils import get_blas_info
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('linear_model', parent_package, top_path)
cblas_libs, blas_info = get_blas_info()
if os.name == 'posix':
cblas_libs.append('m')
config.add_extension('cd_fast', sources=['cd_fast.c'],
libraries=cblas_libs,
include_dirs=[join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
extra_compile_args=blas_info.pop('extra_compile_args',
[]), **blas_info)
config.add_extension('sgd_fast',
sources=['sgd_fast.c'],
include_dirs=[join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
libraries=cblas_libs,
extra_compile_args=blas_info.pop('extra_compile_args',
[]),
**blas_info)
# add other directories
config.add_subpackage('tests')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| bsd-3-clause |
WarrenWeckesser/numpy | doc/neps/conf.py | 6 | 7313 | # -*- coding: utf-8 -*-
#
# NumPy Enhancement Proposals documentation build configuration file, created by
# sphinx-quickstart on Mon Dec 11 12:45:09 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.imgmath',
'sphinx.ext.intersphinx',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['../source/_templates/']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'NumPy Enhancement Proposals'
copyright = u'2017-2018, NumPy Developers'
author = u'NumPy Developers'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u''
# The full version, including alpha/beta/rc tags.
release = u''
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
## -- Options for HTML output ----------------------------------------------
#
## The theme to use for HTML and HTML Help pages. See the documentation for
## a list of builtin themes.
##
#html_theme = 'alabaster'
#
## Theme options are theme-specific and customize the look and feel of a theme
## further. For a list of options available for each theme, see the
## documentation.
##
## html_theme_options = {}
#
## Add any paths that contain custom static files (such as style sheets) here,
## relative to this directory. They are copied after the builtin static files,
## so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
#
## Custom sidebar templates, must be a dictionary that maps document names
## to template names.
##
## This is required for the alabaster theme
## refs: https://alabaster.readthedocs.io/en/latest/installation.html#sidebars
#html_sidebars = {
# '**': [
# 'relations.html', # needs 'show_related': True theme option to display
# 'searchbox.html',
# ]
#}
## -----------------------------------------------------------------------------
# HTML output
# -----------------------------------------------------------------------------
themedir = os.path.join(os.pardir, 'scipy-sphinx-theme', '_theme')
if not os.path.isdir(themedir):
raise RuntimeError("Get the scipy-sphinx-theme first, "
"via git submodule init && git submodule update")
html_theme = 'scipy'
html_theme_path = [themedir]
#if 'scipyorg' in tags:
if True:
# Build for the scipy.org website
html_theme_options = {
"edit_link": True,
"sidebar": "right",
"scipy_org_logo": True,
"rootlinks": [("https://scipy.org/", "Scipy.org"),
("https://docs.scipy.org/", "Docs")]
}
else:
# Default build
html_theme_options = {
"edit_link": False,
"sidebar": "left",
"scipy_org_logo": False,
"rootlinks": []
}
html_sidebars = {'index': 'indexsidebar.html'}
#html_additional_pages = {
# 'index': 'indexcontent.html',
#}
html_title = "%s" % (project)
html_static_path = ['../source/_static']
html_last_updated_fmt = '%b %d, %Y'
html_use_modindex = True
html_copy_source = False
html_domain_indices = False
html_file_suffix = '.html'
htmlhelp_basename = 'numpy'
if 'sphinx.ext.pngmath' in extensions:
pngmath_use_preview = True
pngmath_dvipng_args = ['-gamma', '1.5', '-D', '96', '-bg', 'Transparent']
plot_html_show_formats = False
plot_html_show_source_link = False
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'NumPyEnhancementProposalsdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'NumPyEnhancementProposals.tex', u'NumPy Enhancement Proposals Documentation',
u'NumPy Developers', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'numpyenhancementproposals', u'NumPy Enhancement Proposals Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'NumPyEnhancementProposals', u'NumPy Enhancement Proposals Documentation',
author, 'NumPyEnhancementProposals', 'One line description of project.',
'Miscellaneous'),
]
# -----------------------------------------------------------------------------
# Intersphinx configuration
# -----------------------------------------------------------------------------
intersphinx_mapping = {
'python': ('https://docs.python.org/dev', None),
'numpy': ('https://numpy.org/devdocs', None),
'scipy': ('https://docs.scipy.org/doc/scipy/reference', None),
'matplotlib': ('https://matplotlib.org', None)
}
| bsd-3-clause |
rishikksh20/scikit-learn | examples/neighbors/plot_approximate_nearest_neighbors_scalability.py | 85 | 5728 | """
============================================
Scalability of Approximate Nearest Neighbors
============================================
This example studies the scalability profile of approximate 10-neighbors
queries using the LSHForest with ``n_estimators=20`` and ``n_candidates=200``
when varying the number of samples in the dataset.
The first plot demonstrates the relationship between query time and index size
of LSHForest. Query time is compared with the brute force method in exact
nearest neighbor search for the same index sizes. The brute force queries have a
very predictable linear scalability with the index (full scan). LSHForest index
have sub-linear scalability profile but can be slower for small datasets.
The second plot shows the speedup when using approximate queries vs brute force
exact queries. The speedup tends to increase with the dataset size but should
reach a plateau typically when doing queries on datasets with millions of
samples and a few hundreds of dimensions. Higher dimensional datasets tends to
benefit more from LSHForest indexing.
The break even point (speedup = 1) depends on the dimensionality and structure
of the indexed data and the parameters of the LSHForest index.
The precision of approximate queries should decrease slowly with the dataset
size. The speed of the decrease depends mostly on the LSHForest parameters and
the dimensionality of the data.
"""
from __future__ import division
print(__doc__)
# Authors: Maheshakya Wijewardena <[email protected]>
# Olivier Grisel <[email protected]>
#
# License: BSD 3 clause
###############################################################################
import time
import numpy as np
from sklearn.datasets.samples_generator import make_blobs
from sklearn.neighbors import LSHForest
from sklearn.neighbors import NearestNeighbors
import matplotlib.pyplot as plt
# Parameters of the study
n_samples_min = int(1e3)
n_samples_max = int(1e5)
n_features = 100
n_centers = 100
n_queries = 100
n_steps = 6
n_iter = 5
# Initialize the range of `n_samples`
n_samples_values = np.logspace(np.log10(n_samples_min),
np.log10(n_samples_max),
n_steps).astype(np.int)
# Generate some structured data
rng = np.random.RandomState(42)
all_data, _ = make_blobs(n_samples=n_samples_max + n_queries,
n_features=n_features, centers=n_centers, shuffle=True,
random_state=0)
queries = all_data[:n_queries]
index_data = all_data[n_queries:]
# Metrics to collect for the plots
average_times_exact = []
average_times_approx = []
std_times_approx = []
accuracies = []
std_accuracies = []
average_speedups = []
std_speedups = []
# Calculate the average query time
for n_samples in n_samples_values:
X = index_data[:n_samples]
# Initialize LSHForest for queries of a single neighbor
lshf = LSHForest(n_estimators=20, n_candidates=200,
n_neighbors=10).fit(X)
nbrs = NearestNeighbors(algorithm='brute', metric='cosine',
n_neighbors=10).fit(X)
time_approx = []
time_exact = []
accuracy = []
for i in range(n_iter):
# pick one query at random to study query time variability in LSHForest
query = queries[[rng.randint(0, n_queries)]]
t0 = time.time()
exact_neighbors = nbrs.kneighbors(query, return_distance=False)
time_exact.append(time.time() - t0)
t0 = time.time()
approx_neighbors = lshf.kneighbors(query, return_distance=False)
time_approx.append(time.time() - t0)
accuracy.append(np.in1d(approx_neighbors, exact_neighbors).mean())
average_time_exact = np.mean(time_exact)
average_time_approx = np.mean(time_approx)
speedup = np.array(time_exact) / np.array(time_approx)
average_speedup = np.mean(speedup)
mean_accuracy = np.mean(accuracy)
std_accuracy = np.std(accuracy)
print("Index size: %d, exact: %0.3fs, LSHF: %0.3fs, speedup: %0.1f, "
"accuracy: %0.2f +/-%0.2f" %
(n_samples, average_time_exact, average_time_approx, average_speedup,
mean_accuracy, std_accuracy))
accuracies.append(mean_accuracy)
std_accuracies.append(std_accuracy)
average_times_exact.append(average_time_exact)
average_times_approx.append(average_time_approx)
std_times_approx.append(np.std(time_approx))
average_speedups.append(average_speedup)
std_speedups.append(np.std(speedup))
# Plot average query time against n_samples
plt.figure()
plt.errorbar(n_samples_values, average_times_approx, yerr=std_times_approx,
fmt='o-', c='r', label='LSHForest')
plt.plot(n_samples_values, average_times_exact, c='b',
label="NearestNeighbors(algorithm='brute', metric='cosine')")
plt.legend(loc='upper left', prop=dict(size='small'))
plt.ylim(0, None)
plt.ylabel("Average query time in seconds")
plt.xlabel("n_samples")
plt.grid(which='both')
plt.title("Impact of index size on response time for first "
"nearest neighbors queries")
# Plot average query speedup versus index size
plt.figure()
plt.errorbar(n_samples_values, average_speedups, yerr=std_speedups,
fmt='o-', c='r')
plt.ylim(0, None)
plt.ylabel("Average speedup")
plt.xlabel("n_samples")
plt.grid(which='both')
plt.title("Speedup of the approximate NN queries vs brute force")
# Plot average precision versus index size
plt.figure()
plt.errorbar(n_samples_values, accuracies, std_accuracies, fmt='o-', c='c')
plt.ylim(0, 1.1)
plt.ylabel("precision@10")
plt.xlabel("n_samples")
plt.grid(which='both')
plt.title("precision of 10-nearest-neighbors queries with index size")
plt.show()
| bsd-3-clause |
teonlamont/mne-python | examples/inverse/plot_label_source_activations.py | 8 | 2269 | """
====================================================
Extracting the time series of activations in a label
====================================================
We first apply a dSPM inverse operator to get signed activations in a label
(with positive and negative values) and we then compare different strategies
to average the times series in a label. We compare a simple average, with an
averaging using the dipoles normal (flip mode) and then a PCA,
also using a sign flip.
"""
# Author: Alexandre Gramfort <[email protected]>
#
# License: BSD (3-clause)
import matplotlib.pyplot as plt
import mne
from mne.datasets import sample
from mne.minimum_norm import read_inverse_operator, apply_inverse
print(__doc__)
data_path = sample.data_path()
label = 'Aud-lh'
label_fname = data_path + '/MEG/sample/labels/%s.label' % label
fname_inv = data_path + '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif'
fname_evoked = data_path + '/MEG/sample/sample_audvis-ave.fif'
snr = 3.0
lambda2 = 1.0 / snr ** 2
method = "dSPM" # use dSPM method (could also be MNE or sLORETA)
# Load data
evoked = mne.read_evokeds(fname_evoked, condition=0, baseline=(None, 0))
inverse_operator = read_inverse_operator(fname_inv)
src = inverse_operator['src']
# Compute inverse solution
pick_ori = "normal" # Get signed values to see the effect of sign filp
stc = apply_inverse(evoked, inverse_operator, lambda2, method,
pick_ori=pick_ori)
label = mne.read_label(label_fname)
stc_label = stc.in_label(label)
mean = stc.extract_label_time_course(label, src, mode='mean')
mean_flip = stc.extract_label_time_course(label, src, mode='mean_flip')
pca = stc.extract_label_time_course(label, src, mode='pca_flip')
print("Number of vertices : %d" % len(stc_label.data))
# View source activations
plt.figure()
plt.plot(1e3 * stc_label.times, stc_label.data.T, 'k', linewidth=0.5)
h0, = plt.plot(1e3 * stc_label.times, mean.T, 'r', linewidth=3)
h1, = plt.plot(1e3 * stc_label.times, mean_flip.T, 'g', linewidth=3)
h2, = plt.plot(1e3 * stc_label.times, pca.T, 'b', linewidth=3)
plt.legend([h0, h1, h2], ['mean', 'mean flip', 'PCA flip'])
plt.xlabel('Time (ms)')
plt.ylabel('Source amplitude')
plt.title('Activations in Label : %s' % label)
plt.show()
| bsd-3-clause |
themrmax/scikit-learn | sklearn/preprocessing/tests/test_function_transformer.py | 46 | 3387 | import numpy as np
from sklearn.utils import testing
from sklearn.preprocessing import FunctionTransformer
from sklearn.utils.testing import assert_equal, assert_array_equal
def _make_func(args_store, kwargs_store, func=lambda X, *a, **k: X):
def _func(X, *args, **kwargs):
args_store.append(X)
args_store.extend(args)
kwargs_store.update(kwargs)
return func(X)
return _func
def test_delegate_to_func():
# (args|kwargs)_store will hold the positional and keyword arguments
# passed to the function inside the FunctionTransformer.
args_store = []
kwargs_store = {}
X = np.arange(10).reshape((5, 2))
assert_array_equal(
FunctionTransformer(_make_func(args_store, kwargs_store)).transform(X),
X,
'transform should have returned X unchanged',
)
# The function should only have received X.
assert_equal(
args_store,
[X],
'Incorrect positional arguments passed to func: {args}'.format(
args=args_store,
),
)
assert_equal(
kwargs_store,
{},
'Unexpected keyword arguments passed to func: {args}'.format(
args=kwargs_store,
),
)
# reset the argument stores.
args_store[:] = [] # python2 compatible inplace list clear.
kwargs_store.clear()
y = object()
assert_array_equal(
FunctionTransformer(
_make_func(args_store, kwargs_store),
pass_y=True,
).transform(X, y),
X,
'transform should have returned X unchanged',
)
# The function should have received X and y.
assert_equal(
args_store,
[X, y],
'Incorrect positional arguments passed to func: {args}'.format(
args=args_store,
),
)
assert_equal(
kwargs_store,
{},
'Unexpected keyword arguments passed to func: {args}'.format(
args=kwargs_store,
),
)
def test_np_log():
X = np.arange(10).reshape((5, 2))
# Test that the numpy.log example still works.
assert_array_equal(
FunctionTransformer(np.log1p).transform(X),
np.log1p(X),
)
def test_kw_arg():
X = np.linspace(0, 1, num=10).reshape((5, 2))
F = FunctionTransformer(np.around, kw_args=dict(decimals=3))
# Test that rounding is correct
assert_array_equal(F.transform(X),
np.around(X, decimals=3))
def test_kw_arg_update():
X = np.linspace(0, 1, num=10).reshape((5, 2))
F = FunctionTransformer(np.around, kw_args=dict(decimals=3))
F.kw_args['decimals'] = 1
# Test that rounding is correct
assert_array_equal(F.transform(X), np.around(X, decimals=1))
def test_kw_arg_reset():
X = np.linspace(0, 1, num=10).reshape((5, 2))
F = FunctionTransformer(np.around, kw_args=dict(decimals=3))
F.kw_args = dict(decimals=1)
# Test that rounding is correct
assert_array_equal(F.transform(X), np.around(X, decimals=1))
def test_inverse_transform():
X = np.array([1, 4, 9, 16]).reshape((2, 2))
# Test that inverse_transform works correctly
F = FunctionTransformer(
func=np.sqrt,
inverse_func=np.around, inv_kw_args=dict(decimals=3),
)
assert_array_equal(
F.inverse_transform(F.transform(X)),
np.around(np.sqrt(X), decimals=3),
)
| bsd-3-clause |
kastnerkyle/speech_density | speech_lstmrbm.py | 1 | 18687 | # Author: Kratarth Goel
# BITS Pilani (2014)
# LSTM-RBM for music generation
import sys
import os
import tables
import tarfile
import fnmatch
import random
import numpy
import numpy as np
from scipy.io import wavfile
import theano
import theano.tensor as T
from theano.tensor.shared_randomstreams import RandomStreams
from midify import lpc_analysis, lpc_to_lsf, lpc_synthesis
from midify import lsf_to_lpc, soundsc
from sklearn.cluster import KMeans
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
#Don't use a python long as this don't work on 32 bits computers.
numpy.random.seed(0xbeef)
rng = RandomStreams(seed=numpy.random.randint(1 << 30))
theano.config.warn.subtensor_merge_bug = False
def load_fruitspeech():
# Check if dataset is in the data directory.
data_path = os.path.join(os.path.split(__file__)[0], "data")
if not os.path.exists(data_path):
os.makedirs(data_path)
dataset = 'audio.tar.gz'
data_file = os.path.join(data_path, dataset)
if os.path.isfile(data_file):
dataset = data_file
if not os.path.isfile(data_file):
try:
import urllib
urllib.urlretrieve('http://google.com')
url = 'https://dl.dropboxusercontent.com/u/15378192/audio.tar.gz'
except AttributeError:
import urllib.request as urllib
url = 'https://dl.dropboxusercontent.com/u/15378192/audio.tar.gz'
print('Downloading data from %s' % url)
urllib.urlretrieve(url, data_file)
print('... loading data')
if not os.path.exists(os.path.join(data_path, "audio")):
tar = tarfile.open(data_file)
os.chdir(data_path)
tar.extractall()
tar.close()
h5_file_path = os.path.join(data_path, "saved_fruit.h5")
if not os.path.exists(h5_file_path):
data_path = os.path.join(data_path, "audio")
audio_matches = []
for root, dirnames, filenames in os.walk(data_path):
for filename in fnmatch.filter(filenames, '*.wav'):
audio_matches.append(os.path.join(root, filename))
random.seed(1999)
random.shuffle(audio_matches)
# http://mail.scipy.org/pipermail/numpy-discussion/2011-March/055219.html
h5_file = tables.openFile(h5_file_path, mode='w')
data_x = h5_file.createVLArray(h5_file.root, 'data_x',
tables.Float32Atom(shape=()),
filters=tables.Filters(1))
data_y = h5_file.createVLArray(h5_file.root, 'data_y',
tables.Int32Atom(shape=()),
filters=tables.Filters(1))
for wav_path in audio_matches:
# Convert chars to int classes
word = wav_path.split(os.sep)[-1][:-6]
chars = [ord(c) - 97 for c in word]
data_y.append(np.array(chars, dtype='int32'))
fs, d = wavfile.read(wav_path)
d = d.astype('float64') / (2 ** 15)
# Preprocessing from A. Graves "Towards End-to-End Speech
# Recognition"
data_x.append(d.astype('float32'))
h5_file.close()
h5_file = tables.openFile(h5_file_path, mode='r')
data_x = h5_file.root.data_x
data_y = h5_file.root.data_y
# FIXME: HACKING
train_x = data_x
train_y = data_y
valid_x = data_x
valid_y = data_y
test_x = data_x
test_y = data_y
rval = [(train_x, train_y), (valid_x, valid_y), (test_x, test_y)]
return rval
def fast_dropout(rng, x):
""" Multiply activations by N(1,1) """
mask = rng.normal(size=x.shape, avg=1., dtype=theano.config.floatX)
return x * mask
def build_rbm(v, W, bv, bh, k):
'''Construct a k-step Gibbs chain starting at v for an RBM.
v : Theano vector or matrix
If a matrix, multiple chains will be run in parallel (batch).
W : Theano matrix
Weight matrix of the RBM.
bv : Theano vector
Visible bias vector of the RBM.
bh : Theano vector
Hidden bias vector of the RBM.
k : scalar or Theano scalar
Length of the Gibbs chain.
Return a (v_sample, cost, monitor, updates) tuple:
v_sample : Theano vector or matrix with the same shape as `v`
Corresponds to the generated sample(s).
cost : Theano scalar
Expression whose gradient with respect to W, bv, bh is the CD-k approximation
to the log-likelihood of `v` (training example) under the RBM.
The cost is averaged in the batch case.
monitor: Theano scalar
Pseudo log-likelihood (also averaged in the batch case).
updates: dictionary of Theano variable -> Theano variable
The `updates` object returned by scan.'''
def gibbs_step(v):
mean_h = T.nnet.sigmoid(T.dot( fast_dropout( rng, v) , W) + bh)
h = rng.binomial(size=mean_h.shape, n=1, p=mean_h,
dtype=theano.config.floatX)
mean_v = T.nnet.sigmoid(T.dot( fast_dropout(rng, h) , W.T) + bv)
v = rng.binomial(size=mean_v.shape, n=1, p=mean_v,
dtype=theano.config.floatX)
return mean_v, v
chain, updates = theano.scan(lambda v: gibbs_step(v)[1], outputs_info=[v],
n_steps=k)
v_sample = chain[-1]
mean_v = gibbs_step(v_sample)[0]
monitor = T.xlogx.xlogy0(v, mean_v) + T.xlogx.xlogy0(1 - v, 1 - mean_v)
monitor = monitor.sum() / v.shape[0]
def free_energy(v):
return -(v * bv).sum() - T.log(1 + T.exp(T.dot(v, W) + bh)).sum()
cost = (free_energy(v) - free_energy(v_sample)) / v.shape[0]
return v_sample, cost, monitor, updates
def shared_normal(num_rows, num_cols, scale=1):
'''Initialize a matrix shared variable with normally distributed
elements.'''
return theano.shared(numpy.random.normal(
scale=scale, size=(num_rows, num_cols)).astype(theano.config.floatX))
def shared_zeros(*shape):
'''Initialize a vector shared variable with zero elements.'''
return theano.shared(numpy.zeros(shape, dtype=theano.config.floatX))
def build_lstmrbm(n_visible, n_hidden, n_hidden_recurrent):
'''Construct a symbolic RNN-RBM and initialize parameters.
n_visible : integer
Number of visible units.
n_hidden : integer
Number of hidden units of the conditional RBMs.
n_hidden_recurrent : integer
Number of hidden units of the RNN.
Return a (v, v_sample, cost, monitor, params, updates_train, v_t,
updates_generate) tuple:
v : Theano matrix
Symbolic variable holding an input sequence (used during training)
v_sample : Theano matrix
Symbolic variable holding the negative particles for CD log-likelihood
gradient estimation (used during training)
cost : Theano scalar
Expression whose gradient (considering v_sample constant) corresponds to the
LL gradient of the RNN-RBM (used during training)
monitor : Theano scalar
Frame-level pseudo-likelihood (useful for monitoring during training)
params : tuple of Theano shared variables
The parameters of the model to be optimized during training.
updates_train : dictionary of Theano variable -> Theano variable
Update object that should be passed to theano.function when compiling the
training function.
v_t : Theano matrix
Symbolic variable holding a generated sequence (used during sampling)
updates_generate : dictionary of Theano variable -> Theano variable
Update object that should be passed to theano.function when compiling the
generation function.'''
W = shared_normal(n_visible, n_hidden, 0.01)
bv = shared_zeros(n_visible)
bh = shared_zeros(n_hidden)
Wuh = shared_normal(n_hidden_recurrent, n_hidden, 0.0001)
Wuv = shared_normal(n_hidden_recurrent, n_visible, 0.0001)
Wvu = shared_normal(n_visible, n_hidden_recurrent, 0.0001)
Wuu = shared_normal(n_hidden_recurrent, n_hidden_recurrent, 0.0001)
bu = shared_zeros(n_hidden_recurrent)
Wui = shared_normal(n_hidden_recurrent, n_hidden_recurrent, 0.0001)
Wqi = shared_normal(n_hidden_recurrent, n_hidden_recurrent, 0.0001)
Wci = shared_normal(n_hidden_recurrent, n_hidden_recurrent, 0.0001)
bi = shared_zeros(n_hidden_recurrent)
Wuf = shared_normal(n_hidden_recurrent, n_hidden_recurrent, 0.0001)
Wqf = shared_normal(n_hidden_recurrent, n_hidden_recurrent, 0.0001)
Wcf = shared_normal(n_hidden_recurrent, n_hidden_recurrent, 0.0001)
bf = shared_zeros(n_hidden_recurrent)
Wuc = shared_normal(n_hidden_recurrent, n_hidden_recurrent, 0.0001)
Wqc = shared_normal(n_hidden_recurrent, n_hidden_recurrent, 0.0001)
bc = shared_zeros(n_hidden_recurrent)
Wuo = shared_normal(n_hidden_recurrent, n_hidden_recurrent, 0.0001)
Wqo = shared_normal(n_hidden_recurrent, n_hidden_recurrent, 0.0001)
Wco = shared_normal(n_hidden_recurrent, n_hidden_recurrent, 0.0001)
Wqv = shared_normal(n_hidden_recurrent, n_visible, 0.0001)
Wqh = shared_normal(n_hidden_recurrent, n_hidden, 0.0001)
bo = shared_zeros(n_hidden_recurrent)
params = W, bv, bh, Wuh, Wuv, Wvu, Wuu, bu, Wui, Wqi, Wci, bi,
Wuf, Wqf, Wcf, bf, Wuc, Wqc, bc, Wuo, Wqo, Wco, bo , Wqv, Wqh
# learned parameters as shared
# variables
v = T.matrix() # a training sequence
u0 = T.zeros((n_hidden_recurrent,)) # initial value for the RNN hidden
# units
q0 = T.zeros((n_hidden_recurrent,))
c0 = T.zeros((n_hidden_recurrent,))
# If `v_t` is given, deterministic recurrence to compute the variable
# biases bv_t, bh_t at each time step. If `v_t` is None, same recurrence
# but with a separate Gibbs chain at each time step to sample (generate)
# from the RNN-RBM. The resulting sample v_t is returned in order to be
# passed down to the sequence history.
def recurrence(v_t, u_tm1, q_tm1, c_tm1):
bv_t = bv + T.dot(u_tm1, Wuv) + T.dot( q_tm1, Wqv)
bh_t = bh + T.dot(u_tm1, Wuh) + T.dot( q_tm1, Wqh)
generate = v_t is None
if generate:
v_t, _, _, updates = build_rbm(T.zeros((n_visible,)), W, bv_t,
bh_t, k=25)
u_t = T.tanh(bu + T.dot(v_t, Wvu) + T.dot(u_tm1, Wuu))
i_t = T.tanh(bi + T.dot(c_tm1, Wci) + T.dot(q_tm1, Wqi) + T.dot(u_t, Wui))
f_t = T.tanh(bf + T.dot(c_tm1, Wcf) + T.dot(q_tm1, Wqf) + T.dot(u_t, Wuf))
c_t = (f_t * c_tm1) + ( i_t * T.tanh( T.dot(u_t, Wuc) + T.dot( q_tm1, Wqc) + bc ))
o_t = T.tanh(bo + T.dot(c_t, Wco) + T.dot(q_tm1, Wqo) + T.dot(u_t, Wuo))
q_t = o_t * T.tanh( c_t )
return ([v_t, u_t, q_t, c_t], updates) if generate else [u_t, q_t, c_t, bv_t, bh_t]
# For training, the deterministic recurrence is used to compute all the
# {bv_t, bh_t, 1 <= t <= T} given v. Conditional RBMs can then be trained
# in batches using those parameters.
(u_t, q_t, c_t, bv_t, bh_t), updates_train = theano.scan(
lambda v_t, u_tm1, q_tm1, c_tm1, *_: recurrence(v_t, u_tm1, q_tm1, c_tm1),
sequences=v, outputs_info=[u0, q0, c0, None, None], non_sequences=params)
v_sample, cost, monitor, updates_rbm = build_rbm(v, W, bv_t[:], bh_t[:],
k=15)
updates_train.update(updates_rbm)
# symbolic loop for sequence generation
(v_t, u_t, q_t, c_t), updates_generate = theano.scan(
lambda u_tm1, q_tm1, c_tm1, *_: recurrence(None, u_tm1, q_tm1, c_tm1),
outputs_info=[None, u0, q0, c0], non_sequences=params, n_steps=200)
return (v, v_sample, cost, monitor, params, updates_train, v_t,
updates_generate)
class LstmRbm:
'''Simple class to train an RNN-RBM from MIDI files and to generate sample
sequences.'''
def __init__(self, n_vis, n_hidden=150, n_hidden_recurrent=100, lr=0.001):
'''Constructs and compiles Theano functions for training and sequence
generation.
n_hidden : integer
Number of hidden units of the conditional RBMs.
n_hidden_recurrent : integer
Number of hidden units of the RNN.
lr : float
Learning rate
r : (integer, integer) tuple
Specifies the pitch range of the piano-roll in MIDI note numbers, including
r[0] but not r[1], such that r[1]-r[0] is the number of visible units of the
RBM at a given time step. The default (21, 109) corresponds to the full range
of piano (88 notes).
dt : float
Sampling period when converting the MIDI files into piano-rolls, or
equivalently the time difference between consecutive time steps.'''
(v, v_sample, cost, monitor, params, updates_train, v_t,
updates_generate) = build_lstmrbm(n_vis, n_hidden,
n_hidden_recurrent)
gradient = T.grad(cost, params, consider_constant=[v_sample])
updates_train.update(((p, p - lr * g) for p, g in zip(params,
gradient)))
self.train_function = theano.function([v], monitor,
updates=updates_train)
self.generate_function = theano.function([], v_t,
updates=updates_generate)
def train(self, dataset, batch_size=100, num_epochs=200):
print len(dataset)
print len(dataset[0])
print (dataset[0])
try:
for epoch in xrange(num_epochs):
numpy.random.shuffle(dataset)
costs = []
for s, sequence in enumerate(dataset):
for i in xrange(0, len(sequence), batch_size):
cost = self.train_function(sequence[i:i + batch_size])
costs.append(cost)
print 'Epoch %i/%i' % (epoch + 1, num_epochs),
print numpy.mean(costs)
sys.stdout.flush()
except KeyboardInterrupt:
print 'Interrupted by user.'
def generate(self):
generated = self.generate_function()
return generated
def test_lstmrbm(X, batch_size=100, num_epochs=200):
model = LstmRbm(X[0].shape[1], n_hidden=300, n_hidden_recurrent=200)
model.train(X, batch_size=batch_size, num_epochs=num_epochs)
return model
if __name__ == '__main__':
train, valid, test = load_fruitspeech()
train_x, train_y = train
valid_x, valid_y = valid
test_x, test_y = test
# load into main memory and normalize between -1 and 1
train_x = [x / (2. ** 15) for x in train_x[:]]
fs = 8000
lpc_order = 8
window_size = 128
window_step = window_size
lsf_kmeans_samples = []
gain_kmeans_samples = []
for n, d in enumerate(train_x):
a, gain, e = lpc_analysis(d, order=lpc_order,
window_step=window_step,
window_size=window_size, emphasis=0.9,
copy=True)
lsf = lpc_to_lsf(a)
length, n_features = lsf.shape
slice_sz = 10
lo = slice_sz // 2
hi = len(lsf) - slice_sz // 2
if lo > hi:
# ??? Shouldn't happen unless sequence is too short
continue
r = np.random.randint(lo, hi)
lsf_kmeans_samples.append(
lsf[r - slice_sz // 2:r + slice_sz // 2])
gain_kmeans_samples.append(
gain[r - slice_sz // 2:r + slice_sz // 2])
lsf_kmeans_samples = np.array(lsf_kmeans_samples)
lsf_kmeans_samples = lsf_kmeans_samples.reshape(
lsf_kmeans_samples.shape[0] * lsf_kmeans_samples.shape[1], -1)
gain_kmeans_samples = np.array(gain_kmeans_samples)
gain_kmeans_samples = gain_kmeans_samples.reshape(
gain_kmeans_samples.shape[0] * gain_kmeans_samples.shape[1], -1)
print("Fitting Kmeans...")
n_lsf_clusters = 100
lsf_tf = KMeans(n_clusters=n_lsf_clusters, random_state=1999)
lsf_tf.fit(lsf_kmeans_samples)
n_gain_clusters = 100
gain_tf = KMeans(n_clusters=n_gain_clusters, random_state=1999)
gain_tf.fit(gain_kmeans_samples)
print("Generating dataset")
X = []
for n, d in enumerate(train_x):
a, gain, e = lpc_analysis(d, order=lpc_order,
window_step=window_step,
window_size=window_size, emphasis=0.9,
copy=True)
lsf = lpc_to_lsf(a)
length, n_features = lsf.shape
lsf_labels = lsf_tf.predict(lsf)
lsf_one_hot = np.zeros((length, n_lsf_clusters)).astype(
theano.config.floatX)
gain_labels = gain_tf.predict(gain)
gain_one_hot = np.zeros((length, n_gain_clusters)).astype(
theano.config.floatX)
for i in np.arange(length):
lsf_one_hot[i, lsf_labels[i]] = 1.
gain_one_hot[i, gain_labels[i]] = .1
one_hot = np.concatenate((lsf_one_hot, gain_one_hot), axis=1)
X.append(one_hot)
def reconstruct(codebook, one_hot, n_features):
arr = np.zeros((len(one_hot), n_features))
prev_code = None
for i in range(len(arr)):
idx = np.where(one_hot[i])[0]
code = codebook[idx]
if len(idx) > 1 and prev_code is not None:
if prev_code in code:
print("Using previous code")
code = prev_code
else:
code = code.mean(axis=0)
elif len(idx) == 1:
code = codebook[idx]
else:
if prev_code is not None:
# If there are none active just use the previous
code = prev_code
else:
# Very first sample is messed up... just pick one
code = codebook[0]
arr[i] = code
prev_code = code
return arr
for i in [0, 20, 40, 60, 80, 100]:
lsf = reconstruct(lsf_tf.cluster_centers_, X[i][:, :n_lsf_clusters],
n_features)
a = lsf_to_lpc(lsf)
gain = reconstruct(gain_tf.cluster_centers_, X[i][:, -n_gain_clusters:],
1)
X_s = lpc_synthesis(a, gain, window_step=window_step)
wavfile.write('orig_%i.wav' % i, fs, soundsc(X_s))
model = test_lstmrbm(X, num_epochs=200)
n_samples_to_gen = 5
for i in range(n_samples_to_gen):
g = model.generate()
lsf = reconstruct(lsf_tf.cluster_centers_, g[:, :n_lsf_clusters],
n_features)
a = lsf_to_lpc(lsf)
gain = reconstruct(gain_tf.cluster_centers_, g[:, -n_gain_clusters:],
1)
X_s = lpc_synthesis(a, gain, window_step=window_step)
wavfile.write('sample_%i.wav' % i, fs, soundsc(X_s))
| bsd-3-clause |
Horta/limix | limix/conftest.py | 1 | 1544 | from __future__ import unicode_literals
import pytest
def pytest_sessionstart(*args, **kwargs):
import doctest
import matplotlib as mpl
mpl.use("agg")
_compatibility()
pandas_format()
doctest.ELLIPSIS_MARKER = "-ignore-"
@pytest.fixture(autouse=True)
def _docdir(request):
import os
# Trigger ONLY for the doctests or doctestplus.
plug = request.config.pluginmanager.getplugin("doctest")
if plug is None:
plug = request.config.pluginmanager.getplugin("doctestplus")
if plug is None:
item = None
else:
item = plug._doctest_textfile_item_cls
else:
item = plug.DoctestItem
if isinstance(request.node, item):
# Get the fixture dynamically by its name.
tmpdir = request.getfixturevalue("tmpdir")
# Chdir only for the duration of the test.
olddir = os.getcwd()
tmpdir.chdir()
yield
os.chdir(olddir)
else:
# For normal tests, we have to yield, since this is a yield-fixture.
yield
def pandas_format():
import pandas as pd
pd.set_option("display.width", 88)
pd.set_option("display.max_columns", 79)
pd.set_option("display.max_rows", 60)
pd.set_option("display.large_repr", "truncate")
pd.set_option("display.float_format", "{:8.5f}".format)
def _compatibility():
import warnings
warnings.filterwarnings("ignore", message="numpy.dtype size changed")
warnings.filterwarnings("ignore", message="numpy.ufunc size changed")
| apache-2.0 |
marcocaccin/scikit-learn | sklearn/linear_model/tests/test_base.py | 8 | 12163 | # Author: Alexandre Gramfort <[email protected]>
# Fabian Pedregosa <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.linear_model.base import LinearRegression
from sklearn.linear_model.base import center_data, sparse_center_data, _rescale_data
from sklearn.utils import check_random_state
from sklearn.utils.testing import assert_greater
from sklearn.datasets.samples_generator import make_sparse_uncorrelated
from sklearn.datasets.samples_generator import make_regression
def test_linear_regression():
# Test LinearRegression on a simple dataset.
# a simple dataset
X = [[1], [2]]
Y = [1, 2]
clf = LinearRegression()
clf.fit(X, Y)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(clf.intercept_, [0])
assert_array_almost_equal(clf.predict(X), [1, 2])
# test it also for degenerate input
X = [[1]]
Y = [0]
clf = LinearRegression()
clf.fit(X, Y)
assert_array_almost_equal(clf.coef_, [0])
assert_array_almost_equal(clf.intercept_, [0])
assert_array_almost_equal(clf.predict(X), [0])
def test_linear_regression_sample_weights():
rng = np.random.RandomState(0)
for n_samples, n_features in ((6, 5), (5, 10)):
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
sample_weight = 1.0 + rng.rand(n_samples)
clf = LinearRegression()
clf.fit(X, y, sample_weight)
coefs1 = clf.coef_
assert_equal(clf.coef_.shape, (X.shape[1], ))
assert_greater(clf.score(X, y), 0.9)
assert_array_almost_equal(clf.predict(X), y)
# Sample weight can be implemented via a simple rescaling
# for the square loss.
scaled_y = y * np.sqrt(sample_weight)
scaled_X = X * np.sqrt(sample_weight)[:, np.newaxis]
clf.fit(X, y)
coefs2 = clf.coef_
assert_array_almost_equal(coefs1, coefs2)
def test_raises_value_error_if_sample_weights_greater_than_1d():
# Sample weights must be either scalar or 1D
n_sampless = [2, 3]
n_featuress = [3, 2]
rng = np.random.RandomState(42)
for n_samples, n_features in zip(n_sampless, n_featuress):
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
sample_weights_OK = rng.randn(n_samples) ** 2 + 1
sample_weights_OK_1 = 1.
sample_weights_OK_2 = 2.
clf = LinearRegression()
# make sure the "OK" sample weights actually work
clf.fit(X, y, sample_weights_OK)
clf.fit(X, y, sample_weights_OK_1)
clf.fit(X, y, sample_weights_OK_2)
def test_fit_intercept():
# Test assertions on betas shape.
X2 = np.array([[0.38349978, 0.61650022],
[0.58853682, 0.41146318]])
X3 = np.array([[0.27677969, 0.70693172, 0.01628859],
[0.08385139, 0.20692515, 0.70922346]])
y = np.array([1, 1])
lr2_without_intercept = LinearRegression(fit_intercept=False).fit(X2, y)
lr2_with_intercept = LinearRegression(fit_intercept=True).fit(X2, y)
lr3_without_intercept = LinearRegression(fit_intercept=False).fit(X3, y)
lr3_with_intercept = LinearRegression(fit_intercept=True).fit(X3, y)
assert_equal(lr2_with_intercept.coef_.shape,
lr2_without_intercept.coef_.shape)
assert_equal(lr3_with_intercept.coef_.shape,
lr3_without_intercept.coef_.shape)
assert_equal(lr2_without_intercept.coef_.ndim,
lr3_without_intercept.coef_.ndim)
def test_linear_regression_sparse(random_state=0):
# Test that linear regression also works with sparse data
random_state = check_random_state(random_state)
for i in range(10):
n = 100
X = sparse.eye(n, n)
beta = random_state.rand(n)
y = X * beta[:, np.newaxis]
ols = LinearRegression()
ols.fit(X, y.ravel())
assert_array_almost_equal(beta, ols.coef_ + ols.intercept_)
assert_array_almost_equal(ols.predict(X) - y.ravel(), 0)
def test_linear_regression_multiple_outcome(random_state=0):
# Test multiple-outcome linear regressions
X, y = make_regression(random_state=random_state)
Y = np.vstack((y, y)).T
n_features = X.shape[1]
clf = LinearRegression(fit_intercept=True)
clf.fit((X), Y)
assert_equal(clf.coef_.shape, (2, n_features))
Y_pred = clf.predict(X)
clf.fit(X, y)
y_pred = clf.predict(X)
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T, Y_pred, decimal=3)
def test_linear_regression_sparse_multiple_outcome(random_state=0):
# Test multiple-outcome linear regressions with sparse data
random_state = check_random_state(random_state)
X, y = make_sparse_uncorrelated(random_state=random_state)
X = sparse.coo_matrix(X)
Y = np.vstack((y, y)).T
n_features = X.shape[1]
ols = LinearRegression()
ols.fit(X, Y)
assert_equal(ols.coef_.shape, (2, n_features))
Y_pred = ols.predict(X)
ols.fit(X, y.ravel())
y_pred = ols.predict(X)
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T, Y_pred, decimal=3)
def test_center_data():
n_samples = 200
n_features = 2
rng = check_random_state(0)
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples)
expected_X_mean = np.mean(X, axis=0)
# XXX: currently scaled to variance=n_samples
expected_X_std = np.std(X, axis=0) * np.sqrt(X.shape[0])
expected_y_mean = np.mean(y, axis=0)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=False,
normalize=False)
assert_array_almost_equal(X_mean, np.zeros(n_features))
assert_array_almost_equal(y_mean, 0)
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt, X)
assert_array_almost_equal(yt, y)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=True,
normalize=False)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt, X - expected_X_mean)
assert_array_almost_equal(yt, y - expected_y_mean)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=True,
normalize=True)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_std, expected_X_std)
assert_array_almost_equal(Xt, (X - expected_X_mean) / expected_X_std)
assert_array_almost_equal(yt, y - expected_y_mean)
def test_center_data_multioutput():
n_samples = 200
n_features = 3
n_outputs = 2
rng = check_random_state(0)
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples, n_outputs)
expected_y_mean = np.mean(y, axis=0)
args = [(center_data, X), (sparse_center_data, sparse.csc_matrix(X))]
for center, X in args:
_, yt, _, y_mean, _ = center(X, y, fit_intercept=False,
normalize=False)
assert_array_almost_equal(y_mean, np.zeros(n_outputs))
assert_array_almost_equal(yt, y)
_, yt, _, y_mean, _ = center(X, y, fit_intercept=True,
normalize=False)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(yt, y - y_mean)
_, yt, _, y_mean, _ = center(X, y, fit_intercept=True,
normalize=True)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(yt, y - y_mean)
def test_center_data_weighted():
n_samples = 200
n_features = 2
rng = check_random_state(0)
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples)
sample_weight = rng.rand(n_samples)
expected_X_mean = np.average(X, axis=0, weights=sample_weight)
expected_y_mean = np.average(y, axis=0, weights=sample_weight)
# XXX: if normalize=True, should we expect a weighted standard deviation?
# Currently not weighted, but calculated with respect to weighted mean
# XXX: currently scaled to variance=n_samples
expected_X_std = (np.sqrt(X.shape[0]) *
np.mean((X - expected_X_mean) ** 2, axis=0) ** .5)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=True,
normalize=False,
sample_weight=sample_weight)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt, X - expected_X_mean)
assert_array_almost_equal(yt, y - expected_y_mean)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=True,
normalize=True,
sample_weight=sample_weight)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_std, expected_X_std)
assert_array_almost_equal(Xt, (X - expected_X_mean) / expected_X_std)
assert_array_almost_equal(yt, y - expected_y_mean)
def test_sparse_center_data():
n_samples = 200
n_features = 2
rng = check_random_state(0)
# random_state not supported yet in sparse.rand
X = sparse.rand(n_samples, n_features, density=.5) # , random_state=rng
X = X.tolil()
y = rng.rand(n_samples)
XA = X.toarray()
# XXX: currently scaled to variance=n_samples
expected_X_std = np.std(XA, axis=0) * np.sqrt(X.shape[0])
Xt, yt, X_mean, y_mean, X_std = sparse_center_data(X, y,
fit_intercept=False,
normalize=False)
assert_array_almost_equal(X_mean, np.zeros(n_features))
assert_array_almost_equal(y_mean, 0)
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt.A, XA)
assert_array_almost_equal(yt, y)
Xt, yt, X_mean, y_mean, X_std = sparse_center_data(X, y,
fit_intercept=True,
normalize=False)
assert_array_almost_equal(X_mean, np.mean(XA, axis=0))
assert_array_almost_equal(y_mean, np.mean(y, axis=0))
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt.A, XA)
assert_array_almost_equal(yt, y - np.mean(y, axis=0))
Xt, yt, X_mean, y_mean, X_std = sparse_center_data(X, y,
fit_intercept=True,
normalize=True)
assert_array_almost_equal(X_mean, np.mean(XA, axis=0))
assert_array_almost_equal(y_mean, np.mean(y, axis=0))
assert_array_almost_equal(X_std, expected_X_std)
assert_array_almost_equal(Xt.A, XA / expected_X_std)
assert_array_almost_equal(yt, y - np.mean(y, axis=0))
def test_csr_sparse_center_data():
# Test output format of sparse_center_data, when input is csr
X, y = make_regression()
X[X < 2.5] = 0.0
csr = sparse.csr_matrix(X)
csr_, y, _, _, _ = sparse_center_data(csr, y, True)
assert_equal(csr_.getformat(), 'csr')
def test_rescale_data():
n_samples = 200
n_features = 2
rng = np.random.RandomState(0)
sample_weight = 1.0 + rng.rand(n_samples)
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples)
rescaled_X, rescaled_y = _rescale_data(X, y, sample_weight)
rescaled_X2 = X * np.sqrt(sample_weight)[:, np.newaxis]
rescaled_y2 = y * np.sqrt(sample_weight)
assert_array_almost_equal(rescaled_X, rescaled_X2)
assert_array_almost_equal(rescaled_y, rescaled_y2)
| bsd-3-clause |
mdaniel/intellij-community | python/helpers/pydev/_pydevd_bundle/pydevd_thrift.py | 9 | 22067 | """Contains methods for building Thrift structures for interacting with IDE
The methods from this file are used for Python console interaction. Please
note that the debugger still uses XML structures with the similar methods
contained in `pydevd_xml.py` file.
"""
import sys
import traceback
from _pydev_bundle import pydev_log
from _pydevd_bundle import pydevd_extension_utils
from _pydevd_bundle import pydevd_resolver
from _pydevd_bundle.pydevd_constants import dict_iter_items, dict_keys, IS_PY3K, \
MAXIMUM_VARIABLE_REPRESENTATION_SIZE, RETURN_VALUES_DICT, LOAD_VALUES_POLICY, DEFAULT_VALUES_DICT, NUMPY_NUMERIC_TYPES
from _pydevd_bundle.pydevd_extension_api import TypeResolveProvider, StrPresentationProvider
from _pydevd_bundle.pydevd_utils import take_first_n_coll_elements, is_pandas_container, is_string, pandas_to_str, \
should_evaluate_full_value, should_evaluate_shape
from _pydevd_bundle.pydevd_vars import get_label, array_default_format, is_able_to_format_number, MAXIMUM_ARRAY_SIZE, \
get_column_formatter_by_type, get_formatted_row_elements, DEFAULT_DF_FORMAT, DATAFRAME_HEADER_LOAD_MAX_SIZE
from pydev_console.pydev_protocol import DebugValue, GetArrayResponse, ArrayData, ArrayHeaders, ColHeader, RowHeader, \
UnsupportedArrayTypeException, ExceedingArrayDimensionsException
try:
import types
frame_type = types.FrameType
except:
frame_type = None
class ExceptionOnEvaluate:
def __init__(self, result):
self.result = result
_IS_JYTHON = sys.platform.startswith("java")
def _create_default_type_map():
if not _IS_JYTHON:
default_type_map = [
# None means that it should not be treated as a compound variable
# isintance does not accept a tuple on some versions of python, so, we must declare it expanded
(type(None), None,),
(int, None),
(float, None),
(complex, None),
(str, None),
(tuple, pydevd_resolver.tupleResolver),
(list, pydevd_resolver.tupleResolver),
(dict, pydevd_resolver.dictResolver),
]
try:
default_type_map.append((long, None)) # @UndefinedVariable
except:
pass # not available on all python versions
try:
default_type_map.append((unicode, None)) # @UndefinedVariable
except:
pass # not available on all python versions
try:
default_type_map.append((set, pydevd_resolver.setResolver))
except:
pass # not available on all python versions
try:
default_type_map.append((frozenset, pydevd_resolver.setResolver))
except:
pass # not available on all python versions
try:
from django.utils.datastructures import MultiValueDict
default_type_map.insert(0, (MultiValueDict, pydevd_resolver.multiValueDictResolver))
# we should put it before dict
except:
pass # django may not be installed
try:
from django.forms import BaseForm
default_type_map.insert(0, (BaseForm, pydevd_resolver.djangoFormResolver))
# we should put it before instance resolver
except:
pass # django may not be installed
try:
from collections import deque
default_type_map.append((deque, pydevd_resolver.dequeResolver))
except:
pass
if frame_type is not None:
default_type_map.append((frame_type, pydevd_resolver.frameResolver))
else:
from org.python import core # @UnresolvedImport
default_type_map = [
(core.PyNone, None),
(core.PyInteger, None),
(core.PyLong, None),
(core.PyFloat, None),
(core.PyComplex, None),
(core.PyString, None),
(core.PyTuple, pydevd_resolver.tupleResolver),
(core.PyList, pydevd_resolver.tupleResolver),
(core.PyDictionary, pydevd_resolver.dictResolver),
(core.PyStringMap, pydevd_resolver.dictResolver),
]
if hasattr(core, 'PyJavaInstance'):
# Jython 2.5b3 removed it.
default_type_map.append((core.PyJavaInstance, pydevd_resolver.instanceResolver))
return default_type_map
class TypeResolveHandler(object):
NO_PROVIDER = [] # Sentinel value (any mutable object to be used as a constant would be valid).
def __init__(self):
# Note: don't initialize with the types we already know about so that the extensions can override
# the default resolvers that are already available if they want.
self._type_to_resolver_cache = {}
self._type_to_str_provider_cache = {}
self._initialized = False
def _initialize(self):
self._default_type_map = _create_default_type_map()
self._resolve_providers = pydevd_extension_utils.extensions_of_type(TypeResolveProvider)
self._str_providers = pydevd_extension_utils.extensions_of_type(StrPresentationProvider)
self._initialized = True
def get_type(self, o):
try:
try:
# Faster than type(o) as we don't need the function call.
type_object = o.__class__
except:
# Not all objects have __class__ (i.e.: there are bad bindings around).
type_object = type(o)
type_name = type_object.__name__
except:
# This happens for org.python.core.InitModule
return 'Unable to get Type', 'Unable to get Type', None
return self._get_type(o, type_object, type_name)
def _get_type(self, o, type_object, type_name):
resolver = self._type_to_resolver_cache.get(type_object)
if resolver is not None:
return type_object, type_name, resolver
if not self._initialized:
self._initialize()
try:
for resolver in self._resolve_providers:
if resolver.can_provide(type_object, type_name):
# Cache it
self._type_to_resolver_cache[type_object] = resolver
return type_object, type_name, resolver
for t in self._default_type_map:
if isinstance(o, t[0]):
# Cache it
resolver = t[1]
self._type_to_resolver_cache[type_object] = resolver
return (type_object, type_name, resolver)
except:
traceback.print_exc()
# No match return default (and cache it).
resolver = pydevd_resolver.defaultResolver
self._type_to_resolver_cache[type_object] = resolver
return type_object, type_name, resolver
if _IS_JYTHON:
_base_get_type = _get_type
def _get_type(self, o, type_object, type_name):
if type_name == 'org.python.core.PyJavaInstance':
return type_object, type_name, pydevd_resolver.instanceResolver
if type_name == 'org.python.core.PyArray':
return type_object, type_name, pydevd_resolver.jyArrayResolver
return self._base_get_type(o, type_name, type_name)
def str_from_providers(self, o, type_object, type_name):
provider = self._type_to_str_provider_cache.get(type_object)
if provider is self.NO_PROVIDER:
return None
if provider is not None:
return provider.get_str(o)
if not self._initialized:
self._initialize()
for provider in self._str_providers:
if provider.can_provide(type_object, type_name):
self._type_to_str_provider_cache[type_object] = provider
return provider.get_str(o)
self._type_to_str_provider_cache[type_object] = self.NO_PROVIDER
return None
_TYPE_RESOLVE_HANDLER = TypeResolveHandler()
"""
def get_type(o):
Receives object and returns a triple (typeObject, typeString, resolver).
resolver != None means that variable is a container, and should be displayed as a hierarchy.
Use the resolver to get its attributes.
All container objects should have a resolver.
"""
get_type = _TYPE_RESOLVE_HANDLER.get_type
_str_from_providers = _TYPE_RESOLVE_HANDLER.str_from_providers
def frame_vars_to_struct(frame_f_locals, hidden_ns=None):
"""Returns frame variables as the list of `DebugValue` structures
"""
values = []
keys = dict_keys(frame_f_locals)
if hasattr(keys, 'sort'):
keys.sort() # Python 3.0 does not have it
else:
keys = sorted(keys) # Jython 2.1 does not have it
return_values = []
for k in keys:
try:
v = frame_f_locals[k]
eval_full_val = should_evaluate_full_value(v)
if k == RETURN_VALUES_DICT:
for name, val in dict_iter_items(v):
value = var_to_struct(val, name)
value.isRetVal = True
return_values.append(value)
else:
if hidden_ns is not None and k in hidden_ns:
value = var_to_struct(v, str(k), evaluate_full_value=eval_full_val)
value.isIPythonHidden = True
values.append(value)
else:
value = var_to_struct(v, str(k), evaluate_full_value=eval_full_val)
values.append(value)
except Exception:
traceback.print_exc()
pydev_log.error("Unexpected error, recovered safely.\n")
# Show return values as the first entry.
return return_values + values
def var_to_struct(val, name, format='%s', do_trim=True, evaluate_full_value=True):
""" single variable or dictionary to Thrift struct representation """
debug_value = DebugValue()
try:
# This should be faster than isinstance (but we have to protect against not having a '__class__' attribute).
is_exception_on_eval = val.__class__ == ExceptionOnEvaluate
except:
is_exception_on_eval = False
if is_exception_on_eval:
v = val.result
else:
v = val
_type, typeName, resolver = get_type(v)
type_qualifier = getattr(_type, "__module__", "")
if not evaluate_full_value:
value = DEFAULT_VALUES_DICT[LOAD_VALUES_POLICY]
else:
try:
str_from_provider = _str_from_providers(v, _type, typeName)
if str_from_provider is not None:
value = str_from_provider
elif hasattr(v, '__class__'):
if v.__class__ == frame_type:
value = pydevd_resolver.frameResolver.get_frame_name(v)
elif v.__class__ in (list, tuple):
if len(v) > pydevd_resolver.MAX_ITEMS_TO_HANDLE:
value = '%s' % take_first_n_coll_elements(
v, pydevd_resolver.MAX_ITEMS_TO_HANDLE)
value = value.rstrip(')]}') + '...'
else:
value = '%s' % str(v)
else:
value = format % v
else:
value = str(v)
except:
try:
value = repr(v)
except:
value = 'Unable to get repr for %s' % v.__class__
debug_value.name = name
debug_value.type = typeName
if type_qualifier:
debug_value.qualifier = type_qualifier
# cannot be too big... communication may not handle it.
if len(value) > MAXIMUM_VARIABLE_REPRESENTATION_SIZE and do_trim:
value = value[0:MAXIMUM_VARIABLE_REPRESENTATION_SIZE]
value += '...'
# fix to work with unicode values
try:
if not IS_PY3K:
if value.__class__ == unicode: # @UndefinedVariable
value = value.encode('utf-8')
else:
if value.__class__ == bytes:
value = value.encode('utf-8')
except TypeError: # in java, unicode is a function
pass
if is_pandas_container(type_qualifier, typeName, v):
value = pandas_to_str(v, typeName, value, pydevd_resolver.MAX_ITEMS_TO_HANDLE)
debug_value.value = value
try:
if should_evaluate_shape():
if hasattr(v, 'shape') and not callable(v.shape):
debug_value.shape = str(tuple(v.shape))
elif hasattr(v, '__len__') and not is_string(v):
debug_value.shape = str(len(v))
except:
pass
if is_exception_on_eval:
debug_value.isErrorOnEval = True
else:
if resolver is not None:
debug_value.isContainer = True
else:
pass
return debug_value
def var_to_str(val, format, do_trim=True, evaluate_full_value=True):
struct = var_to_struct(val, '', format, do_trim, evaluate_full_value)
value = struct.value
return value if value is not None else ''
# from pydevd_vars.py
def array_to_thrift_struct(array, name, roffset, coffset, rows, cols, format):
"""
"""
array, array_chunk, r, c, f = array_to_meta_thrift_struct(array, name, format)
format = '%' + f
if rows == -1 and cols == -1:
rows = r
cols = c
rows = min(rows, MAXIMUM_ARRAY_SIZE)
cols = min(cols, MAXIMUM_ARRAY_SIZE)
# there is no obvious rule for slicing (at least 5 choices)
if len(array) == 1 and (rows > 1 or cols > 1):
array = array[0]
if array.size > len(array):
array = array[roffset:, coffset:]
rows = min(rows, len(array))
cols = min(cols, len(array[0]))
if len(array) == 1:
array = array[0]
elif array.size == len(array):
if roffset == 0 and rows == 1:
array = array[coffset:]
cols = min(cols, len(array))
elif coffset == 0 and cols == 1:
array = array[roffset:]
rows = min(rows, len(array))
def get_value(row, col):
value = array
if rows == 1 or cols == 1:
if rows == 1 and cols == 1:
value = array[0]
else:
value = array[(col if rows == 1 else row)]
if "ndarray" in str(type(value)):
value = value[0]
else:
value = array[row][col]
return value
array_chunk.data = array_data_to_thrift_struct(rows, cols, lambda r: (get_value(r, c) for c in range(cols)), format)
return array_chunk
def array_to_meta_thrift_struct(array, name, format):
type = array.dtype.kind
slice = name
l = len(array.shape)
# initial load, compute slice
if format == '%':
if l > 2:
slice += '[0]' * (l - 2)
for r in range(l - 2):
array = array[0]
if type == 'f':
format = '.5f'
elif type == 'i' or type == 'u':
format = 'd'
else:
format = 's'
else:
format = format.replace('%', '')
l = len(array.shape)
reslice = ""
if l > 2:
raise ExceedingArrayDimensionsException
elif l == 1:
# special case with 1D arrays arr[i, :] - row, but arr[:, i] - column with equal shape and ndim
# http://stackoverflow.com/questions/16837946/numpy-a-2-rows-1-column-file-loadtxt-returns-1row-2-columns
# explanation: http://stackoverflow.com/questions/15165170/how-do-i-maintain-row-column-orientation-of-vectors-in-numpy?rq=1
# we use kind of a hack - get information about memory from C_CONTIGUOUS
is_row = array.flags['C_CONTIGUOUS']
if is_row:
rows = 1
cols = len(array)
if cols < len(array):
reslice = '[0:%s]' % (cols)
array = array[0:cols]
else:
cols = 1
rows = len(array)
if rows < len(array):
reslice = '[0:%s]' % (rows)
array = array[0:rows]
elif l == 2:
rows = array.shape[-2]
cols = array.shape[-1]
if cols < array.shape[-1] or rows < array.shape[-2]:
reslice = '[0:%s, 0:%s]' % (rows, cols)
array = array[0:rows, 0:cols]
# avoid slice duplication
if not slice.endswith(reslice):
slice += reslice
bounds = (0, 0)
if type in NUMPY_NUMERIC_TYPES and array.size != 0:
bounds = (array.min(), array.max())
array_chunk = GetArrayResponse()
array_chunk.slice = slice
array_chunk.rows = rows
array_chunk.cols = cols
array_chunk.format = "%" + format
array_chunk.type = type
array_chunk.max = "%s" % bounds[1]
array_chunk.min = "%s" % bounds[0]
return array, array_chunk, rows, cols, format
def dataframe_to_thrift_struct(df, name, roffset, coffset, rows, cols, format):
"""
:type df: pandas.core.frame.DataFrame
:type name: str
:type coffset: int
:type roffset: int
:type rows: int
:type cols: int
:type format: str
"""
original_df = df
dim = len(df.axes)
num_rows = df.shape[0]
num_cols = df.shape[1] if dim > 1 else 1
array_chunk = GetArrayResponse()
array_chunk.slice = name
array_chunk.rows = num_rows
array_chunk.cols = num_cols
array_chunk.type = ""
array_chunk.max = "0"
array_chunk.min = "0"
format = format.replace("%", "")
if not format:
if num_rows > 0 and num_cols == 1: # series or data frame with one column
try:
kind = df.dtype.kind
except AttributeError:
try:
kind = df.dtypes[0].kind
except (IndexError, KeyError):
kind = "O"
format = array_default_format(kind)
else:
format = array_default_format(DEFAULT_DF_FORMAT)
array_chunk.format = "%" + format
if (rows, cols) == (-1, -1):
rows, cols = num_rows, num_cols
elif (rows, cols) == (0, 0):
# return header only
r = min(num_rows, DATAFRAME_HEADER_LOAD_MAX_SIZE)
c = min(num_cols, DATAFRAME_HEADER_LOAD_MAX_SIZE)
array_chunk.headers = header_data_to_thrift_struct(r, c, [""] * num_cols, [(0, 0)] * num_cols, lambda x: DEFAULT_DF_FORMAT, original_df, dim)
array_chunk.data = array_data_to_thrift_struct(rows, cols, None, format)
return array_chunk
rows = min(rows, MAXIMUM_ARRAY_SIZE)
cols = min(cols, MAXIMUM_ARRAY_SIZE, num_cols)
# need to precompute column bounds here before slicing!
col_bounds = [None] * cols
dtypes = [None] * cols
if dim > 1:
for col in range(cols):
dtype = df.dtypes.iloc[coffset + col].kind
dtypes[col] = dtype
if dtype in NUMPY_NUMERIC_TYPES and df.size != 0:
cvalues = df.iloc[:, coffset + col]
bounds = (cvalues.min(), cvalues.max())
else:
bounds = (0, 0)
col_bounds[col] = bounds
else:
dtype = df.dtype.kind
dtypes[0] = dtype
col_bounds[0] = (df.min(), df.max()) if dtype in NUMPY_NUMERIC_TYPES and df.size != 0 else (0, 0)
df = df.iloc[roffset: roffset + rows, coffset: coffset + cols] if dim > 1 else df.iloc[roffset: roffset + rows]
rows = df.shape[0]
cols = df.shape[1] if dim > 1 else 1
def col_to_format(c):
return get_column_formatter_by_type(format, dtypes[c])
iat = df.iat if dim == 1 or len(df.columns.unique()) == len(df.columns) else df.iloc
def formatted_row_elements(row):
return get_formatted_row_elements(row, iat, dim, cols, format, dtypes)
array_chunk.headers = header_data_to_thrift_struct(rows, cols, dtypes, col_bounds, col_to_format, df, dim)
array_chunk.data = array_data_to_thrift_struct(rows, cols, formatted_row_elements, format)
return array_chunk
def array_data_to_thrift_struct(rows, cols, get_row, format):
array_data = ArrayData()
array_data.rows = rows
array_data.cols = cols
# `ArrayData.data`
data = []
for row in range(rows):
data.append([var_to_str(value, format) for value in get_row(row)])
array_data.data = data
return array_data
def header_data_to_thrift_struct(rows, cols, dtypes, col_bounds, col_to_format, df, dim):
array_headers = ArrayHeaders()
col_headers = []
for col in range(cols):
col_label = get_label(df.axes[1].values[col]) if dim > 1 else str(col)
bounds = col_bounds[col]
col_format = "%" + col_to_format(col)
col_header = ColHeader()
# col_header.index = col
col_header.label = col_label
col_header.type = dtypes[col]
col_header.format = col_to_format(col)
col_header.max = col_format % bounds[1]
col_header.min = col_format % bounds[0]
col_headers.append(col_header)
row_headers = []
for row in range(rows):
row_header = RowHeader()
row_header.index = row
row_header.label = get_label(df.axes[0].values[row])
row_headers.append(row_header)
array_headers.colHeaders = col_headers
array_headers.rowHeaders = row_headers
return array_headers
TYPE_TO_THRIFT_STRUCT_CONVERTERS = {
"ndarray": array_to_thrift_struct,
"DataFrame": dataframe_to_thrift_struct,
"Series": dataframe_to_thrift_struct,
"GeoDataFrame": dataframe_to_thrift_struct,
"GeoSeries": dataframe_to_thrift_struct
}
def table_like_struct_to_thrift_struct(array, name, roffset, coffset, rows, cols, format):
"""Returns `GetArrayResponse` structure for table-like structure
The `array` might be either `numpy.ndarray`, `pandas.DataFrame` or `pandas.Series`.
"""
_, type_name, _ = get_type(array)
format = format if is_able_to_format_number(format) else '%'
if type_name in TYPE_TO_THRIFT_STRUCT_CONVERTERS:
return TYPE_TO_THRIFT_STRUCT_CONVERTERS[type_name](array, name, roffset, coffset, rows, cols, format)
else:
raise UnsupportedArrayTypeException(type_name)
| apache-2.0 |
lin-credible/scikit-learn | sklearn/preprocessing/tests/test_imputation.py | 213 | 11911 | import numpy as np
from scipy import sparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.preprocessing.imputation import Imputer
from sklearn.pipeline import Pipeline
from sklearn import grid_search
from sklearn import tree
from sklearn.random_projection import sparse_random_matrix
def _check_statistics(X, X_true,
strategy, statistics, missing_values):
"""Utility function for testing imputation for a given strategy.
Test:
- along the two axes
- with dense and sparse arrays
Check that:
- the statistics (mean, median, mode) are correct
- the missing values are imputed correctly"""
err_msg = "Parameters: strategy = %s, missing_values = %s, " \
"axis = {0}, sparse = {1}" % (strategy, missing_values)
# Normal matrix, axis = 0
imputer = Imputer(missing_values, strategy=strategy, axis=0)
X_trans = imputer.fit(X).transform(X.copy())
assert_array_equal(imputer.statistics_, statistics,
err_msg.format(0, False))
assert_array_equal(X_trans, X_true, err_msg.format(0, False))
# Normal matrix, axis = 1
imputer = Imputer(missing_values, strategy=strategy, axis=1)
imputer.fit(X.transpose())
if np.isnan(statistics).any():
assert_raises(ValueError, imputer.transform, X.copy().transpose())
else:
X_trans = imputer.transform(X.copy().transpose())
assert_array_equal(X_trans, X_true.transpose(),
err_msg.format(1, False))
# Sparse matrix, axis = 0
imputer = Imputer(missing_values, strategy=strategy, axis=0)
imputer.fit(sparse.csc_matrix(X))
X_trans = imputer.transform(sparse.csc_matrix(X.copy()))
if sparse.issparse(X_trans):
X_trans = X_trans.toarray()
assert_array_equal(imputer.statistics_, statistics,
err_msg.format(0, True))
assert_array_equal(X_trans, X_true, err_msg.format(0, True))
# Sparse matrix, axis = 1
imputer = Imputer(missing_values, strategy=strategy, axis=1)
imputer.fit(sparse.csc_matrix(X.transpose()))
if np.isnan(statistics).any():
assert_raises(ValueError, imputer.transform,
sparse.csc_matrix(X.copy().transpose()))
else:
X_trans = imputer.transform(sparse.csc_matrix(X.copy().transpose()))
if sparse.issparse(X_trans):
X_trans = X_trans.toarray()
assert_array_equal(X_trans, X_true.transpose(),
err_msg.format(1, True))
def test_imputation_shape():
# Verify the shapes of the imputed matrix for different strategies.
X = np.random.randn(10, 2)
X[::2] = np.nan
for strategy in ['mean', 'median', 'most_frequent']:
imputer = Imputer(strategy=strategy)
X_imputed = imputer.fit_transform(X)
assert_equal(X_imputed.shape, (10, 2))
X_imputed = imputer.fit_transform(sparse.csr_matrix(X))
assert_equal(X_imputed.shape, (10, 2))
def test_imputation_mean_median_only_zero():
# Test imputation using the mean and median strategies, when
# missing_values == 0.
X = np.array([
[np.nan, 0, 0, 0, 5],
[np.nan, 1, 0, np.nan, 3],
[np.nan, 2, 0, 0, 0],
[np.nan, 6, 0, 5, 13],
])
X_imputed_mean = np.array([
[3, 5],
[1, 3],
[2, 7],
[6, 13],
])
statistics_mean = [np.nan, 3, np.nan, np.nan, 7]
# Behaviour of median with NaN is undefined, e.g. different results in
# np.median and np.ma.median
X_for_median = X[:, [0, 1, 2, 4]]
X_imputed_median = np.array([
[2, 5],
[1, 3],
[2, 5],
[6, 13],
])
statistics_median = [np.nan, 2, np.nan, 5]
_check_statistics(X, X_imputed_mean, "mean", statistics_mean, 0)
_check_statistics(X_for_median, X_imputed_median, "median",
statistics_median, 0)
def test_imputation_mean_median():
# Test imputation using the mean and median strategies, when
# missing_values != 0.
rng = np.random.RandomState(0)
dim = 10
dec = 10
shape = (dim * dim, dim + dec)
zeros = np.zeros(shape[0])
values = np.arange(1, shape[0]+1)
values[4::2] = - values[4::2]
tests = [("mean", "NaN", lambda z, v, p: np.mean(np.hstack((z, v)))),
("mean", 0, lambda z, v, p: np.mean(v)),
("median", "NaN", lambda z, v, p: np.median(np.hstack((z, v)))),
("median", 0, lambda z, v, p: np.median(v))]
for strategy, test_missing_values, true_value_fun in tests:
X = np.empty(shape)
X_true = np.empty(shape)
true_statistics = np.empty(shape[1])
# Create a matrix X with columns
# - with only zeros,
# - with only missing values
# - with zeros, missing values and values
# And a matrix X_true containing all true values
for j in range(shape[1]):
nb_zeros = (j - dec + 1 > 0) * (j - dec + 1) * (j - dec + 1)
nb_missing_values = max(shape[0] + dec * dec
- (j + dec) * (j + dec), 0)
nb_values = shape[0] - nb_zeros - nb_missing_values
z = zeros[:nb_zeros]
p = np.repeat(test_missing_values, nb_missing_values)
v = values[rng.permutation(len(values))[:nb_values]]
true_statistics[j] = true_value_fun(z, v, p)
# Create the columns
X[:, j] = np.hstack((v, z, p))
if 0 == test_missing_values:
X_true[:, j] = np.hstack((v,
np.repeat(
true_statistics[j],
nb_missing_values + nb_zeros)))
else:
X_true[:, j] = np.hstack((v,
z,
np.repeat(true_statistics[j],
nb_missing_values)))
# Shuffle them the same way
np.random.RandomState(j).shuffle(X[:, j])
np.random.RandomState(j).shuffle(X_true[:, j])
# Mean doesn't support columns containing NaNs, median does
if strategy == "median":
cols_to_keep = ~np.isnan(X_true).any(axis=0)
else:
cols_to_keep = ~np.isnan(X_true).all(axis=0)
X_true = X_true[:, cols_to_keep]
_check_statistics(X, X_true, strategy,
true_statistics, test_missing_values)
def test_imputation_median_special_cases():
# Test median imputation with sparse boundary cases
X = np.array([
[0, np.nan, np.nan], # odd: implicit zero
[5, np.nan, np.nan], # odd: explicit nonzero
[0, 0, np.nan], # even: average two zeros
[-5, 0, np.nan], # even: avg zero and neg
[0, 5, np.nan], # even: avg zero and pos
[4, 5, np.nan], # even: avg nonzeros
[-4, -5, np.nan], # even: avg negatives
[-1, 2, np.nan], # even: crossing neg and pos
]).transpose()
X_imputed_median = np.array([
[0, 0, 0],
[5, 5, 5],
[0, 0, 0],
[-5, 0, -2.5],
[0, 5, 2.5],
[4, 5, 4.5],
[-4, -5, -4.5],
[-1, 2, .5],
]).transpose()
statistics_median = [0, 5, 0, -2.5, 2.5, 4.5, -4.5, .5]
_check_statistics(X, X_imputed_median, "median",
statistics_median, 'NaN')
def test_imputation_most_frequent():
# Test imputation using the most-frequent strategy.
X = np.array([
[-1, -1, 0, 5],
[-1, 2, -1, 3],
[-1, 1, 3, -1],
[-1, 2, 3, 7],
])
X_true = np.array([
[2, 0, 5],
[2, 3, 3],
[1, 3, 3],
[2, 3, 7],
])
# scipy.stats.mode, used in Imputer, doesn't return the first most
# frequent as promised in the doc but the lowest most frequent. When this
# test will fail after an update of scipy, Imputer will need to be updated
# to be consistent with the new (correct) behaviour
_check_statistics(X, X_true, "most_frequent", [np.nan, 2, 3, 3], -1)
def test_imputation_pipeline_grid_search():
# Test imputation within a pipeline + gridsearch.
pipeline = Pipeline([('imputer', Imputer(missing_values=0)),
('tree', tree.DecisionTreeRegressor(random_state=0))])
parameters = {
'imputer__strategy': ["mean", "median", "most_frequent"],
'imputer__axis': [0, 1]
}
l = 100
X = sparse_random_matrix(l, l, density=0.10)
Y = sparse_random_matrix(l, 1, density=0.10).toarray()
gs = grid_search.GridSearchCV(pipeline, parameters)
gs.fit(X, Y)
def test_imputation_pickle():
# Test for pickling imputers.
import pickle
l = 100
X = sparse_random_matrix(l, l, density=0.10)
for strategy in ["mean", "median", "most_frequent"]:
imputer = Imputer(missing_values=0, strategy=strategy)
imputer.fit(X)
imputer_pickled = pickle.loads(pickle.dumps(imputer))
assert_array_equal(imputer.transform(X.copy()),
imputer_pickled.transform(X.copy()),
"Fail to transform the data after pickling "
"(strategy = %s)" % (strategy))
def test_imputation_copy():
# Test imputation with copy
X_orig = sparse_random_matrix(5, 5, density=0.75, random_state=0)
# copy=True, dense => copy
X = X_orig.copy().toarray()
imputer = Imputer(missing_values=0, strategy="mean", copy=True)
Xt = imputer.fit(X).transform(X)
Xt[0, 0] = -1
assert_false(np.all(X == Xt))
# copy=True, sparse csr => copy
X = X_orig.copy()
imputer = Imputer(missing_values=X.data[0], strategy="mean", copy=True)
Xt = imputer.fit(X).transform(X)
Xt.data[0] = -1
assert_false(np.all(X.data == Xt.data))
# copy=False, dense => no copy
X = X_orig.copy().toarray()
imputer = Imputer(missing_values=0, strategy="mean", copy=False)
Xt = imputer.fit(X).transform(X)
Xt[0, 0] = -1
assert_true(np.all(X == Xt))
# copy=False, sparse csr, axis=1 => no copy
X = X_orig.copy()
imputer = Imputer(missing_values=X.data[0], strategy="mean",
copy=False, axis=1)
Xt = imputer.fit(X).transform(X)
Xt.data[0] = -1
assert_true(np.all(X.data == Xt.data))
# copy=False, sparse csc, axis=0 => no copy
X = X_orig.copy().tocsc()
imputer = Imputer(missing_values=X.data[0], strategy="mean",
copy=False, axis=0)
Xt = imputer.fit(X).transform(X)
Xt.data[0] = -1
assert_true(np.all(X.data == Xt.data))
# copy=False, sparse csr, axis=0 => copy
X = X_orig.copy()
imputer = Imputer(missing_values=X.data[0], strategy="mean",
copy=False, axis=0)
Xt = imputer.fit(X).transform(X)
Xt.data[0] = -1
assert_false(np.all(X.data == Xt.data))
# copy=False, sparse csc, axis=1 => copy
X = X_orig.copy().tocsc()
imputer = Imputer(missing_values=X.data[0], strategy="mean",
copy=False, axis=1)
Xt = imputer.fit(X).transform(X)
Xt.data[0] = -1
assert_false(np.all(X.data == Xt.data))
# copy=False, sparse csr, axis=1, missing_values=0 => copy
X = X_orig.copy()
imputer = Imputer(missing_values=0, strategy="mean",
copy=False, axis=1)
Xt = imputer.fit(X).transform(X)
assert_false(sparse.issparse(Xt))
# Note: If X is sparse and if missing_values=0, then a (dense) copy of X is
# made, even if copy=False.
| bsd-3-clause |
mlperf/training_results_v0.5 | v0.5.0/google/cloud_v2.512/resnet-tpuv2-512/code/resnet/model/models/official/recommendation/data_preprocessing.py | 4 | 29600 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Preprocess dataset and construct any necessary artifacts."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import atexit
import contextlib
import gc
import hashlib
import multiprocessing
import json
import os
import pickle
import signal
import socket
import subprocess
import time
import timeit
import typing
# pylint: disable=wrong-import-order
from absl import app as absl_app
from absl import flags
import numpy as np
import pandas as pd
import six
import tensorflow as tf
# pylint: enable=wrong-import-order
from official.datasets import movielens
from official.recommendation import constants as rconst
from official.recommendation import stat_utils
from official.recommendation import popen_helper
from official.utils.logs import mlperf_helper
DATASET_TO_NUM_USERS_AND_ITEMS = {
"ml-1m": (6040, 3706),
"ml-20m": (138493, 26744)
}
# Number of batches to run per epoch when using synthetic data. At high batch
# sizes, we run for more batches than with real data, which is good since
# running more batches reduces noise when measuring the average batches/second.
SYNTHETIC_BATCHES_PER_EPOCH = 2000
class NCFDataset(object):
"""Container for training and testing data."""
def __init__(self, user_map, item_map, num_data_readers, cache_paths,
num_train_positives, deterministic=False):
# type: (dict, dict, int, rconst.Paths, int, bool) -> None
"""Assign key values for recommendation dataset.
Args:
user_map: Dict mapping raw user ids to regularized ids.
item_map: Dict mapping raw item ids to regularized ids.
num_data_readers: The number of reader Datasets used during training.
cache_paths: Object containing locations for various cache files.
num_train_positives: The number of positive training examples in the
dataset.
deterministic: Operations should use deterministic, order preserving
methods, even at the cost of performance.
"""
self.user_map = {int(k): int(v) for k, v in user_map.items()}
self.item_map = {int(k): int(v) for k, v in item_map.items()}
self.num_users = len(user_map)
self.num_items = len(item_map)
self.num_data_readers = num_data_readers
self.cache_paths = cache_paths
self.num_train_positives = num_train_positives
self.deterministic = deterministic
def _filter_index_sort(raw_rating_path, match_mlperf):
# type: (str, bool) -> (pd.DataFrame, dict, dict)
"""Read in data CSV, and output structured data.
This function reads in the raw CSV of positive items, and performs three
preprocessing transformations:
1) Filter out all users who have not rated at least a certain number
of items. (Typically 20 items)
2) Zero index the users and items such that the largest user_id is
`num_users - 1` and the largest item_id is `num_items - 1`
3) Sort the dataframe by user_id, with timestamp as a secondary sort key.
This allows the dataframe to be sliced by user in-place, and for the last
item to be selected simply by calling the `-1` index of a user's slice.
While all of these transformations are performed by Pandas (and are therefore
single-threaded), they only take ~2 minutes, and the overhead to apply a
MapReduce pattern to parallel process the dataset adds significant complexity
for no computational gain. For a larger dataset parallelizing this
preprocessing could yield speedups. (Also, this preprocessing step is only
performed once for an entire run.
Args:
raw_rating_path: The path to the CSV which contains the raw dataset.
match_mlperf: If True, change the sorting algorithm to match the MLPerf
reference implementation.
Returns:
A filtered, zero-index remapped, sorted dataframe, a dict mapping raw user
IDs to regularized user IDs, and a dict mapping raw item IDs to regularized
item IDs.
"""
with tf.gfile.Open(raw_rating_path) as f:
df = pd.read_csv(f)
# Get the info of users who have more than 20 ratings on items
grouped = df.groupby(movielens.USER_COLUMN)
df = grouped.filter(
lambda x: len(x) >= rconst.MIN_NUM_RATINGS) # type: pd.DataFrame
original_users = df[movielens.USER_COLUMN].unique()
original_items = df[movielens.ITEM_COLUMN].unique()
mlperf_helper.ncf_print(key=mlperf_helper.TAGS.PREPROC_HP_MIN_RATINGS,
value=rconst.MIN_NUM_RATINGS)
# Map the ids of user and item to 0 based index for following processing
tf.logging.info("Generating user_map and item_map...")
user_map = {user: index for index, user in enumerate(original_users)}
item_map = {item: index for index, item in enumerate(original_items)}
df[movielens.USER_COLUMN] = df[movielens.USER_COLUMN].apply(
lambda user: user_map[user])
df[movielens.ITEM_COLUMN] = df[movielens.ITEM_COLUMN].apply(
lambda item: item_map[item])
num_users = len(original_users)
num_items = len(original_items)
mlperf_helper.ncf_print(key=mlperf_helper.TAGS.PREPROC_HP_NUM_EVAL,
value=rconst.NUM_EVAL_NEGATIVES)
mlperf_helper.ncf_print(
key=mlperf_helper.TAGS.PREPROC_HP_SAMPLE_EVAL_REPLACEMENT,
value=match_mlperf)
assert num_users <= np.iinfo(np.int32).max
assert num_items <= np.iinfo(np.uint16).max
assert df[movielens.USER_COLUMN].max() == num_users - 1
assert df[movielens.ITEM_COLUMN].max() == num_items - 1
# This sort is used to shard the dataframe by user, and later to select
# the last item for a user to be used in validation.
tf.logging.info("Sorting by user, timestamp...")
if match_mlperf:
# This sort is equivalent to the non-MLPerf sort, except that the order of
# items with the same user and timestamp are sometimes different. For some
# reason, this sort results in a better hit-rate during evaluation, matching
# the performance of the MLPerf reference implementation.
df.sort_values(by=movielens.TIMESTAMP_COLUMN, inplace=True)
df.sort_values([movielens.USER_COLUMN, movielens.TIMESTAMP_COLUMN],
inplace=True, kind="mergesort")
else:
df.sort_values([movielens.USER_COLUMN, movielens.TIMESTAMP_COLUMN],
inplace=True)
df = df.reset_index() # The dataframe does not reconstruct indicies in the
# sort or filter steps.
return df, user_map, item_map
def _train_eval_map_fn(args):
"""Split training and testing data and generate testing negatives.
This function is called as part of a multiprocessing map. The principle
input is a shard, which contains a sorted array of users and corresponding
items for each user, where items have already been sorted in ascending order
by timestamp. (Timestamp is not passed to avoid the serialization cost of
sending it to the map function.)
For each user, all but the last item is written into a pickle file which the
training data producer can consume on as needed. The last item for a user
is a validation point; it is written under a separate key and will be used
later to generate the evaluation data.
Args:
shard: A dict containing the user and item arrays.
shard_id: The id of the shard provided. This is used to number the training
shard pickle files.
num_items: The cardinality of the item set, which determines the set from
which validation negatives should be drawn.
cache_paths: rconst.Paths object containing locations for various cache
files.
"""
shard, shard_id, num_items, cache_paths = args
users = shard[movielens.USER_COLUMN]
items = shard[movielens.ITEM_COLUMN]
# This produces index boundaries which can be used to slice by user.
delta = users[1:] - users[:-1]
boundaries = ([0] + (np.argwhere(delta)[:, 0] + 1).tolist() +
[users.shape[0]])
train_blocks = []
test_positives = []
for i in range(len(boundaries) - 1):
# This is simply a vector of repeated values such that the shard could be
# represented compactly with a tuple of tuples:
# ((user_id, items), (user_id, items), ...)
# rather than:
# user_id_vector, item_id_vector
# However the additional nested structure significantly increases the
# serialization and deserialization cost such that it is not worthwhile.
block_user = users[boundaries[i]:boundaries[i+1]]
assert len(set(block_user)) == 1
block_items = items[boundaries[i]:boundaries[i+1]]
train_blocks.append((block_user[:-1], block_items[:-1]))
test_positives.append((block_user[0], block_items[-1]))
train_users = np.concatenate([i[0] for i in train_blocks])
train_items = np.concatenate([i[1] for i in train_blocks])
test_pos_users = np.array([i[0] for i in test_positives],
dtype=train_users.dtype)
test_pos_items = np.array([i[1] for i in test_positives],
dtype=train_items.dtype)
train_shard_fpath = cache_paths.train_shard_template.format(
str(shard_id).zfill(5))
with tf.gfile.Open(train_shard_fpath, "wb") as f:
pickle.dump({
rconst.TRAIN_KEY: {
movielens.USER_COLUMN: train_users,
movielens.ITEM_COLUMN: train_items,
},
rconst.EVAL_KEY: {
movielens.USER_COLUMN: test_pos_users,
movielens.ITEM_COLUMN: test_pos_items,
}
}, f)
def generate_train_eval_data(df, approx_num_shards, num_items, cache_paths,
match_mlperf):
# type: (pd.DataFrame, int, int, rconst.Paths, bool) -> None
"""Construct training and evaluation datasets.
This function manages dataset construction and validation that the
transformations have produced correct results. The particular logic of
transforming the data is performed in _train_eval_map_fn().
Args:
df: The dataframe containing the entire dataset. It is essential that this
dataframe be produced by _filter_index_sort(), as subsequent
transformations rely on `df` having particular structure.
approx_num_shards: The approximate number of similarly sized shards to
construct from `df`. The MovieLens has severe imbalances where some users
have interacted with many items; this is common among datasets involving
user data. Rather than attempt to aggressively balance shard size, this
function simply allows shards to "overflow" which can produce a number of
shards which is less than `approx_num_shards`. This small degree of
imbalance does not impact performance; however it does mean that one
should not expect approx_num_shards to be the ACTUAL number of shards.
num_items: The cardinality of the item set.
cache_paths: rconst.Paths object containing locations for various cache
files.
match_mlperf: If True, sample eval negative with replacements, which the
MLPerf reference implementation does.
"""
num_rows = len(df)
approximate_partitions = np.linspace(
0, num_rows, approx_num_shards + 1).astype("int")
start_ind, end_ind = 0, 0
shards = []
for i in range(1, approx_num_shards + 1):
end_ind = approximate_partitions[i]
while (end_ind < num_rows and df[movielens.USER_COLUMN][end_ind - 1] ==
df[movielens.USER_COLUMN][end_ind]):
end_ind += 1
if end_ind <= start_ind:
continue # imbalance from prior shard.
df_shard = df[start_ind:end_ind]
user_shard = df_shard[movielens.USER_COLUMN].values.astype(np.int32)
item_shard = df_shard[movielens.ITEM_COLUMN].values.astype(np.uint16)
shards.append({
movielens.USER_COLUMN: user_shard,
movielens.ITEM_COLUMN: item_shard,
})
start_ind = end_ind
assert end_ind == num_rows
approx_num_shards = len(shards)
tf.logging.info("Splitting train and test data and generating {} test "
"negatives per user...".format(rconst.NUM_EVAL_NEGATIVES))
tf.gfile.MakeDirs(cache_paths.train_shard_subdir)
map_args = [(shards[i], i, num_items, cache_paths)
for i in range(approx_num_shards)]
with popen_helper.get_pool(multiprocessing.cpu_count()) as pool:
pool.map(_train_eval_map_fn, map_args) # pylint: disable=no-member
def construct_cache(dataset, data_dir, num_data_readers, match_mlperf,
deterministic, cache_id=None):
# type: (str, str, int, bool, bool, typing.Optional[int]) -> NCFDataset
"""Load and digest data CSV into a usable form.
Args:
dataset: The name of the dataset to be used.
data_dir: The root directory of the dataset.
num_data_readers: The number of parallel processes which will request
data during training.
match_mlperf: If True, change the behavior of the cache construction to
match the MLPerf reference implementation.
deterministic: Try to enforce repeatable behavior, even at the cost of
performance.
"""
cache_paths = rconst.Paths(data_dir=data_dir, cache_id=cache_id)
num_data_readers = (num_data_readers or int(multiprocessing.cpu_count() / 2)
or 1)
approx_num_shards = int(movielens.NUM_RATINGS[dataset]
// rconst.APPROX_PTS_PER_TRAIN_SHARD) or 1
st = timeit.default_timer()
cache_root = os.path.join(data_dir, cache_paths.cache_root)
if tf.gfile.Exists(cache_root):
raise ValueError("{} unexpectedly already exists."
.format(cache_paths.cache_root))
tf.logging.info("Creating cache directory. This should be deleted on exit.")
tf.gfile.MakeDirs(cache_paths.cache_root)
raw_rating_path = os.path.join(data_dir, dataset, movielens.RATINGS_FILE)
df, user_map, item_map = _filter_index_sort(raw_rating_path, match_mlperf)
num_users, num_items = DATASET_TO_NUM_USERS_AND_ITEMS[dataset]
if num_users != len(user_map):
raise ValueError("Expected to find {} users, but found {}".format(
num_users, len(user_map)))
if num_items != len(item_map):
raise ValueError("Expected to find {} items, but found {}".format(
num_items, len(item_map)))
generate_train_eval_data(df=df, approx_num_shards=approx_num_shards,
num_items=len(item_map), cache_paths=cache_paths,
match_mlperf=match_mlperf)
del approx_num_shards # value may have changed.
ncf_dataset = NCFDataset(user_map=user_map, item_map=item_map,
num_data_readers=num_data_readers,
cache_paths=cache_paths,
num_train_positives=len(df) - len(user_map),
deterministic=deterministic)
run_time = timeit.default_timer() - st
tf.logging.info("Cache construction complete. Time: {:.1f} sec."
.format(run_time))
return ncf_dataset
def _shutdown(proc):
# type: (subprocess.Popen) -> None
"""Convenience function to cleanly shut down async generation process."""
tf.logging.info("Shutting down train data creation subprocess.")
try:
try:
proc.send_signal(signal.SIGINT)
time.sleep(5)
if proc.poll() is not None:
tf.logging.info("Train data creation subprocess ended")
return # SIGINT was handled successfully within 5 seconds
except socket.error:
pass
# Otherwise another second of grace period and then force kill the process.
time.sleep(1)
proc.terminate()
tf.logging.info("Train data creation subprocess killed")
except: # pylint: disable=broad-except
tf.logging.error("Data generation subprocess could not be killed.")
def write_flagfile(flags_, ncf_dataset):
"""Write flagfile to begin async data generation."""
if ncf_dataset.deterministic:
flags_["seed"] = stat_utils.random_int32()
# We write to a temp file then atomically rename it to the final file,
# because writing directly to the final file can cause the data generation
# async process to read a partially written JSON file.
flagfile_temp = os.path.join(ncf_dataset.cache_paths.cache_root,
rconst.FLAGFILE_TEMP)
tf.logging.info("Preparing flagfile for async data generation in {} ..."
.format(flagfile_temp))
with tf.gfile.Open(flagfile_temp, "w") as f:
for k, v in six.iteritems(flags_):
f.write("--{}={}\n".format(k, v))
flagfile = os.path.join(ncf_dataset.cache_paths.cache_root, rconst.FLAGFILE)
tf.gfile.Rename(flagfile_temp, flagfile)
tf.logging.info(
"Wrote flagfile for async data generation in {}.".format(flagfile))
def instantiate_pipeline(dataset, data_dir, batch_size, eval_batch_size,
num_cycles, num_data_readers=None, num_neg=4,
epochs_per_cycle=1, match_mlperf=False,
deterministic=False, use_subprocess=True,
cache_id=None):
# type: (...) -> (NCFDataset, typing.Callable)
"""Preprocess data and start negative generation subprocess."""
tf.logging.info("Beginning data preprocessing.")
tf.gfile.MakeDirs(data_dir)
ncf_dataset = construct_cache(dataset=dataset, data_dir=data_dir,
num_data_readers=num_data_readers,
match_mlperf=match_mlperf,
deterministic=deterministic,
cache_id=cache_id)
# By limiting the number of workers we guarantee that the worker
# pool underlying the training generation doesn't starve other processes.
num_workers = int(multiprocessing.cpu_count() * 0.75) or 1
flags_ = {
"data_dir": data_dir,
"cache_id": ncf_dataset.cache_paths.cache_id,
"num_neg": num_neg,
"num_train_positives": ncf_dataset.num_train_positives,
"num_items": ncf_dataset.num_items,
"num_users": ncf_dataset.num_users,
"num_readers": ncf_dataset.num_data_readers,
"epochs_per_cycle": epochs_per_cycle,
"num_cycles": num_cycles,
"train_batch_size": batch_size,
"eval_batch_size": eval_batch_size,
"num_workers": num_workers,
"redirect_logs": use_subprocess,
"use_tf_logging": not use_subprocess,
"ml_perf": match_mlperf,
"output_ml_perf_compliance_logging": mlperf_helper.LOGGER.enabled,
}
if use_subprocess:
tf.logging.info("Creating training file subprocess.")
subproc_env = os.environ.copy()
# The subprocess uses TensorFlow for tf.gfile, but it does not need GPU
# resources and by default will try to allocate GPU memory. This would cause
# contention with the main training process.
subproc_env["CUDA_VISIBLE_DEVICES"] = ""
subproc_args = popen_helper.INVOCATION + [
"--data_dir", data_dir,
"--cache_id", str(ncf_dataset.cache_paths.cache_id)]
tf.logging.info(
"Generation subprocess command: {}".format(" ".join(subproc_args)))
proc = subprocess.Popen(args=subproc_args, shell=False, env=subproc_env)
cleanup_called = {"finished": False}
@atexit.register
def cleanup():
"""Remove files and subprocess from data generation."""
if cleanup_called["finished"]:
return
if use_subprocess:
_shutdown(proc)
try:
tf.gfile.DeleteRecursively(ncf_dataset.cache_paths.cache_root)
except tf.errors.NotFoundError:
pass
cleanup_called["finished"] = True
for _ in range(300):
if tf.gfile.Exists(ncf_dataset.cache_paths.subproc_alive):
break
time.sleep(1) # allow `alive` file to be written
if not tf.gfile.Exists(ncf_dataset.cache_paths.subproc_alive):
raise ValueError("Generation subprocess did not start correctly. Data will "
"not be available; exiting to avoid waiting forever.")
# We start the async process and wait for it to signal that it is alive. It
# will then enter a loop waiting for the flagfile to be written. Once we see
# that the async process has signaled that it is alive, we clear the system
# caches and begin the run.
mlperf_helper.ncf_print(key=mlperf_helper.TAGS.RUN_CLEAR_CACHES)
mlperf_helper.clear_system_caches()
mlperf_helper.ncf_print(key=mlperf_helper.TAGS.RUN_START)
write_flagfile(flags_, ncf_dataset)
return ncf_dataset, cleanup
def make_deserialize(params, batch_size, training=False):
"""Construct deserialize function for training and eval fns."""
feature_map = {
movielens.USER_COLUMN: tf.FixedLenFeature([], dtype=tf.string),
movielens.ITEM_COLUMN: tf.FixedLenFeature([], dtype=tf.string),
}
if training:
feature_map["labels"] = tf.FixedLenFeature([], dtype=tf.string)
else:
feature_map[rconst.DUPLICATE_MASK] = tf.FixedLenFeature([], dtype=tf.string)
def deserialize(examples_serialized):
"""Called by Dataset.map() to convert batches of records to tensors."""
features = tf.parse_single_example(examples_serialized, feature_map)
users = tf.reshape(tf.decode_raw(
features[movielens.USER_COLUMN], tf.int32), (batch_size,))
items = tf.reshape(tf.decode_raw(
features[movielens.ITEM_COLUMN], tf.uint16), (batch_size,))
if params["use_tpu"] or params["use_xla_for_gpu"]:
items = tf.cast(items, tf.int32) # TPU and XLA disallows uint16 infeed.
if not training:
dupe_mask = tf.reshape(tf.cast(tf.decode_raw(
features[rconst.DUPLICATE_MASK], tf.int8), tf.bool), (batch_size,))
return {
movielens.USER_COLUMN: users,
movielens.ITEM_COLUMN: items,
rconst.DUPLICATE_MASK: dupe_mask,
}
labels = tf.reshape(tf.cast(tf.decode_raw(
features["labels"], tf.int8), tf.bool), (batch_size,))
return {
movielens.USER_COLUMN: users,
movielens.ITEM_COLUMN: items,
}, labels
return deserialize
def hash_pipeline(dataset, deterministic):
# type: (tf.data.Dataset, bool) -> None
"""Utility function for detecting non-determinism in the data pipeline.
Args:
dataset: a tf.data.Dataset generated by the input_fn
deterministic: Does the input_fn expect the dataset to be deterministic.
(i.e. fixed seed, sloppy=False, etc.)
"""
if not deterministic:
tf.logging.warning("Data pipeline is not marked as deterministic. Hash "
"values are not expected to be meaningful.")
batch = dataset.make_one_shot_iterator().get_next()
md5 = hashlib.md5()
count = 0
first_batch_hash = b""
with tf.Session() as sess:
while True:
try:
result = sess.run(batch)
if isinstance(result, tuple):
result = result[0] # only hash features
except tf.errors.OutOfRangeError:
break
count += 1
md5.update(memoryview(result[movielens.USER_COLUMN]).tobytes())
md5.update(memoryview(result[movielens.ITEM_COLUMN]).tobytes())
if count == 1:
first_batch_hash = md5.hexdigest()
overall_hash = md5.hexdigest()
tf.logging.info("Batch count: {}".format(count))
tf.logging.info(" [pipeline_hash] First batch hash: {}".format(
first_batch_hash))
tf.logging.info(" [pipeline_hash] All batches hash: {}".format(overall_hash))
def make_input_fn(
ncf_dataset, # type: typing.Optional[NCFDataset]
is_training, # type: bool
record_files=None # type: typing.Optional[tf.Tensor]
):
# type: (...) -> (typing.Callable, str, int)
"""Construct training input_fn for the current epoch."""
if ncf_dataset is None:
return make_synthetic_input_fn(is_training)
if record_files is not None:
epoch_metadata = None
batch_count = None
record_dir = None
else:
epoch_metadata, record_dir, template = get_epoch_info(is_training,
ncf_dataset)
record_files = os.path.join(record_dir, template.format("*"))
# This value is used to check that the batch count from the subprocess
# matches the batch count expected by the main thread.
batch_count = epoch_metadata["batch_count"]
def input_fn(params):
"""Generated input_fn for the given epoch."""
if is_training:
batch_size = params["batch_size"]
else:
# Estimator has "eval_batch_size" included in the params, but TPUEstimator
# populates "batch_size" to the appropriate value.
batch_size = params.get("eval_batch_size") or params["batch_size"]
if epoch_metadata and epoch_metadata["batch_size"] != batch_size:
raise ValueError(
"Records were constructed with batch size {}, but input_fn was given "
"a batch size of {}. This will result in a deserialization error in "
"tf.parse_single_example."
.format(epoch_metadata["batch_size"], batch_size))
record_files_ds = tf.data.Dataset.list_files(record_files, shuffle=False)
interleave = tf.contrib.data.parallel_interleave(
tf.data.TFRecordDataset,
cycle_length=4,
block_length=100000,
sloppy=not ncf_dataset.deterministic,
prefetch_input_elements=4,
)
deserialize = make_deserialize(params, batch_size, is_training)
dataset = record_files_ds.apply(interleave)
dataset = dataset.map(deserialize, num_parallel_calls=4)
dataset = dataset.prefetch(32)
if params.get("hash_pipeline"):
hash_pipeline(dataset, ncf_dataset.deterministic)
return dataset
return input_fn, record_dir, batch_count
def _check_subprocess_alive(ncf_dataset, directory):
if (not tf.gfile.Exists(ncf_dataset.cache_paths.subproc_alive) and
not tf.gfile.Exists(directory)):
# The generation subprocess must have been alive at some point, because we
# earlier checked that the subproc_alive file existed.
raise ValueError("Generation subprocess unexpectedly died. Data will not "
"be available; exiting to avoid waiting forever.")
def get_epoch_info(is_training, ncf_dataset):
"""Wait for the epoch input data to be ready and return various info about it.
Args:
is_training: If we should return info for a training or eval epoch.
ncf_dataset: An NCFDataset.
Returns:
epoch_metadata: A dict with epoch metadata.
record_dir: The directory with the TFRecord files storing the input data.
template: A string template of the files in `record_dir`.
`template.format('*')` is a glob that matches all the record files.
"""
if is_training:
train_epoch_dir = ncf_dataset.cache_paths.train_epoch_dir
_check_subprocess_alive(ncf_dataset, train_epoch_dir)
while not tf.gfile.Exists(train_epoch_dir):
tf.logging.info("Waiting for {} to exist.".format(train_epoch_dir))
time.sleep(1)
train_data_dirs = tf.gfile.ListDirectory(train_epoch_dir)
while not train_data_dirs:
tf.logging.info("Waiting for data folder to be created.")
time.sleep(1)
train_data_dirs = tf.gfile.ListDirectory(train_epoch_dir)
train_data_dirs.sort() # names are zfilled so that
# lexicographic sort == numeric sort
record_dir = os.path.join(train_epoch_dir, train_data_dirs[0])
template = rconst.TRAIN_RECORD_TEMPLATE
else:
record_dir = ncf_dataset.cache_paths.eval_data_subdir
_check_subprocess_alive(ncf_dataset, record_dir)
template = rconst.EVAL_RECORD_TEMPLATE
ready_file = os.path.join(record_dir, rconst.READY_FILE)
while not tf.gfile.Exists(ready_file):
tf.logging.info("Waiting for records in {} to be ready".format(record_dir))
time.sleep(1)
with tf.gfile.Open(ready_file, "r") as f:
epoch_metadata = json.load(f)
return epoch_metadata, record_dir, template
def make_synthetic_input_fn(is_training):
"""Construct training input_fn that uses synthetic data."""
def input_fn(params):
"""Generated input_fn for the given epoch."""
batch_size = (params["batch_size"] if is_training else
params["eval_batch_size"] or params["batch_size"])
num_users = params["num_users"]
num_items = params["num_items"]
users = tf.random_uniform([batch_size], dtype=tf.int32, minval=0,
maxval=num_users)
items = tf.random_uniform([batch_size], dtype=tf.int32, minval=0,
maxval=num_items)
if is_training:
labels = tf.random_uniform([batch_size], dtype=tf.int32, minval=0,
maxval=2)
data = {
movielens.USER_COLUMN: users,
movielens.ITEM_COLUMN: items,
}, labels
else:
dupe_mask = tf.cast(tf.random_uniform([batch_size], dtype=tf.int32,
minval=0, maxval=2), tf.bool)
data = {
movielens.USER_COLUMN: users,
movielens.ITEM_COLUMN: items,
rconst.DUPLICATE_MASK: dupe_mask,
}
dataset = tf.data.Dataset.from_tensors(data).repeat(
SYNTHETIC_BATCHES_PER_EPOCH)
dataset = dataset.prefetch(32)
return dataset
return input_fn, None, SYNTHETIC_BATCHES_PER_EPOCH
| apache-2.0 |
vene/marseille | experiments/exp_svmstruct.py | 1 | 5705 | import os
import dill
import numpy as np
from sklearn.model_selection import KFold
from pystruct.learners import FrankWolfeSSVM
from marseille.argdoc import DocStructure
from marseille.custom_logging import logging
from marseille.datasets import get_dataset_loader
from marseille.io import cache_fname
from marseille.struct_models import ArgumentGraphCRF
from marseille.vectorize import (add_pmi_features, stats_train, prop_vectorizer,
link_vectorizer, second_order_vectorizer)
def _vectorize(doc, pmi_incoming, pmi_outgoing, prop_vect, link_vect,
second_order_vect=None):
for f in doc.features:
add_pmi_features(f, pmi_incoming, pmi_outgoing)
X_prop = prop_vect.transform(doc.prop_features)
X_link = link_vect.transform(doc.features)
if second_order_vect is not None:
n_sec_ord_features = len(second_order_vect.get_feature_names())
if doc.second_order_features:
X_sec_ord = second_order_vect.transform(doc.second_order_features)
else:
X_sec_ord = np.empty((0, n_sec_ord_features))
return DocStructure(doc, X_prop, X_link, X_sec_ord)
return DocStructure(doc, X_prop, X_link)
def fit_predict(train_docs, test_docs, dataset, C, class_weight, constraints,
compat_features, second_order, coparents, grandparents,
siblings, exact_test=False, return_vectorizers=False):
stats = stats_train(train_docs)
prop_vect, _ = prop_vectorizer(train_docs,
which=dataset,
stats=stats,
n_most_common_tok=None,
n_most_common_dep=2000,
return_transf=True)
link_vect = link_vectorizer(train_docs, stats, n_most_common=500)
sec_ord_vect = (second_order_vectorizer(train_docs)
if second_order else None)
_, _, _, pmi_in, pmi_out = stats
def _transform_x_y(docs):
X = [_vectorize(doc, pmi_in, pmi_out, prop_vect, link_vect,
sec_ord_vect)
for doc in docs]
Y = [doc.label for doc in docs]
return X, Y
X_tr, Y_tr = _transform_x_y(train_docs)
X_te, Y_te = _transform_x_y(test_docs)
model = ArgumentGraphCRF(class_weight=class_weight,
constraints=constraints,
compat_features=compat_features,
coparents=coparents,
grandparents=grandparents,
siblings=siblings)
clf = FrankWolfeSSVM(model, C=C, random_state=0, verbose=1,
check_dual_every=25,
show_loss_every=25,
max_iter=100,
tol=0)
clf.fit(X_tr, Y_tr)
if exact_test:
clf.model.exact = True
Y_pred = clf.predict(X_te)
if return_vectorizers:
vectorizers = (pmi_in, pmi_out, prop_vect, link_vect, sec_ord_vect)
return clf, Y_te, Y_pred, vectorizers
return clf, Y_te, Y_pred
def svmstruct_cv_score(dataset, C, class_weight, constraints,
compat_features, second_order_features):
fn = cache_fname("svmstruct_cv_score", (dataset, C, class_weight,
constraints, compat_features,
second_order_features))
if os.path.exists(fn):
logging.info("Cached file already exists.")
with open(fn, "rb") as f:
return dill.load(f)
load, ids = get_dataset_loader(dataset, split="train")
n_folds = 5 if dataset == 'ukp' else 3
# below are boolean logical ops
grandparents = second_order_features and dataset == 'ukp'
coparents = second_order_features
siblings = second_order_features and dataset == 'cdcp'
scores = []
all_Y_pred = []
for k, (tr, val) in enumerate(KFold(n_folds).split(ids)):
train_docs = list(load(ids[tr]))
val_docs = list(load(ids[val]))
clf, Y_val, Y_pred = fit_predict(train_docs, val_docs, dataset, C,
class_weight,
constraints, compat_features,
second_order_features, grandparents,
coparents, siblings)
all_Y_pred.extend(Y_pred)
scores.append(clf.model._score(Y_val, Y_pred))
with open(fn, "wb") as f:
dill.dump((scores, all_Y_pred), f)
return scores, all_Y_pred
def main():
from docopt import docopt
usage = """
Usage:
exp_svmstruct (cdcp|ukp) --C=N [--balanced --constraints --strict
--compat-features --second-order-features]
"""
args = docopt(usage)
C = float(args['--C'])
dataset = 'cdcp' if args['cdcp'] else 'ukp'
cw = 'balanced' if args['--balanced'] else None
if args['--constraints']:
constraints = dataset
if args['--strict']:
constraints += '+strict'
else:
constraints = ""
scores, _ = svmstruct_cv_score(dataset, C, cw, constraints,
args['--compat-features'],
args['--second-order-features'])
link_macro, link_micro, node_macro, node_micro, acc = np.mean(scores,
axis=0)
print("Link: {:.3f}/{:.3f} Node: {:.3f}/{:.3f} accuracy {:.3f}".format(
link_macro, link_micro, node_macro, node_micro, acc))
if __name__ == '__main__':
main()
| bsd-3-clause |
hitszxp/scikit-learn | examples/semi_supervised/plot_label_propagation_versus_svm_iris.py | 286 | 2378 | """
=====================================================================
Decision boundary of label propagation versus SVM on the Iris dataset
=====================================================================
Comparison for decision boundary generated on iris dataset
between Label Propagation and SVM.
This demonstrates Label Propagation learning a good boundary
even with a small amount of labeled data.
"""
print(__doc__)
# Authors: Clay Woolam <[email protected]>
# Licence: BSD
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn import svm
from sklearn.semi_supervised import label_propagation
rng = np.random.RandomState(0)
iris = datasets.load_iris()
X = iris.data[:, :2]
y = iris.target
# step size in the mesh
h = .02
y_30 = np.copy(y)
y_30[rng.rand(len(y)) < 0.3] = -1
y_50 = np.copy(y)
y_50[rng.rand(len(y)) < 0.5] = -1
# we create an instance of SVM and fit out data. We do not scale our
# data since we want to plot the support vectors
ls30 = (label_propagation.LabelSpreading().fit(X, y_30),
y_30)
ls50 = (label_propagation.LabelSpreading().fit(X, y_50),
y_50)
ls100 = (label_propagation.LabelSpreading().fit(X, y), y)
rbf_svc = (svm.SVC(kernel='rbf').fit(X, y), y)
# create a mesh to plot in
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# title for the plots
titles = ['Label Spreading 30% data',
'Label Spreading 50% data',
'Label Spreading 100% data',
'SVC with rbf kernel']
color_map = {-1: (1, 1, 1), 0: (0, 0, .9), 1: (1, 0, 0), 2: (.8, .6, 0)}
for i, (clf, y_train) in enumerate((ls30, ls50, ls100, rbf_svc)):
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
plt.subplot(2, 2, i + 1)
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.axis('off')
# Plot also the training points
colors = [color_map[y] for y in y_train]
plt.scatter(X[:, 0], X[:, 1], c=colors, cmap=plt.cm.Paired)
plt.title(titles[i])
plt.text(.90, 0, "Unlabeled points are colored white")
plt.show()
| bsd-3-clause |
jkthompson/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/projections/geo.py | 69 | 19738 | import math
import numpy as np
import numpy.ma as ma
import matplotlib
rcParams = matplotlib.rcParams
from matplotlib.artist import kwdocd
from matplotlib.axes import Axes
from matplotlib import cbook
from matplotlib.patches import Circle
from matplotlib.path import Path
from matplotlib.ticker import Formatter, Locator, NullLocator, FixedLocator, NullFormatter
from matplotlib.transforms import Affine2D, Affine2DBase, Bbox, \
BboxTransformTo, IdentityTransform, Transform, TransformWrapper
class GeoAxes(Axes):
"""
An abstract base class for geographic projections
"""
class ThetaFormatter(Formatter):
"""
Used to format the theta tick labels. Converts the native
unit of radians into degrees and adds a degree symbol.
"""
def __init__(self, round_to=1.0):
self._round_to = round_to
def __call__(self, x, pos=None):
degrees = (x / np.pi) * 180.0
degrees = round(degrees / self._round_to) * self._round_to
if rcParams['text.usetex'] and not rcParams['text.latex.unicode']:
return r"$%0.0f^\circ$" % degrees
else:
return u"%0.0f\u00b0" % degrees
RESOLUTION = 75
def cla(self):
Axes.cla(self)
self.set_longitude_grid(30)
self.set_latitude_grid(15)
self.set_longitude_grid_ends(75)
self.xaxis.set_minor_locator(NullLocator())
self.yaxis.set_minor_locator(NullLocator())
self.xaxis.set_ticks_position('none')
self.yaxis.set_ticks_position('none')
self.grid(rcParams['axes.grid'])
Axes.set_xlim(self, -np.pi, np.pi)
Axes.set_ylim(self, -np.pi / 2.0, np.pi / 2.0)
def _set_lim_and_transforms(self):
# A (possibly non-linear) projection on the (already scaled) data
self.transProjection = self._get_core_transform(self.RESOLUTION)
self.transAffine = self._get_affine_transform()
self.transAxes = BboxTransformTo(self.bbox)
# The complete data transformation stack -- from data all the
# way to display coordinates
self.transData = \
self.transProjection + \
self.transAffine + \
self.transAxes
# This is the transform for longitude ticks.
self._xaxis_pretransform = \
Affine2D() \
.scale(1.0, self._longitude_cap * 2.0) \
.translate(0.0, -self._longitude_cap)
self._xaxis_transform = \
self._xaxis_pretransform + \
self.transData
self._xaxis_text1_transform = \
Affine2D().scale(1.0, 0.0) + \
self.transData + \
Affine2D().translate(0.0, 4.0)
self._xaxis_text2_transform = \
Affine2D().scale(1.0, 0.0) + \
self.transData + \
Affine2D().translate(0.0, -4.0)
# This is the transform for latitude ticks.
yaxis_stretch = Affine2D().scale(np.pi * 2.0, 1.0).translate(-np.pi, 0.0)
yaxis_space = Affine2D().scale(1.0, 1.1)
self._yaxis_transform = \
yaxis_stretch + \
self.transData
yaxis_text_base = \
yaxis_stretch + \
self.transProjection + \
(yaxis_space + \
self.transAffine + \
self.transAxes)
self._yaxis_text1_transform = \
yaxis_text_base + \
Affine2D().translate(-8.0, 0.0)
self._yaxis_text2_transform = \
yaxis_text_base + \
Affine2D().translate(8.0, 0.0)
def _get_affine_transform(self):
transform = self._get_core_transform(1)
xscale, _ = transform.transform_point((np.pi, 0))
_, yscale = transform.transform_point((0, np.pi / 2.0))
return Affine2D() \
.scale(0.5 / xscale, 0.5 / yscale) \
.translate(0.5, 0.5)
def get_xaxis_transform(self):
return self._xaxis_transform
def get_xaxis_text1_transform(self, pad):
return self._xaxis_text1_transform, 'bottom', 'center'
def get_xaxis_text2_transform(self, pad):
return self._xaxis_text2_transform, 'top', 'center'
def get_yaxis_transform(self):
return self._yaxis_transform
def get_yaxis_text1_transform(self, pad):
return self._yaxis_text1_transform, 'center', 'right'
def get_yaxis_text2_transform(self, pad):
return self._yaxis_text2_transform, 'center', 'left'
def _gen_axes_patch(self):
return Circle((0.5, 0.5), 0.5)
def set_yscale(self, *args, **kwargs):
if args[0] != 'linear':
raise NotImplementedError
set_xscale = set_yscale
def set_xlim(self, *args, **kwargs):
Axes.set_xlim(self, -np.pi, np.pi)
Axes.set_ylim(self, -np.pi / 2.0, np.pi / 2.0)
set_ylim = set_xlim
def format_coord(self, long, lat):
'return a format string formatting the coordinate'
long = long * (180.0 / np.pi)
lat = lat * (180.0 / np.pi)
if lat >= 0.0:
ns = 'N'
else:
ns = 'S'
if long >= 0.0:
ew = 'E'
else:
ew = 'W'
return u'%f\u00b0%s, %f\u00b0%s' % (abs(lat), ns, abs(long), ew)
def set_longitude_grid(self, degrees):
"""
Set the number of degrees between each longitude grid.
"""
number = (360.0 / degrees) + 1
self.xaxis.set_major_locator(
FixedLocator(
np.linspace(-np.pi, np.pi, number, True)[1:-1]))
self._logitude_degrees = degrees
self.xaxis.set_major_formatter(self.ThetaFormatter(degrees))
def set_latitude_grid(self, degrees):
"""
Set the number of degrees between each longitude grid.
"""
number = (180.0 / degrees) + 1
self.yaxis.set_major_locator(
FixedLocator(
np.linspace(-np.pi / 2.0, np.pi / 2.0, number, True)[1:-1]))
self._latitude_degrees = degrees
self.yaxis.set_major_formatter(self.ThetaFormatter(degrees))
def set_longitude_grid_ends(self, degrees):
"""
Set the latitude(s) at which to stop drawing the longitude grids.
"""
self._longitude_cap = degrees * (np.pi / 180.0)
self._xaxis_pretransform \
.clear() \
.scale(1.0, self._longitude_cap * 2.0) \
.translate(0.0, -self._longitude_cap)
def get_data_ratio(self):
'''
Return the aspect ratio of the data itself.
'''
return 1.0
### Interactive panning
def can_zoom(self):
"""
Return True if this axes support the zoom box
"""
return False
def start_pan(self, x, y, button):
pass
def end_pan(self):
pass
def drag_pan(self, button, key, x, y):
pass
class AitoffAxes(GeoAxes):
name = 'aitoff'
class AitoffTransform(Transform):
"""
The base Aitoff transform.
"""
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, resolution):
"""
Create a new Aitoff transform. Resolution is the number of steps
to interpolate between each input line segment to approximate its
path in curved Aitoff space.
"""
Transform.__init__(self)
self._resolution = resolution
def transform(self, ll):
longitude = ll[:, 0:1]
latitude = ll[:, 1:2]
# Pre-compute some values
half_long = longitude / 2.0
cos_latitude = np.cos(latitude)
alpha = np.arccos(cos_latitude * np.cos(half_long))
# Mask this array, or we'll get divide-by-zero errors
alpha = ma.masked_where(alpha == 0.0, alpha)
# We want unnormalized sinc. numpy.sinc gives us normalized
sinc_alpha = ma.sin(alpha) / alpha
x = (cos_latitude * np.sin(half_long)) / sinc_alpha
y = (np.sin(latitude) / sinc_alpha)
x.set_fill_value(0.0)
y.set_fill_value(0.0)
return np.concatenate((x.filled(), y.filled()), 1)
transform.__doc__ = Transform.transform.__doc__
transform_non_affine = transform
transform_non_affine.__doc__ = Transform.transform_non_affine.__doc__
def transform_path(self, path):
vertices = path.vertices
ipath = path.interpolated(self._resolution)
return Path(self.transform(ipath.vertices), ipath.codes)
transform_path.__doc__ = Transform.transform_path.__doc__
transform_path_non_affine = transform_path
transform_path_non_affine.__doc__ = Transform.transform_path_non_affine.__doc__
def inverted(self):
return AitoffAxes.InvertedAitoffTransform(self._resolution)
inverted.__doc__ = Transform.inverted.__doc__
class InvertedAitoffTransform(Transform):
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, resolution):
Transform.__init__(self)
self._resolution = resolution
def transform(self, xy):
# MGDTODO: Math is hard ;(
return xy
transform.__doc__ = Transform.transform.__doc__
def inverted(self):
return AitoffAxes.AitoffTransform(self._resolution)
inverted.__doc__ = Transform.inverted.__doc__
def __init__(self, *args, **kwargs):
self._longitude_cap = np.pi / 2.0
GeoAxes.__init__(self, *args, **kwargs)
self.set_aspect(0.5, adjustable='box', anchor='C')
self.cla()
def _get_core_transform(self, resolution):
return self.AitoffTransform(resolution)
class HammerAxes(GeoAxes):
name = 'hammer'
class HammerTransform(Transform):
"""
The base Hammer transform.
"""
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, resolution):
"""
Create a new Hammer transform. Resolution is the number of steps
to interpolate between each input line segment to approximate its
path in curved Hammer space.
"""
Transform.__init__(self)
self._resolution = resolution
def transform(self, ll):
longitude = ll[:, 0:1]
latitude = ll[:, 1:2]
# Pre-compute some values
half_long = longitude / 2.0
cos_latitude = np.cos(latitude)
sqrt2 = np.sqrt(2.0)
alpha = 1.0 + cos_latitude * np.cos(half_long)
x = (2.0 * sqrt2) * (cos_latitude * np.sin(half_long)) / alpha
y = (sqrt2 * np.sin(latitude)) / alpha
return np.concatenate((x, y), 1)
transform.__doc__ = Transform.transform.__doc__
transform_non_affine = transform
transform_non_affine.__doc__ = Transform.transform_non_affine.__doc__
def transform_path(self, path):
vertices = path.vertices
ipath = path.interpolated(self._resolution)
return Path(self.transform(ipath.vertices), ipath.codes)
transform_path.__doc__ = Transform.transform_path.__doc__
transform_path_non_affine = transform_path
transform_path_non_affine.__doc__ = Transform.transform_path_non_affine.__doc__
def inverted(self):
return HammerAxes.InvertedHammerTransform(self._resolution)
inverted.__doc__ = Transform.inverted.__doc__
class InvertedHammerTransform(Transform):
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, resolution):
Transform.__init__(self)
self._resolution = resolution
def transform(self, xy):
x = xy[:, 0:1]
y = xy[:, 1:2]
quarter_x = 0.25 * x
half_y = 0.5 * y
z = np.sqrt(1.0 - quarter_x*quarter_x - half_y*half_y)
longitude = 2 * np.arctan((z*x) / (2.0 * (2.0*z*z - 1.0)))
latitude = np.arcsin(y*z)
return np.concatenate((longitude, latitude), 1)
transform.__doc__ = Transform.transform.__doc__
def inverted(self):
return HammerAxes.HammerTransform(self._resolution)
inverted.__doc__ = Transform.inverted.__doc__
def __init__(self, *args, **kwargs):
self._longitude_cap = np.pi / 2.0
GeoAxes.__init__(self, *args, **kwargs)
self.set_aspect(0.5, adjustable='box', anchor='C')
self.cla()
def _get_core_transform(self, resolution):
return self.HammerTransform(resolution)
class MollweideAxes(GeoAxes):
name = 'mollweide'
class MollweideTransform(Transform):
"""
The base Mollweide transform.
"""
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, resolution):
"""
Create a new Mollweide transform. Resolution is the number of steps
to interpolate between each input line segment to approximate its
path in curved Mollweide space.
"""
Transform.__init__(self)
self._resolution = resolution
def transform(self, ll):
longitude = ll[:, 0:1]
latitude = ll[:, 1:2]
aux = 2.0 * np.arcsin((2.0 * latitude) / np.pi)
x = (2.0 * np.sqrt(2.0) * longitude * np.cos(aux)) / np.pi
y = (np.sqrt(2.0) * np.sin(aux))
return np.concatenate((x, y), 1)
transform.__doc__ = Transform.transform.__doc__
transform_non_affine = transform
transform_non_affine.__doc__ = Transform.transform_non_affine.__doc__
def transform_path(self, path):
vertices = path.vertices
ipath = path.interpolated(self._resolution)
return Path(self.transform(ipath.vertices), ipath.codes)
transform_path.__doc__ = Transform.transform_path.__doc__
transform_path_non_affine = transform_path
transform_path_non_affine.__doc__ = Transform.transform_path_non_affine.__doc__
def inverted(self):
return MollweideAxes.InvertedMollweideTransform(self._resolution)
inverted.__doc__ = Transform.inverted.__doc__
class InvertedMollweideTransform(Transform):
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, resolution):
Transform.__init__(self)
self._resolution = resolution
def transform(self, xy):
# MGDTODO: Math is hard ;(
return xy
transform.__doc__ = Transform.transform.__doc__
def inverted(self):
return MollweideAxes.MollweideTransform(self._resolution)
inverted.__doc__ = Transform.inverted.__doc__
def __init__(self, *args, **kwargs):
self._longitude_cap = np.pi / 2.0
GeoAxes.__init__(self, *args, **kwargs)
self.set_aspect(0.5, adjustable='box', anchor='C')
self.cla()
def _get_core_transform(self, resolution):
return self.MollweideTransform(resolution)
class LambertAxes(GeoAxes):
name = 'lambert'
class LambertTransform(Transform):
"""
The base Lambert transform.
"""
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, center_longitude, center_latitude, resolution):
"""
Create a new Lambert transform. Resolution is the number of steps
to interpolate between each input line segment to approximate its
path in curved Lambert space.
"""
Transform.__init__(self)
self._resolution = resolution
self._center_longitude = center_longitude
self._center_latitude = center_latitude
def transform(self, ll):
longitude = ll[:, 0:1]
latitude = ll[:, 1:2]
clong = self._center_longitude
clat = self._center_latitude
cos_lat = np.cos(latitude)
sin_lat = np.sin(latitude)
diff_long = longitude - clong
cos_diff_long = np.cos(diff_long)
inner_k = (1.0 +
np.sin(clat)*sin_lat +
np.cos(clat)*cos_lat*cos_diff_long)
# Prevent divide-by-zero problems
inner_k = np.where(inner_k == 0.0, 1e-15, inner_k)
k = np.sqrt(2.0 / inner_k)
x = k*cos_lat*np.sin(diff_long)
y = k*(np.cos(clat)*sin_lat -
np.sin(clat)*cos_lat*cos_diff_long)
return np.concatenate((x, y), 1)
transform.__doc__ = Transform.transform.__doc__
transform_non_affine = transform
transform_non_affine.__doc__ = Transform.transform_non_affine.__doc__
def transform_path(self, path):
vertices = path.vertices
ipath = path.interpolated(self._resolution)
return Path(self.transform(ipath.vertices), ipath.codes)
transform_path.__doc__ = Transform.transform_path.__doc__
transform_path_non_affine = transform_path
transform_path_non_affine.__doc__ = Transform.transform_path_non_affine.__doc__
def inverted(self):
return LambertAxes.InvertedLambertTransform(
self._center_longitude,
self._center_latitude,
self._resolution)
inverted.__doc__ = Transform.inverted.__doc__
class InvertedLambertTransform(Transform):
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, center_longitude, center_latitude, resolution):
Transform.__init__(self)
self._resolution = resolution
self._center_longitude = center_longitude
self._center_latitude = center_latitude
def transform(self, xy):
x = xy[:, 0:1]
y = xy[:, 1:2]
clong = self._center_longitude
clat = self._center_latitude
p = np.sqrt(x*x + y*y)
p = np.where(p == 0.0, 1e-9, p)
c = 2.0 * np.arcsin(0.5 * p)
sin_c = np.sin(c)
cos_c = np.cos(c)
lat = np.arcsin(cos_c*np.sin(clat) +
((y*sin_c*np.cos(clat)) / p))
long = clong + np.arctan(
(x*sin_c) / (p*np.cos(clat)*cos_c - y*np.sin(clat)*sin_c))
return np.concatenate((long, lat), 1)
transform.__doc__ = Transform.transform.__doc__
def inverted(self):
return LambertAxes.LambertTransform(
self._center_longitude,
self._center_latitude,
self._resolution)
inverted.__doc__ = Transform.inverted.__doc__
def __init__(self, *args, **kwargs):
self._longitude_cap = np.pi / 2.0
self._center_longitude = kwargs.pop("center_longitude", 0.0)
self._center_latitude = kwargs.pop("center_latitude", 0.0)
GeoAxes.__init__(self, *args, **kwargs)
self.set_aspect('equal', adjustable='box', anchor='C')
self.cla()
def cla(self):
GeoAxes.cla(self)
self.yaxis.set_major_formatter(NullFormatter())
def _get_core_transform(self, resolution):
return self.LambertTransform(
self._center_longitude,
self._center_latitude,
resolution)
def _get_affine_transform(self):
return Affine2D() \
.scale(0.25) \
.translate(0.5, 0.5)
| gpl-3.0 |
google-research/proteinfer | baseline_utils_test.py | 1 | 13960 | # coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python2, python3
# pylint: disable=line-too-long
"""Tests for module model_performance_analysis.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tempfile
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
import pandas as pd
import baseline_utils
import test_util
import tensorflow.compat.v1 as tf
def _write_to_file(contents):
tmpfile_name = tempfile.mktemp()
with tf.io.gfile.GFile(tmpfile_name, "w") as f:
f.write(contents.encode("utf-8"))
return tmpfile_name
class BaselineUtilsTest(parameterized.TestCase):
@parameterized.named_parameters(
dict(
testcase_name="has fasta character >",
header='>accession="ACCESSION"\tlabels="label1,label2"',
expected="ACCESSION",
),
dict(
testcase_name="does not have character >",
header='accession="ACCESSION"\tlabels="label1,label2"',
expected="ACCESSION",
),
)
def test_get_sequence_name_from_sequence_header(self, header, expected):
actual = baseline_utils._get_sequence_name_from_sequence_header(header)
self.assertEqual(actual, expected)
@parameterized.named_parameters(
dict(
testcase_name="two labels",
header='>accession="ACCESSION"\tlabels="label1,label2"',
expected={"label1", "label2"},
),
dict(
testcase_name="zero labels",
header='>accession="ACCESSION"\tlabels=""',
expected=set(),
),
)
def test_get_labels_from_sequence_header(self, header, expected):
actual = baseline_utils._get_labels_from_sequence_header(header)
self.assertEqual(actual, expected)
def test_load_ground_truth(self):
input_fasta = ('>accession="ACCESSION"\tlabels="GO:101010,EC:9.9.9.9"\n'
"ADE\n"
'>accession="ACCESSION2"\tlabels="EC:1.2.-.-"\n'
"WWWW\n")
tmpfile_name = _write_to_file(input_fasta)
actual = baseline_utils.load_ground_truth(tmpfile_name)
expected = pd.DataFrame({
"sequence_name": ["ACCESSION", "ACCESSION2"],
"true_label": [{"GO:101010", "EC:9.9.9.9"}, {"EC:1.2.-.-"}],
"sequence": ["ADE", "WWWW"]
})
test_util.assert_dataframes_equal(self, actual, expected)
@parameterized.named_parameters(
dict(
testcase_name="no inputs, one thing in vocab",
input_row=pd.Series({
"predicted_label": frozenset([]),
"bit_score": 99.
}),
input_label_vocab=np.array(["PF00001"]),
expected=[0.],
),
dict(
testcase_name="one input, one thing in vocab",
input_row=pd.Series({
"predicted_label": frozenset(["PF00001"]),
"bit_score": 99.
}),
input_label_vocab=np.array(["PF00001"]),
expected=[99.],
),
dict(
testcase_name="one input, two things in vocab",
input_row=pd.Series({
"predicted_label": frozenset(["PF00001"]),
"bit_score": 99.
}),
input_label_vocab=np.array(["PF00001", "PF99999"]),
expected=[99., 0.],
),
dict(
testcase_name="two inputs, two things in vocab",
input_row=pd.Series({
"predicted_label": frozenset(["PF00001", "PF99999"]),
"bit_score": 99.
}),
input_label_vocab=np.array(["PF00001", "PF99999"]),
expected=[99., 99.],
),
)
def test_blast_row_to_confidence_array(self, input_row, input_label_vocab,
expected):
lookup = {k: i for i, k in enumerate(input_label_vocab)}
actual = baseline_utils._blast_row_to_confidence_array(
input_row, input_label_vocab, lookup)
np.testing.assert_allclose(actual, expected)
def test_load_blast_output(self):
input_test_fasta = (
'>accession="ACCESSION"\tlabels="GO:101010,EC:9.9.9.9"\n'
"ADE\n"
'>accession="ACCESSION2"\tlabels="EC:1.2.-.-"\n'
"WWWW\n")
test_fasta_filename = _write_to_file(input_test_fasta)
ground_truth_test = baseline_utils.load_ground_truth(test_fasta_filename)
input_train_fasta = (
'>accession="MATCHACCESSION"\tlabels="GO:101010,EC:9.9.9.9,Pfam:PF12345"\n'
"ADE\n")
train_fasta_filename = _write_to_file(input_train_fasta)
ground_truth_train = baseline_utils.load_ground_truth(train_fasta_filename)
# Missing second sequence in ground truth.
input_blast = (
'accession="ACCESSION"\taccession="MATCHACCESSION"\t82.456\t57\t10\t0\t1\t57\t1\t57\t6.92e-21\t79.3\n'
)
input_label_vocab = np.array(
["EC:1.2.-.-", "EC:9.9.9.9", "GO:101010", "Pfam:PF12345"])
blast_filename = _write_to_file(input_blast)
actual = baseline_utils.load_blast_output(
filename=blast_filename,
label_vocab=input_label_vocab,
test_data_ground_truth=ground_truth_test,
training_data_ground_truth=ground_truth_train)
expected = pd.DataFrame({
"sequence_name": ["ACCESSION", "ACCESSION2"],
"closest_sequence": ["MATCHACCESSION", float("nan")],
"true_label": [{"GO:101010", "EC:9.9.9.9"}, {"EC:1.2.-.-"}],
"predicted_label": [{"GO:101010", "EC:9.9.9.9", "Pfam:PF12345"},
frozenset()],
"percent_seq_identity": [82.456, float("nan")],
"e_value": [6.92e-21, float("nan")],
"bit_score": [79.3, 0.0],
})
test_util.assert_dataframes_equal(
self,
# Assert dataframes equal except for predictions column.
# Rely on unit testing for predictions column instead to increase
# test clarity. See test_blast_row_to_confidence_array above.
actual.drop(columns=["predictions"]),
expected,
nan_equals_nan=True)
def test_limit_set_of_labels(self):
# Set up input data.
input_df = pd.DataFrame(
{"labels": [frozenset(["a"]), frozenset(["a", "b"])]})
acceptable_labels = frozenset(["a"])
column_to_limit = "labels"
# Assert input dataframe was not modified later on, so save a copy.
input_df_copy = input_df.copy()
# Compute actual.
actual = baseline_utils.limit_set_of_labels(input_df, acceptable_labels,
column_to_limit)
expected = pd.DataFrame({"labels": [frozenset(["a"]), frozenset(["a"])]})
# Test assertions.
test_util.assert_dataframes_equal(self, actual, expected)
# Assert input dataframe was not modified.
test_util.assert_dataframes_equal(self, input_df, input_df_copy)
def test_limit_labels_for_label_normalizer(self):
input_label_normalizer = {
"a": ["a", "b", "c"],
"DDDD": ["XXXX"],
"b": ["YYYY", "b"]
}
input_acceptable_labels = frozenset(["a", "b"])
actual = baseline_utils.limit_labels_for_label_normalizer(
input_label_normalizer, input_acceptable_labels)
expected = {"a": ["a", "b"], "b": ["b"]}
self.assertDictEqual(actual, expected)
@parameterized.named_parameters(
dict(
testcase_name="one sequence, one label row, no extra nonlabel entries",
interproscan_output="""accession="B7UIV5" 74c763abf8567dfb6f4f83a4e0a31454 217 TIGRFAM TIGR00506 ribB: 3,4-dihydroxy-2-butanone-4-phosphate synthase 13 209 1.5E-86 T 21-10-2019 IPR000422 3,4-dihydroxy-2-butanone 4-phosphate synthase, RibB GO:0008686|GO:0009231""",
input_ground_truth_test_fasta=""">accession="B7UIV5"\tlabels="GO:101010"
NOT_USED""",
expected_predicted_labels_per_seq={
"B7UIV5": {"GO:0008686", "GO:0009231"}
},
),
dict(
testcase_name="one sequence, one label row, extra nonlabel entries",
interproscan_output="""accession="B7UIV5" 74c763abf8567dfb6f4f83a4e0a31454 217 TIGRFAM TIGR00506 ribB: 3,4-dihydroxy-2-butanone-4-phosphate synthase 13 209 1.5E-86 T 21-10-2019 IPR000422 3,4-dihydroxy-2-butanone 4-phosphate synthase, RibB GO:0008686|GO:0009231
accession="B7UIV5" 74c763abf8567dfb6f4f83a4e0a31454 217 Gene3D G3DSA:3.90.870.10 1 217 5.1E-95T21-10-2019""",
input_ground_truth_test_fasta=""">accession="B7UIV5"\tlabels="GO:101010"
NOT_USED""",
expected_predicted_labels_per_seq={"B7UIV5": {"GO:0008686", "GO:0009231"}},
),
dict(
testcase_name="one sequence, no labels",
interproscan_output="""accession="B7UIV5" 74c763abf8567dfb6f4f83a4e0a31454 217 Gene3D G3DSA:3.90.870.10 1 217 5.1E-95T21-10-2019""",
input_ground_truth_test_fasta=""">accession="B7UIV5"\tlabels="GO:101010"
NOT_USED""",
expected_predicted_labels_per_seq={"B7UIV5": set()},
),
dict(
testcase_name="one sequence, multiple label rows, extra nonlabel entries",
interproscan_output="""accession="B7UIV5" 74c763abf8567dfb6f4f83a4e0a31454 217 PANTHER PTHR21327:SF38 1 217 8.2E-126 T21-10-2019
accession="B7UIV5" 74c763abf8567dfb6f4f83a4e0a31454 217 TIGRFAM TIGR00506 ribB: 3,4-dihydroxy-2-butanone-4-phosphate synthase 13 209 1.5E-86 T 21-10-2019 IPR000422 3,4-dihydroxy-2-butanone 4-phosphate synthase, RibB GO:0008686|GO:0009231
accession="B7UIV5" 74c763abf8567dfb6f4f83a4e0a31454 217 Hamap MF_00180 3,4-dihydroxy-2-butanone 4-phosphate synthase [ribB]. 11 213 43.238 T 21-10-2019 IPR000422 3,4-dihydroxy-2-butanone 4-phosphate synthase, RibB GO:0008686|GO:0009231
accession="B7UIV5" 74c763abf8567dfb6f4f83a4e0a31454 217 Gene3D G3DSA:3.90.870.10 1 217 5.1E-95T21-10-2019
accession="B7UIV5" 74c763abf8567dfb6f4f83a4e0a31454 217 SUPERFAMILY SSF55821 7 213 5.95E-86T 21-10-2019 IPR017945 DHBP synthase RibB-like alpha/beta domain superfamily
accession="B7UIV5" 74c763abf8567dfb6f4f83a4e0a31454 217 Pfam PF00926 3,4-dihydroxy-2-butanone 4-phosphate synthase 17 208 1.7E-82 T 21-10-2019 IPR000422 3,4-dihydroxy-2-butanone 4-phosphate synthase, RibB GO:0008686|GO:0009231
accession="B7UIV5" 74c763abf8567dfb6f4f83a4e0a31454 217 PANTHER PTHR21327 1 217 8.2E-126 T21-10-2019""",
input_ground_truth_test_fasta=""">accession="B7UIV5"\tlabels="GO:101010"
NOT_USED""",
expected_predicted_labels_per_seq={"B7UIV5": {"GO:0008686", "GO:0009231"}},
),
dict(
testcase_name="two sequences, one has labels",
interproscan_output="""accession="B7UIV5" 74c763abf8567dfb6f4f83a4e0a31454 217 PANTHER PTHR21327 1 217 8.2E-126 T21-10-2019
accession="Q5SMK6" e9a286a263b71156fcf0cfebc12caec6 360 CDD cd00143 PP2Cc 64 325 6.91138E-87 T 21-10-2019 IPR001932 PPM-type phosphatase domain GO:0003824""",
input_ground_truth_test_fasta=""">accession="B7UIV5"\tlabels="GO:101010"
NOT_USED
>accession="Q5SMK6"\tlabels="GO:101010"
NOT_USED""",
expected_predicted_labels_per_seq={
"B7UIV5": set(),
"Q5SMK6": {"GO:0003824"},
},
),
dict(
testcase_name="two sequences, both have labels",
interproscan_output="""accession="B7UIV5" 74c763abf8567dfb6f4f83a4e0a31454 217 TIGRFAM TIGR00506 ribB: 3,4-dihydroxy-2-butanone-4-phosphate synthase 13 209 1.5E-86 T 21-10-2019 IPR000422 3,4-dihydroxy-2-butanone 4-phosphate synthase, RibB GO:0008686|GO:0009231
accession="Q5SMK6" e9a286a263b71156fcf0cfebc12caec6 360 CDD cd00143 PP2Cc 64 325 6.91138E-87 T 21-10-2019 IPR001932 PPM-type phosphatase domain GO:0003824""",
input_ground_truth_test_fasta=""">accession="B7UIV5"\tlabels="GO:101010"
NOT_USED
>accession="Q5SMK6"\tlabels="GO:101010"
NOT_USED""",
expected_predicted_labels_per_seq={
"B7UIV5": {"GO:0008686", "GO:0009231"},
"Q5SMK6": {"GO:0003824"},
},
),
dict(
testcase_name="sequence in ground truth that is missing in interproscan output",
interproscan_output="",
input_ground_truth_test_fasta=""">accession="B7UIV5"\tlabels="GO:101010"
NOT_USED""",
expected_predicted_labels_per_seq={
"B7UIV5": set(),
},
),
)
def test_load_interproscan_output(self, interproscan_output,
input_ground_truth_test_fasta,
expected_predicted_labels_per_seq):
# Set up inputs.
input_file = _write_to_file(interproscan_output)
input_test_fasta_filename = _write_to_file(input_ground_truth_test_fasta)
input_ground_truth_test = baseline_utils.load_ground_truth(
input_test_fasta_filename)
# Compute actual results.
actual_interproscan_output = baseline_utils.load_interproscan_output(
test_data_ground_truth=input_ground_truth_test,
interproscan_output_filename=input_file)
# Assertions.
expected_df_length = len(
set(
list(expected_predicted_labels_per_seq.keys()) +
input_ground_truth_test.sequence_name.values))
self.assertLen(actual_interproscan_output, expected_df_length)
for row in actual_interproscan_output.itertuples():
self.assertIn(row.sequence_name, expected_predicted_labels_per_seq)
self.assertSetEqual(row.predicted_label,
expected_predicted_labels_per_seq[row.sequence_name])
if __name__ == "__main__":
absltest.main()
| apache-2.0 |
selva86/python-machine-learning | final_project/tester.py | 1 | 4212 | #!/usr/bin/pickle
""" a basic script for importing student's POI identifier,
and checking the results that they get from it
requires that the algorithm, dataset, and features list
be written to my_classifier.pkl, my_dataset.pkl, and
my_feature_list.pkl, respectively
that process should happen at the end of poi_id.py
"""
import pickle
import sys
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.cross_validation import StratifiedShuffleSplit
sys.path.append("../tools/")
from feature_format import featureFormat, targetFeatureSplit
PERF_FORMAT_STRING = "\
\tAccuracy: {:>0.{display_precision}f}\tPrecision: {:>0.{display_precision}f}\t\
Recall: {:>0.{display_precision}f}\tF1: {:>0.{display_precision}f}\tF2: {:>0.{display_precision}f}"
RESULTS_FORMAT_STRING = "\tTotal predictions: {:4d}\tTrue positives: {:4d}\tFalse positives: {:4d}\tFalse negatives: {:4d}\tTrue negatives: {:4d}"
def test_classifier(clf, dataset, feature_list, folds = 1000):
data = featureFormat(dataset, feature_list, sort_keys = True)
labels, features = targetFeatureSplit(data)
cv = StratifiedShuffleSplit(labels, folds, random_state = 42)
true_negatives = 0
false_negatives = 0
true_positives = 0
false_positives = 0
for train_idx, test_idx in cv:
features_train = []
features_test = []
labels_train = []
labels_test = []
for ii in train_idx:
features_train.append( features[ii] )
labels_train.append( labels[ii] )
for jj in test_idx:
features_test.append( features[jj] )
labels_test.append( labels[jj] )
### fit the classifier using training set, and test on test set
clf.fit(features_train, labels_train)
predictions = clf.predict(features_test)
for prediction, truth in zip(predictions, labels_test):
if prediction == 0 and truth == 0:
true_negatives += 1
elif prediction == 0 and truth == 1:
false_negatives += 1
elif prediction == 1 and truth == 0:
false_positives += 1
elif prediction == 1 and truth == 1:
true_positives += 1
else:
print "Warning: Found a predicted label not == 0 or 1."
print "All predictions should take value 0 or 1."
print "Evaluating performance for processed predictions:"
break
try:
total_predictions = true_negatives + false_negatives + false_positives + true_positives
accuracy = 1.0*(true_positives + true_negatives)/total_predictions
precision = 1.0*true_positives/(true_positives+false_positives)
recall = 1.0*true_positives/(true_positives+false_negatives)
f1 = 2.0 * true_positives/(2*true_positives + false_positives+false_negatives)
f2 = (1+2.0*2.0) * precision*recall/(4*precision + recall)
print clf
print PERF_FORMAT_STRING.format(accuracy, precision, recall, f1, f2, display_precision = 5)
print RESULTS_FORMAT_STRING.format(total_predictions, true_positives, false_positives, false_negatives, true_negatives)
print ""
except:
print "Got a divide by zero when trying out:", clf
CLF_PICKLE_FILENAME = "my_classifier.pkl"
DATASET_PICKLE_FILENAME = "my_dataset.pkl"
FEATURE_LIST_FILENAME = "my_feature_list.pkl"
def dump_classifier_and_data(clf, dataset, feature_list):
pickle.dump(clf, open(CLF_PICKLE_FILENAME, "w") )
pickle.dump(dataset, open(DATASET_PICKLE_FILENAME, "w") )
pickle.dump(feature_list, open(FEATURE_LIST_FILENAME, "w") )
def load_classifier_and_data():
clf = pickle.load(open(CLF_PICKLE_FILENAME, "r") )
dataset = pickle.load(open(DATASET_PICKLE_FILENAME, "r") )
feature_list = pickle.load(open(FEATURE_LIST_FILENAME, "r"))
return clf, dataset, feature_list
def main():
### load up student's classifier, dataset, and feature_list
clf, dataset, feature_list = load_classifier_and_data()
### Run testing script
test_classifier(clf, dataset, feature_list)
if __name__ == '__main__':
main()
| mit |
shikhardb/scikit-learn | sklearn/feature_selection/tests/test_base.py | 170 | 3666 | import numpy as np
from scipy import sparse as sp
from nose.tools import assert_raises, assert_equal
from numpy.testing import assert_array_equal
from sklearn.base import BaseEstimator
from sklearn.feature_selection.base import SelectorMixin
from sklearn.utils import check_array
class StepSelector(SelectorMixin, BaseEstimator):
"""Retain every `step` features (beginning with 0)"""
def __init__(self, step=2):
self.step = step
def fit(self, X, y=None):
X = check_array(X, 'csc')
self.n_input_feats = X.shape[1]
return self
def _get_support_mask(self):
mask = np.zeros(self.n_input_feats, dtype=bool)
mask[::self.step] = True
return mask
support = [True, False] * 5
support_inds = [0, 2, 4, 6, 8]
X = np.arange(20).reshape(2, 10)
Xt = np.arange(0, 20, 2).reshape(2, 5)
Xinv = X.copy()
Xinv[:, 1::2] = 0
y = [0, 1]
feature_names = list('ABCDEFGHIJ')
feature_names_t = feature_names[::2]
feature_names_inv = np.array(feature_names)
feature_names_inv[1::2] = ''
def test_transform_dense():
sel = StepSelector()
Xt_actual = sel.fit(X, y).transform(X)
Xt_actual2 = StepSelector().fit_transform(X, y)
assert_array_equal(Xt, Xt_actual)
assert_array_equal(Xt, Xt_actual2)
# Check dtype matches
assert_equal(np.int32, sel.transform(X.astype(np.int32)).dtype)
assert_equal(np.float32, sel.transform(X.astype(np.float32)).dtype)
# Check 1d list and other dtype:
names_t_actual = sel.transform(feature_names)
assert_array_equal(feature_names_t, names_t_actual.ravel())
# Check wrong shape raises error
assert_raises(ValueError, sel.transform, np.array([[1], [2]]))
def test_transform_sparse():
sparse = sp.csc_matrix
sel = StepSelector()
Xt_actual = sel.fit(sparse(X)).transform(sparse(X))
Xt_actual2 = sel.fit_transform(sparse(X))
assert_array_equal(Xt, Xt_actual.toarray())
assert_array_equal(Xt, Xt_actual2.toarray())
# Check dtype matches
assert_equal(np.int32, sel.transform(sparse(X).astype(np.int32)).dtype)
assert_equal(np.float32, sel.transform(sparse(X).astype(np.float32)).dtype)
# Check wrong shape raises error
assert_raises(ValueError, sel.transform, np.array([[1], [2]]))
def test_inverse_transform_dense():
sel = StepSelector()
Xinv_actual = sel.fit(X, y).inverse_transform(Xt)
assert_array_equal(Xinv, Xinv_actual)
# Check dtype matches
assert_equal(np.int32,
sel.inverse_transform(Xt.astype(np.int32)).dtype)
assert_equal(np.float32,
sel.inverse_transform(Xt.astype(np.float32)).dtype)
# Check 1d list and other dtype:
names_inv_actual = sel.inverse_transform(feature_names_t)
assert_array_equal(feature_names_inv, names_inv_actual.ravel())
# Check wrong shape raises error
assert_raises(ValueError, sel.inverse_transform, np.array([[1], [2]]))
def test_inverse_transform_sparse():
sparse = sp.csc_matrix
sel = StepSelector()
Xinv_actual = sel.fit(sparse(X)).inverse_transform(sparse(Xt))
assert_array_equal(Xinv, Xinv_actual.toarray())
# Check dtype matches
assert_equal(np.int32,
sel.inverse_transform(sparse(Xt).astype(np.int32)).dtype)
assert_equal(np.float32,
sel.inverse_transform(sparse(Xt).astype(np.float32)).dtype)
# Check wrong shape raises error
assert_raises(ValueError, sel.inverse_transform, np.array([[1], [2]]))
def test_get_support():
sel = StepSelector()
sel.fit(X, y)
assert_array_equal(support, sel.get_support())
assert_array_equal(support_inds, sel.get_support(indices=True))
| bsd-3-clause |
annahs/atmos_research | WHI_long_term_make_SP2_GC_comparison_table.py | 1 | 8928 | import matplotlib.pyplot as plt
import sys
import os
import numpy as np
from pprint import pprint
from datetime import datetime
from datetime import timedelta
import mysql.connector
import pickle
import math
import calendar
from math import log10, floor
case = 'default' #default, Van, wet_scav, no_bb, all_together
RH_of_interest = 90 #101 = no threshold
sig_figs_SP2 = 3
sig_figs_gc = 4
def round_to_n(x,n):
return round(x, -int(floor(log10(x))) + (n - 1))
#database connection
cnx = mysql.connector.connect(user='root', password='Suresh15', host='localhost', database='black_carbon')
cursor = cnx.cursor()
data= []
red_list = []
blue_list = []
clusters = ['all','NPac','SPac','Cont','GBPS','LRT']
GC_row_no = 2
for cluster in clusters:
cursor.execute(('SELECT 10th_percentile_mass_conc, 50th_percentile_mass_conc, 90th_percentile_mass_conc, mean_mass_conc, rel_err, data_source from whi_gc_and_sp2_stats_on_6h_clustered_ft_data where RH_threshold = %s and cluster = %s and test_scenario = %s'),(RH_of_interest,cluster,case))
data_raw = cursor.fetchall()
if case != 'default':
cursor.execute(('SELECT 10th_percentile_mass_conc, 50th_percentile_mass_conc, 90th_percentile_mass_conc, mean_mass_conc, rel_err, data_source from whi_gc_and_sp2_stats_on_6h_clustered_ft_data where RH_threshold = %s and cluster = %s and test_scenario = %s and data_source = %s'),(RH_of_interest,cluster,'default','SP2'))
sp2_data = cursor.fetchall()
data_raw.append(sp2_data[0])
if cluster == 'all':
cluster = 'all clusters\ncombined'
if cluster == 'NPac':
cluster = 'N. Pacific'
if cluster == 'SPac':
cluster = 'S. Pacific'
if cluster == 'Cont':
cluster = 'N. Canada'
if cluster == 'LRT':
cluster = 'W. Pacific/Asia'
if cluster == 'GBPS':
cluster = 'Georgia Basin\n/Puget Sound'
for row in data_raw:
data_source = row[5]
if data_source == 'SP2':
p10_sp2 = row[0]
p50_sp2 = row[1]
p90_sp2 = row[2]
mean_sp2 = row[3]
rel_err_sp2 = row[4]
if data_source == 'GEOS-Chem':
p10_gc = row[0]
p50_gc = row[1]
p90_gc = row[2]
mean_gc = row[3]
rel_err_gc = row[4]
SP2_10 = str(round_to_n(p10_sp2,sig_figs_SP2)) + u'\u00B1' + str(round_to_n(p10_sp2*rel_err_sp2,sig_figs_SP2))
SP2_50 = str(round_to_n(p50_sp2,sig_figs_SP2)) + u'\u00B1' + str(round_to_n(p50_sp2*rel_err_sp2,sig_figs_SP2))
SP2_90 = str(round_to_n(p90_sp2,sig_figs_SP2)) + u'\u00B1' + str(round_to_n(p90_sp2*rel_err_sp2,sig_figs_SP2))
SP2_mean = str(round_to_n(mean_sp2,sig_figs_SP2)) + u'\u00B1' + str(round_to_n(mean_sp2*rel_err_sp2,sig_figs_SP2))
GC_10 = str(round_to_n(p10_gc,sig_figs_gc)) + u'\u00B1' + str(round_to_n(p10_gc*rel_err_gc,sig_figs_gc))
GC_50 = str(round_to_n(p50_gc,sig_figs_gc)) + u'\u00B1' + str(round_to_n(p50_gc*rel_err_gc,sig_figs_gc))
GC_90 = str(round_to_n(p90_gc,sig_figs_gc)) + u'\u00B1' + str(round_to_n(p90_gc*rel_err_gc,sig_figs_gc))
GC_mean = str(round_to_n(mean_gc,sig_figs_gc)) + u'\u00B1' + str(round_to_n(mean_gc*rel_err_gc,sig_figs_gc))
GC_list = [p10_gc, p50_gc, p90_gc, mean_gc]
SP2_list = [p10_sp2, p50_sp2, p90_sp2, mean_sp2]
i = 0
for value in GC_list:
if (value - value*rel_err_gc) > (SP2_list[i]+ SP2_list[i]*rel_err_sp2):
red_list.append((GC_row_no,i+2))
if (value + value*rel_err_gc) < (SP2_list[i]- SP2_list[i]*rel_err_sp2):
blue_list.append((GC_row_no,i+2))
i+=1
table_row_SP2 = [cluster,'Measurement',SP2_10,SP2_50,SP2_90,SP2_mean]
table_row_GC = [cluster,'GEOS-Chem', GC_10,GC_50,GC_90,GC_mean]
data.append(table_row_SP2)
data.append(table_row_GC)
GC_row_no +=2
colLabels=('cluster','data dource','10th ptile', '50th ptile', '90th ptile', 'mean')
fig=plt.figure(figsize=(8,10))
ax = fig.add_subplot(111)
ax.axis('off')
#do the table
the_table = ax.table(cellText=data,
colLabels=colLabels,
loc='center')
table_props=the_table.properties()
table_cells=table_props['child_artists']
i=0
for cell in table_cells:
ht = cell.get_height()
#cell.set_width(0.1)
cell.set_height(ht*2)
cell.set_fontsize(14)
#if i in [1,3,5,7]:
# cell.set_linewidth(4)
i+=1
cellDict = the_table.get_celld()
for cell in red_list:
cellDict[cell]._text.set_color('r')
for cell in blue_list:
cellDict[cell]._text.set_color('b')
os.chdir('C:/Users/Sarah Hanna/Documents/Data/WHI long term record/GOES-Chem/')
plt.savefig('GC default v10 vs SP2 by cluster for WHI - ' + case + ' - ' + str(RH_of_interest) + '% RH threshold.png',bbox_inches='tight')
plt.show()
cnx.close()
#######################
cases = ['default', 'Van', 'wet_scav', 'no_bb', 'all_together']
def round_to_n(x,n):
return round(x, -int(floor(log10(x))) + (n - 1))
#database connection
cnx = mysql.connector.connect(user='root', password='Suresh15', host='localhost', database='black_carbon')
cursor = cnx.cursor()
data= []
red_list = []
blue_list = []
clusters = ['all','NPac','SPac','Cont','GBPS','LRT']
GC_row_offset = 0
for cluster in clusters:
col = 2
if cluster == 'all':
cluster_name = 'all clusters\ncombined'
if cluster == 'NPac':
cluster_name = 'N. Pacific'
if cluster == 'SPac':
cluster_name = 'S. Pacific'
if cluster == 'Cont':
cluster_name = 'N. Canada'
if cluster == 'LRT':
cluster_name = 'W. Pacific/Asia'
if cluster == 'GBPS':
cluster_name = 'Georgia Basin\n/Puget Sound'
p10_row = [cluster_name,'10th',]
p50_row = ['','50th',]
p90_row = ['','90th',]
mean_row = ['','mean',]
for case in cases:
cursor.execute(('SELECT 10th_percentile_mass_conc, 50th_percentile_mass_conc, 90th_percentile_mass_conc, mean_mass_conc, rel_err, data_source from whi_gc_and_sp2_stats_on_6h_clustered_ft_data where RH_threshold = %s and cluster = %s and test_scenario = %s'),(RH_of_interest,cluster,case))
data_raw = cursor.fetchall()
if case != 'default':
cursor.execute(('SELECT 10th_percentile_mass_conc, 50th_percentile_mass_conc, 90th_percentile_mass_conc, mean_mass_conc, rel_err, data_source from whi_gc_and_sp2_stats_on_6h_clustered_ft_data where RH_threshold = %s and cluster = %s and test_scenario = %s and data_source = %s'),(RH_of_interest,cluster,'default','SP2'))
sp2_data = cursor.fetchall()
data_raw.append(sp2_data[0])
for row in data_raw:
data_source = row[5]
if data_source == 'SP2':
p10_sp2 = row[0]
p50_sp2 = row[1]
p90_sp2 = row[2]
mean_sp2 = row[3]
rel_err_sp2 = row[4]
if data_source == 'GEOS-Chem':
p10_gc = row[0]
p50_gc = row[1]
p90_gc = row[2]
mean_gc = row[3]
rel_err_gc = row[4]
SP2_10 = str(round_to_n(p10_sp2,sig_figs_SP2)) + u'\u00B1' + str(round_to_n(p10_sp2*rel_err_sp2,sig_figs_SP2))
SP2_50 = str(round_to_n(p50_sp2,sig_figs_SP2)) + u'\u00B1' + str(round_to_n(p50_sp2*rel_err_sp2,sig_figs_SP2))
SP2_90 = str(round_to_n(p90_sp2,sig_figs_SP2)) + u'\u00B1' + str(round_to_n(p90_sp2*rel_err_sp2,sig_figs_SP2))
SP2_mean = str(round_to_n(mean_sp2,sig_figs_SP2)) + u'\u00B1' + str(round_to_n(mean_sp2*rel_err_sp2,sig_figs_SP2))
GC_10 = str(round_to_n(p10_gc,sig_figs_gc))
GC_50 = str(round_to_n(p50_gc,sig_figs_gc))
GC_90 = str(round_to_n(p90_gc,sig_figs_gc))
GC_mean = str(round_to_n(mean_gc,sig_figs_gc))
GC_list = [p10_gc, p50_gc, p90_gc, mean_gc]
SP2_list = [p10_sp2, p50_sp2, p90_sp2, mean_sp2]
i = 0
GC_row_no = 1
for value in GC_list:
if (value - value*rel_err_gc) > (SP2_list[i]+ SP2_list[i]*rel_err_sp2):
red_list.append((GC_row_no + GC_row_offset,col))
print case, cluster, value,(GC_row_no + GC_row_offset,col)
if (value + value*rel_err_gc) < (SP2_list[i]- SP2_list[i]*rel_err_sp2):
blue_list.append((GC_row_no + GC_row_offset,col))
GC_row_no +=1
i+=1
p10_row.append(round_to_n(p10_gc/p10_sp2,3))
p50_row.append(round_to_n(p50_gc/p50_sp2,3))
p90_row.append(round_to_n(p90_gc/p90_sp2,3))
mean_row.append(round_to_n(mean_gc/mean_sp2,3))
col += 1
GC_row_offset += 4
data.append(p10_row)
data.append(p50_row)
data.append(p90_row)
data.append(mean_row)
colLabels=('cluster','percentile','default\nscenario', 'no Vancouver\nemissions', 'improved wet\nscavenging', 'no biomass\nburning', 'all changes\ntogether')
fig=plt.figure(figsize=(12,14))
ax = fig.add_subplot(111)
ax.axis('off')
#do the table
the_table = ax.table(cellText=data,
colLabels=colLabels,
loc='center')
cellDict = the_table.get_celld()
table_props=the_table.properties()
table_cells=table_props['child_artists']
for cell in table_cells:
ht = cell.get_height()
#cell.set_width(0.1)
cell.set_height(ht*2.2)
cell.set_fontsize(14)
for cell in red_list:
cellDict[cell]._text.set_color('r')
for cell in blue_list:
cellDict[cell]._text.set_color('b')
os.chdir('C:/Users/Sarah Hanna/Documents/Data/WHI long term record/GOES-Chem/')
plt.savefig('GC all tests v10 vs SP2 by cluster for WHI - ' + str(RH_of_interest) + '% RH threshold.png',bbox_inches='tight')
plt.show()
cnx.close() | mit |
louisLouL/pair_trading | capstone_env/lib/python3.6/site-packages/pandas/compat/numpy/function.py | 7 | 12722 | """
For compatibility with numpy libraries, pandas functions or
methods have to accept '*args' and '**kwargs' parameters to
accommodate numpy arguments that are not actually used or
respected in the pandas implementation.
To ensure that users do not abuse these parameters, validation
is performed in 'validators.py' to make sure that any extra
parameters passed correspond ONLY to those in the numpy signature.
Part of that validation includes whether or not the user attempted
to pass in non-default values for these extraneous parameters. As we
want to discourage users from relying on these parameters when calling
the pandas implementation, we want them only to pass in the default values
for these parameters.
This module provides a set of commonly used default arguments for functions
and methods that are spread throughout the codebase. This module will make it
easier to adjust to future upstream changes in the analogous numpy signatures.
"""
from numpy import ndarray
from pandas.util._validators import (validate_args, validate_kwargs,
validate_args_and_kwargs)
from pandas.errors import UnsupportedFunctionCall
from pandas.core.dtypes.common import is_integer, is_bool
from pandas.compat import OrderedDict
class CompatValidator(object):
def __init__(self, defaults, fname=None, method=None,
max_fname_arg_count=None):
self.fname = fname
self.method = method
self.defaults = defaults
self.max_fname_arg_count = max_fname_arg_count
def __call__(self, args, kwargs, fname=None,
max_fname_arg_count=None, method=None):
if args or kwargs:
fname = self.fname if fname is None else fname
max_fname_arg_count = (self.max_fname_arg_count if
max_fname_arg_count is None
else max_fname_arg_count)
method = self.method if method is None else method
if method == 'args':
validate_args(fname, args, max_fname_arg_count, self.defaults)
elif method == 'kwargs':
validate_kwargs(fname, kwargs, self.defaults)
elif method == 'both':
validate_args_and_kwargs(fname, args, kwargs,
max_fname_arg_count,
self.defaults)
else:
raise ValueError("invalid validation method "
"'{method}'".format(method=method))
ARGMINMAX_DEFAULTS = dict(out=None)
validate_argmin = CompatValidator(ARGMINMAX_DEFAULTS, fname='argmin',
method='both', max_fname_arg_count=1)
validate_argmax = CompatValidator(ARGMINMAX_DEFAULTS, fname='argmax',
method='both', max_fname_arg_count=1)
def process_skipna(skipna, args):
if isinstance(skipna, ndarray) or skipna is None:
args = (skipna,) + args
skipna = True
return skipna, args
def validate_argmin_with_skipna(skipna, args, kwargs):
"""
If 'Series.argmin' is called via the 'numpy' library,
the third parameter in its signature is 'out', which
takes either an ndarray or 'None', so check if the
'skipna' parameter is either an instance of ndarray or
is None, since 'skipna' itself should be a boolean
"""
skipna, args = process_skipna(skipna, args)
validate_argmin(args, kwargs)
return skipna
def validate_argmax_with_skipna(skipna, args, kwargs):
"""
If 'Series.argmax' is called via the 'numpy' library,
the third parameter in its signature is 'out', which
takes either an ndarray or 'None', so check if the
'skipna' parameter is either an instance of ndarray or
is None, since 'skipna' itself should be a boolean
"""
skipna, args = process_skipna(skipna, args)
validate_argmax(args, kwargs)
return skipna
ARGSORT_DEFAULTS = OrderedDict()
ARGSORT_DEFAULTS['axis'] = -1
ARGSORT_DEFAULTS['kind'] = 'quicksort'
ARGSORT_DEFAULTS['order'] = None
validate_argsort = CompatValidator(ARGSORT_DEFAULTS, fname='argsort',
max_fname_arg_count=0, method='both')
# two different signatures of argsort, this second validation
# for when the `kind` param is supported
ARGSORT_DEFAULTS_KIND = OrderedDict()
ARGSORT_DEFAULTS_KIND['axis'] = -1
ARGSORT_DEFAULTS_KIND['order'] = None
validate_argsort_kind = CompatValidator(ARGSORT_DEFAULTS_KIND, fname='argsort',
max_fname_arg_count=0, method='both')
def validate_argsort_with_ascending(ascending, args, kwargs):
"""
If 'Categorical.argsort' is called via the 'numpy' library, the
first parameter in its signature is 'axis', which takes either
an integer or 'None', so check if the 'ascending' parameter has
either integer type or is None, since 'ascending' itself should
be a boolean
"""
if is_integer(ascending) or ascending is None:
args = (ascending,) + args
ascending = True
validate_argsort_kind(args, kwargs, max_fname_arg_count=3)
return ascending
CLIP_DEFAULTS = dict(out=None)
validate_clip = CompatValidator(CLIP_DEFAULTS, fname='clip',
method='both', max_fname_arg_count=3)
def validate_clip_with_axis(axis, args, kwargs):
"""
If 'NDFrame.clip' is called via the numpy library, the third
parameter in its signature is 'out', which can takes an ndarray,
so check if the 'axis' parameter is an instance of ndarray, since
'axis' itself should either be an integer or None
"""
if isinstance(axis, ndarray):
args = (axis,) + args
axis = None
validate_clip(args, kwargs)
return axis
COMPRESS_DEFAULTS = OrderedDict()
COMPRESS_DEFAULTS['axis'] = None
COMPRESS_DEFAULTS['out'] = None
validate_compress = CompatValidator(COMPRESS_DEFAULTS, fname='compress',
method='both', max_fname_arg_count=1)
CUM_FUNC_DEFAULTS = OrderedDict()
CUM_FUNC_DEFAULTS['dtype'] = None
CUM_FUNC_DEFAULTS['out'] = None
validate_cum_func = CompatValidator(CUM_FUNC_DEFAULTS, method='both',
max_fname_arg_count=1)
validate_cumsum = CompatValidator(CUM_FUNC_DEFAULTS, fname='cumsum',
method='both', max_fname_arg_count=1)
def validate_cum_func_with_skipna(skipna, args, kwargs, name):
"""
If this function is called via the 'numpy' library, the third
parameter in its signature is 'dtype', which takes either a
'numpy' dtype or 'None', so check if the 'skipna' parameter is
a boolean or not
"""
if not is_bool(skipna):
args = (skipna,) + args
skipna = True
validate_cum_func(args, kwargs, fname=name)
return skipna
LOGICAL_FUNC_DEFAULTS = dict(out=None)
validate_logical_func = CompatValidator(LOGICAL_FUNC_DEFAULTS, method='kwargs')
MINMAX_DEFAULTS = dict(out=None)
validate_min = CompatValidator(MINMAX_DEFAULTS, fname='min',
method='both', max_fname_arg_count=1)
validate_max = CompatValidator(MINMAX_DEFAULTS, fname='max',
method='both', max_fname_arg_count=1)
RESHAPE_DEFAULTS = dict(order='C')
validate_reshape = CompatValidator(RESHAPE_DEFAULTS, fname='reshape',
method='both', max_fname_arg_count=1)
REPEAT_DEFAULTS = dict(axis=None)
validate_repeat = CompatValidator(REPEAT_DEFAULTS, fname='repeat',
method='both', max_fname_arg_count=1)
ROUND_DEFAULTS = dict(out=None)
validate_round = CompatValidator(ROUND_DEFAULTS, fname='round',
method='both', max_fname_arg_count=1)
SORT_DEFAULTS = OrderedDict()
SORT_DEFAULTS['axis'] = -1
SORT_DEFAULTS['kind'] = 'quicksort'
SORT_DEFAULTS['order'] = None
validate_sort = CompatValidator(SORT_DEFAULTS, fname='sort',
method='kwargs')
STAT_FUNC_DEFAULTS = OrderedDict()
STAT_FUNC_DEFAULTS['dtype'] = None
STAT_FUNC_DEFAULTS['out'] = None
validate_stat_func = CompatValidator(STAT_FUNC_DEFAULTS,
method='kwargs')
validate_sum = CompatValidator(STAT_FUNC_DEFAULTS, fname='sort',
method='both', max_fname_arg_count=1)
validate_mean = CompatValidator(STAT_FUNC_DEFAULTS, fname='mean',
method='both', max_fname_arg_count=1)
STAT_DDOF_FUNC_DEFAULTS = OrderedDict()
STAT_DDOF_FUNC_DEFAULTS['dtype'] = None
STAT_DDOF_FUNC_DEFAULTS['out'] = None
validate_stat_ddof_func = CompatValidator(STAT_DDOF_FUNC_DEFAULTS,
method='kwargs')
TAKE_DEFAULTS = OrderedDict()
TAKE_DEFAULTS['out'] = None
TAKE_DEFAULTS['mode'] = 'raise'
validate_take = CompatValidator(TAKE_DEFAULTS, fname='take',
method='kwargs')
def validate_take_with_convert(convert, args, kwargs):
"""
If this function is called via the 'numpy' library, the third
parameter in its signature is 'axis', which takes either an
ndarray or 'None', so check if the 'convert' parameter is either
an instance of ndarray or is None
"""
if isinstance(convert, ndarray) or convert is None:
args = (convert,) + args
convert = True
validate_take(args, kwargs, max_fname_arg_count=3, method='both')
return convert
TRANSPOSE_DEFAULTS = dict(axes=None)
validate_transpose = CompatValidator(TRANSPOSE_DEFAULTS, fname='transpose',
method='both', max_fname_arg_count=0)
def validate_transpose_for_generic(inst, kwargs):
try:
validate_transpose(tuple(), kwargs)
except ValueError as e:
klass = type(inst).__name__
msg = str(e)
# the Panel class actual relies on the 'axes' parameter if called
# via the 'numpy' library, so let's make sure the error is specific
# about saying that the parameter is not supported for particular
# implementations of 'transpose'
if "the 'axes' parameter is not supported" in msg:
msg += " for {klass} instances".format(klass=klass)
raise ValueError(msg)
def validate_window_func(name, args, kwargs):
numpy_args = ('axis', 'dtype', 'out')
msg = ("numpy operations are not "
"valid with window objects. "
"Use .{func}() directly instead ".format(func=name))
if len(args) > 0:
raise UnsupportedFunctionCall(msg)
for arg in numpy_args:
if arg in kwargs:
raise UnsupportedFunctionCall(msg)
def validate_rolling_func(name, args, kwargs):
numpy_args = ('axis', 'dtype', 'out')
msg = ("numpy operations are not "
"valid with window objects. "
"Use .rolling(...).{func}() instead ".format(func=name))
if len(args) > 0:
raise UnsupportedFunctionCall(msg)
for arg in numpy_args:
if arg in kwargs:
raise UnsupportedFunctionCall(msg)
def validate_expanding_func(name, args, kwargs):
numpy_args = ('axis', 'dtype', 'out')
msg = ("numpy operations are not "
"valid with window objects. "
"Use .expanding(...).{func}() instead ".format(func=name))
if len(args) > 0:
raise UnsupportedFunctionCall(msg)
for arg in numpy_args:
if arg in kwargs:
raise UnsupportedFunctionCall(msg)
def validate_groupby_func(name, args, kwargs, allowed=None):
"""
'args' and 'kwargs' should be empty, except for allowed
kwargs because all of
their necessary parameters are explicitly listed in
the function signature
"""
if allowed is None:
allowed = []
kwargs = set(kwargs) - set(allowed)
if len(args) + len(kwargs) > 0:
raise UnsupportedFunctionCall((
"numpy operations are not valid "
"with groupby. Use .groupby(...)."
"{func}() instead".format(func=name)))
RESAMPLER_NUMPY_OPS = ('min', 'max', 'sum', 'prod',
'mean', 'std', 'var')
def validate_resampler_func(method, args, kwargs):
"""
'args' and 'kwargs' should be empty because all of
their necessary parameters are explicitly listed in
the function signature
"""
if len(args) + len(kwargs) > 0:
if method in RESAMPLER_NUMPY_OPS:
raise UnsupportedFunctionCall((
"numpy operations are not valid "
"with resample. Use .resample(...)."
"{func}() instead".format(func=method)))
else:
raise TypeError("too many arguments passed in")
| mit |
foxtrotmike/pairpred | analyzeZDOCK.py | 1 | 1399 | # -*- coding: utf-8 -*-
"""
Created on Tue May 28 13:51:06 2013
Plot the comparison between PAIRpred and ZDOCK
@author: root
"""
import os
import numpy as np
import matplotlib.pyplot as plt
import myPickle as mPickle
from DBD4 import parseCSVData
def getSortedR(cid,bdir,A,N=2000,M=2000):
ifname=os.path.join(bdir,cid+'.zd3.0.2.cg.out.rmsds')
R=np.zeros(N)
with open(ifname, 'r') as f:
for n in range(N):
R[n]=f.readline().split()[1]
sidx=np.argsort(A[cid][0][:M])
RS=R+0.0
RS[:M]=R[:M][sidx]
return RS,R
bdir='../zdock_data/decoys_bm4_zd3.0.2_15deg/results/'
dfname='../ascores_2K2.scr.pkl'
d4=parseCSVData('..\Complete Data\DBD4_data.csv')
A=mPickle.load(dfname)
RS=[]
R=[]
dthr=2.5
N=2000
ncids=0
for i,cid in enumerate(A.keys()):
# if cid not in d4 or d4[cid][1]=='RB':
# continue
rs,r=getSortedR(cid,bdir,A,N=N)
# if np.any(r[:10]<dthr):
# # import pdb
# print cid, d4[cid][1]
#
# pdb.set_trace()
R.append(np.cumsum(r<dthr)>0)
RS.append(np.cumsum(rs<dthr)>0)
ncids=ncids+1.0
plt.plot(range(1,N+1),np.sum(np.array(R),axis=0)/ncids,'k.-',label='ZDOCK')
plt.plot(range(1,N+1),np.sum(np.array(RS),axis=0)/ncids,'ro-',label='ZDOCK with Resorting')
plt.xlabel('Number of Predictions')
plt.ylabel('Success Rate')
plt.axis([0,10,0,1])
plt.legend(loc=0)
plt.grid()
plt.show() | gpl-3.0 |
glemaitre/fdasrsf | fdasrsf/time_warping.py | 1 | 34102 | """
Group-wise function alignment using SRSF framework and Dynamic Programming
moduleauthor:: Derek Tucker <[email protected]>
"""
import numpy as np
import matplotlib.pyplot as plt
import fdasrsf.utility_functions as uf
from scipy.integrate import trapz, cumtrapz
from scipy.linalg import svd
from numpy.linalg import norm
from joblib import Parallel, delayed
from fdasrsf.fPLS import pls_svd
import fdasrsf.plot_style as plot
import fpls_warp as fpls
import collections
def srsf_align(f, time, method="mean", showplot=True, smoothdata=False,
lam=0.0):
"""
This function aligns a collection of functions using the elastic
square-root slope (srsf) framework.
:param f: numpy ndarray of shape (M,N) of N functions with M samples
:param time: vector of size M describing the sample points
:param method: (string) warp calculate Karcher Mean or Median
(options = "mean" or "median") (default="mean")
:param showplot: Shows plots of results using matplotlib (default = T)
:param smoothdata: Smooth the data using a box filter (default = F)
:param lam: controls the elasticity (default = 0)
:type lam: double
:type smoothdata: bool
:type f: np.ndarray
:type time: np.ndarray
:rtype: tuple of numpy array
:return fn: aligned functions - numpy ndarray of shape (M,N) of N
functions with M samples
:return qn: aligned srvfs - similar structure to fn
:return q0: original srvf - similar structure to fn
:return fmean: function mean or median - vector of length M
:return mqn: srvf mean or median - vector of length M
:return gam: warping functions - similar structure to fn
:return orig_var: Original Variance of Functions
:return amp_var: Amplitude Variance
:return phase_var: Phase Variance
Examples
>>> import tables
>>> fun=tables.open_file("../Data/simu_data.h5")
>>> f = fun.root.f[:]
>>> f = f.transpose()
>>> time = fun.root.time[:]
>>> out = srsf_align(f,time)
"""
M = f.shape[0]
N = f.shape[1]
if M > 500:
parallel = True
elif N > 100:
parallel = True
else:
parallel = False
eps = np.finfo(np.double).eps
f0 = f
methods = ["mean", "median"]
# 0 mean, 1-median
method = [i for i, x in enumerate(methods) if x == method]
if len(method) == 0:
method = 0
else:
method = method[0]
if showplot:
plot.f_plot(time, f, title="f Original Data")
# Compute SRSF function from data
f, g, g2 = uf.gradient_spline(time, f, smoothdata)
q = g / np.sqrt(abs(g) + eps)
print("Initializing...")
mnq = q.mean(axis=1)
a = mnq.repeat(N)
d1 = a.reshape(M, N)
d = (q - d1) ** 2
dqq = np.sqrt(d.sum(axis=0))
min_ind = dqq.argmin()
mq = q[:, min_ind]
mf = f[:, min_ind]
if parallel:
out = Parallel(n_jobs=-1)(delayed(uf.optimum_reparam)(mq, time,
q[:, n], lam) for n in range(N))
gam = np.array(out)
gam = gam.transpose()
else:
gam = uf.optimum_reparam(mq, time, q, lam)
gamI = uf.SqrtMeanInverse(gam)
mf = np.interp((time[-1] - time[0]) * gamI + time[0], time, mf)
mq = uf.f_to_srsf(mf, time)
# Compute Karcher Mean
if method == 0:
print("Compute Karcher Mean of %d function in SRSF space..." % N)
if method == 1:
print("Compute Karcher Median of %d function in SRSF space..." % N)
MaxItr = 20
ds = np.repeat(0.0, MaxItr + 2)
ds[0] = np.inf
qun = np.repeat(0.0, MaxItr + 1)
tmp = np.zeros((M, MaxItr + 2))
tmp[:, 0] = mq
mq = tmp
tmp = np.zeros((M, N, MaxItr + 2))
tmp[:, :, 0] = f
f = tmp
tmp = np.zeros((M, N, MaxItr + 2))
tmp[:, :, 0] = q
q = tmp
for r in range(0, MaxItr):
print("updating step: r=%d" % (r + 1))
if r == (MaxItr - 1):
print("maximal number of iterations is reached")
# Matching Step
if parallel:
out = Parallel(n_jobs=-1)(delayed(uf.optimum_reparam)(mq[:, r],
time, q[:, n, 0], lam) for n in range(N))
gam = np.array(out)
gam = gam.transpose()
else:
gam = uf.optimum_reparam(mq[:, r], time, q[:, :, 0], lam)
gam_dev = np.zeros((M, N))
for k in range(0, N):
f[:, k, r + 1] = np.interp((time[-1] - time[0]) * gam[:, k]
+ time[0], time, f[:, k, 0])
q[:, k, r + 1] = uf.f_to_srsf(f[:, k, r + 1], time)
gam_dev[:, k] = np.gradient(gam[:, k], 1 / float(M - 1))
mqt = mq[:, r]
a = mqt.repeat(N)
d1 = a.reshape(M, N)
d = (q[:, :, r + 1] - d1) ** 2
if method == 0:
d1 = sum(trapz(d, time, axis=0))
d2 = sum(trapz((1 - np.sqrt(gam_dev)) ** 2, time, axis=0))
ds_tmp = d1 + lam * d2
ds[r + 1] = ds_tmp
# Minimization Step
# compute the mean of the matched function
qtemp = q[:, :, r + 1]
mq[:, r + 1] = qtemp.mean(axis=1)
qun[r] = norm(mq[:, r + 1] - mq[:, r]) / norm(mq[:, r])
if method == 1:
d1 = np.sqrt(sum(trapz(d, time, axis=0)))
d2 = sum(trapz((1 - np.sqrt(gam_dev)) ** 2, time, axis=0))
ds_tmp = d1 + lam * d2
ds[r + 1] = ds_tmp
# Minimization Step
# compute the mean of the matched function
dist_iinv = ds[r + 1] ** (-1)
qtemp = q[:, :, r + 1] / ds[r + 1]
mq[:, r + 1] = qtemp.sum(axis=1) * dist_iinv
qun[r] = norm(mq[:, r + 1] - mq[:, r]) / norm(mq[:, r])
if qun[r] < 1e-2 or r >= MaxItr:
break
# Last Step with centering of gam
r += 1
if parallel:
out = Parallel(n_jobs=-1)(delayed(uf.optimum_reparam)(mq[:, r], time, q[:, n, 0], lam) for n in range(N))
gam = np.array(out)
gam = gam.transpose()
else:
gam = uf.optimum_reparam(mq[:, r], time, q[:, :, 0], lam)
gam_dev = np.zeros((M, N))
for k in range(0, N):
gam_dev[:, k] = np.gradient(gam[:, k], 1 / float(M - 1))
gamI = uf.SqrtMeanInverse(gam)
gamI_dev = np.gradient(gamI, 1 / float(M - 1))
time0 = (time[-1] - time[0]) * gamI + time[0]
mq[:, r + 1] = np.interp(time0, time, mq[:, r]) * np.sqrt(gamI_dev)
for k in range(0, N):
q[:, k, r + 1] = np.interp(time0, time, q[:, k, r]) * np.sqrt(gamI_dev)
f[:, k, r + 1] = np.interp(time0, time, f[:, k, r])
gam[:, k] = np.interp(time0, time, gam[:, k])
# Aligned data & stats
fn = f[:, :, r + 1]
qn = q[:, :, r + 1]
q0 = q[:, :, 0]
mean_f0 = f0.mean(axis=1)
std_f0 = f0.std(axis=1)
mean_fn = fn.mean(axis=1)
std_fn = fn.std(axis=1)
mqn = mq[:, r + 1]
tmp = np.zeros((1, M))
tmp = tmp.flatten()
tmp[1:] = cumtrapz(mqn * np.abs(mqn), time)
fmean = np.mean(f0[1, :]) + tmp
fgam = np.zeros((M, N))
for k in range(0, N):
time0 = (time[-1] - time[0]) * gam[:, k] + time[0]
fgam[:, k] = np.interp(time0, time, fmean)
var_fgam = fgam.var(axis=1)
orig_var = trapz(std_f0 ** 2, time)
amp_var = trapz(std_fn ** 2, time)
phase_var = trapz(var_fgam, time)
if showplot:
fig, ax = plot.f_plot(np.arange(0, M) / float(M - 1), gam,
title="Warping Functions")
ax.set_aspect('equal')
plot.f_plot(time, fn, title="Warped Data")
tmp = np.array([mean_f0, mean_f0 + std_f0, mean_f0 - std_f0])
tmp = tmp.transpose()
plot.f_plot(time, tmp, title="Original Data: Mean $\pm$ STD")
tmp = np.array([mean_fn, mean_fn + std_fn, mean_fn - std_fn])
tmp = tmp.transpose()
plot.f_plot(time, tmp, title="Warped Data: Mean $\pm$ STD")
plot.f_plot(time, fmean, title="$f_{mean}$")
plt.show()
align_results = collections.namedtuple('align', ['fn', 'qn', 'q0', 'fmean',
'mqn', 'gam', 'orig_var',
'amp_var', 'phase_var'])
out = align_results(fn, qn, q0, fmean, mqn, gam, orig_var, amp_var,
phase_var)
return out
def srsf_align_pair(f, g, time, method="mean", showplot=True,
smoothdata=False, lam=0.0):
"""
This function aligns a collection of functions using the elastic square-
root slope (srsf) framework.
:param f: numpy ndarray of shape (M,N) of N functions with M samples
:param g: numpy ndarray of shape (M,N) of N functions with M samples
:param time: vector of size M describing the sample points
:param method: (string) warp calculate Karcher Mean or Median (options =
"mean" or "median") (default="mean")
:param showplot: Shows plots of results using matplotlib (default = T)
:param smoothdata: Smooth the data using a box filter (default = F)
:param lam: controls the elasticity (default = 0)
:type lam: double
:type smoothdata: bool
:type f: np.ndarray
:type time: np.ndarray
:rtype: tuple of numpy array
:return fn: aligned functions - numpy ndarray of shape (M,N) of N
functions with M samples
:return gn: aligned functions - numpy ndarray of shape (M,N) of N
functions with M samples
:return qfn: aligned srvfs - similar structure to fn
:return qgn: aligned srvfs - similar structure to fn
:return qf0: original srvf - similar structure to fn
:return qg0: original srvf - similar structure to fn
:return fmean: f function mean or median - vector of length N
:return gmean: g function mean or median - vector of length N
:return mqfn: srvf mean or median - vector of length N
:return mqgn: srvf mean or median - vector of length N
:return gam: warping functions - similar structure to fn
"""
M = f.shape[0]
N = f.shape[1]
if M > 500:
parallel = True
elif N > 100:
parallel = True
else:
parallel = False
eps = np.finfo(np.double).eps
f0 = f
g0 = g
methods = ["mean", "median"]
# 0 mean, 1-median
method = [i for i, x in enumerate(methods) if x == method]
if method != 0 or method != 1:
method = 0
if showplot:
plot.f_plot(time, f, title="Original Data")
plot.f_plot(time, g, title="g Original Data")
# Compute SRSF function from data
f, g1, g2 = uf.gradient_spline(time, f, smoothdata)
qf = g1 / np.sqrt(abs(g1) + eps)
g, g1, g2 = uf.gradient_spline(time, g, smoothdata)
qg = g1 / np.sqrt(abs(g1) + eps)
print ("Initializing...")
mnq = qf.mean(axis=1)
a = mnq.repeat(N)
d1 = a.reshape(M, N)
d = (qf - d1) ** 2
dqq = np.sqrt(d.sum(axis=0))
min_ind = dqq.argmin()
mq = np.column_stack((qf[:, min_ind], qg[:, min_ind]))
mf = np.column_stack((f[:, min_ind], g[:, min_ind]))
if parallel:
out = Parallel(n_jobs=-1)(delayed(uf.optimum_reparam_pair)(mq, time, qf[:, n], qg[:, n], lam) for n in range(N))
gam = np.array(out)
gam = gam.transpose()
else:
gam = uf.optimum_reparam_pair(mq, time, qf, qg, lam)
gamI = uf.SqrtMeanInverse(gam)
time0 = (time[-1] - time[0]) * gamI + time[0]
for k in range(0, 2):
mf[:, k] = np.interp(time0, time, mf[:, k])
mq[:, k] = uf.f_to_srsf(mf[:, k], time)
# Compute Karcher Mean
if method == 0:
print("Compute Karcher Mean of %d function in SRSF space..." % N)
if method == 1:
print("Compute Karcher Median of %d function in SRSF space..." % N)
MaxItr = 20
ds = np.repeat(0.0, MaxItr + 2)
ds[0] = np.inf
qfun = np.repeat(0.0, MaxItr + 1)
qgun = np.repeat(0.0, MaxItr + 1)
tmp = np.zeros((M, 2, MaxItr + 2))
tmp[:, :, 0] = mq
mq = tmp
tmp = np.zeros((M, N, MaxItr + 2))
tmp[:, :, 0] = f
f = tmp
tmp = np.zeros((M, N, MaxItr + 2))
tmp[:, :, 0] = g
g = tmp
tmp = np.zeros((M, N, MaxItr + 2))
tmp[:, :, 0] = qf
qf = tmp
tmp = np.zeros((M, N, MaxItr + 2))
tmp[:, :, 0] = qg
qg = tmp
for r in range(0, MaxItr):
print("updating step: r=%d" % (r + 1))
if r == (MaxItr - 1):
print("maximal number of iterations is reached")
# Matching Step
if parallel:
out = Parallel(n_jobs=-1)(
delayed(uf.optimum_reparam_pair)(mq[:, :, r], time, qf[:, n, 0], qg[:, n, 0], lam) for n in range(N))
gam = np.array(out)
gam = gam.transpose()
else:
gam = uf.optimum_reparam_pair(mq[:, :, r], time, qf[:, :, 0],
qg[:, :, 0], lam)
gam_dev = np.zeros((M, N))
for k in range(0, N):
time0 = (time[-1] - time[0]) * gam[:, k] + time[0]
f[:, k, r + 1] = np.interp(time0, time, f[:, k, 0])
g[:, k, r + 1] = np.interp(time0, time, g[:, k, 0])
qf[:, k, r + 1] = uf.f_to_srsf(f[:, k, r + 1], time)
qg[:, k, r + 1] = uf.f_to_srsf(g[:, k, r + 1], time)
gam_dev[:, k] = np.gradient(gam[:, k], 1 / float(M - 1))
mqt = mq[:, 0, r]
a = mqt.repeat(N)
d1 = a.reshape(M, N)
df = (qf[:, :, r + 1] - d1) ** 2
mqt = mq[:, 1, r]
a = mqt.repeat(N)
d1 = a.reshape(M, N)
dg = (qg[:, :, r + 1] - d1) ** 2
if method == 0:
d1 = sum(trapz(df, time, axis=0))
d2 = sum(trapz((1 - np.sqrt(gam_dev)) ** 2, time, axis=0))
ds_tmp = d1 + lam * d2
d1 = sum(trapz(dg, time, axis=0))
d2 = sum(trapz((1 - np.sqrt(gam_dev)) ** 2, time, axis=0))
ds_tmp1 = d1 + lam * d2
ds[r + 1] = (ds_tmp + ds_tmp1) / 2
# Minimization Step
# compute the mean of the matched function
qtemp = qf[:, :, r + 1]
mq[:, 0, r + 1] = qtemp.mean(axis=1)
qtemp = qg[:, :, r + 1]
mq[:, 1, r + 1] = qtemp.mean(axis=1)
qfun[r] = norm(mq[:, 0, r + 1] - mq[:, 0, r]) / norm(mq[:, 0, r])
qgun[r] = norm(mq[:, 1, r + 1] - mq[:, 1, r]) / norm(mq[:, 1, r])
if method == 1:
d1 = sum(trapz(df, time, axis=0))
d2 = sum(trapz((1 - np.sqrt(gam_dev)) ** 2, time, axis=0))
ds_tmp = np.sqrt(d1) + lam * d2
ds_tmp1 = np.sqrt(sum(trapz(dg, time, axis=0))) + lam * sum(
trapz((1 - np.sqrt(gam_dev)) ** 2, time, axis=0))
ds[r + 1] = (ds_tmp + ds_tmp1) / 2
# Minimization Step
# compute the mean of the matched function
dist_iinv = ds[r + 1] ** (-1)
qtemp = qf[:, :, r + 1] / ds[r + 1]
mq[:, 0, r + 1] = qtemp.sum(axis=1) * dist_iinv
qtemp = qg[:, :, r + 1] / ds[r + 1]
mq[:, 1, r + 1] = qtemp.sum(axis=1) * dist_iinv
qfun[r] = norm(mq[:, 0, r + 1] - mq[:, 0, r]) / norm(mq[:, 0, r])
qgun[r] = norm(mq[:, 1, r + 1] - mq[:, 1, r]) / norm(mq[:, 1, r])
if (qfun[r] < 1e-2 and qgun[r] < 1e-2) or r >= MaxItr:
break
# Last Step with centering of gam
r += 1
if parallel:
out = Parallel(n_jobs=-1)(
delayed(uf.optimum_reparam_pair)(mq[:, :, r], time, qf[:, n, 0],
qg[:, n, 0], lam) for n in range(N))
gam = np.array(out)
gam = gam.transpose()
else:
gam = uf.optimum_reparam_pair(mq[:, :, r], time, qf[:, :, 0],
qg[:, :, 0], lam)
gam_dev = np.zeros((M, N))
for k in range(0, N):
gam_dev[:, k] = np.gradient(gam[:, k], 1 / float(M - 1))
gamI = uf.SqrtMeanInverse(gam)
gamI_dev = np.gradient(gamI, 1 / float(M - 1))
time0 = (time[-1] - time[0]) * gamI + time[0]
for k in range(0, 2):
mq[:, k, r + 1] = np.interp(time0, time,
mq[:, k, r]) * np.sqrt(gamI_dev)
for k in range(0, N):
qf[:, k, r + 1] = np.interp(time0, time,
qf[:, k, r]) * np.sqrt(gamI_dev)
f[:, k, r + 1] = np.interp(time0, time, f[:, k, r])
qg[:, k, r + 1] = np.interp(time0, time,
qg[:, k, r]) * np.sqrt(gamI_dev)
g[:, k, r + 1] = np.interp(time0, time, g[:, k, r])
gam[:, k] = np.interp(time0, time, gam[:, k])
# Aligned data & stats
fn = f[:, :, r + 1]
gn = g[:, :, r + 1]
qfn = qf[:, :, r + 1]
qf0 = qf[:, :, 0]
qgn = qg[:, :, r + 1]
qg0 = qg[:, :, 0]
mean_f0 = f0.mean(axis=1)
std_f0 = f0.std(axis=1)
mean_fn = fn.mean(axis=1)
std_fn = fn.std(axis=1)
mean_g0 = g0.mean(axis=1)
std_g0 = g0.std(axis=1)
mean_gn = gn.mean(axis=1)
std_gn = gn.std(axis=1)
mqfn = mq[:, 0, r + 1]
mqgn = mq[:, 1, r + 1]
tmp = np.zeros(M)
tmp[1:] = cumtrapz(mqfn * np.abs(mqfn), time)
fmean = np.mean(f0[1, :]) + tmp
tmp = np.zeros(M)
tmp[1:] = cumtrapz(mqgn * np.abs(mqgn), time)
gmean = np.mean(g0[1, :]) + tmp
if showplot:
fig, ax = plot.f_plot(np.arange(0, M) / float(M - 1), gam,
title="Warping Functions")
ax.set_aspect('equal')
plot.f_plot(time, fn, title="fn Warped Data")
plot.f_plot(time, gn, title="gn Warped Data")
tmp = np.array([mean_f0, mean_f0 + std_f0, mean_f0 - std_f0])
tmp = tmp.transpose()
plot.f_plot(time, tmp, title="f Original Data: Mean $\pm$ STD")
tmp = np.array([mean_fn, mean_fn + std_fn, mean_fn - std_fn])
tmp = tmp.transpose()
plot.f_plot(time, tmp, title="fn Warped Data: Mean $\pm$ STD")
tmp = np.array([mean_g0, mean_g0 + std_g0, mean_g0 - std_g0])
tmp = tmp.transpose()
plot.f_plot(time, tmp, title="g Original Data: Mean $\pm$ STD")
tmp = np.array([mean_gn, mean_gn + std_gn, mean_gn - std_gn])
tmp = tmp.transpose()
plot.f_plot(time, tmp, title="gn Warped Data: Mean $\pm$ STD")
plot.f_plot(time, fmean, title="$f_{mean}$")
plot.f_plot(time, gmean, title="$g_{mean}$")
plt.show()
align_results = collections.namedtuple('align', ['fn', 'gn', 'qfn', 'qf0',
'qgn', 'qg0', 'fmean',
'gmean', 'mqfn', 'mqgn',
'gam'])
out = align_results(fn, gn, qfn, qf0, qgn, qg0, fmean, gmean, mqfn,
mqgn, gam)
return out
def align_fPCA(f, time, num_comp=3, showplot=True, smoothdata=False):
"""
aligns a collection of functions while extracting principal components.
The functions are aligned to the principal components
:param f: numpy ndarray of shape (M,N) of N functions with M samples
:param time: vector of size M describing the sample points
:param num_comp: number of fPCA components
:param showplot: Shows plots of results using matplotlib (default = T)
:param smooth_data: Smooth the data using a box filter (default = F)
:param sparam: Number of times to run box filter (default = 25)
:type sparam: double
:type smooth_data: bool
:type f: np.ndarray
:type time: np.ndarray
:rtype: tuple of numpy array
:return fn: aligned functions - numpy ndarray of shape (M,N) of N
functions with M samples
:return qn: aligned srvfs - similar structure to fn
:return q0: original srvf - similar structure to fn
:return mqn: srvf mean or median - vector of length M
:return gam: warping functions - similar structure to fn
:return q_pca: srsf principal directions
:return f_pca: functional principal directions
:return latent: latent values
:return coef: coefficients
:return U: eigenvectors
:return orig_var: Original Variance of Functions
:return amp_var: Amplitude Variance
:return phase_var: Phase Variance
"""
lam = 0.0
MaxItr = 50
coef = np.arange(-2., 3.)
Nstd = coef.shape[0]
M = f.shape[0]
N = f.shape[1]
if M > 500:
parallel = True
elif N > 100:
parallel = True
else:
parallel = False
eps = np.finfo(np.double).eps
f0 = f
if showplot:
plot.f_plot(time, f, title="Original Data")
# Compute SRSF function from data
f, g, g2 = uf.gradient_spline(time, f, smoothdata)
q = g / np.sqrt(abs(g) + eps)
print ("Initializing...")
mnq = q.mean(axis=1)
a = mnq.repeat(N)
d1 = a.reshape(M, N)
d = (q - d1) ** 2
dqq = np.sqrt(d.sum(axis=0))
min_ind = dqq.argmin()
print("Aligning %d functions in SRVF space to %d fPCA components..."
% (N, num_comp))
itr = 0
mq = np.zeros((M, MaxItr + 1))
mq[:, itr] = q[:, min_ind]
fi = np.zeros((M, N, MaxItr + 1))
fi[:, :, 0] = f
qi = np.zeros((M, N, MaxItr + 1))
qi[:, :, 0] = q
gam = np.zeros((M, N, MaxItr + 1))
cost = np.zeros(MaxItr + 1)
while itr < MaxItr:
print("updating step: r=%d" % (itr + 1))
if itr == MaxItr:
print("maximal number of iterations is reached")
# PCA Step
a = mq[:, itr].repeat(N)
d1 = a.reshape(M, N)
qhat_cent = qi[:, :, itr] - d1
K = np.cov(qi[:, :, itr])
U, s, V = svd(K)
alpha_i = np.zeros((num_comp, N))
for ii in range(0, num_comp):
for jj in range(0, N):
alpha_i[ii, jj] = trapz(qhat_cent[:, jj] * U[:, ii], time)
U1 = U[:, 0:num_comp]
tmp = U1.dot(alpha_i)
qhat = d1 + tmp
# Matching Step
if parallel:
out = Parallel(n_jobs=-1)(
delayed(uf.optimum_reparam)(qhat[:, n], time, qi[:, n, itr],
lam) for n in range(N))
gam_t = np.array(out)
gam[:, :, itr] = gam_t.transpose()
else:
gam[:, :, itr] = uf.optimum_reparam(qhat, time, qi[:, :, itr], lam)
for k in range(0, N):
time0 = (time[-1] - time[0]) * gam[:, k, itr] + time[0]
fi[:, k, itr + 1] = np.interp(time0, time, fi[:, k, itr])
qi[:, k, itr + 1] = uf.f_to_srsf(fi[:, k, itr + 1], time)
qtemp = qi[:, :, itr + 1]
mq[:, itr + 1] = qtemp.mean(axis=1)
cost_temp = np.zeros(N)
for ii in range(0, N):
cost_temp[ii] = norm(qtemp[:, ii] - qhat[:, ii]) ** 2
cost[itr + 1] = cost_temp.mean()
if abs(cost[itr + 1] - cost[itr]) < 1e-06:
break
itr += 1
if itr >= MaxItr:
itrf = MaxItr
else:
itrf = itr+1
cost = cost[1:(itrf+1)]
# Aligned data & stats
fn = fi[:, :, itrf]
qn = qi[:, :, itrf]
q0 = qi[:, :, 0]
mean_f0 = f0.mean(axis=1)
std_f0 = f0.std(axis=1)
mqn = mq[:, itrf]
gamf = gam[:, :, 0]
for k in range(1, itr):
gam_k = gam[:, :, k]
for l in range(0, N):
time0 = (time[-1] - time[0]) * gam_k[:, l] + time[0]
gamf[:, l] = np.interp(time0, time, gamf[:, l])
# Center Mean
gamI = uf.SqrtMeanInverse(gamf)
gamI_dev = np.gradient(gamI, 1 / float(M - 1))
time0 = (time[-1] - time[0]) * gamI + time[0]
mqn = np.interp(time0, time, mqn) * np.sqrt(gamI_dev)
for k in range(0, N):
qn[:, k] = np.interp(time0, time, qn[:, k]) * np.sqrt(gamI_dev)
fn[:, k] = np.interp(time0, time, fn[:, k])
gamf[:, k] = np.interp(time0, time, gamf[:, k])
mean_fn = fn.mean(axis=1)
std_fn = fn.std(axis=1)
# Get Final PCA
mididx = np.round(time.shape[0] / 2)
m_new = np.sign(fn[mididx, :]) * np.sqrt(np.abs(fn[mididx, :]))
mqn2 = np.append(mqn, m_new.mean())
qn2 = np.vstack((qn, m_new))
K = np.cov(qn2)
U, s, V = svd(K)
stdS = np.sqrt(s)
# compute the PCA in the q domain
q_pca = np.ndarray(shape=(M + 1, Nstd, num_comp), dtype=float)
for k in range(0, num_comp):
for l in range(0, Nstd):
q_pca[:, l, k] = mqn2 + coef[l] * stdS[k] * U[:, k]
# compute the correspondence in the f domain
f_pca = np.ndarray(shape=(M, Nstd, num_comp), dtype=float)
for k in range(0, num_comp):
for l in range(0, Nstd):
q_pca_tmp = q_pca[0:M, l, k] * np.abs(q_pca[0:M, l, k])
q_pca_tmp2 = np.sign(q_pca[M, l, k]) * (q_pca[M, l, k] ** 2)
f_pca[:, l, k] = uf.cumtrapzmid(time, q_pca_tmp, q_pca_tmp2)
N2 = qn.shape[1]
c = np.zeros((N2, num_comp))
for k in range(0, num_comp):
for l in range(0, N2):
c[l, k] = sum((np.append(qn[:, l], m_new[l]) - mqn2) * U[:, k])
if showplot:
CBcdict = {
'Bl': (0, 0, 0),
'Or': (.9, .6, 0),
'SB': (.35, .7, .9),
'bG': (0, .6, .5),
'Ye': (.95, .9, .25),
'Bu': (0, .45, .7),
'Ve': (.8, .4, 0),
'rP': (.8, .6, .7),
}
cl = sorted(CBcdict.keys())
# Align Plots
fig, ax = plot.f_plot(np.arange(0, M) / float(M - 1), gamf,
title="Warping Functions")
ax.set_aspect('equal')
plot.f_plot(time, fn, title="Warped Data")
tmp = np.array([mean_f0, mean_f0 + std_f0, mean_f0 - std_f0])
tmp = tmp.transpose()
plot.f_plot(time, tmp, title="Original Data: Mean $\pm$ STD")
tmp = np.array([mean_fn, mean_fn + std_fn, mean_fn - std_fn])
tmp = tmp.transpose()
plot.f_plot(time, tmp, title="Warped Data: Mean $\pm$ STD")
# PCA Plots
fig, ax = plt.subplots(2, num_comp)
for k in range(0, num_comp):
axt = ax[0, k]
for l in range(0, Nstd):
axt.plot(time, q_pca[0:M, l, k], color=CBcdict[cl[l]])
axt.hold(True)
axt.set_title('q domain: PD %d' % (k + 1))
plot.rstyle(axt)
axt = ax[1, k]
for l in range(0, Nstd):
axt.plot(time, f_pca[:, l, k], color=CBcdict[cl[l]])
axt.hold(True)
axt.set_title('f domain: PD %d' % (k + 1))
plot.rstyle(axt)
fig.set_tight_layout(True)
cumm_coef = 100 * np.cumsum(s) / sum(s)
idx = np.arange(0, M + 1) + 1
plot.f_plot(idx, cumm_coef, "Coefficient Cumulative Percentage")
plt.xlabel("Percentage")
plt.ylabel("Index")
plt.show()
mean_f0 = f0.mean(axis=1)
std_f0 = f0.std(axis=1)
mean_fn = fn.mean(axis=1)
std_fn = fn.std(axis=1)
tmp = np.zeros(M)
tmp[1:] = cumtrapz(mqn * np.abs(mqn), time)
fmean = np.mean(f0[1, :]) + tmp
fgam = np.zeros((M, N))
for k in range(0, N):
time0 = (time[-1] - time[0]) * gamf[:, k] + time[0]
fgam[:, k] = np.interp(time0, time, fmean)
var_fgam = fgam.var(axis=1)
orig_var = trapz(std_f0 ** 2, time)
amp_var = trapz(std_fn ** 2, time)
phase_var = trapz(var_fgam, time)
K = np.cov(fn)
U, s, V = svd(K)
align_fPCAresults = collections.namedtuple('align_fPCA', ['fn', 'qn',
'q0', 'mqn', 'gam', 'q_pca',
'f_pca', 'latent', 'coef',
'U', 'orig_var', 'amp_var',
'phase_var', 'cost'])
out = align_fPCAresults(fn, qn, q0, mqn, gamf, q_pca, f_pca, s, c,
U, orig_var, amp_var, phase_var, cost)
return out
def align_fPLS(f, g, time, comps=3, showplot=True, smoothdata=False,
delta=0.01, max_itr=100):
"""
This function aligns a collection of functions while performing
principal least squares
:param f: numpy ndarray of shape (M,N) of N functions with M samples
:param g: numpy ndarray of shape (M,N) of N functions with M samples
:param time: vector of size M describing the sample points
:param comps: number of fPLS components
:param showplot: Shows plots of results using matplotlib (default = T)
:param smooth_data: Smooth the data using a box filter (default = F)
:param delta: gradient step size
:param max_itr: maximum number of iterations
:type smooth_data: bool
:type f: np.ndarray
:type g: np.ndarray
:type time: np.ndarray
:rtype: tuple of numpy array
:return fn: aligned functions - numpy ndarray of shape (M,N) of N
functions with M samples
:return gn: aligned functions - numpy ndarray of shape (M,N) of N
functions with M samples
:return qfn: aligned srvfs - similar structure to fn
:return qgn: aligned srvfs - similar structure to fn
:return qf0: original srvf - similar structure to fn
:return qg0: original srvf - similar structure to fn
:return gam: warping functions - similar structure to fn
:return wqf: srsf principal weight functions
:return wqg: srsf principal weight functions
:return wf: srsf principal weight functions
:return wg: srsf principal weight functions
:return cost: cost function value
"""
print ("Initializing...")
binsize = np.diff(time)
binsize = binsize.mean()
eps = np.finfo(np.double).eps
M = f.shape[0]
N = f.shape[1]
f0 = f
g0 = g
if showplot:
plot.f_plot(time, f, title="f Original Data")
plot.f_plot(time, g, title="g Original Data")
# Compute q-function of f and g
f, g1, g2 = uf.gradient_spline(time, f, smoothdata)
qf = g1 / np.sqrt(abs(g1) + eps)
g, g1, g2 = uf.gradient_spline(time, g, smoothdata)
qg = g1 / np.sqrt(abs(g1) + eps)
print("Calculating fPLS weight functions for %d Warped Functions..." % N)
itr = 0
fi = np.zeros((M, N, max_itr + 1))
fi[:, :, itr] = f
gi = np.zeros((M, N, max_itr + 1))
gi[:, :, itr] = g
qfi = np.zeros((M, N, max_itr + 1))
qfi[:, :, itr] = qf
qgi = np.zeros((M, N, max_itr + 1))
qgi[:, :, itr] = qg
wqf1, wqg1, alpha, values, costmp = pls_svd(time, qfi[:, :, itr],
qgi[:, :, itr], 2, 0)
wqf = np.zeros((M, max_itr + 1))
wqf[:, itr] = wqf1[:, 0]
wqg = np.zeros((M, max_itr + 1))
wqg[:, itr] = wqg1[:, 0]
gam = np.zeros((M, N, max_itr + 1))
tmp = np.tile(np.linspace(0, 1, M), (N, 1))
gam[:, :, itr] = tmp.transpose()
wqf_diff = np.zeros(max_itr + 1)
cost = np.zeros(max_itr + 1)
cost_diff = 1
while itr <= max_itr:
# warping
gamtmp = np.ascontiguousarray(gam[:, :, 0])
qftmp = np.ascontiguousarray(qfi[:, :, 0])
qgtmp = np.ascontiguousarray(qgi[:, :, 0])
wqftmp = np.ascontiguousarray(wqf[:, itr])
wqgtmp = np.ascontiguousarray(wqg[:, itr])
gam[:, :, itr + 1] = fpls.fpls_warp(time, gamtmp, qftmp, qgtmp,
wqftmp, wqgtmp, display=0,
delta=delta, tol=1e-6,
max_iter=4000)
for k in range(0, N):
gam_k = gam[:, k, itr + 1]
time0 = (time[-1] - time[0]) * gam_k + time[0]
fi[:, k, itr + 1] = np.interp(time0, time, fi[:, k, 0])
gi[:, k, itr + 1] = np.interp(time0, time, gi[:, k, 0])
qfi[:, k, itr + 1] = uf.warp_q_gamma(time, qfi[:, k, 0], gam_k)
qgi[:, k, itr + 1] = uf.warp_q_gamma(time, qgi[:, k, 0], gam_k)
# PLS
wqfi, wqgi, alpha, values, costmp = pls_svd(time, qfi[:, :, itr + 1],
qgi[:, :, itr + 1], 2, 0)
wqf[:, itr + 1] = wqfi[:, 0]
wqg[:, itr + 1] = wqgi[:, 0]
wqf_diff[itr] = np.sqrt(sum(wqf[:, itr + 1] - wqf[:, itr]) ** 2)
rfi = np.zeros(N)
rgi = np.zeros(N)
for l in range(0, N):
rfi[l] = uf.innerprod_q(time, qfi[:, l, itr + 1], wqf[:, itr + 1])
rgi[l] = uf.innerprod_q(time, qgi[:, l, itr + 1], wqg[:, itr + 1])
cost[itr] = np.cov(rfi, rgi)[1, 0]
if itr > 1:
cost_diff = cost[itr] - cost[itr - 1]
print("Iteration: %d - Diff Value: %f - %f" % (itr + 1, wqf_diff[itr],
cost[itr]))
if wqf_diff[itr] < 1e-1 or abs(cost_diff) < 1e-3:
break
itr += 1
cost = cost[0:(itr + 1)]
# Aligned data & stats
fn = fi[:, :, itr + 1]
gn = gi[:, :, itr + 1]
qfn = qfi[:, :, itr + 1]
qf0 = qfi[:, :, 0]
qgn = qgi[:, :, itr + 1]
qg0 = qgi[:, :, 0]
wqfn, wqgn, alpha, values, costmp = pls_svd(time, qfn, qgn, comps, 0)
wf = np.zeros((M, comps))
wg = np.zeros((M, comps))
for ii in range(0, comps):
wf[:, ii] = cumtrapz(wqfn[:, ii] * np.abs(wqfn[:, ii]), time, initial=0)
wg[:, ii] = cumtrapz(wqgn[:, ii] * np.abs(wqgn[:, ii]), time, initial=0)
gam_f = gam[:, :, itr + 1]
if showplot:
# Align Plots
fig, ax = plot.f_plot(np.arange(0, M) / float(M - 1), gam_f,
title="Warping Functions")
ax.set_aspect('equal')
plot.f_plot(time, fn, title="fn Warped Data")
plot.f_plot(time, gn, title="gn Warped Data")
plot.f_plot(time, wf, title="wf")
plot.f_plot(time, wg, title="wg")
plt.show()
align_fPLSresults = collections.namedtuple('align_fPLS', ['wf', 'wg', 'fn',
'gn', 'qfn', 'qgn', 'qf0',
'qg0', 'wqf', 'wqg', 'gam',
'values', 'cost'])
out = align_fPLSresults(wf, wg, fn, gn, qfn, qgn, qf0, qg0, wqfn,
wqgn, gam_f, values, cost)
return out
| gpl-3.0 |
JasonKessler/scattertext | scattertext/categoryprojector/pairplot.py | 1 | 12541 | import numpy as np
from scipy.stats import rankdata, pearsonr
from sklearn.decomposition import PCA, TruncatedSVD
from sklearn.metrics.pairwise import cosine_distances
from scattertext.Scalers import stretch_0_to_1, dense_rank
from scattertext.termcompaction.AssociationCompactor import AssociationCompactor
from scattertext.termscoring.RankDifference import RankDifference
from scattertext.viz import ScatterplotStructure, VizDataAdapter
from scattertext.viz.PairPlotFromScattertextStructure import PairPlotFromScatterplotStructure
from scattertext.ScatterChartExplorer import ScatterChartExplorer
from scattertext.categoryprojector.CategoryProjector import CategoryProjector
from scattertext.viz.BasicHTMLFromScatterplotStructure import D3URLs
from scattertext.Scalers import scale_neg_1_to_1_with_zero_mean
from scattertext.termranking.AbsoluteFrequencyRanker import AbsoluteFrequencyRanker
def produce_category_focused_pairplot(corpus,
category,
category_projector=CategoryProjector(projector=TruncatedSVD(20)),
category_projection=None,
**kwargs):
'''
Produces a pair-plot which is focused on a single category.
:param corpus: TermDocMatrix
:param category: str, name of a category in the corpus
:param category_projector: CategoryProjector, a factor analysis of the category/feature vector
:param category_projection: CategoryProjection, None by default. If present, overrides category projector
:param kwargs: remaining kwargs for produce_pairplot
:return: str, HTML
'''
category_num = corpus.get_categories().index(category)
uncorrelated_components_projection = category_projection
if category_projection is None:
if 'use_metadata' in kwargs and kwargs['use_metadata']:
uncorrelated_components_projection = category_projector.project_with_metadata(corpus)
else:
uncorrelated_components_projection = category_projector.project(corpus)
distances = cosine_distances(uncorrelated_components_projection.get_category_embeddings().T)
similarity_to_category_scores = -2 * (rankdata(distances[category_num]) - 0.5)
uncorrelated_components = uncorrelated_components_projection.get_projection()
least_correlated_dimension = min([(np.abs(pearsonr(similarity_to_category_scores,
uncorrelated_components.T[i])[0]), i)]
for i in range(uncorrelated_components.shape[1]))[0][1]
projection_to_plot = np.array([uncorrelated_components.T[least_correlated_dimension],
similarity_to_category_scores]).T
return produce_pairplot(
corpus,
initial_category=category,
category_projection=uncorrelated_components_projection.use_alternate_projection(projection_to_plot),
category_focused=True,
**kwargs
)
def produce_pairplot(corpus,
asian_mode=False,
category_width_in_pixels=500,
category_height_in_pixels=700,
term_width_in_pixels=500,
term_height_in_pixels=700,
terms_to_show=3000,
scaler=scale_neg_1_to_1_with_zero_mean,
term_ranker=AbsoluteFrequencyRanker,
use_metadata=False,
category_projector=CategoryProjector(),
category_projection=None,
topic_model_term_lists=None,
topic_model_preview_size=10,
metadata_descriptions=None,
initial_category=None,
x_dim=0,
y_dim=1,
show_halo=True,
num_terms_in_halo=5,
category_color_func='(function(x) {return "#5555FF"})',
protocol='https',
d3_url_struct=D3URLs(),
category_focused=False,
verbose=False,
use_full_doc=True,
default_to_term_comparison=True,
category_x_label='',
category_y_label='',
category_show_axes_and_cross_hairs=False,
highlight_selected_category=True,
term_x_label=None, # used if default_to_term_comparison
term_y_label=None, # used if default_to_term_comparison
wordfish_style=False,
**kwargs):
if category_projection is None:
if use_metadata:
category_projection = category_projector.project_with_metadata(corpus, x_dim=x_dim, y_dim=y_dim)
else:
category_projection = category_projector.project(corpus, x_dim=x_dim, y_dim=y_dim)
if initial_category is None:
initial_category = corpus.get_categories()[0]
category_scatter_chart_explorer = _get_category_scatter_chart_explorer(
category_projection, scaler, term_ranker, verbose
)
category_scatter_chart_data = category_scatter_chart_explorer.to_dict(
category=initial_category,
max_docs_per_category=0,
)
category_tooltip_func = '(function(d) {return d.term})'
#initial_category_idx = corpus.get_categories().index(initial_category)
term_plot_change_func = _get_term_plot_change_js_func(wordfish_style, category_focused, initial_category)
category_scatterplot_structure = ScatterplotStructure(
VizDataAdapter(category_scatter_chart_data),
width_in_pixels=category_width_in_pixels,
height_in_pixels=category_height_in_pixels,
asian_mode=asian_mode,
use_non_text_features=True,
show_characteristic=False,
x_label=category_x_label,
y_label=category_y_label,
show_axes_and_cross_hairs=category_show_axes_and_cross_hairs,
full_data='getCategoryDataAndInfo()',
show_top_terms=False,
get_tooltip_content=category_tooltip_func,
color_func=category_color_func,
show_axes=False,
horizontal_line_y_position=0,
vertical_line_x_position=0,
unified_context=True,
show_category_headings=False,
show_cross_axes=True,
div_name='cat-plot',
alternative_term_func=term_plot_change_func,
highlight_selected_category=highlight_selected_category
)
compacted_corpus = AssociationCompactor(terms_to_show,
use_non_text_features=use_metadata).compact(corpus)
terms_to_hide = set(corpus.get_terms()) - set(compacted_corpus.get_terms())
if verbose:
print('num terms to hide', len(terms_to_hide))
print('num terms to show', compacted_corpus.get_num_terms())
term_scatter_chart_explorer = ScatterChartExplorer(
category_projection.get_corpus(),
minimum_term_frequency=0,
minimum_not_category_term_frequency=0,
pmi_threshold_coefficient=0,
term_ranker=term_ranker,
use_non_text_features=use_metadata,
score_transform=stretch_0_to_1,
verbose=verbose
).hide_terms(terms_to_hide)
if default_to_term_comparison:
if topic_model_term_lists is not None:
term_scatter_chart_explorer.inject_metadata_term_lists(topic_model_term_lists)
if metadata_descriptions is not None:
term_scatter_chart_explorer.inject_metadata_descriptions(metadata_descriptions)
if use_metadata:
tdf = corpus.get_metadata_freq_df('')
else:
tdf = corpus.get_term_freq_df('')
scores = RankDifference().get_scores(
tdf[initial_category], tdf[[c for c in corpus.get_categories() if c != initial_category]].sum(axis=1)
)
term_scatter_chart_data = term_scatter_chart_explorer.to_dict(
category=initial_category,
scores=scores,
include_term_category_counts=True,
transform=dense_rank,
**kwargs
)
y_label = initial_category,
x_label = 'Not ' + initial_category,
color_func = None
show_top_terms = True
show_axes = False
else:
term_projection = category_projection.get_term_projection()
original_x = term_projection['x']
original_y = term_projection['y']
x_coords = scaler(term_projection['x'])
y_coords = scaler(term_projection['y'])
x_label = term_x_label if term_x_label is not None else ''
y_label = term_y_label if term_y_label is not None else ''
show_axes = True
horizontal_line_y_position = 0
vertical_line_x_position = 0
term_scatter_chart_explorer.inject_coordinates(x_coords,
y_coords,
original_x=original_x,
original_y=original_y)
if topic_model_term_lists is not None:
term_scatter_chart_explorer.inject_metadata_term_lists(topic_model_term_lists)
if metadata_descriptions is not None:
term_scatter_chart_explorer.inject_metadata_descriptions(metadata_descriptions)
term_scatter_chart_data = term_scatter_chart_explorer.to_dict(
category=initial_category,
category_name=initial_category,
include_term_category_counts=True,
# transform=dense_rank,
)
color_func = '(function(x) {return "#5555FF"})'
show_top_terms = False
term_scatterplot_structure = ScatterplotStructure(
VizDataAdapter(term_scatter_chart_data),
width_in_pixels=term_width_in_pixels,
height_in_pixels=term_height_in_pixels,
use_full_doc=use_metadata or use_full_doc, asian_mode=asian_mode,
use_non_text_features=use_metadata, show_characteristic=False,
x_label=x_label,
y_label=y_label,
full_data='getTermDataAndInfo()',
show_top_terms=show_top_terms,
get_tooltip_content=None,
color_func=color_func,
# horizontal_line_y_position=0,
# vertical_line_x_position=0,
show_axes=show_axes,
topic_model_preview_size=topic_model_preview_size,
show_category_headings=False,
div_name='d3-div-1',
unified_context=True,
highlight_selected_category=highlight_selected_category
)
return PairPlotFromScatterplotStructure(
category_scatterplot_structure,
term_scatterplot_structure,
category_projection,
category_width_in_pixels,
category_height_in_pixels,
num_terms=num_terms_in_halo,
show_halo=show_halo,
d3_url_struct=d3_url_struct,
x_dim=x_dim,
y_dim=y_dim,
protocol=protocol
).to_html()
def _get_category_scatter_chart_explorer(category_projection, scaler, term_ranker, verbose):
category_scatter_chart_explorer = ScatterChartExplorer(
category_projection.get_corpus(),
minimum_term_frequency=0,
minimum_not_category_term_frequency=0,
pmi_threshold_coefficient=0,
filter_unigrams=False,
jitter=0,
max_terms=None,
# term_ranker=term_ranker,
use_non_text_features=True,
term_significance=None,
terms_to_include=None,
verbose=verbose
)
proj_df = category_projection.get_pandas_projection()
category_scatter_chart_explorer.inject_coordinates(
x_coords=scaler(proj_df['x']),
y_coords=scaler(proj_df['y']),
original_x=proj_df['x'],
original_y=proj_df['y']
)
return category_scatter_chart_explorer
def _get_term_plot_change_js_func(wordfish_style, category_focused, initial_category):
if wordfish_style:
return '(function (termInfo) {termPlotInterface.yAxisLogCounts(termInfo.term); return false;})'
if category_focused:
return '(function (termInfo) {termPlotInterface.drawCategoryAssociation("%s", termInfo.term); return false;})' \
% (initial_category.replace('"', '\\"'))
#return '(function (termInfo) {termPlotInterface.drawCategoryAssociation(termInfo.i); return false;})'
return '(function (termInfo) {termPlotInterface.drawCategoryAssociation(termInfo.term); return false;})'
| apache-2.0 |
zaxtax/scikit-learn | sklearn/linear_model/tests/test_base.py | 19 | 12955 | # Author: Alexandre Gramfort <[email protected]>
# Fabian Pedregosa <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from scipy import linalg
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.linear_model.base import LinearRegression
from sklearn.linear_model.base import center_data
from sklearn.linear_model.base import sparse_center_data
from sklearn.linear_model.base import _rescale_data
from sklearn.utils import check_random_state
from sklearn.utils.testing import assert_greater
from sklearn.datasets.samples_generator import make_sparse_uncorrelated
from sklearn.datasets.samples_generator import make_regression
def test_linear_regression():
# Test LinearRegression on a simple dataset.
# a simple dataset
X = [[1], [2]]
Y = [1, 2]
reg = LinearRegression()
reg.fit(X, Y)
assert_array_almost_equal(reg.coef_, [1])
assert_array_almost_equal(reg.intercept_, [0])
assert_array_almost_equal(reg.predict(X), [1, 2])
# test it also for degenerate input
X = [[1]]
Y = [0]
reg = LinearRegression()
reg.fit(X, Y)
assert_array_almost_equal(reg.coef_, [0])
assert_array_almost_equal(reg.intercept_, [0])
assert_array_almost_equal(reg.predict(X), [0])
def test_linear_regression_sample_weights():
# TODO: loop over sparse data as well
rng = np.random.RandomState(0)
# It would not work with under-determined systems
for n_samples, n_features in ((6, 5), ):
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
sample_weight = 1.0 + rng.rand(n_samples)
for intercept in (True, False):
# LinearRegression with explicit sample_weight
reg = LinearRegression(fit_intercept=intercept)
reg.fit(X, y, sample_weight=sample_weight)
coefs1 = reg.coef_
inter1 = reg.intercept_
assert_equal(reg.coef_.shape, (X.shape[1], )) # sanity checks
assert_greater(reg.score(X, y), 0.5)
# Closed form of the weighted least square
# theta = (X^T W X)^(-1) * X^T W y
W = np.diag(sample_weight)
if intercept is False:
X_aug = X
else:
dummy_column = np.ones(shape=(n_samples, 1))
X_aug = np.concatenate((dummy_column, X), axis=1)
coefs2 = linalg.solve(X_aug.T.dot(W).dot(X_aug),
X_aug.T.dot(W).dot(y))
if intercept is False:
assert_array_almost_equal(coefs1, coefs2)
else:
assert_array_almost_equal(coefs1, coefs2[1:])
assert_almost_equal(inter1, coefs2[0])
def test_raises_value_error_if_sample_weights_greater_than_1d():
# Sample weights must be either scalar or 1D
n_sampless = [2, 3]
n_featuress = [3, 2]
rng = np.random.RandomState(42)
for n_samples, n_features in zip(n_sampless, n_featuress):
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
sample_weights_OK = rng.randn(n_samples) ** 2 + 1
sample_weights_OK_1 = 1.
sample_weights_OK_2 = 2.
reg = LinearRegression()
# make sure the "OK" sample weights actually work
reg.fit(X, y, sample_weights_OK)
reg.fit(X, y, sample_weights_OK_1)
reg.fit(X, y, sample_weights_OK_2)
def test_fit_intercept():
# Test assertions on betas shape.
X2 = np.array([[0.38349978, 0.61650022],
[0.58853682, 0.41146318]])
X3 = np.array([[0.27677969, 0.70693172, 0.01628859],
[0.08385139, 0.20692515, 0.70922346]])
y = np.array([1, 1])
lr2_without_intercept = LinearRegression(fit_intercept=False).fit(X2, y)
lr2_with_intercept = LinearRegression(fit_intercept=True).fit(X2, y)
lr3_without_intercept = LinearRegression(fit_intercept=False).fit(X3, y)
lr3_with_intercept = LinearRegression(fit_intercept=True).fit(X3, y)
assert_equal(lr2_with_intercept.coef_.shape,
lr2_without_intercept.coef_.shape)
assert_equal(lr3_with_intercept.coef_.shape,
lr3_without_intercept.coef_.shape)
assert_equal(lr2_without_intercept.coef_.ndim,
lr3_without_intercept.coef_.ndim)
def test_linear_regression_sparse(random_state=0):
# Test that linear regression also works with sparse data
random_state = check_random_state(random_state)
for i in range(10):
n = 100
X = sparse.eye(n, n)
beta = random_state.rand(n)
y = X * beta[:, np.newaxis]
ols = LinearRegression()
ols.fit(X, y.ravel())
assert_array_almost_equal(beta, ols.coef_ + ols.intercept_)
assert_array_almost_equal(ols.predict(X) - y.ravel(), 0)
def test_linear_regression_multiple_outcome(random_state=0):
# Test multiple-outcome linear regressions
X, y = make_regression(random_state=random_state)
Y = np.vstack((y, y)).T
n_features = X.shape[1]
reg = LinearRegression(fit_intercept=True)
reg.fit((X), Y)
assert_equal(reg.coef_.shape, (2, n_features))
Y_pred = reg.predict(X)
reg.fit(X, y)
y_pred = reg.predict(X)
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T, Y_pred, decimal=3)
def test_linear_regression_sparse_multiple_outcome(random_state=0):
# Test multiple-outcome linear regressions with sparse data
random_state = check_random_state(random_state)
X, y = make_sparse_uncorrelated(random_state=random_state)
X = sparse.coo_matrix(X)
Y = np.vstack((y, y)).T
n_features = X.shape[1]
ols = LinearRegression()
ols.fit(X, Y)
assert_equal(ols.coef_.shape, (2, n_features))
Y_pred = ols.predict(X)
ols.fit(X, y.ravel())
y_pred = ols.predict(X)
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T, Y_pred, decimal=3)
def test_center_data():
n_samples = 200
n_features = 2
rng = check_random_state(0)
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples)
expected_X_mean = np.mean(X, axis=0)
# XXX: currently scaled to variance=n_samples
expected_X_std = np.std(X, axis=0) * np.sqrt(X.shape[0])
expected_y_mean = np.mean(y, axis=0)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=False,
normalize=False)
assert_array_almost_equal(X_mean, np.zeros(n_features))
assert_array_almost_equal(y_mean, 0)
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt, X)
assert_array_almost_equal(yt, y)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=True,
normalize=False)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt, X - expected_X_mean)
assert_array_almost_equal(yt, y - expected_y_mean)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=True,
normalize=True)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_std, expected_X_std)
assert_array_almost_equal(Xt, (X - expected_X_mean) / expected_X_std)
assert_array_almost_equal(yt, y - expected_y_mean)
def test_center_data_multioutput():
n_samples = 200
n_features = 3
n_outputs = 2
rng = check_random_state(0)
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples, n_outputs)
expected_y_mean = np.mean(y, axis=0)
args = [(center_data, X), (sparse_center_data, sparse.csc_matrix(X))]
for center, X in args:
_, yt, _, y_mean, _ = center(X, y, fit_intercept=False,
normalize=False)
assert_array_almost_equal(y_mean, np.zeros(n_outputs))
assert_array_almost_equal(yt, y)
_, yt, _, y_mean, _ = center(X, y, fit_intercept=True,
normalize=False)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(yt, y - y_mean)
_, yt, _, y_mean, _ = center(X, y, fit_intercept=True,
normalize=True)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(yt, y - y_mean)
def test_center_data_weighted():
n_samples = 200
n_features = 2
rng = check_random_state(0)
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples)
sample_weight = rng.rand(n_samples)
expected_X_mean = np.average(X, axis=0, weights=sample_weight)
expected_y_mean = np.average(y, axis=0, weights=sample_weight)
# XXX: if normalize=True, should we expect a weighted standard deviation?
# Currently not weighted, but calculated with respect to weighted mean
# XXX: currently scaled to variance=n_samples
expected_X_std = (np.sqrt(X.shape[0]) *
np.mean((X - expected_X_mean) ** 2, axis=0) ** .5)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=True,
normalize=False,
sample_weight=sample_weight)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt, X - expected_X_mean)
assert_array_almost_equal(yt, y - expected_y_mean)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=True,
normalize=True,
sample_weight=sample_weight)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_std, expected_X_std)
assert_array_almost_equal(Xt, (X - expected_X_mean) / expected_X_std)
assert_array_almost_equal(yt, y - expected_y_mean)
def test_sparse_center_data():
n_samples = 200
n_features = 2
rng = check_random_state(0)
# random_state not supported yet in sparse.rand
X = sparse.rand(n_samples, n_features, density=.5) # , random_state=rng
X = X.tolil()
y = rng.rand(n_samples)
XA = X.toarray()
# XXX: currently scaled to variance=n_samples
expected_X_std = np.std(XA, axis=0) * np.sqrt(X.shape[0])
Xt, yt, X_mean, y_mean, X_std = sparse_center_data(X, y,
fit_intercept=False,
normalize=False)
assert_array_almost_equal(X_mean, np.zeros(n_features))
assert_array_almost_equal(y_mean, 0)
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt.A, XA)
assert_array_almost_equal(yt, y)
Xt, yt, X_mean, y_mean, X_std = sparse_center_data(X, y,
fit_intercept=True,
normalize=False)
assert_array_almost_equal(X_mean, np.mean(XA, axis=0))
assert_array_almost_equal(y_mean, np.mean(y, axis=0))
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt.A, XA)
assert_array_almost_equal(yt, y - np.mean(y, axis=0))
Xt, yt, X_mean, y_mean, X_std = sparse_center_data(X, y,
fit_intercept=True,
normalize=True)
assert_array_almost_equal(X_mean, np.mean(XA, axis=0))
assert_array_almost_equal(y_mean, np.mean(y, axis=0))
assert_array_almost_equal(X_std, expected_X_std)
assert_array_almost_equal(Xt.A, XA / expected_X_std)
assert_array_almost_equal(yt, y - np.mean(y, axis=0))
def test_csr_sparse_center_data():
# Test output format of sparse_center_data, when input is csr
X, y = make_regression()
X[X < 2.5] = 0.0
csr = sparse.csr_matrix(X)
csr_, y, _, _, _ = sparse_center_data(csr, y, True)
assert_equal(csr_.getformat(), 'csr')
def test_rescale_data():
n_samples = 200
n_features = 2
rng = np.random.RandomState(0)
sample_weight = 1.0 + rng.rand(n_samples)
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples)
rescaled_X, rescaled_y = _rescale_data(X, y, sample_weight)
rescaled_X2 = X * np.sqrt(sample_weight)[:, np.newaxis]
rescaled_y2 = y * np.sqrt(sample_weight)
assert_array_almost_equal(rescaled_X, rescaled_X2)
assert_array_almost_equal(rescaled_y, rescaled_y2)
| bsd-3-clause |
HullUni-bioinformatics/ReproPhyloDockerfile | reprophylo.py | 2 | 281415 | reprophyloversion=1.0
############################################################################################
if False:
"""
ReproPhylo version 1
General purpose phylogenetics package for reproducible and experimental analysis
Amir Szitenebrg
[email protected]
[email protected]
David H Lunt
[email protected]
EvoHull.org
University of Hull
Developed with:
CPython 2.7.6
IPython 1.2.1
ete2 2.2rev1056
biopython 1.64
dendropy 3.12.0
cloud 2.8.5
numpy 1.8.2
matplotlib 1.3.1
pandas
RAxML 8
Phylobayes 3
Trimal 1
Muscle
Mafft 7
Pal2nal 14
"""
##############################################################################################
from Bio import SeqIO
import os, csv, sys, dendropy, re, time, random, glob, platform, warnings, rpgit, ast, gb_syn,css
import HTML, inspect, shutil
import subprocess as sub
from Bio.Seq import Seq
import numpy as np
import matplotlib.pyplot as plt
from Bio.Alphabet import IUPAC
from Bio.SeqRecord import SeqRecord
from Bio.SeqFeature import SeqFeature, FeatureLocation, CompoundLocation
from Bio.Align.Applications import MafftCommandline, MuscleCommandline
from StringIO import StringIO
from Bio import AlignIO
from Bio.Phylo.Applications import RaxmlCommandline
from Bio.Align import MultipleSeqAlignment
from Bio.SeqUtils import GC
from ete2 import *
from collections import Counter
#import pandas as pd
import math
import __builtin__
##############################################################################################
class Locus:
##############################################################################################
""" Configure the loci stored in the ReproPhylo Project.
>>> locus = Locus('dna', 'CDS', 'coi', ['cox1','COX1','coi','COI','CoI'])
>>> print(locus)
Locus(char_type=dna, feature_type=CDS, name=coi, aliases=cox1; COX1; coi; COI; CoI)
"""
char_type = 'NotSet'
feature_type = 'NotSet'
name = 'NotSet'
aliases = []
def __init__(self, char_type=char_type, feature_type=feature_type,
name=name, aliases=aliases):
self.char_type = char_type
self.feature_type = feature_type
self.name = name
self.aliases = aliases
valid = ['dna','prot']
if not self.char_type in valid:
raise ValueError('self.char_type should be \'dna\' or \'prot\'')
if not type(self.feature_type) is str:
raise ValueError('self.feature_type should be a string')
if not type(self.name) is str:
raise ValueError('self.name should be a string')
if not type(self.aliases) is list:
raise ValueError('self.aliases should be a list')
else:
for a in self.aliases:
if not type(a) is str:
raise ValueError('aliases in self.aliases have to be strings')
def __str__(self):
aliases_str = ('; ').join(self.aliases)
return ('Locus(char_type='+self.char_type+', feature_type='+self.feature_type+
', name='+self.name+', aliases='+aliases_str+')')
##############################################################################################
class Concatenation:
##############################################################################################
"""This class is used to configure concatenations given loci and rules.
>>> coi = Locus('dna', 'CDS', 'coi', ['cox1','COX1','coi','COI','CoI'])
>>> ssu = Locus('dna', 'rRNA', '18S', ['18S rRNA','SSU rRNA'])
>>> bssu = Locus('dna', 'rRNA', '16S', ['16S rRNA'])
>>> lsu = Locus('dna', 'rRNA', '28S', ['28S rRNA', 'LSU rRNA'])
>>> alg11 = Locus('dna', 'CDS', 'ALG11', ['ALG11'])
>>> loci = [coi, ssu, bssu, lsu, alg11]
>>> concatenation = Concatenation(name='combined', loci=loci,
... otu_meta='OTU_name',
... otu_must_have_all_of=['coi'],
... otu_must_have_one_of =[['16S','28S'],['ALG11','18S']],
... define_trimmed_alns=["MuscleDefaults@dummyTrimMethod"])
>>> print(str(concatenation))
Concatenation named combined, with loci coi,18S,16S,28S,ALG11,
of which coi must exist for all species
and at least one of each group of [ 16S 28S ][ ALG11 18S ] is represented.
Alignments with the following names: MuscleDefaults@dummyTrimMethod are prefered
"""
otu_must_have_all_of = []
otu_must_have_one_of = 'any'
define_trimmed_alns = [] #should be Locus_name@Alignment_method_name@Trimming_mathod_name
feature_id_dict = {}
def __init__(self,
name,
loci,
otu_meta,
otu_must_have_all_of = otu_must_have_all_of,
otu_must_have_one_of = otu_must_have_one_of,
define_trimmed_alns = define_trimmed_alns):
self.name = name
self.loci = loci
self.otu_meta = otu_meta
self.otu_must_have_all_of = otu_must_have_all_of
self.otu_must_have_one_of = otu_must_have_one_of
if isinstance(otu_must_have_all_of,str):
raise IOError('The keyword \'otu_must_have_all_of\' has to be a list')
if isinstance(otu_must_have_one_of[0],str) and not otu_must_have_one_of == 'any':
raise IOError('The keyword \'otu_must_have_one_of\' has to be a list of lists')
if self.otu_must_have_one_of == 'any':
self.otu_must_have_one_of = [[l.name for l in self.loci]]
self.feature_id_dict = {} # Will hold the feature_id list for each otu
self.define_trimmed_alns = define_trimmed_alns # To choose between alternative
# alignments of the same locus
self.used_trimmed_alns = {} #To hold the alignment chosen for each locus
# Validate loci list
seen = []
for locus in loci:
if not isinstance(locus, Locus):
raise TypeError("Expecting Locus object in loci list")
if locus.name in seen:
raise NameError('Locus ' + locus.name + ' appears more than once in self.loci')
else:
seen.append(locus.name)
def __str__(self):
loci_names = [i.name for i in self.loci]
loci_string = ''
for l in loci_names:
loci_string += l+','
loci_string = loci_string[:-1]
must_have = ''
for i in self.otu_must_have_all_of:
must_have += i+','
must_have = must_have[:-1]
trimmed_alignmnets_spec = ''
one_of = ''
for i in self.otu_must_have_one_of:
one_of += '[ '
for j in i:
one_of += j+' '
one_of += ']'
if (self.define_trimmed_alns) > 0:
for i in self.define_trimmed_alns:
trimmed_alignmnets_spec += i
return ("Concatenation named %s, with loci %s,\n"
"of which %s must exist for all species\n"
"and at least one of each group of %s is represented.\n"
"Alignments with the following names: %s are prefered"
% (self.name, loci_string, must_have, one_of, trimmed_alignmnets_spec))
##############################################################################################
if False:
"""
Reprophylo Project Utilities
Used in the Project class but are not in the classe's methods
"""
##############################################################################################
## Git management
__builtin__.git = False
# git log template
gitline = "<<<<\n%s\nSTDOUT:\n%s\nSTDERR:%s\n>>>>\n"
def undate_git_log(pj, out, err):
if not err:
err = 'None'
if not out:
out = 'None'
pj.git_log += gitline%(str(time.asctime()),str(out), str(err))
def start_git(pj):
__builtin__.git = True # flag it on
cwd = os.getcwd()
if os.path.isdir(cwd + '/.git'):
# a repo exists, check it belongs to this project by checking the description
try:
assert open(cwd + '/.git/description').read().strip().rstrip() == pj.pickle_name.strip().rstrip()
warnings.warn('Git repository exists for this Project')
except:
raise RuntimeError('The Git repository in the CWD does not belong to this project. Either the pickle'+
' moved, or this is a preexsisting repo. Try one of the following: Delete the local '+
' .Git dir if you don\'t need it, move the pickle and the notebook to a new work dir,'+
' or if possible, move them back to their original location. You may also disable Git'+
' by with stop_git().')
else:
# start a rep
out, err = rpgit.gitInit()
undate_git_log(pj, out, err)
# write the pickle name as the repo description
hndl = open(cwd + '/.git/description', 'wt')
hndl.write(pj.pickle_name.strip().rstrip())
hndl.close()
warnings.warn('The new repository is called %s.'%open(cwd + '/.git/description', 'r').read().rstrip())
# list scripts and notebooks
import fnmatch
matches = []
for root, dirnames, filenames in os.walk(cwd):
for filename in fnmatch.filter(filenames, '*.py'):
matches.append(os.path.join(root, filename))
for filename in fnmatch.filter(filenames, '*.ipynb'):
matches.append(os.path.join(root, filename))
# git add scripts and notebooks
for match in matches:
out, err = rpgit.gitAdd(match)
undate_git_log(pj, out, err)
# commit scripts and notebooks
comment = "%i script file(s) from %s" % (len(matches), time.asctime())
out, err = rpgit.gitCommit(comment)
undate_git_log(pj, out, err)
def stop_git():
__builtin__.git = False # flad it off
cwd = os.getcwd()
# list, git add and git commit scripts and notebooks
# list
import fnmatch
matches = []
for root, dirnames, filenames in os.walk(cwd):
for filename in fnmatch.filter(filenames, '*.py'):
matches.append(os.path.join(root, filename))
for filename in fnmatch.filter(filenames, '*.ipynb'):
matches.append(os.path.join(root, filename))
# add
for match in matches:
out, err = rpgit.gitAdd(match)
undate_git_log(pj, out, err)
# commit
comment = "%i script file(s) from %s" % (len(matches), time.asctime())
out, err = rpgit.gitCommit(comment)
undate_git_log(pj, out, err)
## end git management
def platform_report():
"""
Prints machine specs, os specs and dependencies at time of execution
>>> isinstance(platform_report(), list)
True
"""
import pkg_resources
modules = [] # and their versions
for i in ('ete2','biopython','dendropy','cloud'):
try:
modules.append(i+' version: '+
pkg_resources.get_distribution(i).version)
except:
pass
modules.append('reprophylo version %s'%str(reprophyloversion))
return(['Platform: '+platform.platform(aliased=0, terse=0),
'Processor: '+platform.processor(),
'Python build: '+platform.python_build()[0] + platform.python_build()[1],
'Python compiler: '+platform.python_compiler(),
'Python implementation: ' +platform.python_implementation(),
'Python version: ' + platform.python_version()]+
modules+
['User: ' +platform.uname()[1]])
def write_alns(pj, format = 'fasta'):
"""
Writes untrimmed sequence alignment files that are in pj in a biopython format
"""
if len(pj.alignments.keys()) == 0:
raise IOError('Align the records first')
else:
for key in pj.alignments:
AlignIO.write(pj.alignments[key], key+'_aln.'+format, format)
def keep_feature(feature, loci):
""" Returns true if a feature's type is in one of the loci and if the gene
or product qualifiers is in the aliases of one of the loci, for data collection
from a genbank or embl file
# making a dummy feature
>>> coi = Locus('dna','CDS','coi', ['cox1','COX1','coi','COI','CoI'])
>>> location = FeatureLocation(1,100)
>>> feature = SeqFeature()
>>> feature.location = location
>>> feature.type = 'CDS'
>>> feature.qualifiers['gene'] = ['CoI']
# testing if fits any of the Project Locus objects
>>> a = keep_feature(feature, [coi])
>>> print(a)
True"""
keep = 0
for g in loci:
if not g.name in g.aliases:
g.aliases.append(g.name)
if feature.type == 'source':
keep = 1
elif feature.type == g.feature_type:
qual = None
if 'gene' in feature.qualifiers.keys():
qual = 'gene'
elif 'product' in feature.qualifiers.keys():
qual = 'product'
if qual and feature.qualifiers[qual][0] in g.aliases:
keep = 1
if keep == 1:
return True
else:
return False
return
def dwindle_record(record, loci):
"""
Retains only features that are called by Locus objects and records with features that are
called by Locus objects
# Making a dummy locus
>>> coi = Locus('dna','CDS','coi', ['cox1','COX1','coi','COI','CoI'])
# Making a dummy record with a feature that fits a Locus object (kept_feature)
# and a feature that does not (dwindled_feature)
>>> location = FeatureLocation(1,100)
>>> kept_feature = SeqFeature()
>>> kept_feature.location = location
>>> kept_feature.type = 'CDS'
>>> kept_feature.qualifiers['gene'] = ['CoI']
>>> dwindled_feature = SeqFeature()
>>> dwindled_feature.location = location
>>> dwindled_feature.type = 'rRNA'
>>> dwindled_feature.qualifiers['gene'] = ['LSU']
>>> s = 'atgc'*1000
>>> record = SeqRecord(seq=Seq(s, IUPAC.ambiguous_dna), id='1', description='spam')
>>> record.features.append(kept_feature)
>>> record.features.append(dwindled_feature)
>>> print(len(record.features))
2
# Dwindling the record
>>> a = dwindle_record(record, [coi])
>>> print(len(record.features))
1
"""
dwindled_features = []
feature_count = 0
for feature in record.features:
if keep_feature(feature, loci)== True:
# determine feature id
if feature.type == 'source' and not 'feature_id' in feature.qualifiers.keys():
feature.qualifiers['feature_id'] = [record.id + '_source']
elif not 'feature_id' in feature.qualifiers.keys():
feature.qualifiers['feature_id'] = [record.id + '_f' + str(feature_count)]
feature_count += 1
# determine prop ambiguity and GC content
if not feature.type == 'source':
feature_seq = feature.extract(record.seq)
degen = len(feature_seq)
for i in ['A','T','G','C','U','a','t','g','c','u']:
degen -= feature_seq.count(i)
feature.qualifiers['GC_content'] = [str(GC(feature_seq))]
feature.qualifiers['nuc_degen_prop'] = [str(float(degen)/len(feature_seq))]
if 'translation' in feature.qualifiers.keys():
transl = feature.qualifiers['translation'][0]
degen = 0
for i in ['B', 'X', 'Z', 'b', 'x', 'z']:
degen += transl.count(i)
feature.qualifiers['prot_degen_prop'] = [str(float(degen)/len(transl))]
dwindled_features.append(feature)
record.features = dwindled_features
return record
def is_embl_or_gb(input_filename):
suffixes = ['.gb','.embl']
gb = False
for s in suffixes:
if s in input_filename:
gb = True
return gb
def parse_input(input_filename, fmt):
return SeqIO.parse(input_filename, fmt)
def list_to_string(List):
"""
Handles list printing as a nice string in the pj.write(format="csv") method
>>> L = ['a','b','b']
>>> print(list_to_string(L))
a;b;b
"""
string = ''
for i in List:
if type(i) is str and '\n' in i:
string += lines_to_line(i).rstrip()+';'
else:
string += str(i)+';'
return string[:-1]
def lines_to_line(lines):
"""
Replaces newline with space in the pj.write(format="csv") method
"""
lines = lines.split('\n')
return (' ').join(lines)
def type_to_single_line_str(var):
"""
Returns any type as a one line string for the pj.write(format="csv") method
"""
if type(var) is str and '\n' in var:
return lines_to_line(var)
elif type(var) is str or type(var) is int or type(var) is float:
return str(var)
elif type(var) is list and len(var) == 1:
return str(var[0])
elif type(var) is list and len(var) > 0:
return list_to_string(var)
else:
return var
def get_qualifiers_dictionary(project, feature_id):
"""
Takes sequence record annotation, source qualifiers and feature qualifiers and puts them
in a flat dictionary
This is being replaced by __get_qualifiers_dictionary__ which deals with the records as a dict
and is much faster. Eventually, records will be handled as a dict throughout, instead of as
a list.
# Making a dummy locus
>>> coi = Locus('dna','CDS','coi', ['cox1','COX1','coi','COI','CoI'])
# Making a dummy Project
>>> pj = Project([coi], git=False)
# making a dummy record
>>> s = 'atgc'*1000
>>> location = FeatureLocation(1,100)
>>> feature = SeqFeature()
>>> feature.location = location
>>> feature.type = 'CDS'
>>> feature.qualifiers['gene'] = ['CoI']
>>> feature.qualifiers['feature_id'] = ['1_f0']
>>> source = SeqFeature()
>>> source.location = FeatureLocation(0,3999)
>>> source.type = 'source'
>>> source.qualifiers['organism'] = ['Tetillda radiata']
>>> record = SeqRecord(seq=Seq(s, IUPAC.ambiguous_dna), id='1', description='spam')
>>> record.features.append(feature)
>>> record.features.append(source)
>>> record.annotations["evidence"] = 'made up'
>>> pj.records = [record]
# executing get_qualifiers_dictionary()
>>> qual_dict = get_qualifiers_dictionary(pj, '1_f0')
>>> qual_items = qual_dict.items()
>>> qual_items.sort(key = lambda i: i[0])
>>> for key, val in qual_items: print(key.ljust(20,' ') + val.ljust(20,' '))
annotation_evidence made up
feature_id 1_f0
gene CoI
record_id 1
source_organism Tetillda radiata
"""
if type(feature_id) is list and len(feature_id) > 1:
raise IOError('get_qualifiers_dictionary takes one feature_id at a time')
if type(feature_id) is list:
feature_id = feature_id[0]
record_id = feature_id.rpartition('_')[0]
qualifiers_dictionary={'record_id': record_id}
for record in project.records:
if record.id in feature_id:
for annotation in record.annotations.keys():
qualifiers_dictionary['annotation_'+annotation]=record.annotations[annotation]
for feature in record.features:
if feature.type == 'source':
for qualifier in feature.qualifiers.keys():
qualifiers_dictionary['source_'+qualifier]=feature.qualifiers[qualifier][0]
elif feature.qualifiers['feature_id'][0] == feature_id:
for qualifier in feature.qualifiers.keys():
qualifiers_dictionary[qualifier]=feature.qualifiers[qualifier][0]
return qualifiers_dictionary
def __get_qualifiers_dictionary__(project, feature_id):
"""
This will replace the public version. It uses the Project._records_dict to pull
the record using the record id, instead of iterating Project.records, which is very slow.
It requires Project.__records_list_to_dict__() to execute beforehand.
"""
if type(feature_id) is list and len(feature_id) > 1:
raise IOError('get_qualifiers_dictionary takes one feature_id at a time')
if type(feature_id) is list:
feature_id = feature_id[0]
record_id = feature_id.rpartition('_')[0]
record = project._records_dict[record_id]
qualifiers_dictionary={'record_id': record_id}
for annotation in record.annotations.keys():
qualifiers_dictionary['annotation_'+annotation]=record.annotations[annotation]
for feature in record.features:
if feature.type == 'source':
for qualifier in feature.qualifiers.keys():
qualifiers_dictionary['source_'+qualifier]=feature.qualifiers[qualifier][0]
elif feature.qualifiers['feature_id'][0] == feature_id:
for qualifier in feature.qualifiers.keys():
qualifiers_dictionary[qualifier]=feature.qualifiers[qualifier][0]
return qualifiers_dictionary
def seq_format_from_suffix(suffix):
"""
Guesses input format from suffix
>>> print(seq_format_from_suffix('gb'))
genbank
"""
suffixes = {'fasta': ['fas','fasta','fa','fna'],
'genbank': ['gb','genbank'],
'embl': ['embl']}
found = False
for key in suffixes.keys():
if suffix in suffixes[key]:
found = True
return key
if not found:
raise RuntimeError(suffix+' is not a recognised suffix of an unaligned sequence file')
def read_feature_quals_from_tab_csv(csv_filename):
"""
This is used to update feature qualifiers from a tab delimited file
"""
import re
header = open(csv_filename, 'r').readlines()[0].rstrip().split('\t')
feature_id_col = header.index('feature_id')
taxonomy_col = header.index('taxonomy')
seq_col = header.index('seq')
translation_col = None
if 'translation' in header:
translation_col = header.index('translation')
csv_info = {}
for line in [l.rstrip().split('\t') for l in open(csv_filename, 'r').readlines()[1:]]:
if not line[0] in csv_info.keys():
csv_info[line[0]] = {'source':{},
'taxonomy':[],
'features':{}
}
if csv_info[line[0]]['taxonomy'] == []:
csv_info[line[0]]['taxonomy'] = line[taxonomy_col].split(';')
csv_info[line[0]]['features'][line[feature_id_col]] = {}
get_source = False
if csv_info[line[0]]['source'] == {}:
get_source = True
for i in range(len(header)):
if get_source and 'source:_' in header[i]:
qual_name = re.sub('source:_','',header[i])
if not line[i] == 'null' and not line[i] == '' and line[i]:
csv_info[line[0]]['source'][qual_name] = line[i].split(';')
elif (not 'source:_' in header[i] and not line[i] == 'null' and not line[i] == '' and line[i] and
not i in [seq_col, translation_col, taxonomy_col, feature_id_col]):
csv_info[line[0]]['features'][line[feature_id_col]][header[i]] = line[i].split(';')
return csv_info
## Alignment statistics
def count_positions(aln_column):
counts = {}
for i in aln_column:
if i in counts.keys():
counts[i] += 1
else:
counts[i] = 1
return counts
def global_aln_stats(aln_obj):
total_gaps = 0
prop_list = []
non_uniform_count = aln_obj.get_alignment_length()
parsimony_informative = 0
for i in range(aln_obj.get_alignment_length()):
total_gaps += aln_obj[:, i].count('-')
prop_list.append(aln_obj[:, i].count('-')/float(len(aln_obj)))
if len(count_positions(aln_obj[:, i]).keys()) == 1:
non_uniform_count -= 1
elif (len(count_positions(aln_obj[:, i]).keys()) == 2 and
'-' in count_positions(aln_obj[:, i]).keys()):
non_uniform_count -= 1
if len([p for p in count_positions(aln_obj[:, i]).keys() if (p != '-' and count_positions(aln_obj[:, i])[p] > 1)]) > 1:
parsimony_informative += 1
mean_gap_prop = sum(prop_list)/aln_obj.get_alignment_length()
return (mean_gap_prop, non_uniform_count, parsimony_informative)
def count_undetermined_lines(aln_obj, cutoff=0):
count = 0
ids = []
if aln_obj.get_alignment_length() < cutoff*2:
warnings.warn('The cutoff to exclude a sequence is more than half of the alignmnet length')
elif aln_obj.get_alignment_length() <= cutoff:
raise RuntimeWarning('The cutoff to exclude a sequence is as long or longer than the alignment')
for seq in aln_obj:
if str(seq.seq).count('-') >= aln_obj.get_alignment_length()-cutoff:
count += 1
ids.append(seq.id)
return count, ids
def count_collapsed_aln_seqs(aln_obj):
count = 1
seen_seqs = [str(aln_obj[0].seq)]
for seq in aln_obj[1:]:
str_seq = str(seq.seq)
if len([s for s in seen_seqs if (str_seq in s or s in str_seq or s == str_seq)]) == 0:
count += 1
seen_seqs.append(str_seq)
return count
def aln_summary(aln_obj, cutoff=0):
lines = ["Alignment length: %i" % aln_obj.get_alignment_length(),
"Number of rows: %i" % len(aln_obj),
"Unique sequences: %i"%count_collapsed_aln_seqs(aln_obj),
"Average gap prop.: %f\nVariable columns: %i\nParsimony informative: %i"
%global_aln_stats(aln_obj),
"Undetermined sequences: %i"%(count_undetermined_lines(aln_obj, cutoff=cutoff)[0]),
"Undetermined sequence cutoff: %i"%cutoff
]
return [lines, len(aln_obj), count_undetermined_lines(aln_obj, cutoff=cutoff), count_collapsed_aln_seqs(aln_obj)]
##
def loci_list_from_csv(loci):
"""
Parse the loci csv file given to Project
"""
# verify format
if any(len(line.split(',')) >= 4 for line in open(loci, 'r').readlines()):
pass
else:
raise IOError("File %s has no valid loci of format char_type,feature_type,name,aliases"%loci)
loci_dict = {}
loci_list = []
for line in [line.rstrip() for line in open(loci, 'r').readlines() if len(line.rstrip()) > 0]:
# verify format
if len(line.split(',')) < 4:
raise IOError("The line %s in file %s is missing arguments. Needs at least char_type,feature_type,name,aliases"%
(line.rstrip(), loci))
# look for synonyms
else:
group = None
try:
group = int(line.rstrip().split(',')[-1])
except:
pass
if group:
locus_exists = False
for name in loci_dict:
if 'group' in loci_dict[name].keys() and loci_dict[name]['group'] == group:
loci_dict[name]['aliases'] += line.split(',')[3:-1]
locus_exists = True
if not locus_exists:
loci_dict[line.split(',')[2]] = {'group': int(line.rstrip().split(',')[-1]),
'char_type': line.split(',')[0],
'feature_type': line.split(',')[1],
'aliases': line.split(',')[3:-1]
}
else:
loci_dict[line.split(',')[2]] = {'group': None,
'char_type': line.split(',')[0],
'feature_type': line.split(',')[1],
'aliases': line.split(',')[3:]
}
for name in loci_dict:
loci_list.append(Locus(loci_dict[name]['char_type'],
loci_dict[name]['feature_type'],
name,
loci_dict[name]['aliases']))
return loci_list
def parse_paup_charset(nexus_filename):
"""
Takes a nexus file with PAUP style charset commands.
Returns a dictionary with partition names as keys and a list of
integers representing the start and end of the partition as a value.
Position count starts from 0.
Handles paup commands of the following format:
CHARSET locus_name=1-129;
or
charset locus_name = 1 - 129 ;
"""
try:
AlignIO.read(nexus_filename, 'nexus')
except:
n = len(list(AlignIO.parse(nexus_filename, 'nexus')))
raise IOError('Cannot handle more then one matrix in %s. Got %i matrices'%
(nexus_filename, n))
charsets = {}
charset_lines = [l for l in open(nexus_filename,'r').readlines() if
(l.startswith('CHARSET') or l.startswith('charset'))]
if len(charset_lines) == 0:
raise IOError("There are no CHARSET commands in %s"%nexus_filename)
for line in charset_lines:
try:
info = line.split()[1].split(';')[0]
locus_name, range = info.split('=')
locus_name = locus_name.strip().rstrip()
start = int(range.split('-')[0].strip().rstrip())-1
end = int(range.split('-')[1].strip().rstrip())-1
charsets[locus_name] = [start,end]
except:
raise IOError('Expects "charset set_name = start_int - end_int;"'+
' (case insensitive, spaces around the "=" or "-" not mandatory). Got %s'%line)
return charsets
def pj_from_nexus_w_charset(nexus_filename, output_dir, char_type,
feature_type, project=False, pickle=False, git=False):
"""
Takes a nexus file with PAUP style charset commands as input.
Creates a separate fasta file for each alignment partition
Returns a list of fasta filenames and a list of Locus objects
If project==True, returns a Project instance with the loci, alignments and records instead
"""
from reprophylo import Locus
from Bio import AlignIO
charsets = parse_paup_charset(nexus_filename)
alignment = AlignIO.read(nexus_filename, 'nexus')
filenames = []
loci_list = []
for locus_name in charsets:
s = charsets[locus_name][0]
e = charsets[locus_name][1]
outname = "%s/%s.fasta"%(output_dir,locus_name)
AlignIO.write(alignment[:, s:e], outname, 'fasta')
filenames.append(outname)
loci_list.append(Locus(char_type, feature_type, locus_name, [locus_name]))
if project:
from reprophylo import Project
pj = Project(loci_list, pickle=pickle, git=git)
i=1
for f in filenames:
locus_name = f.split('/')[-1].split('.')[0]
print '%i/%i reading %s'%(i,len(filenames), locus_name)
i += 1
pj.read_alignment(f, char_type, feature_type, locus_name)
return pj
else:
return filenames, loci_list
##############################################################################################
class Project:
##############################################################################################
"""
The Project class contians all the data and has methods to analyze it. It allows for
experimental analysis by running alternative analyses and formally comparing the
outputs. The pickle_pj() function allows to pickle the project, including the data,
intermediates and results, as well as a description of the methods.It allows for a rerun
of the whole analysis as is, as well as for a reconfiguration of the analysis or addition
of data. If git is installed, it can be called by 'import rpgit'. As a result, git can be
initiated using start_git(). A git repository will be created in the CWD, if it doesn't already exist.
Input datasets, .py, .ipynb and .pkl files in the CWD will be version controlled.
Version control can be paused in the middle of the script
by calling stop_git() and restarted by calling start_git() again.
"""
def __init__(self, loci, pickle=None, git=True):
"""
# making dummy loci
>>> coi = Locus('dna','CDS','coi',['COX1','cox1'])
>>> ssu = Locus('dna','rRNA','18S',['18S','SSU'])
# Making a Project object
>>> pj = Project([coi,ssu], git=False)
>>> print(str(pj))
Project object with the loci coi,18S,
"""
self.records = []
self._records_dict = {}
self.starttime = str(time.asctime())
self.user = None
if os.path.isfile('USER'):
self.user = []
for line in open('USER','r').readlines():
key, arg = line.rstrip().split('=')
self.user.append([key, arg])
self.loci = loci
self.records_by_locus = {}
self.concatenations = []
self.alignments = {}
self.trimmed_alignments = {}
self.trees = {}
self.used_methods = {}
self.sets = {}
self.git_log = ''
self.pickle_name=pickle
if self.pickle_name and os.path.exists(self.pickle_name):
raise IOError('Pickle %s exists. If you want to keep using it do pj=unpickle_pj(\'%s\') instead.'%(self.pickle_name,self.pickle_name))
if git and not self.pickle_name:
raise IOError('Must have pickle to run Git. Either specify pickle or git=False')
elif git:
start_git(self)
self.defaults = {'raxmlHPC': programspath+'raxmlHPC-PTHREADS-SSE3',
'mafft': 'mafft',
'muscle': programspath+'muscle',
'trimal': programspath+'trimal',
'pb': programspath+'pb',
'bpcomp': programspath+'bpcomp',
'tracecomp': programspath+'tracecomp',
'fasttree': programspath+'FastTreeMP',
'pal2nal': programspath+'pal2nal.pl',
# PROGRAM PLUG
# 'program name': programpath+'the basic command'
}
seen = []
if isinstance(loci,list):
for locus in loci:
if not isinstance(locus, Locus):
raise TypeError("Expecting Locus object in loci list. "+locus+
" not a Locus object")
if locus.name in seen:
raise NameError('Locus ' + locus.name + ' apears more than once in self.loci')
else:
seen.append(locus.name)
elif isinstance(loci,str):
self.loci = loci_list_from_csv(loci)
#print 'Read the following loci from file %s:'%loci
#for l in self.loci:
#print str(l)
self.aln_summaries = []
if self.pickle_name:
pickle_pj(self, self.pickle_name, track=False)
if __builtin__.git and self.pickle_name:
import rpgit
comment = "%s from %s" % (str(self), time.asctime())
out, err = rpgit.gitAdd(self.pickle_name)
undate_git_log(self, out, err)
cwd = os.getcwd()
import fnmatch
matches = []
for root, dirnames, filenames in os.walk(cwd):
for filename in fnmatch.filter(filenames, '*.py'):
matches.append(os.path.join(root, filename))
for filename in fnmatch.filter(filenames, '*.ipynb'):
matches.append(os.path.join(root, filename))
for match in matches:
out, err = rpgit.gitAdd(match)
undate_git_log(self, out, err)
out, err = rpgit.gitCommit(comment)
undate_git_log(self, out, err)
def __str__(self):
loci_string = ''
for i in self.loci:
loci_string += i.name+','
return 'Project object with the loci '+loci_string
def __records_list_to_dict__(self):
self._records_dict = SeqIO.to_dict(self.records)
def last_git_log(self):
print self.git_log.split('<<<<')[-1]
def show_commits(self):
print rpgit.gitLog()[0]
###################################
# Project methods for reading data
###################################
def read_embl_genbank(self, input_filenames_list):
"""
Read a file from Genbank of EMBL
>>> input_filenames = ['test-data/test.gb']
>>> locus = Locus('dna', 'CDS', 'coi', ['cox1','COX1','coi','COI','CoI'])
>>> pj = Project([locus], git=False)
>>> print(len(pj.records))
0
>>> pj.read_embl_genbank(input_filenames)
>>> print(len(pj.records))
89
"""
if __builtin__.git:
import rpgit
else:
warnings.warn('Version control off')
generators = []
for input_filename in input_filenames_list:
if __builtin__.git:
import rpgit
out, err = rpgit.gitAdd(input_filename)
undate_git_log(self, out, err)
generators.append(parse_input(input_filename, 'gb'))
for generator in generators:
for record in generator:
dwindled_record = dwindle_record(record, self.loci)
if len(record.features) > 1:
self.records.append(dwindled_record)
elif len(record.features) == 1 and not record.features[0].type == 'source':
self.records.append(dwindled_record)
if __builtin__.git:
import rpgit
comment = "%i genbank/embl data file(s) from %s" % (len(input_filenames_list), time.asctime())
for filename in input_filenames_list:
out, err = rpgit.gitAdd(filename)
undate_git_log(self, out, err)
cwd = os.getcwd()
import fnmatch
matches = []
for root, dirnames, filenames in os.walk(cwd):
for filename in fnmatch.filter(filenames, '*.py'):
matches.append(os.path.join(root, filename))
for filename in fnmatch.filter(filenames, '*.ipynb'):
matches.append(os.path.join(root, filename))
for match in matches:
out, err = rpgit.gitAdd(match)
undate_git_log(self, out, err)
out, err = rpgit.gitCommit(comment)
undate_git_log(self, out, err)
def read_denovo(self, input_filenames, char_type, format = 'fasta'):
"""
Include records from a fasta file. Fasta file records will be given record ids
of the form 'denovo1'. The record.id and record.description will be placed in a
source feature under the 'original_id' and 'original_desc' qualifiers. Denovo sequences
require the use of the add_feature_to_record() method in order to be included in the
anaysis.
>>> input_filenames = ['test-data/test.gb']
>>> locus = Locus('dna', 'CDS', 'coi', ['cox1','COX1','coi','COI','CoI'])
>>> pj = Project([locus], git=False)
>>> print(len(pj.records))
0
>>> pj.read_embl_genbank(input_filenames)
>>> print(len(pj.records))
89
>>> input_filenames = ['test-data/test.fasta']
>>> pj.read_denovo(input_filenames, 'dna')
1
>>> print(len(pj.records))
90
# Since the denovo sequence has no feature it is not included
>>> pj.extract_by_locus()
>>> print(len(pj.records_by_locus['coi']))
89
# Making a feature for the denovo record.
>>> pj.add_feature_to_record('denovo0', 'CDS', qualifiers={'gene': 'coi'})
'denovo0_f0'
>>> pj.extract_by_locus()
>>> print(len(pj.records_by_locus['coi']))
90
"""
if __builtin__.git:
import rpgit
else:
warnings.warn('Version control off')
count = 0
# start the counter where it stoped the last time we read denovo things
for record in self.records:
if 'denovo' in record.id:
serial = int(record.id[6:])
if serial >= count:
count = serial+1
for input_filename in input_filenames:
if __builtin__.git:
import rpgit
rpgit.gitAdd(input_filename)
denovo = SeqIO.parse(input_filename, format)
for record in denovo:
source = SeqFeature(FeatureLocation(0, len(record.seq)), type='source', strand=1)
source.qualifiers['original_id'] = [record.id]
source.qualifiers['original_desc'] = [(' ').join(record.description.split()[1:])]
record.id = 'denovo'+str(count)
record.name = record.id
source.qualifiers['feature_id'] = [record.id+'_source']
record.features = [source]
if '-' in str(record.seq):
record.seq = Seq(str(record.seq).replace('-',''))
warnings.warn("Reseting gaps in records from %s"%input_filename)
if char_type == 'prot':
record.seq.alphabet = IUPAC.protein
#feature = SeqFeature(FeatureLocation(0, len(record.seq)), type='Protein', strand=1)
#feature.qualifiers['translation'] = [str(record.seq)]
#feature.qualifiers['protonly']=['true']
elif char_type == 'dna':
record.seq.alphabet = IUPAC.ambiguous_dna
count += 1
self.records.append(record)
if __builtin__.git:
import rpgit
comment = "%i denovo data file(s) from %s" % (len(input_filenames), time.asctime())
for filename in input_filenames:
out, err = rpgit.gitAdd(filename)
undate_git_log(self, out, err)
cwd = os.getcwd()
import fnmatch
matches = []
for root, dirnames, filenames in os.walk(cwd):
for filename in fnmatch.filter(filenames, '*.py'):
matches.append(os.path.join(root, filename))
for filename in fnmatch.filter(filenames, '*.ipynb'):
matches.append(os.path.join(root, filename))
for match in matches:
out, err = rpgit.gitAdd(match)
undate_git_log(self, out, err)
out, err = rpgit.gitCommit(comment)
undate_git_log(self, out, err)
return count
def read_alignment(self, filename, char_type, feature_type, locus_name, format="fasta", aln_method_name = "ReadDirectly", exclude=[]):
if __builtin__.git:
import rpgit
else:
warnings.warn('Version control off')
if not any([locus.name == locus_name for locus in self.loci]):
raise RuntimeError("Locus %s does not exist"%locus_name)
elif not [locus for locus in self.loci if locus.name == locus_name][0].char_type == char_type:
raise RuntimeError("%s is not a %s locus"%(locus_name, char_type))
elif not [locus for locus in self.loci if locus.name == locus_name][0].feature_type == feature_type:
raise RuntimeError("The feature_type %s is not %s"%(locus_name, feature_type))
count = 0
# start the counter where it stoped the last time we read denovo things
for record in self.records:
if 'denovo' in record.id:
serial = int(record.id[6:])
if serial >= count:
count = serial+1
# Read the alignment:
raw_aln_input = list(AlignIO.read(filename, format))
# make records
records = []
aln_records = []
for record in raw_aln_input:
total_seq_len = len(record.seq)
if (str(record.seq).count('-') + str(record.seq).count('.') +
str(record.seq).count('?') + str(record.seq).count('N') +
str(record.seq).count('n') + str(record.seq).count('X') +
str(record.seq).count('x')) == total_seq_len:
print 'dropping seq %s in locus %s: missing data'%(locus_name,record.id)
elif not record.id in exclude:
# remove gaps
new_record = SeqRecord(seq=Seq(str(record.seq).replace('-','').replace('.','')))
aln_record = SeqRecord(seq=Seq(str(record.seq).replace('.','-')))
#set alphabet
if char_type == 'prot':
new_record.seq.alphabet = IUPAC.protein
aln_record.seq.alphabet = IUPAC.protein
elif char_type == 'dna':
new_record.seq.alphabet = IUPAC.ambiguous_dna
aln_record.seq.alphabet = IUPAC.ambiguous_dna
# set denovo record id
new_record.id = 'denovo%i'%count
new_record.name = new_record.id
# set source and first feature
source = SeqFeature(FeatureLocation(0, len(new_record.seq)), type='source', strand=1)
source.qualifiers['original_id'] = [record.id]
source.qualifiers['original_desc'] = [(' ').join(record.description.split()[1:])]
source.qualifiers['feature_id'] = [new_record.id+'_source']
feature = SeqFeature(FeatureLocation(0, len(new_record.seq)), type=feature_type, strand=1)
feature.qualifiers['feature_id'] = [new_record.id+'_f0']
feature.qualifiers['gene'] = [locus_name]
feature_seq = feature.extract(new_record.seq)
degen = len(feature_seq)
if char_type == 'dna':
for i in ['A','T','G','C','U','a','t','g','c','u']:
degen -= feature_seq.count(i)
feature.qualifiers['GC_content'] = [str(GC(feature_seq))]
feature.qualifiers['nuc_degen_prop'] = [str(float(degen)/len(feature_seq))]
warnings.warn("To get translations, add a feature manually")
elif char_type == 'prot':
degen = 0
for i in ['B', 'X', 'Z', 'b', 'x', 'z']:
degen += feature_seq.count(i)
feature.qualifiers['prot_degen_prop'] = [str(float(degen)/len(feature_seq))]
new_record.features = [source, feature]
aln_record.id = 'denovo%i_f0'%count
count += 1
records.append(new_record)
aln_records.append(aln_record)
token = "%s@%s"%(locus_name, aln_method_name)
if token in self.alignments.keys():
warnings.warn("Replacing alignment %s"%token)
# need to add the denovo id's inside the alignment
self.alignments[token] = MultipleSeqAlignment(aln_records)
self.records += records
self.extract_by_locus()
if __builtin__.git:
import rpgit
comment = "Alignment file %s" % (time.asctime())
out, err = rpgit.gitAdd(filename)
undate_git_log(self, out, err)
cwd = os.getcwd()
import fnmatch
matches = []
for root, dirnames, filenames in os.walk(cwd):
for filename in fnmatch.filter(filenames, '*.py'):
matches.append(os.path.join(root, filename))
for filename in fnmatch.filter(filenames, '*.ipynb'):
matches.append(os.path.join(root, filename))
for match in matches:
out, err = rpgit.gitAdd(match)
undate_git_log(self, out, err)
out, err = rpgit.gitCommit(comment)
undate_git_log(self, out, err)
def add_feature_to_record(self, record_id, feature_type, location='full', qualifiers={}):
"""
# Making a dummy locus
>>> coi = Locus('dna','CDS','coi', ['cox1','COX1','coi','COI','CoI'])
# Making a dummy Project
>>> pj = Project([coi], git=False)
# making a dummy record
>>> s = 'atgc'*1000
>>> source = SeqFeature()
>>> source.location = FeatureLocation(0,3999)
>>> source.type = 'source'
>>> record = SeqRecord(seq=Seq(s, IUPAC.ambiguous_dna), id='1', description='spam')
>>> record.features.append(source)
>>> pj.records = [record]
>>> print(len(pj.records[0].features))
1
# adding a feature to a record in the pj
>>> import warnings
>>> warnings.simplefilter("ignore")
>>> pj.add_feature_to_record('1', 'CDS', qualifiers={'gene': 'madeuplocus'})
'1_f0'
>>> print(len(pj.records[0].features))
2
"""
for record in self.records:
if record.id == record_id:
#determine new feature id
feature_id = None
serials = []
if len(record.features) > 0:
for feature in record.features:
if 'feature_id' in feature.qualifiers.keys():
if '_f' in feature.qualifiers['feature_id'][0]:
f = feature.qualifiers['feature_id'][0]
serials.append(int(f.split('_')[1][1:]))
serials.sort(reverse = True)
if len(serials) > 0:
feature_id = record.id + '_f' + str(serials[0]+1)
else:
feature_id = record.id + '_f0'
feature = None
if location == 'full':
feature = SeqFeature(FeatureLocation(0, len(record.seq)),
type=feature_type,
strand=1)
elif isinstance(location, list):
for i in location:
if not isinstance(i, list):
raise RuntimeError('\'location\' takes either \'full\' or a list of lists')
if len(location) == 1:
feature = SeqFeature(FeatureLocation(int(location[0][0])-1,int(location[0][1])),
type=feature_type, strand=int(location[0][2]))
elif len(location) > 1:
list_of_locations = []
for i in location:
start = int(i[0]-1)
end = int(i[1])
strand = int(i[2])
list_of_locations.append(FeatureLocation(start,end,strand=strand))
feature = SeqFeature(CompoundLocation(list_of_locations),type=feature_type)
feature.qualifiers['feature_id'] = [feature_id]
if len(qualifiers.keys()) > 0:
for key in qualifiers.keys():
feature.qualifiers[key] = [qualifiers[key]]
if (('codon_start' in qualifiers.keys()) and
('transl_table' in qualifiers.keys())):
cds = feature.extract(record.seq)
if str(qualifiers['codon_start']) == '2':
cds = cds[1:]
elif str(qualifiers['codon_start']) == '3':
cds = cds[2:]
translation = cds.translate(table=int(qualifiers['transl_table']))
if len(translation)*3 < float(0.9)*len(cds):
raise RuntimeWarning('The translation of feature '+feature_id+' uses less than 90%'+
' of the coding sequence')
feature.qualifiers['translation'] = [str(translation)]
feature_seq = feature.extract(record.seq)
degen = len(feature_seq)
for i in ['A','T','G','C','U','a','t','g','c','u']:
degen -= feature_seq.count(i)
feature.qualifiers['GC_content'] = [str(GC(feature_seq))]
feature.qualifiers['nuc_degen_prop'] = [str(float(degen)/len(feature_seq))]
if 'translation' in feature.qualifiers.keys():
transl = feature.qualifiers['translation'][0]
degen = 0
for i in ['B', 'X', 'Z', 'b', 'x', 'z']:
degen += transl.count(i)
feature.qualifiers['prot_degen_prop'] = [str(float(degen)/len(transl))]
record.features.append(feature)
return feature_id
##############################################
# Project methods for managing concatenations
##############################################
def add_concatenation(self, concatenation_object):
"""
add a Concatenation object to the Project
# making dummy loci
>>> coi = Locus('dna','CDS','coi',['COX1','cox1'])
>>> ssu = Locus('dna','rRNA','18S',['18S','SSU'])
>>> lsu = Locus('dna','rRNA','28S',['28S','LSU'])
>>> loci = [coi,ssu,lsu]
# making dummy Concatenation
>>> combined = Concatenation(name='combined',
... loci=loci, otu_meta='OTU_dict',
... otu_must_have_all_of=['coi'],
... otu_must_have_one_of =[['18S','28S']],
... define_trimmed_alns=["MafftLinsi@Gappyout"])
>>> print(str(combined))
Concatenation named combined, with loci coi,18S,28S,
of which coi must exist for all species
and at least one of each group of [ 18S 28S ] is represented.
Alignments with the following names: MafftLinsi@Gappyout are prefered
# making a dummy Project
>>> pj = Project(loci, git=False)
# Including the Concatenation in the Project
>>> pj.add_concatenation(combined)
>>> print(len(pj.concatenations))
1
"""
if isinstance(concatenation_object, Concatenation):
# correct characters offending raxml, back up original values
meta = concatenation_object.otu_meta
self.copy_paste_within_feature(meta, "original_%s"%meta)
offensive = ['\t','\r','\n', "'", '"', ',', ' ',
';', ':', ']','[','(',')','/']
for r in self.records:
for f in r.features:
if meta in f.qualifiers.keys():
for o in offensive:
if o in f.qualifiers[meta][0]:
print (("found raxml offensive char %s in OTU %s. Replacing with '_ro_'."+
"Backing up original in the qualifier original_%s.")%(o, f.qualifiers[meta][0], meta))
f.qualifiers[meta][0] = f.qualifiers[meta][0].replace(o,'_ro_')
seen = []
for s in self.concatenations:
seen.append(s.name)
if concatenation_object.name in seen:
raise NameError('Concatenation ' + concatenation_object.name +
' apears more than once in self.concatenations')
else:
self.concatenations.append(concatenation_object)
else:
raise TypeError("Expecting Concatenation object")
#def make_concatenation_alignments(self):
# """
# Concatenates a trimmed alignment based on each of the Concatenation objects and adds them
# to the pj.trimmed_alignments dictionary. While a trimmed alignment of an individual locus will have a key
# following the patten "locus_name@alignment_method_name@trimming_method_name, the key for a concatenated
# trimmed alignment will be the Concatenation object name attribute.
# """
# for s in self.concatenations:
#
# # get a non-redundant list of OTUs stored in 'meta', such as voucher specimen
# meta = s.otu_meta
# OTU_list = []
# for record in self.records:
# for feature in record.features:
# if not feature.type == 'source':
# qualifiers_dictionary = get_qualifiers_dictionary(self,
# feature.qualifiers['feature_id'])
# if (meta in qualifiers_dictionary.keys() and
# not qualifiers_dictionary[meta] in OTU_list):
# OTU_list.append(qualifiers_dictionary[meta])
#
#
#
#
# included_individuals = {} #included_individuals[otu][locus]=feautre_id
#
# #Get the correct trimmed alignment tokens
# keys_of_trimmed_alignments_to_use_in_concat = []
# for locus in s.loci:
# trimmed_aln = None
# all_locus_trimmed_alns_in_pj = []
# for key in self.trimmed_alignments.keys():
# if locus.name == key.split('@')[0]:
# all_locus_trimmed_alns_in_pj.append(key)
# if len(all_locus_trimmed_alns_in_pj) == 1:
# trimmed_aln = all_locus_trimmed_alns_in_pj[0]
# elif len(all_locus_trimmed_alns_in_pj) == 0:
# raise RuntimeError('Locus '+locus.name+' have no trimmed alignments')
# else:
# s.define_trimmed_alns.sort(key = lambda i: i.count('@'), reverse=True)
# for definition in s.define_trimmed_alns:
# if definition.count('@') == 2 and locus.name == definition.split('@')[0]:
# trimmed_aln = definition
# elif definition.count('@') == 1 and any([definition in i for i in all_locus_trimmed_alns_in_pj]):
# trimmed_aln = locus.name+'@'+definition
# else:
# raise RuntimeError("Could not determine which alignment/trimming alternative to use for locus '"+
# locus.name+"' out of "+str(all_locus_trimmed_alns_in_pj))
# if trimmed_aln:
# keys_of_trimmed_alignments_to_use_in_concat.append(trimmed_aln)
# else:
# raise RuntimeError('Could not find trimmed aln for locus '+locus.name+' given the rulls '+str(s.define_trimmed_alns))
#
# #print "%i individuals will be included in the concatenations %s"%(len(included_individuals.keys()), s.name)
#
# #if len(included_individuals.keys()) < 4:
# # raise RuntimeError("Concatenation %s has less than 4 OTUs and cannot be analyzed"%s.name)
# for otu in OTU_list:
# otu_features = {}
# use = True
#
# # Check first rule
# for locus in s.otu_must_have_all_of:
# token = [t for t in keys_of_trimmed_alignments_to_use_in_concat if "%s@"%locus in t][0]
# feature_ids = [r.id for r in self.trimmed_alignments[token]]
# feature_found = False
# count = 0
# for feature_id in feature_ids:
# qualifiers = get_qualifiers_dictionary(self, feature_id)
# if meta in qualifiers.keys() and otu == qualifiers[meta]:
# count += 1
# feature_found = True
# otu_features[locus] = feature_id
# if count > 1:
# raise RuntimeError("%s is not unique in %s"%(otu, locus))
# if not feature_found:
# use = False
#
# # Check second rule
# if use:
# for group in s.otu_must_have_one_of:
# if isinstance(group,str):
# raise IOError('The keyword \'otu_must_have_one_of\' has to be a list of lists')
# feature_found = False
# for locus in group:
# token = [t for t in keys_of_trimmed_alignments_to_use_in_concat if "%s@"%locus in t][0]
# feature_ids = [r.id for r in self.trimmed_alignments[token]]
# count = 0
# for feature_id in feature_ids:
# qualifiers = get_qualifiers_dictionary(self, feature_id)
# if meta in qualifiers.keys() and otu == qualifiers[meta]:
# count += 1
# feature_found = True
# otu_features[locus] = feature_id
# if count > 1:
# raise RuntimeError("%s is not unique in %s"%(otu, locus))
# if not feature_found:
# use = False
# if use:
# included_individuals[otu] = otu_features
#
# # printing a table of the alignment
# included_indivduals_table = ''
# loci_names = [l.name for l in s.loci]
# line = 'OTU'.ljust(30,' ')
# for name in loci_names:
# line += name.ljust(20,' ')
# included_indivduals_table += line+'\n'
# for otu in included_individuals.keys():
# line = otu.ljust(30,' ')
# for locus_name in loci_names:
# if locus_name in included_individuals[otu].keys():
# line += included_individuals[otu][locus_name].ljust(15,' ')
# else:
# line += ''.ljust(15,' ')
# included_indivduals_table += line+'\n'
# print "Concatenation %s will have the following data"%s.name
# print included_indivduals_table
#
# # remove partitions with less then 4 sequences
# for name in loci_names:
# if len([otu for otu in included_individuals.keys() if name in included_individuals[otu].keys()]) < 4:
# print (("Locus %s has less then 4 sequences in concatenation %s and where excluded "+
# "from the concatenation")%(name,s.name))
# for key in keys_of_trimmed_alignments_to_use_in_concat:
# if name in key:
# keys_of_trimmed_alignments_to_use_in_concat.remove(key)
#
#
#
# # build alignment
# # concat_records = []
# alignment = []
# for individual in included_individuals.keys():
# sequence = ''
# for key in keys_of_trimmed_alignments_to_use_in_concat:
# locus_name = key.split('@')[0]
# length = len(self.trimmed_alignments[key][0])
# s.used_trimmed_alns[key] = length
# if locus_name in included_individuals[individual].keys():
# for record in self.trimmed_alignments[key]:
# if record.id == included_individuals[individual][locus_name]:
# sequence += str(record.seq)
# else:
# sequence += '?'*length
# concat_sequence = SeqRecord(seq = Seq(sequence), id = individual, description = '')
# alignment.append(concat_sequence)
# self.trimmed_alignments[s.name] = MultipleSeqAlignment(alignment)
# s.feature_id_dict = included_individuals
def make_concatenation_alignments(self):
"""
Concatenates a trimmed alignment based on each of the Concatenation objects and adds them
to the pj.trimmed_alignments dictionary. While a trimmed alignment of an individual locus will have a key
following the patten "locus_name@alignment_method_name@trimming_method_name, the key for a concatenated
trimmed alignment will be the Concatenation object name attribute.
"""
self.__records_list_to_dict__()
for s in self.concatenations:
# get a non-redundant list of OTUs stored in 'meta', such as voucher specimen
meta = s.otu_meta
OTU_list = []
for record in self.records:
for feature in record.features:
if not feature.type == 'source':
qualifiers_dictionary = __get_qualifiers_dictionary__(self,
feature.qualifiers['feature_id'])
if (meta in qualifiers_dictionary.keys() and
not qualifiers_dictionary[meta] in OTU_list):
OTU_list.append(qualifiers_dictionary[meta])
included_individuals = {} #included_individuals[otu][locus]=feautre_id
#Get the correct trimmed alignment tokens
keys_of_trimmed_alignments_to_use_in_concat = []
for locus in s.loci:
trimmed_aln = None
all_locus_trimmed_alns_in_pj = []
for key in self.trimmed_alignments.keys():
if locus.name == key.split('@')[0]:
all_locus_trimmed_alns_in_pj.append(key)
if len(all_locus_trimmed_alns_in_pj) == 1:
trimmed_aln = all_locus_trimmed_alns_in_pj[0]
elif len(all_locus_trimmed_alns_in_pj) == 0:
raise RuntimeError('Locus '+locus.name+' have no trimmed alignments')
else:
s.define_trimmed_alns.sort(key = lambda i: i.count('@'), reverse=True)
for definition in s.define_trimmed_alns:
if definition.count('@') == 2 and locus.name == definition.split('@')[0]:
trimmed_aln = definition
elif definition.count('@') == 1 and any([definition in i for i in all_locus_trimmed_alns_in_pj]):
trimmed_aln = locus.name+'@'+definition
else:
raise RuntimeError("Could not determine which alignment/trimming alternative to use for locus '"+
locus.name+"' out of "+str(all_locus_trimmed_alns_in_pj))
if trimmed_aln:
keys_of_trimmed_alignments_to_use_in_concat.append(trimmed_aln)
else:
raise RuntimeError('Could not find trimmed aln for locus '+locus.name+' given the rulls '+str(s.define_trimmed_alns))
#print "%i individuals will be included in the concatenations %s"%(len(included_individuals.keys()), s.name)
#if len(included_individuals.keys()) < 4:
# raise RuntimeError("Concatenation %s has less than 4 OTUs and cannot be analyzed"%s.name)
for otu in OTU_list:
otu_features = {}
use = True
# Check first rule
for locus in s.otu_must_have_all_of:
token = [t for t in keys_of_trimmed_alignments_to_use_in_concat if "%s@"%locus in t][0]
feature_ids = [r.id for r in self.trimmed_alignments[token]]
feature_found = False
count = 0
for feature_id in feature_ids:
qualifiers = __get_qualifiers_dictionary__(self, feature_id)
if meta in qualifiers.keys() and otu == qualifiers[meta]:
count += 1
feature_found = True
otu_features[locus] = feature_id
if count > 1:
raise RuntimeError("%s is not unique in %s"%(otu, locus))
if not feature_found:
use = False
# Check second rule
if use:
for group in s.otu_must_have_one_of:
if isinstance(group,str):
raise IOError('The keyword \'otu_must_have_one_of\' has to be a list of lists')
feature_found = False
for locus in group:
token = [t for t in keys_of_trimmed_alignments_to_use_in_concat if "%s@"%locus in t][0]
feature_ids = [r.id for r in self.trimmed_alignments[token]]
count = 0
for feature_id in feature_ids:
qualifiers = __get_qualifiers_dictionary__(self, feature_id)
if meta in qualifiers.keys() and otu == qualifiers[meta]:
count += 1
feature_found = True
otu_features[locus] = feature_id
if count > 1:
raise RuntimeError("%s is not unique in %s"%(otu, locus))
if not feature_found:
use = False
if use:
included_individuals[otu] = otu_features
# printing a table of the alignment
included_indivduals_table = ''
loci_names = [l.name for l in s.loci]
line = 'OTU'.ljust(30,' ')
for name in loci_names:
line += name.ljust(20,' ')
included_indivduals_table += line+'\n'
for otu in included_individuals.keys():
line = otu.ljust(30,' ')
for locus_name in loci_names:
if locus_name in included_individuals[otu].keys():
line += included_individuals[otu][locus_name].ljust(15,' ')
else:
line += ''.ljust(15,' ')
included_indivduals_table += line+'\n'
print "Concatenation %s will have the following data"%s.name
print included_indivduals_table
# remove partitions with less then 4 sequences
for name in loci_names:
if len([otu for otu in included_individuals.keys() if name in included_individuals[otu].keys()]) < 4:
print (("Locus %s has less then 4 sequences in concatenation %s and where excluded "+
"from the concatenation")%(name,s.name))
for key in keys_of_trimmed_alignments_to_use_in_concat:
if name in key:
keys_of_trimmed_alignments_to_use_in_concat.remove(key)
# build alignment
# concat_records = []
alignment = []
for individual in included_individuals.keys():
sequence = ''
for key in keys_of_trimmed_alignments_to_use_in_concat:
locus_name = key.split('@')[0]
length = len(self.trimmed_alignments[key][0])
s.used_trimmed_alns[key] = length
if locus_name in included_individuals[individual].keys():
for record in self.trimmed_alignments[key]:
if record.id == included_individuals[individual][locus_name]:
sequence += str(record.seq)
else:
sequence += '?'*length
concat_sequence = SeqRecord(seq = Seq(sequence), id = individual, description = '')
alignment.append(concat_sequence)
self.trimmed_alignments[s.name] = MultipleSeqAlignment(alignment)
s.feature_id_dict = included_individuals
self._records_dict = {}
###################################################
# Project methods for modifying feature qualifiers
###################################################
def write(self, filename, format = 'genbank'):
"""
Write the records in the project in any Biopython format, as csv or as nexml.
The csv table has a line for each feature (ie multiplt lines for records
with multiple non-source featue). Each line will include the records annotations,
the source feature qualifiers and the qualifiers of the feature itself. (ie, in a
record with several features, the record annotations and the source feature qualifiers
will be repeted in several lines, once for each non-source feature in the record.
The csv file is primarily usefull for reviewing and editing feature qualifiers
The nexml format only includes the trees from the pj.trees dictionary, but all
the metadata is included as leaf metadata, including the sequences and the
trimmed and alligned sequences for each leaf.
"""
if format == 'nexml':
self.write_nexml(filename)
if format == 'phyloxml':
self.write_phyloxml(filename)
elif format == 'genbank' or format == 'embl':
SeqIO.write(self.records, filename, format)
elif format == 'csv':
# get titles for source and othe features
source_qualifiers = []
feature_qualifiers = []
for record in self.records:
for feature in record.features:
for key in feature.qualifiers.keys():
if feature.type == 'source' and not key in source_qualifiers:
source_qualifiers.append(key)
elif not feature.type == 'source' and not key in feature_qualifiers:
feature_qualifiers.append(key)
with open(filename, 'wb') as csvfile:
linewriter = csv.writer(csvfile, delimiter='\t',
quotechar='"',
quoting=csv.QUOTE_MINIMAL)
linewriter.writerow(['record_id','seq']+['source:_'+q for q in source_qualifiers]+['taxonomy']+feature_qualifiers)
for record in self.records:
seq = ''
if len(record.seq) <= 10:
seq = str(record.seq)[0:11]
else:
seq = str(record.seq)[0:6] + '...' + str(record.seq)[-5:]
line_start = [record.id, seq]
source = None
for feature in record.features:
if feature.type == 'source':
source = feature
if not source == None:
for qual in source_qualifiers:
if qual in source.qualifiers.keys():
line_start.append(type_to_single_line_str(source.qualifiers[qual]))
else:
line_start.append('null')
elif source == None:
for qual in source_qualifiers:
line_start.append('null')
if 'taxonomy' in record.annotations.keys():
line_start.append(type_to_single_line_str(record.annotations['taxonomy']))
else:
line_start.append(['null'])
for feature in record.features:
if not feature.type == 'source':
line = list(line_start)
for qual in feature_qualifiers:
if qual in feature.qualifiers.keys() and qual == 'translation':
trans = feature.qualifiers[qual][0]
if len(trans)<=10:
line.append(trans)
else:
line.append(trans[:6] + '...' + trans[-5:])
elif qual in feature.qualifiers.keys():
line.append(type_to_single_line_str(feature.qualifiers[qual]))
else:
line.append('null')
linewriter.writerow(line)
if __builtin__.git:
import rpgit
comment = "Records %s text file from %s" % (format, time.asctime())
out, err = rpgit.gitAdd(filename)
undate_git_log(self, out, err)
cwd = os.getcwd()
import fnmatch
matches = []
for root, dirnames, filenames in os.walk(cwd):
for filename in fnmatch.filter(filenames, '*.py'):
matches.append(os.path.join(root, filename))
for filename in fnmatch.filter(filenames, '*.ipynb'):
matches.append(os.path.join(root, filename))
for match in matches:
out, err = rpgit.gitAdd(match)
undate_git_log(self, out, err)
out, err = rpgit.gitCommit(comment)
undate_git_log(self, out, err)
def correct_metadata_from_file(self,csv_file):
metadata = read_feature_quals_from_tab_csv(csv_file)
new_records = []
for record in self.records:
if record.id in metadata.keys():
#print 'making new record'
new_record = record
record_corrected_metadata = metadata[record.id]
new_record.annotations['taxonomy'] = metadata[record.id]['taxonomy']
for feature in new_record.features:
if feature.type == 'source':
feature.qualifiers = metadata[record.id]['source']
else:
feature_id = feature.qualifiers['feature_id']
translation = None
if 'translation' in feature.qualifiers.keys():
translation = feature.qualifiers['translation']
feature.qualifiers = metadata[record.id]['features'][feature_id[0]]
feature.qualifiers['feature_id'] = feature_id
if translation:
feature.qualifiers['translation'] = translation
new_records.append(new_record)
else:
#print 'using old records'
new_records.append(record)
self.records = new_records
if __builtin__.git:
import rpgit
comment = "Corrected metadata CSV file from %s" % (time.asctime())
out, err = rpgit.gitAdd(csv_file)
undate_git_log(self, out, err)
cwd = os.getcwd()
import fnmatch
matches = []
for root, dirnames, filenames in os.walk(cwd):
for filename in fnmatch.filter(filenames, '*.py'):
matches.append(os.path.join(root, filename))
for filename in fnmatch.filter(filenames, '*.ipynb'):
matches.append(os.path.join(root, filename))
for match in matches:
out, err = rpgit.gitAdd(match)
undate_git_log(self, out, err)
out, err = rpgit.gitCommit(comment)
undate_git_log(self, out, err)
def if_this_then_that(self, IF_THIS, IN_THIS, THEN_THAT, IN_THAT, mode = 'whole'):
"""
Searches pj.records for features that have the value IF_THIS in the qualifier IN_THIS
and places the value THEN_THAT in the qualifier IN_THAT, which either exists or is new.
The IF_THIS value can either match completely (mode = 'whole') or just to a part (mode = 'part')
of the target qualifier value
The following demonstartes all the feature qualifier editing methods
# Make a dummy pj with a locus and with records
>>> input_filenames = ['test-data/test.gb']
>>> locus = Locus('dna', 'CDS', 'coi', ['cox1','COX1','coi','COI','CoI'])
>>> pj = Project([locus], git=False)
>>> pj.read_embl_genbank(input_filenames)
# copying a source qualifier into the feature qualifiers so that it
# will be available for editing (source qualifiers are kept imutable)
>>> pj.add_qualifier_from_source('organism')
# populate a new qualifier based on the data in another
# Here we will take only the the genus name from the organism qualifier
# and put it in a new qualifier
# We use mode='part' because our search phrase (the genus name)
# fits only the start of the organism name
>>> tetillid_genera = ['Tetilla', 'Cinachyrella', 'Craniella']
>>> for g in tetillid_genera:
... pj.if_this_then_that(g, 'organism', g, 'genus', mode='part')
# Now we will add a sample id to all the sequences which belong to
# sample TAU_25617
>>> pj.add_qualifier(['JX177913.1_f0',
... 'JX177935.1_f0',
... 'JX177965.1_f0'],
... 'specimen_voucher',
... 'TAU_25617')
# We are using a copy paste approch to unite the data from
# differen qualifiers under on qualifiers
>>> pj.copy_paste_within_feature('variant', 'strain_or_variant')
>>> pj.copy_paste_within_feature('strain', 'strain_or_variant')
# Now we print the qualifier of a random feature as an example
>>> qual_dict = get_qualifiers_dictionary(pj, 'JX177913.1_f0')
>>> qual_items = qual_dict.items()
>>> qual_items.sort(key = lambda i: i[0])
>>> for key, val in qual_items:
... print(key.ljust(40,' ') + type_to_single_line_str(val)[:5]+'...')
GC_content 37.28...
annotation_accessions JX177...
annotation_data_file_division INV...
annotation_date 05-SE...
annotation_gi 39293...
annotation_keywords ...
annotation_organism Cinac...
annotation_references locat...
annotation_sequence_version 1...
annotation_source mitoc...
annotation_taxonomy Eukar...
codon_start 2...
db_xref GI:39...
feature_id JX177...
gene cox1...
genus Cinac...
nuc_degen_prop 0.0...
organism Cinac...
product cytoc...
prot_degen_prop 0.0...
protein_id AFM91...
record_id JX177...
source_country Panam...
source_db_xref taxon...
source_feature_id JX177...
source_identified_by Ilan,...
source_mol_type genom...
source_note autho...
source_organelle mitoc...
source_organism Cinac...
source_specimen_voucher DH_S2...
specimen_voucher TAU_2...
transl_table 4...
translation MIGSG...
# Note that GC content and the porportion of degenerate positions
# have been automatically included. They will be plotted in the report
"""
for record in self.records:
for feature in record.features:
if not feature.type == 'source':
if IN_THIS in feature.qualifiers.keys():
if not type(feature.qualifiers[IN_THIS]) is list:
feature.qualifiers[IN_THIS] = [feature.qualifiers[IN_THIS]]
for i in feature.qualifiers[IN_THIS]:
if mode == 'whole':
if i == IF_THIS:
feature.qualifiers[IN_THAT] = [THEN_THAT]
elif mode == 'part':
if IF_THIS in i:
feature.qualifiers[IN_THAT] = [THEN_THAT]
def add_qualifier(self, feature_ids, name, value):
if not type(value) is list:
value = [value]
for record in self.records:
for feature in record.features:
if feature.qualifiers['feature_id'][0] in feature_ids:
feature.qualifiers[name] = value
def add_qualifier_from_source(self, qualifier):
for record in self.records:
source = None
for feature in record.features:
if feature.type == 'source':
source = feature
value = None
if not source == None:
if qualifier in source.qualifiers.keys():
value = source.qualifiers[qualifier]
if not value == None:
if not type(value) is list:
value = [value]
for feature in record.features:
if not feature.type == 'source':
feature.qualifiers[qualifier] = value
def copy_paste_within_feature(self, from_qualifier, to_qualifier):
for record in self.records:
for feature in record.features:
if not feature.type == 'source':
if from_qualifier in feature.qualifiers.keys():
feature.qualifiers[to_qualifier] = feature.qualifiers[from_qualifier]
def copy_paste_from_features_to_source(self, from_feature_qual, to_source_qual):
for record in self.records:
source = None
values_from_features = []
for feature in record.features:
if not feature.type == 'source':
if from_feature_qual in feature.qualifiers.keys():
if not feature.qualifiers[from_feature_qual] in values_from_features:
values_from_features += feature.qualifiers[from_feature_qual]
else:
source = feature
if source == None:
source = SeqFeature(FeatureLocation(0, len(record.seq)), type='source', strand=1)
source.qualifiers['feature_id'] = record.id + '_source'
record.features = [source] + record.features
if not to_source_qual in source.qualifiers.keys():
source.qualifiers[to_source_qual] = values_from_features
def species_vs_loci(self, outfile_name):
"""
Makes a csv file showing the count of each unique value in the source_organism qualifier
for each locus
"""
species_vs_loci = {}
for record in self.records:
organism = 'undef'
for feature in record.features:
if feature.type == 'source':
if 'organism' in feature.qualifiers.keys():
organism = feature.qualifiers['organism'][0]
if not organism in species_vs_loci.keys():
species_vs_loci[organism] = {}
for feature in record.features:
if not feature.type == 'source':
for locus in self.loci:
if not locus.name in locus.aliases:
locus.aliases.append(locus.name)
if 'gene' in feature.qualifiers.keys():
if feature.qualifiers['gene'][0] in locus.aliases:
if not locus.name in species_vs_loci[organism].keys():
species_vs_loci[organism][locus.name] = 1
else:
species_vs_loci[organism][locus.name] += 1
elif 'product' in feature.qualifiers.keys():
if feature.qualifiers['product'][0] in locus.aliases:
if not locus.name in species_vs_loci[organism].keys():
species_vs_loci[organism][locus.name] = 1
else:
species_vs_loci[organism][locus.name] += 1
with open(outfile_name, 'wb') as csvfile:
linewriter = csv.writer(csvfile, delimiter='\t',
quotechar='|',
quoting=csv.QUOTE_MINIMAL)
loci_names = []
for g in self.loci:
loci_names.append(g.name)
linewriter.writerow(['species']+loci_names)
for organism in sorted(list(species_vs_loci.keys())):
line = [organism]
for name in loci_names:
if name in species_vs_loci[organism].keys():
line.append(str(species_vs_loci[organism][name]))
else:
line.append('0')
linewriter.writerow(line)
######################################
# Project methods to analyze the data
######################################
def extract_by_locus(self):
"""
>>> input_filenames = ['test-data/test.gb']
>>> coi = Locus('dna', 'CDS', 'coi', ['cox1','COX1','coi','COI','CoI'])
>>> lsu = Locus('dna', 'rRNA', '28S', ['28s','28S','LSU rRNA','28S ribosomal RNA','28S large subunit ribosomal RNA'])
>>> pj = Project([coi, lsu], git=False)
>>> pj.read_embl_genbank(input_filenames)
>>> pj.extract_by_locus()
>>> print(len(pj.records_by_locus['coi']))
89
>>> print(len(pj.records_by_locus['28S']))
48
"""
data_by_locus = {}
for locus in self.loci:
if not locus.name in locus.aliases:
locus.aliases.append(locus.name)
locus.aliases.append(locus.name.replace('_',' '))
records = []
for record in self.records:
for feature in record.features:
if ((feature.type == locus.feature_type or
# equate CDS and Protein feature types to allow reading protein sequences from
# a mix of DNA gb files and protein fasta files that were given a Protein feature
# type
(feature.type in ['CDS','Protein'] and locus.feature_type in ['CDS','Protein'])) and
(('gene' in feature.qualifiers.keys() and
feature.qualifiers['gene'][0] in locus.aliases)
or
('product' in feature.qualifiers.keys() and
feature.qualifiers['product'][0] in locus.aliases))
):
if locus.char_type == 'dna':
if not type(record.seq.alphabet) == IUPAC.IUPACProtein:
S = feature.extract(record.seq)
else:
raise RuntimeError('Trying to read DNA from protein only records')
elif locus.char_type == 'prot':
if not type(record.seq.alphabet) == IUPAC.IUPACProtein:
if 'translation' in feature.qualifiers.keys():
S = Seq(feature.qualifiers['translation'][0], IUPAC.protein)
else:
raise RuntimeError('Trying to read protein from DNA records without translation info')
else:
S = feature.extract(record.seq)
feature_record = SeqRecord(seq = S, id = feature.qualifiers['feature_id'][0],
description = '')
records.append(feature_record)
data_by_locus[locus.name] = records
self.records_by_locus = data_by_locus
def exclude(self, start_from_max=True, **kwargs):
keep_safe = self.records_by_locus
self.extract_by_locus()
locus_names = [i.name for i in self.loci]
for key, value in kwargs.iteritems():
if key in locus_names:
if value == 'all':
self.records_by_locus[key] = []
else:
subset = []
locus_feature_ids = [i.id.rpartition('_')[0] for i in self.records_by_locus[key]]
if not all(i in locus_feature_ids for i in value):
print [i for i in value if not i in locus_feature_ids]
warnings.warn('Not all records to exclude exist in locus. Typos?')
if not start_from_max and not keep_safe == {}:
for record in keep_safe[key]:
if not record.id.rpartition('_')[0] in [i for i in value]:
subset.append(record)
self.records_by_locus[key] = subset
else:
for record in self.records_by_locus[key]:
if not record.id.rpartition('_')[0] in [i for i in value]:
subset.append(record)
self.records_by_locus[key] = subset
else:
warnings.warn('Locus name %s not recognised'%key)
def include(self, start_from_null=True, **kwargs):
keep_safe = self.records_by_locus
self.extract_by_locus()
locus_names = [i.name for i in self.loci]
for key, value in kwargs.iteritems():
if key in locus_names:
if value == 'all':
pass
else:
subset = []
locus_feature_ids = [i.id.rpartition('_')[0] for i in self.records_by_locus[key]]
if not all(i in locus_feature_ids for i in value):
print [i for i in value if not i.split('_')[0] in locus_feature_ids]
warnings.warn('Not all records to include exist in locus. Typos?')
for record in self.records_by_locus[key]:
if record.id.rpartition('_')[0] in [i for i in value]:
subset.append(record)
self.records_by_locus[key] = subset
if not start_from_null and not keep_safe == {}:
self.records_by_locus[key] = subset+keep_safe[key]
else:
warnings.warn('Locus name %s not recognised'%key)
def filter_by_seq_length(self, locus_name, min_length=0, max_length=None):
if self.records_by_locus == {}:
self.extract_by_locus()
subset = [r for r in self.records_by_locus[locus_name] if len(r) >= min_length]
if max_length:
subset = [r for r in subset if len(r) <= max_length]
self.records_by_locus[locus_name] = subset
def filter_by_gc_content(self, locus_name, min_percent_gc=0, max_percent_gc=None):
if self.records_by_locus == {}:
self.extract_by_locus()
subset = [r for r in self.records_by_locus[locus_name] if GC(r.seq) >= min_percent_gc]
if max_percent_gc:
subset = [r for r in subset if GC(r.seq) <= max_percent_gc]
self.records_by_locus[locus_name] = subset
def write_by_locus(self, format = 'fasta'):
"""
Write the unaligned sequences into file in any Biopython format, one file per locus
"""
if self.records_by_locus == {}:
self.extract_by_locus
for key in self.records_by_locus.keys():
SeqIO.write(self.records_by_locus[key], key+'.'+format, format)
def align(self, alignment_methods=[], pal2nal='defaults'):
"""
Configured by an AlnConf object
"""
if pal2nal == 'defaults':
pal2nal = self.defaults['pal2nal']
seen_loci = []
for method in alignment_methods:
method.timeit.append(time.time())
method.platform = platform_report()
if method.program_name == 'muscle':
method.platform.append('Program and version: '+
os.popen(method.cmd + ' -version').read().split(' by')[0])
method.platform.append('Program reference:\nEdgar 2004: MUSCLE: multiple sequence '+
'alignment with high accuracy and high throughput. '+
'Nucleic Acids Research 32(5):1792-1797')
elif method.program_name == 'mafft':
p = sub.Popen(method.cmd+" --version", shell=True, stderr=sub.PIPE, stdout=sub.PIPE)
method.platform.append('Program and version: '+
p.communicate()[1].splitlines()[3].strip().split(' (')[0])
method.platform.append('Program reference:Katoh, Standley 2013 (Molecular Biology and Evolution 30:772-780) '+
'MAFFT multiple sequence alignment software version 7: improvements in performance and usability.')
# PROGRAM PLUG
# NOTE: THIS ADDS THE PROGRAM AND REFERENCE INFO TO THE CONF OBJECT
#
# elif method.program_name == 'the name of the program':
# p = sub.Popen(method.cmd+" command that writes version", shell=True, stderr=sub.PIPE, stdout=sub.PIPE)
# method.platform.append('Program and version: '+
# # 1 for stderr, 0 for stdout
# p.communicate()[1].splitlines()[# get the line and split])
# method.platform.append('Program reference: write the reference here')
if method.CDSAlign:
p = sub.Popen('pal2nal.pl', shell=True, stderr=sub.PIPE, stdout=sub.PIPE)
method.platform[-2] += '\nPal2Nal '+ p.communicate()[1].splitlines()[1].split('(')[1].split(')')[0]
method.platform[-1] += ['\nMikita Suyama, David Torrents, and Peer Bork (2006) PAL2NAL: robust '+
'conversion of protein sequence alignments into the corresponding codon '+
'alignments.Nucleic Acids Res. 34, W609-W612.'][0]
for locus in method.loci:
if locus.name in seen_loci:
# Uncomment this to restrict each locus to one Conf object
#raise RuntimeError('locus '+locus.name+' is in more than one AlnConf objects')
pass
else:
seen_loci.append(locus.name)
# This will put a fasta alignment in stdout
stdout, stderr = method.command_lines[locus.name]()
# This will make a MultipleSeqAlignment from the fasta alignment
align = AlignIO.read(StringIO(stdout), "fasta", alphabet=IUPAC.protein)
# This will run pal2nal
if method.CDSAlign and locus.feature_type == 'CDS' and locus.char_type == 'dna':
# These will check the protein alignment have the same otus and
# the sequences are the same length
for seq in align:
found = 0
for s in method.CDS_in_frame[locus.name]:
if s.id == seq.id:
found = 1
if found == 0:
raise RuntimeError(seq.id + ' is not in the CDS sequences')
for s in method.CDS_in_frame[locus.name]:
found = 0
for seq in align:
if s.id == seq.id:
found = 1
if found == 0:
raise RuntimeError(seq.id + ' is not in the protein sequences')
for seq in method.CDS_in_frame[locus.name]:
for prot in align:
if prot.id == seq.id:
i = 0
for p in str(prot.seq):
if not p == '-':
i += 1
if not i*3 == len(seq.seq):
raise RuntimeError('nuc and prot seqs have unmatched lengths for '+seq.id)
# This will write an input protein alignment file for pal2nal
aln_filename = method.id+'_'+locus.name+'.aln'
AlignIO.write(align, aln_filename, 'fasta')
# This will run pal2nal
cds_filename = method.id+'_CDS_in_frame_'+locus.name+'.fasta'
stdout = os.popen(pal2nal+' '+aln_filename+' '+cds_filename + ' -nostderr -codontable %i'%method.codontable).read()
# This will make a MultipleSeqAlignment out of the pal2nal output
align = AlignIO.read(StringIO(stdout), "clustal", alphabet=IUPAC.ambiguous_dna)
# Maybe this will replace pal2nal
#from Bio import CodonAlign
#codon_aln = CodonAlign.build(align, method.CDS_in_frame[locus.name])
#align = codon_aln
# This will list all the temp files written during the analysis
method_files = glob.glob(method.id+'_*')
# This will get summary statistics of the alignment,
# will remove alignments with less than four unique sequences
# and will remove undetermined sequences.
[summary_lines, num_lines, num_undeter, num_collapsed_aln_seqs] = aln_summary(align)
summary = 'Alignment name: '+locus.name+'@'+method.method_name+'\n'
for line in summary_lines:
summary += line+'\n'
if num_lines < 4:
line = 'Alignment %s has less than 4 sequences and will be dropped'%(locus.name+'@'+method.method_name)
print line
summary += line+'\n'
elif num_undeter[0] > 0:
line = 'Alignment %s has undetermined sequences and will be dropped'%(locus.name+'@'+method.method_name)
print line
summary += line+'\n'
elif num_collapsed_aln_seqs < 4:
line = 'Alignment %s has less than 4 unique sequences and will be dropped'%(locus.name+'@'+method.method_name)
print line
summary += line+'\n'
else:
# This will palce the MultipleSeqAlignment in the Project
self.alignments[locus.name+'@'+method.method_name] = align
self.aln_summaries.append(summary)
# This will measure the running time
method.timeit.append(time.time())
method.timeit.append(method.timeit[2]-method.timeit[1])
# This will delete the temp files
for f in method_files:
os.remove(f)
self.used_methods[method.method_name] = method
if self.pickle_name:
pickle_pj(self, self.pickle_name, track=False)
if __builtin__.git and self.pickle_name:
comment = ''
for method in alignment_methods:
comment += '%s\n'%(str(method))
import rpgit
out, err = rpgit.gitAdd(self.pickle_name)
undate_git_log(self, out, err)
cwd = os.getcwd()
import fnmatch
matches = []
for root, dirnames, filenames in os.walk(cwd):
for filename in fnmatch.filter(filenames, '*.py'):
matches.append(os.path.join(root, filename))
for filename in fnmatch.filter(filenames, '*.ipynb'):
matches.append(os.path.join(root, filename))
for match in matches:
out, err = rpgit.gitAdd(match)
undate_git_log(self, out, err)
out, err = rpgit.gitCommit(comment)
undate_git_log(self, out, err)
def write_alns(self, id=['feature_id'], format = 'fasta'):
filenames = []
if len(self.alignments.keys()) == 0:
raise IOError('Align the records first')
else:
for key in self.alignments:
aln = self.alignments[key]
records = []
for s in aln:
qualifiers = get_qualifiers_dictionary(self, s.id)
new_id = ""
for i in id:
if i in qualifiers.keys():
new_id += qualifiers[i]+'_'
if new_id == "":
new_id = s.id
else:
new_id = new_id[:-1]
records.append(SeqRecord(seq=s.seq, id=new_id))
SeqIO.write(records, key+'_aln.'+format, format)
filenames.append(key+'_aln.'+format)
return filenames
def write_trimmed_alns(self, id=['feature_id'], format = 'fasta'):
filenames = []
if len(self.trimmed_alignments.keys()) == 0:
raise IOError('Align and trimmed the records first')
else:
for key in self.trimmed_alignments.keys():
aln = self.trimmed_alignments[key]
records = []
for s in aln:
qualifiers = get_qualifiers_dictionary(self, s.id)
new_id = ""
for i in id:
if i in qualifiers.keys():
new_id += qualifiers[i]+'_'
if new_id == "":
new_id = s.id
else:
new_id = new_id[:-1]
records.append(SeqRecord(seq=s.seq, id=new_id))
SeqIO.write(records, key+'_trimmed_aln.'+format, format)
filenames.append(key+'_trimmed_aln.'+format)
return filenames
def show_aln(self, token, id=['feature_id']):
aln_obj=None
if token in self.alignments.keys():
aln_obj = self.alignments[token]
elif token in self.trimmed_alignments.keys():
aln_obj = self.trimmed_alignments[token]
locus_name = token.split('@')[0]
char_type = [locus.char_type for locus in self.loci if locus.name == locus_name][0]
records = []
for s in aln_obj:
qualifiers = get_qualifiers_dictionary(self, s.id)
new_id = ""
for i in id:
if i in qualifiers.keys():
new_id += qualifiers[i]+'_'
if new_id == "":
new_id = s.id
else:
new_id = new_id[:-1]
records.append(SeqRecord(seq=s.seq, id=new_id))
title_length = max([len(r.id) for r in records])+2
# colors
dna_colors = {'a':'green',
'A':'green',
'T':'red',
't':'red',
'U':'red',
'u':'red',
'g':'gray',
'G':'gray',
'c':'blue',
'C':'blue'
}
protein_colors = {'R':'blueviolet',
'r':'blueviolet',
'K':'cornflowerblue',
'k':'cornflowerblue',
'E':'red',
'e':'red',
'D':'crimson',
'd':'crimson',
'I':'gold',
'i':'gold',
'L':'yellow',
'l':'yellow',
'V':'moccasin',
'v':'moccasin',
'A':'lemonchiffon',
'a':'lemonchiffon',
'C':'palegreen',
'c':'palegreen',
'H':'paleturquoise',
'h':'paleturquoise',
'M':'hotpink',
'm':'hotpink',
'N':'pink',
'n':'pink',
'Q':'yellow',
'q':'yellow',
'F':'darkseagreen',
'f':'darkseagreen',
'Y':'darkcyan',
'y':'darkcyan',
'W':'steelblue',
'w':'steelblue',
'S':'thistle',
's':'thistle',
'T':'lavender',
't':'lavender',
'G':'darkgray',
'g':'darkgray',
'P':'gainsboro',
'p':'gainsboro',
}
colors = None
if char_type == 'dna':
colors = dna_colors
elif char_type == 'prot':
colors = protein_colors
linelength = (len(records[0].seq)+len(records[0].id)+3)*10
html_string = '<html><head></head>\n'
html_string += '<body><pre><font face="Courier New">\n'
for r in records:
html_string += r.id.ljust(title_length, '.')
for p in str(r.seq):
c = 'white'
if p in colors.keys():
c = colors[p]
html_string += '<font style="BACKGROUND-COLOR: %s">%s</font>'%(c, p)
html_string += "<br>"
html_string += '</font></pre></body></html>'
import webbrowser
path = os.path.abspath("%s.html"%token)
url = 'file://' + path
with open(path, 'w') as f:
f.write(html_string)
webbrowser.open_new_tab(url)
def tree(self, raxml_methods, bpcomp='default', bpcomp_burnin=0.2, bpcomp_step=10, bpcomp_cutoff=0.01):
if bpcomp == 'default':
bpcomp = self.defaults['bpcomp']
for raxml_method in raxml_methods:
raxml_method.timeit.append(time.time())
raxml_method.platform = platform_report()
if isinstance(raxml_method, RaxmlConf):
raxml_method.platform.append('Program and version: '+ raxml_method.cmd +
os.popen(raxml_method.cmd + ' -version').readlines()[2].split('This is ')[1].split(' released')[0])
raxml_method.platform.append('Program reference: '+
'A. Stamatakis: RAxML Version 8: A tool for Phylogenetic Analysis '+
'and Post-Analysis of Large Phylogenies. In Bioinformatics, 2014.')
elif isinstance(raxml_method, PbConf):
p = sub.Popen(raxml_method.cmd+" -v", shell=True, stderr=sub.PIPE, stdout=sub.PIPE)
raxml_method.platform.append('Program and version: '+p.communicate()[1].splitlines()[-1])
raxml_method.platform.append('Program reference: '
'N. Lartillot, T. Lepage and S. Blanquart, 2009: PhyloBayes 3: '+
'a Bayesian software package for phylogenetic reconstruction and '+
'molecular dating. Bioinformatics Vol. 25 no. 17.')
# PROGRAM PLUG
# NOTE: THIS METHOD SERVES ALL PHYLO PROGRAMS ALTHOUGH THE ITERATOR IS
# CALLED raxml_method
# THIS GETS THE VERSION AND REFERENCE OF THE PROGRAM
# elif isinstance(raxml_method, Conf object name):
# p = sub.Popen(raxml_method.cmd+" command that writes version", shell=True, stderr=sub.PIPE, stdout=sub.PIPE)
# raxml_method.platform.append('Program and version: '+
# # 1 for stderr, 0 for stdout
# p.communicate()[1].splitlines()[# get the line and split])
# raxml_method.platform.append('Program reference: write the reference here')
for trimmed_alignment in raxml_method.command_lines.keys():
for cline in raxml_method.command_lines[trimmed_alignment]:
if isinstance(raxml_method, RaxmlConf):
stdout, stderr = cline()
elif isinstance(raxml_method, PbConf):
sub.call(cline, shell=True)
# PROGRAM PLUG
# THIS RUNS THE PROGRAM
# elif isinstance(raxml_method, Conf Object Name):
# sub.call(cline, shell=True)
t = None
if isinstance(raxml_method, RaxmlConf):
if raxml_method.preset == 'fa':
t = Tree('RAxML_bipartitions.'+raxml_method.id+'_'+trimmed_alignment+'0')
elif raxml_method.preset == 'fD_fb':
t = Tree('RAxML_bipartitions.'+raxml_method.id+'_'+trimmed_alignment+'1')
elif raxml_method.preset == 'fd_b_fb':
t = Tree('RAxML_bipartitions.'+raxml_method.id+'_'+trimmed_alignment+'2')
elif raxml_method.preset == 'fd_fJ' or raxml_method.preset == 'fF_fJ':
tree_filename = 'RAxML_fastTreeSH_Support.'+raxml_method.id+'_'+trimmed_alignment+'1'
t = Tree(open(tree_filename,'r').read().replace('[','[&&NHX:support='))
elif isinstance(raxml_method, PbConf):
base_name = "%s_%s"%(raxml_method.id, trimmed_alignment)
trees_file = "%s.1.treelist"%base_name
trees_per_chain = len(open(trees_file,'r').readlines())
# find the number of chains
nchains = raxml_method.cline_args['nchain'].split()[0]
chain_names = ''
for i in range(1,int(nchains)+1):
chain_names += "%s.%i "%(base_name, i)
chain_names = chain_names[:-1]
bpcomp_cline = "%s -c %f -x %i %i %s"%(bpcomp,
bpcomp_cutoff,
int(trees_per_chain*bpcomp_burnin),
int(bpcomp_step),
chain_names)
sub.call(bpcomp_cline, shell=True)
t = Tree("bpcomp.con.tre")
for l in t:
l.support = 0
# PROGRAM PLUG
# NOTE: THIS IS SIMPLIFIED. MIGHT WORK WITH SOMETHING LIKE
# FASTTREE. SEE MORE EXAMPLES ABOVE
# THIS SECTION MAKES A Tree OBJECT OUT OF THE OUTPUT FILE
# elif isinstance(raxml_method, Conf object name):
# base_name = "%s_%s"%(raxml_method.id, trimmed_alignment)
# tree_file = "the form of the output file with the %s"%base_name
# t = Tree(tree_file)
# This puts the Conf id in the tree
for n in t.traverse():
n.add_feature('tree_method_id', str(raxml_method.id)+'_'+trimmed_alignment)
t.dist = 0
t.add_feature('tree_method_id', str(raxml_method.id)+'_'+trimmed_alignment)
# This gets all the metadta from pj.records and puts it
# on the tree leaves
loci_names = [i.name for i in self.loci]
concat_names = [c.name for c in self.concatenations]
if trimmed_alignment.partition('@')[0] in loci_names:
for leaf in t:
records = self.records
feature = ''
feature_source = ''
record = ''
for r in records:
if r.id in leaf.name:
record = r
for f in r.features:
if f.type == 'source':
feature_source = f
elif f.qualifiers['feature_id'][0] == leaf.name:
feature = f
for a in record.annotations.keys():
label = 'annotation_'+a
leaf.add_feature(label, record.annotations[a])
for f_source_qual in feature_source.qualifiers.keys():
label = 'source_'+f_source_qual
leaf.add_feature(label, feature_source.qualifiers[f_source_qual][0])
for f_qual in feature.qualifiers.keys():
leaf.add_feature(f_qual, feature.qualifiers[f_qual][0])
for l in t:
t.add_feature('tree_id', trimmed_alignment+'@'+raxml_method.method_name)
# This puts a Tree object and a string representation of the tree
# in the project
self.trees[trimmed_alignment+'@'+raxml_method.method_name] = [t,t.write(features=[])]
elif trimmed_alignment in concat_names:
# This does the same as above, but instead of dealing with gene trees
# it deals with supermatrix trees
s = filter(lambda i: i.name == trimmed_alignment, self.concatenations)[0]
for leaf in t:
records = self.records
feature = ''
feature_source = ''
record = ''
for r in records:
for feature in r.features:
if not feature.type == 'source':
qual_dict = get_qualifiers_dictionary(self, feature.qualifiers['feature_id'])
if s.otu_meta in qual_dict.keys() and qual_dict[s.otu_meta] == leaf.name:
for key in qual_dict.keys():
leaf.add_feature(key, qual_dict[key])
for l in t:
t.add_feature('tree_id', s.name+'@mixed@mixed@'+raxml_method.method_name)
self.trees[s.name+'@mixed@mixed@'+raxml_method.method_name] = [t,t.write(features=[])]
# This times the analysis
raxml_method.timeit.append(time.time())
raxml_method.timeit.append(raxml_method.timeit[2]-raxml_method.timeit[1])
# This deletes temp files
if not raxml_method.keepfiles:
for file_name in os.listdir(os.curdir):
if raxml_method.id.partition('_')[0] in file_name:
os.remove(file_name)
self.used_methods[raxml_method.method_name] = raxml_method
if self.pickle_name:
pickle_pj(self, self.pickle_name, track=False)
if __builtin__.git and self.pickle_name:
comment = ''
for raxml_method in raxml_methods:
comment += '%s\n'%(str(raxml_method))
import rpgit
out, err = rpgit.gitAdd(self.pickle_name)
undate_git_log(self, out, err)
cwd = os.getcwd()
import fnmatch
matches = []
for root, dirnames, filenames in os.walk(cwd):
for filename in fnmatch.filter(filenames, '*.py'):
matches.append(os.path.join(root, filename))
for filename in fnmatch.filter(filenames, '*.ipynb'):
matches.append(os.path.join(root, filename))
for match in matches:
out, err = rpgit.gitAdd(match)
undate_git_log(self, out, err)
out, err = rpgit.gitCommit(comment)
undate_git_log(self, out, err)
def clear_tree_annotations(self):
for tree in self.trees.keys():
t = Tree(self.trees[tree][1])
t.dist = 0
self.trees[tree][0] = t
def write_nexml(self, output_name):
D = dendropy.DataSet()
tree_list = []
loci_names = []
for locus in self.loci:
loci_names.append(locus.name)
for tree_name in self.trees.keys():
#get aligned and trimmd aligned sequences as leaf features
t = self.trees[tree_name][0]
for l in t:
loc_name = tree_name.split('@')[0]
trim_aln_name = tree_name.rpartition('@')[0]
aln_name = None
if loc_name in loci_names:
aln_name = tree_name.rsplit('@',2)[0]
else:
trim_aln_name = trim_aln_name.split('@')[0]
otu_feature = 'feature_id'
if not aln_name: # then it is a concatenation
for c in self.concatenations:
if c.name == loc_name:
otu_feature = c.otu_meta
if aln_name: # Then it is a locus
leaf_feature_value = getattr(l, otu_feature)
alignment = self.alignments[aln_name]
for record in alignment:
if record.id == leaf_feature_value:
l.add_feature('aligned_sequence',str(record.seq))
t_aln = self.trimmed_alignments[trim_aln_name]
leaf_feature_value = getattr(l, otu_feature)
for record in t_aln:
if record.id == leaf_feature_value:
l.add_feature('aligned_trimmed_sequence',str(record.seq))
tree_string = self.trees[tree_name][0].write(features=[])
tree = dendropy.Tree()
tree.read_from_string(tree_string, schema='newick', extract_comment_metadata = True)
tree_list.append(tree)
TL = dendropy.TreeList(tree_list)
D.add_tree_list(TL)
D.write_to_path(
output_name,
'nexml',
suppress_annotations=False,
annotations_as_nhx=False,
exclude_trees=False)
def write_phyloxml(self, filename):
phyloxmls = []
from Bio import Phylo
from StringIO import StringIO
for token in self.trees.keys():
tree = Phylo.read(StringIO(self.trees[token][0].write(format=2)), 'newick')
xmltree = tree.as_phyloxml()
xmltree.name = token
aln= None
trimaln= None
alnlookup = None
trimalnlookup = None
if token.split('@')[0] in [l.name for l in self.loci]:
aln = self.alignments['%s@%s'%(token.split('@')[0], token.split('@')[1])]
alnlookup = dict((rec.id, str(rec.seq)) for rec in aln)
trimaln = self.trimmed_alignments['%s@%s@%s'%(token.split('@')[0],
token.split('@')[1],
token.split('@')[2])]
trimalnlookup = dict((rec.id, str(rec.seq)) for rec in trimaln)
elif token.split('@')[0] in [c.name for c in self.concatenations]:
trimaln = self.trimmed_alignments[token.split('@')[0]]
trimalnlookup = dict((rec.id, str(rec.seq)) for rec in trimaln)
else:
raise RuntimeError('No locus or concatenation for tree %s'%token)
for clade in xmltree.get_terminals():
typ = 'dna'
if (alnlookup and
[l for l in self.loci if l.name == token.split('@')[0]][0].char_type == 'prot'):
typ = 'protein'
key = clade.name
feautre_id = key
if not alnlookup:
feature_id = (self.trees[token][0]&key).feature_id
dbtype = 'NCBI'
if alnlookup and 'denovo' in key:
dbtype= 'denovo'
elif not alnlookup:
dbtype= 'otu'
accession = Phylo.PhyloXML.Accession(key, dbtype)
mol_seq = Phylo.PhyloXML.MolSeq(trimalnlookup[key], is_aligned=True)
sequence = Phylo.PhyloXML.Sequence(type=typ,
accession=accession,
mol_seq=mol_seq,
annotations=[Phylo.PhyloXML.Annotation(evidence='trimmed')])
if alnlookup:
mol_seq = Phylo.PhyloXML.MolSeq(alnlookup[key], is_aligned=True)
sequence = Phylo.PhyloXML.Sequence(type=typ,
accession=accession,
mol_seq=mol_seq)
clade.sequences.append(sequence)
others = [Phylo.PhyloXML.Other(tag='trimmedaln',
attributes={'treename':token},
value=trimaln.format('phylip-relaxed'))]
if aln:
others.append(Phylo.PhyloXML.Other(tag='aln',
attributes={'treename':token},
value=aln.format('phylip-relaxed')))
xmltree.other = others
phyloxmls.append(xmltree)
Phylo.write(phyloxmls, filename, 'phyloxml')
def annotate(self, fig_folder,
root_meta,
root_value,
leaf_labels_txt_meta,
leaf_node_color_meta=None,
leaf_label_colors=None,
ftype='Verdana',
fsize=10,
node_bg_meta=None,
node_bg_color=None,
node_support_dict=None,
support_bullet_size=5,
heat_map_meta = None, #list
heat_map_colour_scheme=2,
pic_meta=None,
pic_paths=None,
pic_w=None,
pic_h=None,
multifurc=None,
branch_width=2,
branch_color='DimGray',
scale = 1000,
html = None
):
stdout = sys.stdout
if html:
sys.stdout = open(html, 'wt')
print '<html>'
ts = TreeStyle()
ts.show_leaf_name = False
ts.scale = scale
if node_support_dict:
ts.legend_position=1
ts.legend.add_face(TextFace('Node support: ', fsize=10), column=0)
i = 1
for color in sorted(node_support_dict.items(),key=lambda i: i[1][0], reverse=True):
ts.legend.add_face(CircleFace(radius = 4, color = color[0]), column=i)
i +=1
ts.legend.add_face(TextFace(' '+str(node_support_dict[color[0]][0])+'-'+str(node_support_dict[color[0]][1]),
fsize=10), column=i)
i += 1
for tree in self.trees.keys():
# set outgroup leaves, labels and label colors
outgroup_list = []
all_heatmap_profile_values = []
leaves_for_heatmap = []
for leaf in self.trees[tree][0]:
qualifiers_dictionary = get_qualifiers_dictionary(self, leaf.feature_id)
leaf_label = ''
for meta in leaf_labels_txt_meta:
leaf_label += qualifiers_dictionary[meta]+' '
leaf_label = leaf_label[:-1]
fgcolor = 'black'
if leaf_label_colors:
for colour_name in leaf_label_colors.keys():
if colour_name in qualifiers_dictionary[leaf_node_color_meta]:
fgcolor = leaf_label_colors[colour_name]
leaf_face = TextFace(leaf_label, fgcolor=fgcolor, ftype=ftype, fsize=fsize)
leaf.add_face(leaf_face,0)
if not root_value == 'mid' and root_meta in qualifiers_dictionary.keys() and root_value in qualifiers_dictionary[root_meta]:
outgroup_list.append(leaf)
if heat_map_meta:
include = True
for i in heat_map_meta:
if not i in qualifiers_dictionary:
include = False
if include:
profile = []
deviation = []
for meta in heat_map_meta:
if meta in qualifiers_dictionary.keys():
profile.append(float(qualifiers_dictionary[meta]))
all_heatmap_profile_values.append(float(qualifiers_dictionary[meta]))
deviation.append(0.0)
leaf.add_features(profile=profile)
leaf.add_features(deviation=deviation)
leaves_for_heatmap.append(leaf)
if pic_meta:
leaf_value = qualifiers_dictionary[pic_meta]
if leaf_value in pic_paths:
pic_face = ImgFace(pic_paths[leaf_value], width=pic_w, height=pic_h)
leaf.add_face(pic_face, 1)
for leaf in leaves_for_heatmap:
leaf.add_face(ProfileFace(max_v=float(max(all_heatmap_profile_values)),
min_v=float(min(all_heatmap_profile_values)),
center_v=float(float(max(all_heatmap_profile_values)+min(all_heatmap_profile_values))/2),
width=50, height=30,
style='heatmap',
colorscheme=heat_map_colour_scheme),
column=1, position="aligned")
#set outgroup
if outgroup_list == ['mid']:
try:
R = self.trees[tree][0].get_midpoint_outgroup()
self.trees[tree][0].set_outgroup(R)
print 'rooting tree '+tree+' at midpoint'
except:
print 'root in '+tree+' already set correctly?'
elif len(outgroup_list) == 1:
try:
self.trees[tree][0].set_outgroup(outgroup_list[0])
except:
print 'root in '+tree+' already set correctly?'
elif len(outgroup_list) > 1:
try:
R = self.trees[tree][0].get_common_ancestor(outgroup_list)
self.trees[tree][0].set_outgroup(R)
except:
print 'root in '+tree+' already set correctly?'
elif len(outgroup_list)==0:
try:
R = self.trees[tree][0].get_midpoint_outgroup()
self.trees[tree][0].set_outgroup(R)
print 'rooting tree '+tree+' at midpoint'
except:
print 'root in '+tree+' already set correctly?'
# ladderize
self.trees[tree][0].ladderize()
ns = NodeStyle()
ns['size'] = 0
ns['fgcolor'] = 'black'
ns['vt_line_width'] = branch_width
ns['hz_line_width'] = branch_width
ns['hz_line_color'] = branch_color
ns['vt_line_color'] = branch_color
for n in self.trees[tree][0].traverse():
n.set_style(ns)
self.trees[tree][0].set_style(ns)
if multifurc:
for n in self.trees[tree][0].traverse():
if n.support < multifurc and not n.is_leaf():
n.delete()
# node bg colors
if node_bg_color:
for key in node_bg_color.keys():
for node in self.trees[tree][0].get_monophyletic(values=[key], target_attr=node_bg_meta):
ns = NodeStyle(bgcolor=node_bg_color[key])
ns['size']=0
ns['fgcolor']='black'
ns['vt_line_width'] = branch_width
ns['hz_line_width'] = branch_width
ns['hz_line_color'] = branch_color
ns['vt_line_color'] = branch_color
node.set_style(ns)
# node support
if node_support_dict:
for node in self.trees[tree][0].traverse():
for key in node_support_dict.keys():
if (node.support <= node_support_dict[key][0] and
node.support > node_support_dict[key][1]):
node.add_face(CircleFace(radius = support_bullet_size,
color = key),column=0, position = "float")
self.trees[tree][0].render(fig_folder + "/"+self.trees[tree][0].get_leaves()[0].tree_method_id+'.png',w=1000, tree_style=ts)
print('<A href='+
fig_folder + "/" + self.trees[tree][0].get_leaves()[0].tree_method_id+'.png'+
'>'+self.trees[tree][0].get_leaves()[0].tree_method_id+
'</A><BR>')
print '</html>'
print fig_folder
sys.stdout = stdout
def trim(self, list_of_Conf_objects, cutoff=0):
for m in list_of_Conf_objects:
m.timeit.append(time.time())
m.platform = platform_report()
m.platform.append('Program and version: '+
os.popen(m.cmd + ' --version').readlines()[1].rstrip())
m.platform.append('Program reference: '+
'Salvador Capella-Gutierrez; Jose M. Silla-Martinez; Toni Gabaldon. trimAl: '+
'a tool for automated alignment trimming in large-scale '+
'phylogenetic analyses. Bioinformatics 2009 25: 1972-1973.')
# In this part, I need to take what I can out of the if isinstance
# to facilitate addition of new Conf objects. It can be done like this
# but there will be redundancy
if isinstance(m, TrimalConf):
import subprocess as sub
for aln in m.command_lines.keys():
p = sub.Popen(m.command_lines[aln], shell=True, stdout=sub.PIPE)
stdout, stderr = p.communicate()
#stdout = os.system(m.command_lines[aln]).stdout
alphabet = IUPAC.ambiguous_dna
locus_name = aln.split('@')[0]
for locus in self.loci:
if locus.name == locus_name and locus.char_type == 'prot':
alphabet = IUPAC.protein
align = AlignIO.read(StringIO(stdout), "fasta", alphabet=alphabet)
[summary_lines, num_lines, num_undeter, num_collapsed_aln_seqs] = aln_summary(align,
cutoff=cutoff)
summary = 'Alignment name: '+aln+'\n'
for line in summary_lines:
summary += line+'\n'
if num_lines < 4:
line = 'Alignment %s has less than 4 sequences and will be dropped'%aln
print line
summary += line+'\n'
elif num_undeter[0] > 0:
line = 'Alignment %s has undetermined sequences (%i bp or less) which will be dropped: %s'%(aln, cutoff+1, num_undeter[1])
print line
summary += line+'\n'
records_wo_undeter = []
for record in align:
if not record.id in num_undeter[1]:
records_wo_undeter.append(record)
align = MultipleSeqAlignment(records_wo_undeter)
self.trimmed_alignments[aln] = align
elif num_collapsed_aln_seqs < 4:
line = 'Alignment %s has less than 4 unique sequences and will be dropped'%aln
print line
summary += line+'\n'
else:
self.trimmed_alignments[aln] = align
self.aln_summaries.append(summary)
for file_name in os.listdir(os.curdir):
if m.id.partition('_')[0] in file_name:
os.remove(file_name)
m.timeit.append(time.time())
m.timeit.append(m.timeit[2]-m.timeit[1])
self.used_methods[m.method_name] = m
if self.pickle_name:
pickle_pj(self, self.pickle_name)
if __builtin__.git and self.pickle_name:
comment = ''
for m in list_of_Conf_objects:
comment += '%s\n'%(str(m))
import rpgit
out, err = rpgit.gitAdd(self.pickle_name)
undate_git_log(self, out, err)
cwd = os.getcwd()
import fnmatch
matches = []
for root, dirnames, filenames in os.walk(cwd):
for filename in fnmatch.filter(filenames, '*.py'):
matches.append(os.path.join(root, filename))
for filename in fnmatch.filter(filenames, '*.ipynb'):
matches.append(os.path.join(root, filename))
for match in matches:
out, err = rpgit.gitAdd(match)
undate_git_log(self, out, err)
out, err = rpgit.gitCommit(comment)
undate_git_log(self, out, err)
def report_seq_stats(self):
if len(self.records_by_locus.keys())>0:
# This will make a list of seq length for each locus. Seq length are calced
# using the record.seq in 'pj.records_by_locus'. 'pj.records_by_locus is a
# dict with loci names as keys, and lists of SeqReocrd objects as values
lengths_dict = {}
for locus_name in self.records_by_locus.keys():
lengths_dict[locus_name] = []
for record in self.records_by_locus[locus_name]:
lengths_dict[locus_name].append(len(record.seq))
print "Distribution of sequence lengths".title()
draw_boxplot(lengths_dict, 'Seq length (bp)', 'inline')
for stat in ['GC_content']:#, 'nuc_degen_prop', 'prot_degen_prop']:
title = 'Distribution of sequence statistic \"'+stat+'\"'
print title.title()
# This will make a dict with loci as keys and a list of stat values as
# dict values.
stat_dict = {}
ylabel = 'GC ontent (%)'
if not stat == 'GC_content':
ylabel = 'Ambiguous positions (prop)'
for locus_name in self.records_by_locus.keys():
stat_dict[locus_name] = []
for i in self.records_by_locus[locus_name]:
for record in self.records:
for feature in record.features:
if feature.qualifiers['feature_id'][0] == i.id:
if stat in feature.qualifiers.keys():
stat_dict[locus_name].append(float(feature.qualifiers[stat][0]))
draw_boxplot(stat_dict, ylabel, 'inline')
##################################
# Project methods to fetch objects
##################################
def ft(self, token):
"""
Will fetch the tree object which has the token in
its key, as long as there is only one
"""
# check how many tree keys match the token
keys = [key for key in self.trees.keys() if token in key]
if len(keys) > 1:
raise IOError("The token %s was found in more then one tree key: %s"
%(token, str(keys)))
elif len(keys) == 0:
raise IOError("The token %s was not found in any tree key"
%token)
elif len(keys) == 1:
print "returning tree object %s"%keys[0]
return Tree(self.trees[keys[0]][1])
def fa(self, token):
"""
Will fetch the alignment object which has the token in
its key, as long as there is only one
"""
from StringIO import StringIO
# check how many aln keys match the token
keys = [key for key in self.alignments.keys() if token in key]
if len(keys) > 1:
raise IOError("The token %s was found in more then one alignment key: %s"
%(token, str(keys)))
elif len(keys) == 0:
raise IOError("The token %s was not found in any alignment key"
%token)
elif len(keys) == 1:
print "returning alignment object %s"%keys[0]
return AlignIO.read(StringIO(self.alignments[keys[0]].format('fasta')), 'fasta')
def fta(self, token):
"""
Will fetch the trimmed alignment object which has the token in
its key, as long as there is only one
"""
from StringIO import StringIO
# check how many trimmed aln keys match the token
keys = [key for key in self.trimmed_alignments.keys() if token in key]
if len(keys) > 1:
raise IOError("The token %s was found in more then one trimmed alignment key: %s"
%(token, str(keys)))
elif len(keys) == 0:
raise IOError("The token %s was not found in any trimmed alignment key"
%token)
elif len(keys) == 1:
print "returning trimmed alignment object %s"%keys[0]
return AlignIO.read(StringIO(self.trimmed_alignments[keys[0]].format('fasta')), 'fasta')
def fr(self, locus_name, filter=None):
"""
Will fetch the record objects of the specified locus,
as long as there is at least one.
filter should be a list of lists. Every (sub)list is
a pair of qualifier and value. If filter is specified,
only records that have all the specified values in the
specified qualifiers will be kept.
"""
# check how many record keys match the token
keys = [key for key in self.records_by_locus.keys() if locus_name in key]
if len(keys) > 1:
raise IOError("The locus name %s fits more then one locus: %s"
%(locus_name, str(keys)))
elif len(keys) == 0:
raise IOError("The locus %s was not found"
%locus_name)
elif len(keys) == 1:
records = []
if filter:
for r in self.records_by_locus[keys[0]]:
qualifiers = get_qualifiers_dictionary(self, r.id)
get = True
for f in filter:
if not (f[0] in qualifiers.keys() and qualifiers[f[0]] == f[1]):
get = False
if get:
records.append(r)
else:
for r in self.records_by_locus[keys[0]]:
records.append(r)
print "returning records list of locus %s and filter %s"%(keys[0], str(filter))
return records
def propagate_metadata(self):
for t in self.trees.keys():
for l in self.trees[t][0]:
feature_id = l.feature_id
record_id = feature_id.rpartition('_')[0]
record = [r for r in self.records if r.id == record_id][0]
annotations = record.annotations
source_qualifiers = [f for f in record.features if f.type == 'source'][0].qualifiers
feature_qualifiers = [f for f in record.features if f.qualifiers['feature_id'][0] == feature_id][0].qualifiers
for i in annotations:
l.add_feature("annotations_%s"%i,type_to_single_line_str(annotations[i]))
for i in source_qualifiers:
l.add_feature("source_%s"%i,type_to_single_line_str(source_qualifiers[i]))
for i in feature_qualifiers:
l.add_feature(i,type_to_single_line_str(feature_qualifiers[i]))
self.trees[t][1] = self.trees[t][0].write(features=[])
##############################################################################################
if False:
"""Tools for loci explorations in a GenBank File"""
##############################################################################################
programspath = ""
def list_loci_in_genbank(genbank_filename, control_filename, loci_report = None):
"""
Takes a genbank file, returns a loci csv file and a list of loci and their counts. The latter goes
either to stdout or to a file.
>>> list_loci_in_genbank("test-data/test.gb", "test-data/temp_loci.csv", loci_report = "test-data/temp_loci.txt")
>>> assert open("test-data/temp_loci.csv",'r').read() == open("test-data/test_loci.csv",'r').read()
>>> import os
>>> os.remove("test-data/temp_loci.csv")
"""
stdout = sys.stdout
if loci_report:
sys.stdout = open(loci_report, 'w')
genbank_synonyms = gb_syn.gb_syn()
# Open GenBank file
MelPCgenes = open(genbank_filename, 'rU')
gene_dict = {} #set up a gene_dict dictionary
# For each record
for record in SeqIO.parse(MelPCgenes, 'genbank') :
# Look at all features for this record
for feature in record.features:
# If it's a CDS or rRNA...
if feature.type == 'CDS' or feature.type == 'rRNA':
# If it contains some attribute called 'gene' save that
if 'gene' in feature.qualifiers:
geneName = feature.qualifiers['gene'][0]
geneName = geneName.replace(',','_')
geneName = geneName.replace('/','_')
if feature.type+','+geneName in gene_dict:
gene_dict[feature.type+','+geneName]+=1
else:
gene_dict[feature.type+','+geneName]=1
#print(geneName)
# Else if it contains a 'product' qualifier
elif 'product' in feature.qualifiers:
geneName = feature.qualifiers['product'][0]
geneName = geneName.replace(',','_')
geneName = geneName.replace('/','_')
if feature.type+','+geneName in gene_dict:
gene_dict[feature.type+','+geneName]+=1
else:
gene_dict[feature.type+','+geneName]=1
#print(geneName)
else:
print 'Could not find either gene or product in '+record.id
#print feature.qualifiers
#sorting happens via a list
sorted_gene_names = gene_dict.items()
sorted_gene_names.sort(key = lambda i: i[0].lower())
control_file_lines = {}
print('\n' + "There are " + str(len(sorted_gene_names)) + " gene names (or gene product names) detected")
print("----------------------------------")
print("Gene and count sorted by gene name")
print("----------------------------------")
for key, value in sorted_gene_names:
#print key, value
print(str(value) +" instances of " + key)
feature_type = key.split(',')[0]
alias = key.split(',')[1]
gen_group = None
for group in genbank_synonyms:
if alias in group:
gen_group = group
if gen_group:
if gen_group[0].replace(' ','_') in control_file_lines.keys():
control_file_lines[gen_group[0].replace(' ','_')].append(alias)
else:
control_file_lines[gen_group[0].replace(' ','_')] = [feature_type, alias]
else:
name = alias.replace(' ','_').replace('/','_')
control_file_lines[name] = [feature_type, alias]
control_file_handle = open(control_filename, 'wt')
for line in sort(control_file_lines.keys()):
control_file_handle.write('dna,%s,%s'%(control_file_lines[line][0],line))
for a in control_file_lines[line][1:]:
control_file_handle.write(',%s'%a)
control_file_handle.write('\n')
control_file_handle.close()
print("-------------------------------")
print("Gene and count sorted by counts")
print("-------------------------------")
sorted_gene_names.sort(key = lambda i: int(i[1]), reverse=True)
for key, value in sorted_gene_names:
#print key, value
print(str(value) +" instances of " + key)
sys.stdout = stdout
return
AlnConfMethodsSection=\
"""\n
==============================
Core Methods section sentence:
==============================
The dataset(s) %s were aligned using the program %s [1].
Reference:
%s
"""
AlnConfPalNalMethodsSection=\
"""\n
==============================
Core Methods section sentence:
==============================
The dataset(s) %s were first aligned at the protein level using the program %s [1].
The resulting alignments served as guides to codon-align the DNA sequences using %s [2].
Reference:
[1]%s
[2]%s
"""
# ALIGNMENT PROGRAM PLUG
# AS ABOVE, A STRING IS NEEDED WITH PLACE HOLDERS FOR THE LOCI, THE PROGRAM+VERSION
# AND FOR THE REFERENCE.
# ANOTHER STRING IS REQUIRED THAT ALSO INCLUDES PAL2NAL
##############################################################################################
class AlnConf:
##############################################################################################
"""
>>> coi = Locus('dna', 'CDS', 'coi', ['cox1','COX1','coi','COI','CoI'])
>>> lsu = Locus('dna', 'rRNA', '28S', ['28s','28S','LSU rRNA','28S ribosomal RNA','28S large subunit ribosomal RNA'])
>>> pj = Project([coi, lsu], git=False)
# cline_str = muscle = AlnConf(pj, method_name='MuscleDefaults',
# cmd='muscle', program_name='muscle',
# cline_args=dict())
"""
def __init__(self, pj, method_name='mafftDefault', CDSAlign=True, codontable=1, program_name='mafft',
cmd='mafft', loci='all',
cline_args=dict()):
if pj.records_by_locus == {}:
pj.extract_by_locus()
self.id = str(random.randint(10000,99999))+str(time.time())
self.method_name=method_name
self.CDSAlign=CDSAlign
self.program_name=program_name
self.loci = pj.loci
if not loci == 'all':
self.loci = []
for locus_name in loci:
for locus in pj.loci:
if locus_name == locus.name:
self.loci.append(locus)
mutable_loci_list = []
removed_loci = []
for locus in self.loci:
if len(pj.records_by_locus[locus.name]) < 4:
removed_loci.append(locus.name)
else:
mutable_loci_list.append(locus)
if len(removed_loci) > 0:
print "These loci have less than 4 sequences and will be dropped from this conf object. Don't use them in a concatenation:\n%s\n\n"%removed_loci
self.loci = mutable_loci_list
self.CDS_proteins = {}
self.CDS_in_frame = {}
self.codontable = codontable
self.aln_input_strings = {}
self.command_lines = {}
self.timeit = [time.asctime()]
self.platform = []
self.cline_args = cline_args
self.cmd = cmd
if not program_name == 'mafft' and cmd == 'mafft':
self.cmd = pj.defaults[program_name]
elif program_name == 'mafft' and cmd != 'mafft' and not 'mafft' in cmd:
self.cmd = pj.defaults['mafft']
# make defalut input files
for key in pj.records_by_locus.keys():
if key in [l.name for l in self.loci]:
SeqIO.write(pj.records_by_locus[key], self.id+'_'+key+'.fasta', 'fasta')
for locus in self.loci:
# put default input file filename and string in the AlnConf object
input_filename=self.id+'_'+locus.name+'.fasta'
self.aln_input_strings[locus.name] = [open(input_filename,'r').read()]
# If CDS and CDSAlign, prepare reference protein input file and in frame CDS input file
if locus.feature_type == 'CDS' and locus.char_type == 'dna' and self.CDSAlign:
self.CDS_proteins[locus.name] = []
self.CDS_in_frame[locus.name] = []
for record in pj.records:
for feature in record.features:
if (feature.type == 'CDS' and 'gene' in feature.qualifiers.keys() and
feature.qualifiers['gene'][0] in locus.aliases and
feature.qualifiers['feature_id'][0] in [f.id for f in pj.records_by_locus[locus.name]]):
S = feature.extract(record.seq)
# Make in-frame CDS input file seq start in frame
if 'codon_start' in feature.qualifiers.keys():
i = feature.qualifiers['codon_start'][0]
if i > 1:
S = S[(int(i)-1):]
# Make in-frame CDS input file seq end in frame
if len(S)%3 == 1:
S = S[:-1]
elif len(S)%3 == 2:
S = S[:-2]
# make protein input file seq
if not 'translation' in feature.qualifiers.keys():
raise IOError("Feature %s has no 'translation' qualifier"%
feature.qualifiers['feature_id'][0])
P = Seq(feature.qualifiers['translation'][0], IUPAC.protein)
# Remove 3' positions that are based on partial codons
while len(P)*3 > len(S):
P = P[:-1]
# remove complete termination codon
if (len(S)/3)-1 == len(P):
S = S[:-3]
# make in frame cds record
feature_record = SeqRecord(seq = S, id = feature.qualifiers['feature_id'][0],
description = '')
# put it in the object
self.CDS_in_frame[locus.name].append(feature_record)
# make protein record
feature_record = SeqRecord(seq = P, id = feature.qualifiers['feature_id'][0],
description = '')
# Put the protein records in the AlnConf object
self.CDS_proteins[locus.name].append(feature_record)
# check same number of prot and cds objects
if len(pj.records_by_locus[locus.name]) > len(self.CDS_proteins[locus.name]):
raise RuntimeError('For the CDS locus '+locus.name+': more nuc seqs than prot seqs.'+
' You may miss a \'translate\' or \'gene\' qualifier in some of '+
'the features.')
elif len(pj.records_by_locus[locus.name]) < len(self.CDS_proteins[locus.name]):
raise RuntimeError('For the CDS locus '+locus.name+': less nuc seqs than prot seqs.'+
' You may miss a \'translate\' or \'gene\' qualifier in some of '+
'the features.')
unmatched = []
for record in self.CDS_in_frame[locus.name]:
for prot in self.CDS_proteins[locus.name]:
if prot.id == record.id:
if not len(prot.seq)*3 == len(record.seq):
unmatched.append(record.id)
unmatched_string = ''
if len(unmatched) > 0:
for u in unmatched:
unmatched_string += u+' '
raise RuntimeError('The following CDS/protein pairs are unmatched: '+unmatched_string)
SeqIO.write(self.CDS_in_frame[locus.name],
self.id+'_CDS_in_frame_'+locus.name+'.fasta', 'fasta')
input_filename2=self.id+'_CDS_in_frame_'+locus.name+'.fasta'
SeqIO.write(self.CDS_proteins[locus.name],
self.id+'_CDS_proteins_'+locus.name+'.fasta', 'fasta')
input_filename=self.id+'_CDS_proteins_'+locus.name+'.fasta'
self.aln_input_strings[locus.name][0] = [open(input_filename,'r').read(),
open(input_filename2,'r').read()]
cline = dict(dict(input=input_filename), **cline_args)
if self.program_name == 'mafft':
self.command_lines[locus.name] = MafftCommandline(cmd=self.cmd)
elif self.program_name == 'muscle':
self.command_lines[locus.name] = MuscleCommandline(cmd=self.cmd)
# PROGRAM PLUG
# NOTE: MAFFT AND MUSCLE GET FASTA. IF THE NEW PROGRAM GETS SOMETHING
# ELSE, SOME WORK IS REQUIRED ABOVE, e.g. condition to choose the format
# THIS WRITE THE CLINE AND PLACES IN PROJECT.
# elif self.program_name == 'some program':
# self.command_lines[locus.name] = some_program_cline_object(cmd=self.cmd)
# This assumes Bio.Applications style cline object
for c in cline.keys():
self.command_lines[locus.name].__setattr__(c,cline[c])
print str(self.command_lines[locus.name])
def __str__(self):
loci_string = ''
for n in [i.name for i in self.loci]:
loci_string += n+','
loci_string = loci_string[:-1]
command_lines = ''
for i in self.command_lines.keys():
command_lines += i+': '+str(self.command_lines[i])+'\n'
date = str(self.timeit[0])
execution = '[This was not executed yet]'
if len(self.timeit) > 1:
execution = str(self.timeit[3])
plat = '[This was not executed yet]'
if len(self.platform) > 0:
plat = str(self.platform).replace(",",'\n').replace(']','').replace("'",'').replace('[','')
output = str("AlnConf named %s with ID %s\n"+
"Loci: %s \n"+
"Created on: %s\n"+
"Commands:\n"+
"%s\n"+
"Environment:\n"+
"%s\n"+
"execution time:\n"+
"%s\n")%(self.method_name, str(self.id), loci_string, date, command_lines, plat, execution)
if len(self.platform) > 0 and self.CDSAlign:
prog = '[This was not executed yet]'
pal = '[This was not executed yet]'
progref = '[This was not executed yet]'
palref = '[This was not executed yet]'
if self.platform[-1].startswith('Program reference:'):
prog, pal = self.platform[-2].split(':')[1].strip().rstrip().split('\n')
progref, palref = self.platform[-1].split('Program reference:')[1].strip().rstrip().split('\n')
output += AlnConfPalNalMethodsSection%(type_to_single_line_str([l.name for l in self.loci]),
prog, pal, progref, palref)
elif len(self.platform) > 0:
prog = '[This was not executed yet]'
progref = '[This was not executed yet]'
if self.platform[-1].startswith('Program reference:'):
prog = self.platform[-2].split(':')[1].strip().rstrip()
progref = self.platform[-1].split('Program reference:')[1].strip().rstrip()
output += AlnConfMethodsSection%(type_to_single_line_str([l.name for l in self.loci]),
prog, progref)
return output
TrimalConfMethodsSection=\
"""\n
==============================
Core Methods section sentence:
==============================
The alignment(s) %s were trimmed using the program %s [1].
Reference:
%s
"""
##############################################################################################
class TrimalConf:
##############################################################################################
def __init__(self, pj, method_name='gappyout', program_name='trimal',
cmd='default', alns='all', trimal_commands=dict(gappyout=True)):
if len(pj.alignments) == 0:
raise RuntimeError("No sequence alignments found")
self.id = str(random.randint(10000,99999))+str(time.time())
self.method_name=method_name
self.program_name=program_name
self.alignments = pj.alignments
if not alns == 'all':
self.alignments = {}
for aln_name in alns:
if aln_name in pj.alignments.keys():
self.alignments[aln_name] = pj.alignments[aln_name]
self.command_lines = {}
self.timeit = [time.asctime()]
self.platform = []
self.cline_args = trimal_commands
self.cmd = cmd
if cmd == 'default':
self.cmd = pj.defaults['trimal']
irelevant = ['out', 'clustal', 'fasta', 'nbrf', 'nexus', 'mega',
'phylip3.2', 'phylip', 'sgt', 'scc', 'sct', 'sfc',
'sft','sident']
for aln in self.alignments:
input_filename = self.id+'_'+aln+'.fasta'
AlignIO.write(self.alignments[aln], input_filename,'fasta')
self.command_lines[aln] = "%s -in %s "%(self.cmd, input_filename)
for kwd in trimal_commands:
if kwd in irelevant:
warnings.simplefilter('always')
warnings.warn("%s is irelevant in this context and will be ignored"%kwd)
else:
if not trimal_commands[kwd] == None and not trimal_commands[kwd] == False:
if trimal_commands[kwd] == True:
self.command_lines[aln] += "-%s "%(kwd)
else:
self.command_lines[aln] += "-%s %s "%(kwd, trimal_commands[kwd])
self.command_lines[aln+'@'+self.method_name] = self.command_lines[aln][:-1]
print self.command_lines[aln+'@'+self.method_name]
self.command_lines.pop(aln, None)
def __str__(self):
aln_string = ''
for n in self.alignments.keys():
aln_string += n+','
aln_string = aln_string[:-1]
command_lines = ''
for i in self.command_lines.keys():
command_lines += i+': '+str(self.command_lines[i])+'\n'
date = str(self.timeit[0])
execution = '[This was not executed yet]'
if len(self.timeit) > 1:
execution = str(self.timeit[3])
prog = '[This was not executed yet]'
plat = '[This was not executed yet]'
progref = '[This was not executed yet]'
if len(self.platform) > 0:
plat = str(self.platform).replace(",",'\n').replace(']','').replace("'",'').replace('[','')
if self.platform[-1].startswith('Program reference:'):
prog = self.platform[-2].split(':')[1].strip().rstrip()
progref = self.platform[-1].split('Program reference:')[1].strip().rstrip()
output= str("TrimalConf named %s with ID %s\n"+
"Alignments: %s \n"+
"Created on: %s\n"+
"Commands:\n"+
"%s\n"+
"Environment:"+
"%s\n"+
"execution time:\n"+
"%s")%(self.method_name, str(self.id), aln_string, date, command_lines, plat, execution)
output += TrimalConfMethodsSection%(type_to_single_line_str(self.alignments.keys()),
prog, progref)
return output
def use_sh_support_as_branch_support(tree_filename):
string = open(tree_filename,'r').read()
string = re.sub(r'\[',r'[&&NHX:support=',string)
t = Tree(string)
t.dist=0
return t.write(features=[])
def transfer_support_same_topo(tree_file_with_support,
tree_file_without_support):
supported = Tree(tree_file_with_support)
unsupported = Tree(tree_file_without_support)
supported_leaf_names = sorted(supported.get_leaf_names())
unsupported_leaf_names = sorted(unsupported.get_leaf_names())
if not len(unsupported_leaf_names) == len(supported_leaf_names):
raise IOError(tree_file_with_support + ' and ' + tree_file_without_support +
' are not the same length')
for i in range(len(supported_leaf_names)):
if not supported_leaf_names[i] == unsupported_leaf_names[i]:
raise IOError('The trees do not share all leaves or leaf names')
same_root = supported.get_leaf_names()[0]
unsupported.set_outgroup(same_root)
supported.set_outgroup(same_root)
for ns in supported.traverse():
ns_leaves = ns.get_leaf_names()
if not unsupported.check_monophyly(values=ns_leaves, target_attr="name"):
raise RuntimeError('trees do not share topology and/or all the leaf names')
else:
unsupported_ancestor = unsupported.get_common_ancestor(ns_leaves)
unsupported_ancestor.support = ns.support
unsupported.write(outfile = tree_file_without_support)
def make_raxml_partfile(tree_method, pj, trimmed_alignment_name):
concatenation = None
for c in pj.concatenations:
if c.name == trimmed_alignment_name:
concatenation = c
#concatenation = filter(lambda concatenation: concatenation.name == trimmed_alignment_name, pj.concatenations)[0]
model = []
for locus in concatenation.loci:
part_name = None
for trm_aln in concatenation.used_trimmed_alns.keys():
if locus.name == trm_aln.partition('@')[0]:
part_name = trm_aln
if not part_name:
warnings.warn('There is no trimmed alignment for locus '+locus.name+' in concatenation '+concatenation.name)
else:
part_length = concatenation.used_trimmed_alns[part_name]
if locus.char_type == 'prot':
m = None
if isinstance(tree_method.matrix,dict):
m = tree_method.matrix[locus.name]
elif isinstance(tree_method.matrix,str):
m = tree_method.matrix
else:
#todo write error
pass
model.append([m,part_name,part_length])
elif locus.char_type == 'dna':
model.append(['DNA',part_name,part_length])
# make partition file
partfile = open(tree_method.id+'_'+concatenation.name+'_partfile','wt')
i = 1
for m in model:
partfile.write(m[0]+', '+m[1]+'='+str(i)+'-'+str(m[2]+i-1)+'\n')
i += m[2]
partfile.close()
return tree_method.id+'_'+concatenation.name+'_partfile'
def make_raxml_input_matrix_file(tree_method, trimmed_alignment_name):
SeqIO.write(tree_method.trimmed_alignments[trimmed_alignment_name],
tree_method.id+'_'+trimmed_alignment_name+'.fasta','fasta')
return tree_method.id+'_'+trimmed_alignment_name+'.fasta'
def write_raxml_clines(tree_method, pj, trimmed_alignment_name):
cline_que = 0
support_replicates = 100
ML_replicates = 1
if '-N' in tree_method.cline_args.keys():
ML_replicates = tree_method.cline_args['-N']
if '-#' in tree_method.cline_args.keys():
support_replicates = tree_method.cline_args['-#']
partfile = None
# Check if it is a concatenation and make partfile
for c in pj.concatenations:
if c.name == trimmed_alignment_name.partition('@')[0]:
partfile = make_raxml_partfile(tree_method, pj, trimmed_alignment_name)
input_filename = make_raxml_input_matrix_file(tree_method, trimmed_alignment_name)
model = tree_method.model
try:
locus_char_type = filter(lambda locus: locus.name == trimmed_alignment_name.partition('@')[0], pj.loci)[0].char_type
except:
locus_char_type = 'prot'
if partfile:
model='PROT'+model+'JTT'
else:
if locus_char_type == 'dna':
model = 'GTR'+tree_method.model
elif locus_char_type == 'prot':
if isinstance(tree_method.matrix,str):
model = 'PROT'+tree_method.model+tree_method.matrix
elif isinstance(tree_method.matrix,dict):
model = 'PROT'+tree_method.model+tree_method.matrix[trimmed_alignment_name]
presets = {'fa': [{'-f': 'a',
'-p': random.randint(99,999),
'-x': random.randint(99,999),
'-s': input_filename,
'-N': support_replicates,
'-n': tree_method.id+'_'+trimmed_alignment_name+'0',
'-m': model}
],
'fD_fb':[{'-f': 'D',
'-p': random.randint(99,999),
'-s': input_filename,
'-N': ML_replicates,
'-n': tree_method.id+'_'+trimmed_alignment_name+'0',
'-m': model},{'-f': 'b',
'-p': random.randint(99,999),
'-s': input_filename,
'-n': tree_method.id+'_'+trimmed_alignment_name+'1',
'-m': model,
'-t': 'RAxML_bestTree.'+tree_method.id+'_'+trimmed_alignment_name+'0',
'-z': 'RAxML_rellBootstrap.'+tree_method.id+'_'+trimmed_alignment_name+'0'}
],
'fd_b_fb':[{'-f': 'd',
'-p': random.randint(99,999),
'-s': input_filename,
'-N': ML_replicates,
'-n': tree_method.id+'_'+trimmed_alignment_name+'0',
'-m': model},{
'-p': random.randint(99,999),
'-b': random.randint(99,999),
'-s': input_filename,
'-#': support_replicates,
'-n': tree_method.id+'_'+trimmed_alignment_name+'1',
'-m': model,
'-T': tree_method.threads},
{'-f': 'b',
'-p': random.randint(99,999),
'-s': input_filename,
'-n': tree_method.id+'_'+trimmed_alignment_name+'2',
'-m': model,
'-t': 'RAxML_bestTree.'+tree_method.id+'_'+trimmed_alignment_name+'0',
'-z': 'RAxML_bootstrap.'+tree_method.id+'_'+trimmed_alignment_name+'1'}
],
'fF_fJ': [{'-f': 'F',
'-p': random.randint(99,999),
'-s': input_filename,
'-n': tree_method.id+'_'+trimmed_alignment_name+'0',
'-m': model},{'-f': 'J',
'-t': 'RAxML_fastTree.'+tree_method.id+'_'+trimmed_alignment_name+'0',
'-p': random.randint(99,999),
'-s': input_filename,
'-n': tree_method.id+'_'+trimmed_alignment_name+'1',
'-m': model}],
'fd_fJ': [{'-f': 'd',
'-p': random.randint(99,999),
'-s': input_filename,
'-N': ML_replicates,
'-n': tree_method.id+'_'+trimmed_alignment_name+'0',
'-m': model},{'-f': 'J',
'-t': 'RAxML_bestTree.'+tree_method.id+'_'+trimmed_alignment_name+'0',
'-p': random.randint(99,999),
'-s': input_filename,
'-n': tree_method.id+'_'+trimmed_alignment_name+'1',
'-m': model}]
}
if 'PTHREADS' in tree_method.cmd:
if tree_method.threads < 2:
tree_method.threads = 2
warnings.warn('raxmlHPC-PTHREADS requires at least 2 threads. Setting threads to 2')
for preset in presets:
for c in presets[preset]:
c['-T'] = tree_method.threads
else:
if tree_method.threads > 1:
raise RuntimeWarning('This is a serial raxmlHPC. Setting threads to 1.'+
'PTHREADS executables have to have explicit filename, eg, raxmlHPC-PTHREADS-SSE3')
if partfile:
for preset in presets.keys():
for cline in range(len(presets[preset])):
presets[preset][cline] = dict({'-q': partfile}, **presets[preset][cline])
return presets[tree_method.preset]
RaxmlConfMethodsSection=\
"""\n
==============================
Core Methods section sentence:
==============================
Phylogenetic trees were reconstructed from the dataset(s) %s using the program %s [1].
Reference:
%s
"""
##############################################################################################
class RaxmlConf:
##############################################################################################
def __init__(self, pj, method_name='fa', program_name='raxmlHPC-PTHREADS-SSE3', keepfiles=False,
cmd='default', preset = 'fa', alns='all', model='GAMMA', matrix='JTT', threads=4,
cline_args={}):
if len(pj.trimmed_alignments) == 0:
raise RuntimeError("No trimmed sequence alignments found")
self.id = str(random.randint(10000,99999))+str(time.time())
self.method_name=method_name
self.program_name=program_name
self.preset = preset
self.cline_args = cline_args
self.model = model
self.matrix = matrix
self.threads = threads
self.trimmed_alignments = pj.trimmed_alignments
if not alns == 'all':
self.trimmed_alignments = {}
for aln_name in alns:
if aln_name in pj.trimmed_alignments.keys():
self.trimmed_alignments[aln_name] = pj.trimmed_alignments[aln_name]
self.aln_input_strings = {}
self.command_lines = {}
self.timeit = [time.asctime()]
self.platform = []
self.cmd = cmd
self.keepfiles = keepfiles
if cmd == 'default':
self.cmd = pj.defaults['raxmlHPC']
for trimmed_alignment in self.trimmed_alignments.keys():
self.command_lines[trimmed_alignment] = []
command_lines = write_raxml_clines(self, pj, trimmed_alignment)
for command_line in command_lines:
cline_object = RaxmlCommandline(cmd=self.cmd)
for c in command_line.keys():
cline_object.__setattr__(c,command_line[c])
self.command_lines[trimmed_alignment].append(cline_object)
print str(cline_object)
def __str__(self):
aln_string = ''
for n in self.trimmed_alignments.keys():
aln_string += n+','
aln_string = aln_string[:-1]
command_lines = ''
for i in self.command_lines.keys():
command_lines += i+':\n'
for k in self.command_lines[i]:
command_lines += str(k) + '\n'
date = str(self.timeit[0])
execution = '[This was not executed yet]'
if len(self.timeit) > 1:
execution = str(self.timeit[3])
prog = '[This was not executed yet]'
plat = '[This was not executed yet]'
progref = '[This was not executed yet]'
if len(self.platform) > 0:
plat = str(self.platform).replace(",",'\n').replace(']','').replace("'",'').replace('[','')
if self.platform[-1].startswith('Program reference:'):
prog = self.platform[-2].split(':')[1].strip().rstrip()
progref = self.platform[-1].split('Program reference:')[1].strip().rstrip()
output = str("RaxmlConf named %s with ID %s\n"+
"Alignments: %s \n"+
"Created on: %s\n"+
"Commands:\n"+
"%s\n"+
"Environment:\n"+
"%s\n"+
"execution time:\n"+
"%s")%(self.method_name, str(self.id), aln_string, date, command_lines, plat, execution)
output += RaxmlConfMethodsSection%(type_to_single_line_str(self.trimmed_alignments.keys()),
prog, progref)
return output
def make_pb_input_matrix_file(conf_obj, trimmed_alignment_name):
SeqIO.write(conf_obj.trimmed_alignments[trimmed_alignment_name],
conf_obj.id+'_'+trimmed_alignment_name+'.phylip','phylip-relaxed')
return conf_obj.id+'_'+trimmed_alignment_name+'.phylip'
def write_pb_cline(conf_obj, pj, trimmed_alignment):
cline = "%s -d %s"%(conf_obj.cmd, make_pb_input_matrix_file(conf_obj, trimmed_alignment))
for key in conf_obj.cline_args:
kw = key
if key[0] == '-':
kw = key[1:]
cline += " -%s"%str(kw)
if not str(conf_obj.cline_args[key]) == str(True):
cline += " %s"%str(conf_obj.cline_args[key])
cline += " %s_%s"%(conf_obj.id, trimmed_alignment)
return cline
###################################################################################################
class PbConf:
###################################################################################################
def __init__(self, pj, method_name = 'dna_cat_gtr', program_name='phylobayes', keepfiles=True,
cmd='default', alns='all', cline_args=dict(nchain="2 100 0.1 100",
gtr=True,
cat=True)):
self.id = str(random.randint(10000,99999))+str(time.time())
self.method_name=method_name
self.program_name=program_name
self.cline_args = cline_args
self.trimmed_alignments = pj.trimmed_alignments
if not alns == 'all':
self.trimmed_alignments = {}
for aln_name in alns:
if aln_name in pj.trimmed_alignments.keys():
self.trimmed_alignments[aln_name] = pj.trimmed_alignments[aln_name]
self.aln_input_strings = {}
self.command_lines = {}
self.timeit = [time.asctime()]
self.platform = []
self.cmd = cmd
self.keepfiles = keepfiles
if cmd == 'default':
self.cmd = pj.defaults['pb']
for trimmed_alignment in self.trimmed_alignments.keys():
self.command_lines[trimmed_alignment] = [write_pb_cline(self, pj, trimmed_alignment)]
print self.command_lines[trimmed_alignment][0]
def __str__(self):
aln_string = ''
for n in self.trimmed_alignments.keys():
aln_string += n+','
aln_string = aln_string[:-1]
command_lines = ''
for i in self.command_lines.keys():
command_lines += i+': '+str(self.command_lines[i])+'\n'
date = str(self.timeit[0])
execution = '[This was not executed yet]'
if len(self.timeit) > 1:
execution = str(self.timeit[3])
prog = '[This was not executed yet]'
plat = '[This was not executed yet]'
progref = '[This was not executed yet]'
if len(self.platform) > 0:
plat = str(self.platform).replace(",",'\n').replace(']','').replace("'",'').replace('[','')
if self.platform[-1].startswith('Program reference:'):
prog = self.platform[-2].split(':')[1].strip().rstrip()
progref = self.platform[-1].split('Program reference:')[1].strip().rstrip()
output = str("PbConf named %s with ID %s\n"+
"Alignments: %s \n"+
"Created on: %s\n"+
"Commands:\n"+
"%s\n"+
"Environment:\n"+
"%s\n"+
"execution time:\n"+
"%s")%(self.method_name, str(self.id), aln_string, date, command_lines, plat, execution)
output += RaxmlConfMethodsSection%(type_to_single_line_str(self.trimmed_alignments.keys()),
prog, progref)
return output
from pylab import *
import random
def draw_boxplot(dictionary, y_axis_label, figs_folder): #'locus':[values]
import numpy as np
import matplotlib.pyplot as plt
items = dictionary.items()
items.sort()
data = [locus[1] for locus in items]
fig, ax1 = plt.subplots()
fig.set_size_inches(0.3*len(data),10)
plt.subplots_adjust(top=0.99, bottom=0.3)
#bp = plt.boxplot(data, widths=0.75, patch_artist=True)
bp = plt.boxplot(data, patch_artist=True)
for box in bp['boxes']:
# change outline color
box.set( color='black', linewidth=1)
# change fill color
box.set( facecolor = 'red', alpha=0.85 )
# change color, linestyle and linewidth of the whiskers
for whisker in bp['whiskers']:
whisker.set(color='gray', linestyle='solid', linewidth=2.0)
# change color and linewidth of the caps
for cap in bp['caps']:
cap.set(color='gray', linewidth=2.0)
# change color and linewidth of the medians
for median in bp['medians']:
#median.set(color='#b2df8a', linewidth=2)
median.set(color='white', linewidth=2)
# change the style of fliers and their fill
for flier in bp['fliers']:
flier.set(marker='o', color='#e7298a', alpha=0.5)
# Add a light horizontal grid to the plot
ax1.yaxis.grid(True, linestyle='-', which='major', color='lightgrey',
alpha=0.7)
# Hide these grid behind plot objects
ax1.set_axisbelow(True)
#set title and axis labels
#ax1.set_title('Sequence length distribution per locus\n', size=18)
xlabels = [locus[0] for locus in items]
xticks(range(1,len(data)+1), xlabels, size=14, rotation='vertical')
#subplots_adjust(left=0.3, bottom=0.8)
ax1.set_ylabel(y_axis_label, size=18)
name = str(random.randint(1000,2000))
if figs_folder=='inline':
fig.show()
else:
fig.savefig(figs_folder + '/' + name +'.png')
close('all')
return figs_folder + '/' + name+'.png'
#################################################################################
def report_methods(pj, figs_folder, output_directory, size='small',
compare_trees=[], compare_meta=None, trees_to_compare='all',
unrooted_trees=False, mp_root=True):
#################################################################################
"""
Main HTML reporting function. This function iterates over the
Project's attributes and generate appropriate html formated report lines.
pj - The Project object
figs_folder - The directory to which tree figures were saved. This is
specified in the annotate Project method
output_directory - The directory to which this report will be written.It
can be inherited from the publish function which uses
this function.
"""
#========================================================================
# HEAD
#========================================================================
# Checking if 'report_methods' was called by 'publish' in order to infer
# which folders to create and what errors to raise for existing
# directories. If we were called by 'publish' we should allow
# output_directory to exist because 'publish' has just created it.
# 'publish' would have also raised an error if it existed before.
curframe = inspect.currentframe()
calframe = inspect.getouterframes(curframe, 2)
callername = calframe[1][3]
print "reporter was called by "+str(callername)
# Here we manage mkdir and error raising for preexisting
# output_directory, based of the caller function
if os.path.isdir(output_directory) and str(callername) != 'publish':
raise RuntimeError('%s already exists'%output_directory)
else:
# This will make output_directory if it doesnt exist and
# will add a subdirectory 'files' which will store the
# stylesheet file and the figure files
os.makedirs('%s/files'%output_directory)
# This will contain the text that points to the stylesheet file
# I have named it rp.css. We need to add something that writes
# it here. Or that get it from the source directory and places it
# in /files
css_line = '<link rel="stylesheet" type="text/css" href="Bootstrap.css">'
# This list will contain the report lines/ tables as values. We will append
# each new report line to it
report_lines = ['<html>','<head>',css_line,'<h1>']
# Main report title, will print the time in which the 'Project' object
# was initiated (not the time when the report was made as in previous
# versions.
head = 'reprophylo analysis from '+pj.starttime
report_lines.append(head)
report_lines += ['</h1>','</head>','<body>','']
#========================================================================
# BODY
#========================================================================
############################# section 1: DATA #######################
if pj.user:
report_lines += ['<h2>','User Info','</h2>', '']
for item in pj.user:
report_lines += ['<strong>%s: </strong>'%item[0], str(item[1])]
report_lines += ['']
report_lines += ['<h2>','Data','</h2>', '']
print "starting species table"
# Species over loci table
#------------------------------------------------------------------------
title = 'species representation in sequence data'.title()
report_lines += ('<h3>', title, '</h3>', '')
# This will write a CSV file of the times each locus occures for each
# species. The species are sorted in alphabetical order. The CSV will
# be writen to 'outfile_name'. After 'outfile_name' is processed it will
# be deleted.The CSV will be read with the csv module and the resulting
# list of lists will be made into a table using HTML and than added to
# 'report_lines'. The species an gene counts are made by 'species_vs_loci'
# based on the sequences record objects (biopython) found in pj.records.
# pj.records is a list of SeqRecord objects
outfile_name= str(random.randint(1000,2000))
pj.species_vs_loci(outfile_name)
with open(outfile_name, 'rb') as csvfile:
sp_vs_lc = list(csv.reader(csvfile, delimiter='\t', quotechar='|'))
report_lines += ['',
HTML.table(sp_vs_lc[1:], header_row=sp_vs_lc[0]),
'']
# The following writes a pre text of the same thing
#field_sizes = []
#for i in range(len(sp_vs_lc[0])):
# lengths = []
# for row in sp_vs_lc:
# lengths.append(len(row[i]))
# field_sizes.append(max(lengths))
#for row in sp_vs_lc:
# string = ''
# for i in range(len(row)):
# string += row[i].ljust(field_sizes[i]+3)
# report_lines.append(string)
os.remove(outfile_name)
if not size == 'small':
print "starting sequence statistics plots"
# Sequence statistic plots
#------------------------------------------------------------------------
title = 'Sequence statistic plots'.title()
report_lines += ('<h3>', title, '</h3>', '')
# This will plot 4 box plot figures representing the distribution of seq
# length, GC content, %ambiguity in nuc and prot seqs for each locus.
if len(pj.records_by_locus.keys())>0:
# This will determine the with of the figure, 0.5' per locus
scale = str(len(pj.records_by_locus.keys())*0.5)
# This will make a list of seq length for each locus. Seq length are calced
# using the record.seq in 'pj.records_by_locus'. 'pj.records_by_locus is a
# dict with loci names as keys, and lists of SeqReocrd objects as values
lengths_dict = {}
for locus_name in pj.records_by_locus.keys():
lengths_dict[locus_name] = []
for record in pj.records_by_locus[locus_name]:
lengths_dict[locus_name].append(len(record.seq))
# This draws a box plot of sequence length distributions and puts a png in
# the 'files' directory.
if not size == 'small':
fig_filename = draw_boxplot(lengths_dict, 'Seq length (bp)', '%s/files'%output_directory)
# Distribution of sequence lengths
#---------------------------------------------------------------------
title = 'Distribution of sequence lengths'
report_lines += ( '<h4>', title, '</h4>', '')
# This will write the img tag for the seq length boxplot in the report html
# The src attribute is the png file path. The commented lines are an alternative
# making an embeded figure.
if not size=='small' and os.path.isfile(fig_filename):
#data_uri = open(fig_filename, 'rb').read().encode('base64').replace('\n', '')
#img_tag = '<img height=400 width='+scale+' src="data:image/png;base64,{0}">'.format(data_uri)
img_tag = '<img src="%s">'%(fig_filename.partition('/')[-1])
report_lines.append(img_tag)
#os.remove(fig_filename)
# This will make GC content, nuc_degen_prop and prot_degen_prop png file,
# will put them in the /files subdirectory and will write the html sections
# for them, including img tags. All three params are feature qualifiers of
# SeqRecord objects found in pj.records, which is a list.
stats_to_plot = ('GC_content', 'nuc_degen_prop', 'prot_degen_prop')
if size=='small':
stats_to_plot = ()
for stat in stats_to_plot:
# This will make a dict with loci as keys and a list of stat values as
# dict values.
stat_dict = {}
ylabel = 'GC ontent (%)'
if not stat == 'GC_content':
ylabel = 'Ambiguous positions (prop)'
for locus_name in pj.records_by_locus.keys():
stat_dict[locus_name] = []
for i in pj.records_by_locus[locus_name]:
for record in pj.records:
for feature in record.features:
if feature.qualifiers['feature_id'][0] == i.id:
if stat in feature.qualifiers.keys():
stat_dict[locus_name].append(float(feature.qualifiers[stat][0]))
# This will make the boxplot png and will put in in the /files subdirectory
fig_filename = draw_boxplot(stat_dict, ylabel, '%s/files'%output_directory)
# Distribution of stat
#---------------------------------------------------------------------
title = 'Distribution of sequence statistic \"'+stat+'\"'
report_lines += ( '<h4>', title, '</h4>', '')
# This will make the img tag using the png path as src. The commented lines are an alternative
# making an embeded image
if os.path.isfile(fig_filename):
#data_uri = open(fig_filename, 'rb').read().encode('base64').replace('\n', '')
img_tag = '<img src="%s">'%(fig_filename.partition('/')[-1])
#img_tag = '<img height=400 width='+scale+' src="data:image/png;base64,{0}">'.format(data_uri)
report_lines.append(img_tag)
#os.remove(fig_filename)
print "starting concatenations"
# Description of data concatenations
#------------------------------------------------------------------------
title = 'Description of data concatenations'.title()
report_lines += ('<h3>', title, '</h3>', '')
# filter out concatenation objects that were not used to build a concatenation
# by taking only concat names that are in the keys of pj.trimmed_alignments.
# pj.trimmed_alignments is a dict with alignment names as keys and list
# lists containing an alignment object (biopython) and an alignment string as
# values.
composed_concatenations = []
for c in pj.concatenations:
if c.name in pj.trimmed_alignments.keys():
composed_concatenations.append(c)
for c in composed_concatenations:
title = ('content of concatenation \"' + c.name + '\"').title()
report_lines += ('<h4>', title, '</h4>', '')
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# This will write the concatenation attribute
report_lines.append('Rules for \"' + c.name + '\":')
rule_1 = 'OTUs must have the loci: '
for locus in c.otu_must_have_all_of:
rule_1 += locus + ', '
report_lines.append(rule_1)
rule_2 = '<pre>'
for group in c.otu_must_have_one_of:
rule_2 += '</pre>OTUs must have at least one of the following loci: \n<pre>'
rule_2 += str(group).replace('[','').replace(']','') +'</pre>\n'
report_lines += (rule_2, '')
# This are the otus and loci names in the concatenation. c.feature_id_dict
# is a dict with the otius as keys and dicts as values. Each of these dicts
# have the loci as keys and the feature id as value
otus = c.feature_id_dict.keys()
loci = [locus.name for locus in c.loci]
# This is the table's header
table_lines = [['','']+[locus.name for locus in c.loci]]
# Commented lines are an alternative way to write the table as
# pre text
#otus_max_length = max([len(i) for i in otus])+33
#loci_columns_max_length = []
#for locus in loci:
# lengths = [len(locus)]
# for otu in otus:
# if locus in c.feature_id_dict[otu].keys():
# lengths.append(len(c.feature_id_dict[otu][locus]))
# else:
# lengths.append(0)
# loci_columns_max_length.append(max(lengths)+3)
#concat_header = ''.ljust(otus_max_length)
#for i in range(len(loci)):
# concat_header += loci[i].ljust(loci_columns_max_length[i])
#report_lines += (concat_header, '~'*len(concat_header))
# This will write the table
for otu in otus:
otu_species = ''
for locus in loci:
if locus in c.feature_id_dict[otu].keys():
feature_qualifiers = get_qualifiers_dictionary(pj, c.feature_id_dict[otu][locus])
if 'source_organism' in feature_qualifiers.keys():
otu_species = feature_qualifiers['source_organism']
otu_line = [otu, otu_species]
#concat_line = (otu+' '+otu_species).ljust(otus_max_length)
for i in range(len(loci)):
if loci[i] in c.feature_id_dict[otu].keys():
#concat_line += c.feature_id_dict[otu][loci[i]].ljust(loci_columns_max_length[i])
otu_line.append(c.feature_id_dict[otu][loci[i]])
else:
#concat_line += ''.ljust(loci_columns_max_length[i])
otu_line.append('')
table_lines.append(otu_line)
#report_lines.append(concat_line)
report_lines.append(HTML.table(table_lines[1:], header_row=table_lines[0]))
############################# section 2: METHODS #######################
# This section prints some attributes of each of the 'Conf' objects used.
# The 'Conf' objects are found in a list called pj.used_methods.
# In an unpickled 'Project' object, the 'Conf' objects are replaced by lists of
# strings describing the attributes because the objects themselves do not
# pickle well. The formating of the list representations when they are printed
# still needs some beautification. Also, I plan a 'revive_methods' func to turn
# them back to 'Conf' objects that can be rerun.
report_lines += ['', '<h2>','Methods','</h2>', '']
print "starting methods"
for method in pj.used_methods:
# This will print list representations of the 'Conf' objects
title = str(pj.used_methods[method]).split('\n')[0]
report_lines += ('', '<h4>', title, '</h4>','')
report_lines += ('<pre>',str(pj.used_methods[method]),'</pre>')
report_lines += ['','']
############################# section 3: RESULTS #######################
report_lines += ['', '<h2>','Results','</h2>', '']
print "starting alignment statistics"
# Global alignmnet statistics
#------------------------------------------------------------------------
title = 'Global alignmnet statistics'.title()
report_lines += ('<h3>', title, '</h3>', '')
# This prints things like num of unique seqs and num of parsimony informative
# cloumns. Takes the info from 'pj.aln_summaries' which is a list of strings.
# Alignment length: 1566
#Number of rows: 94
#Unique sequences: 87
#Average gap prop.: 0.488445
#Variable columns: 1045
#Parsimony informative: 402
#Undetermined sequences: 0
if len(pj.aln_summaries)>0:
report_lines += [('<pre>Name=Alignment name\n'+
'NumPos=Alignment length\n'+
'NumSeq=Number of sequences\n'+
'Unique=Number of unique sequences\n'+
'GapProp=Average gap proportion\n'+
'VarCols=Total variable positions\n'+
'ParsInf=Parsimony informative positions\n'+
'UnSeqs=Undetermined sequences (mostly/only gaps)\n'+
'UnSeqsCutoff=Length cutoff which defines undetermined\n</pre>')]
T = [['Name','NumPos','NumSeq','Unique','GapProp','VarCols','ParsInf','UnSeqs','UnSeqsCutoff']]
comments = []
for summary in pj.aln_summaries:
line = []
for i in summary.splitlines():
try:
line.append(i.split(': ')[1])
except:
comments.append(i)
T.append(line)
report_lines += ['',
HTML.table(T[1:], header_row=T[0]),
'','<pre>']+comments+['</pre>']
else:
report_lines += ['','No sequence alignments in this Project','']
# Per position alignmnet statistics
#------------------------------------------------------------------------
title = 'per position alignmnet statistics'.title()
report_lines += ('<h3>', title, '</h3>', '')
if len(pj.alignments.keys())>0 and not size == 'small':
title = 'Alignment statistics before trimming'
report_lines += ('', '<h4>', title, '</h4>', '')
report_lines += ['<h4>','Trimal\'s Residue Similarity Score (-scc)','</h4>', '']
# draw_trimal_scc(project, num_plots_in_raw, output_dir...
# 'trimmed' determines if it will analyse trimmed or raw alignments
# alignments are taken from pj.alignments or pj.trimmed_alignments which are dictionaries
# with alignment names as keys and lists containing an aln object and al string as values
# scc on raw alignments
fig_file = draw_trimal_scc(pj, 2, '%s/files'%output_directory, trimmed=False)
if os.path.isfile(fig_file):
#data_uri = open(fig_file, 'rb').read().encode('base64').replace('\n', '')
#img_tag = '<img src="data:image/png;base64,{0}">'.format(data_uri)
img_tag = '<img src="%s">'%(fig_file.partition('/')[-1])
report_lines.append(img_tag)
#os.remove(fig_file)
report_lines += [ '<h4>','Trimal\'s column gap gcore (-sgc)','</h4>', '']
# sgc on raw alignments
fig_file = draw_trimal_scc(pj, 2, '%s/files'%output_directory, trimmed=False, alg='-sgc')
if os.path.isfile(fig_file):
#data_uri = open(fig_file, 'rb').read().encode('base64').replace('\n', '')
#img_tag = '<img src="data:image/png;base64,{0}">'.format(data_uri)
img_tag = '<img src="%s">'%(fig_file.partition('/')[-1])
report_lines.append(img_tag)
#os.remove(fig_file)
else:
report_lines += ['No alignments or too many alignments in this project','']
if len(pj.trimmed_alignments.keys())>0 and not size=='small':
title = 'Alignment statistics after trimming'
report_lines += ('', '<h4>', title, '</h4>', '')
report_lines += ['<h4>','"Trimal\'s Residue Similarity Score (-scc)','</h4>', '']
# scc on trimmed alignments
fig_file = draw_trimal_scc(pj, 2, '%s/files'%output_directory, trimmed=True)
if os.path.isfile(fig_file):
#data_uri = open(fig_file, 'rb').read().encode('base64').replace('\n', '')
#img_tag = '<img src="data:image/png;base64,{0}">'.format(data_uri)
img_tag = '<img src="%s">'%(fig_file.partition('/')[-1])
report_lines.append(img_tag)
#os.remove(fig_file)
report_lines += ['<h4>','Trimal\'s column gap gcore (-sgc)','</h4>', '']
# sgc on trimmed alignments
fig_file = draw_trimal_scc(pj, 2, '%s/files'%output_directory, trimmed=True, alg='-sgc')
if os.path.isfile(fig_file):
#data_uri = open(fig_file, 'rb').read().encode('base64').replace('\n', '')
#img_tag = '<img src="data:image/png;base64,{0}">'.format(data_uri)
img_tag = '<img src="%s">'%(fig_file.partition('/')[-1])
report_lines.append(img_tag)
#os.remove(fig_file)
else:
report_lines += ['No trimmed alignments, or too many, in this project','']
print "starting RF matrix(ces)"
for rf_type in compare_trees:
title = 'Robinson-Foulds distances (%s)'%rf_type.title()
report_lines += ('<h3>', title, '</h3>', '')
if len(pj.trees.keys())>1:
try:
RF_filename, legend = calc_rf(pj, '%s/files'%output_directory,
rf_type=rf_type, meta=compare_meta, trees=trees_to_compare,
unrooted_trees=unrooted_trees, mp_root=mp_root)
scale = str(len(legend)*60)
if os.path.isfile(RF_filename):
#data_uri = open(RF_filename, 'rb').read().encode('base64').replace('\n', '')
#img_tag = '<img height='+scale+' width='+scale+' src="data:image/png;base64,{0}">'.format(data_uri)
img_tag = '<img src="%s">'%(RF_filename.partition('/')[-1])
report_lines.append(img_tag)
#os.remove(RF_filename)
report_lines+=['<h3>Legend<h3>','']
report_lines += [HTML.table( legend[1:],
header_row=legend[0])]
report_lines.append('')
except:
report_lines += ['Skipping RF distance calculation']
else:
report_lines += ['Less than two trees in this Project','']
############################# section 4: TREES #######################
print "reporting trees"
report_lines += ['', '<h2>','Trees','</h2>', '']
for tree in pj.trees.keys():
report_lines += ('<h2>'+tree.split('@')[0]+'</h2>',
'<h3>Alignment method: '+tree.split('@')[1]+'</h3>',
'<h3>Trimming method: '+tree.split('@')[2]+'</h3>',
'<h3>Tree method: '+tree.split('@')[3]+'</h3>',
'<pre style="white-space:normal;">',
'Tree Method ID: '+pj.trees[tree][0].get_leaves()[0].tree_method_id,'</pre>')
report_lines += ('<h3>newick format</h3>','','<pre style="white-space:normal;">',pj.trees[tree][0].write(),'</pre>','')
report_lines += ('<h3>nhx format</h3>','','<pre>',pj.trees[tree][1],'</pre>','','','','')
if os.path.isfile(figs_folder+'/'+pj.trees[tree][0].get_leaves()[0].tree_method_id+'.png'):
origin = figs_folder+'/'+pj.trees[tree][0].get_leaves()[0].tree_method_id+'.png'
dest = '%s/files/%s'%(output_directory, pj.trees[tree][0].get_leaves()[0].tree_method_id+'.png')
shutil.copyfile(origin, dest)
#data_handle = open(figs_folder+'/'+pj.trees[tree][0].get_leaves()[0].tree_method_id+'.png','rb')
#data_uri = data_handle.read().encode('base64').replace('\n', '')
#data_handle.close()
#img_tag = '<img width=500 src="data:image/png;base64,{0}">'.format(data_uri)
img_tag = '<img width=500 src="%s">'%(dest.partition('/')[-1])
report_lines.append(img_tag)
report_lines.append('</body>')
report_lines.append('</html>')
lines = []
for line in report_lines:
if '<' in line:
lines.append(line)
else:
lines.append(line.replace('\n','<br>')+'<br>')
return lines
def pickle_pj(pj, pickle_file_name, track=True):
import os
if os.path.exists(pickle_file_name):
#print "DEBUG pickle_pj: %s"%str(os.stat(pickle_file_name).st_ctime)
#print "DEBUG pickle_pj: %s"%str(os.stat(pickle_file_name).st_size)
os.remove(pickle_file_name)
import cloud.serialization.cloudpickle as pickle
output = open(pickle_file_name,'wb')
pickle.dump(pj, output)
output.close()
#print "DEBUG pickle_pj: %s"%str(os.stat(pickle_file_name).st_ctime)
#print "DEBUG pickle_pj: %s"%str(os.stat(pickle_file_name).st_size)
if __builtin__.git and track:
import rpgit
rpgit.gitAdd(pickle_file_name)
comment = "A pickled Project from %s" % time.asctime()
rpgit.gitCommit(comment)
return pickle_file_name
def unpickle_pj(pickle_file_name, git=True):
import cloud.serialization.cloudpickle as pickle
pickle_handle = open(pickle_file_name, 'rb')
# fix some attr: add git_log if None, turn Confs to strings.
pkl_pj = pickle.pickle.load(pickle_handle)
new_pj = Project(pkl_pj.loci, git=False)
# These do not need fixing
attr_names = [ 'alignments',
'aln_summaries',
'concatenations',
'defaults',
'git_log',
'pickle_name',
'records',
'records_by_locus',
'sets',
'starttime',
'trees',
'trimmed_alignments',
'user',
]
# Move the content into the new pj, Add git_log if missing
for attr_name in attr_names:
try:
setattr(new_pj,attr_name,getattr(pkl_pj,attr_name))
except:
warnings.warn('Upgrading Project to v1')
# upgrade used methods to dict, if list.
if isinstance(pkl_pj.used_methods, list) and len(pkl_pj.used_methods) == 0:
pkl_pj.used_methods = {}
elif isinstance(pkl_pj.used_methods, list) and len(pkl_pj.used_methods) > 0:
temp = {}
for m in pkl_pj.used_methods:
if isinstance(m, basestring):
name = m.split()[1]
temp[name] = m
elif not isinstance(m, basestring):
temp[m.method_name] = m
pkl_pj.used_methods = temp
# Turn Confs to strings
for i in pkl_pj.used_methods:
if isinstance(pkl_pj.used_methods[i], basestring):
new_pj.used_methods[i] = pkl_pj.used_methods[i]
else:
new_pj.used_methods[i] = str(pkl_pj.used_methods[i])
if git:
start_git(new_pj)
return new_pj
def revert_pickle(pj, commit_hash):
pickle_filename = pj.pickle_name
if not os.path.exists(pickle_filename):
raise RuntimeError('Cannot find %s. Has the pickle been moved'%pickle_filename)
cmd = 'git checkout %s -- %s'%(commit_hash, pickle_filename)
pipe = sub.Popen(cmd, shell=True, stdout = sub.PIPE,stderr = sub.PIPE )
(out, error) = pipe.communicate()
print 'Git STDOUT: %s'%str(out)
print 'Git STDERR: %s'%str(error)
new_pj = unpickle_pj(pickle_filename)
new_pj.git_log += "<<<<\nThe pickle was reverted to commit %s\nSTDOUT:\n%s\nSTDERR:%s\n>>>>\n"%(commit_hash,
str(out),
str(error))
return new_pj
def publish(pj, folder_name, figures_folder, size='small',
compare_trees=[], compare_meta=None, trees_to_compare='all',
unrooted_trees=False):
import os, time
folder = None
zip_file = None
if folder_name.endswith('.zip'):
zip_file = folder_name
folder = folder_name[:-4]
else:
folder = folder_name
zip_file = folder_name + '.zip'
print "checking if file exists"
if os.path.exists(folder) or os.path.exists(zip_file):
raise IOError(folder_name + ' already exists')
os.makedirs(folder)
pj.write(folder+'/tree_and_alns.xml','phyloxml')
#os.mkdir(folder+'/fasta_alignments')
#file_names = pj.write_alns(id=['feature_id','original_id'])
#for f in file_names:
# os.rename(f, "%s/fasta_alignments/%s"%(folder,f))
#os.mkdir(folder+'/trimmed_fasta_alignments')
#file_names = pj.write_alns(id=['feature_id','original_id'])
#for f in file_names:
# os.rename(f, "%s/trimmed_fasta_alignments/%s"%(folder,f))
from glob import glob
from shutil import copyfile
notebooks = glob('*.ipynb')
for n in notebooks:
copyfile(n, '%s/%s'%(folder,n))
pj.write(folder+'/sequences_and_metadata.gb','genbank')
report = open(folder+'/report.html','wt')
lines = report_methods(pj, figures_folder, folder_name, size,
compare_trees=compare_trees, compare_meta=compare_meta,
trees_to_compare=trees_to_compare, unrooted_trees=unrooted_trees)
for line in lines:
report.write(line + '\n')
report.close()
#'report_lines' is now taking care of puting the figures in the zip folder, within /files
#for tree in pj.trees.keys():
# if os.path.isfile(figures_folder+'/'+pj.trees[tree][0].get_leaves()[0].tree_method_id+'.png'):
# from shutil import copyfile
# copyfile(figures_folder+'/'+pj.trees[tree][0].get_leaves()[0].tree_method_id+'.png',
# folder+'/'+pj.trees[tree][0].get_leaves()[0].tree_method_id+'.png')
print "pickling"
pickle_name = time.strftime("%a_%d_%b_%Y_%X", time.gmtime())+'.pkl'
pickle_pj(pj, folder + '/' + pickle_name)
hndl = open("%s/Bootstrap.css"%folder, 'wt')
hndl.write(css.css())
hndl.close()
import zipfile, shutil
print "archiving"
zf = zipfile.ZipFile(zip_file, "w")
for dirname, subdirs, files in os.walk(folder):
zf.write(dirname)
for filename in files:
zf.write(os.path.join(dirname, filename))
zf.close()
shutil.rmtree(folder)
print "report ready"
##################################################
if False:
""" Robinson Foulds Pairwise tree distances"""
##################################################
# These will allow to do an RF distance to dendropy, where branch lengths
# are considered, but also to correct the branch length by the tree length
# and thus compare trees with very different evolutionary rates.
def get_tree_length(t):
tree_length = 0
for n in t.traverse():
if not n.dist == 1: # ete puts 1 if there is no blen
tree_length += n.dist
return tree_length
def correct_branch_length_by_tree_length(branch_length, tree_length):
return branch_length/float(tree_length)
def get_corrected_blen_dif(cor_blen1, cor_blen2):
return abs(cor_blen1-cor_blen2)
def get_corrected_blen_dif_s(cor_blen1, cor_blen2):
from math import pow
return pow((cor_blen1-cor_blen2),2)
def flatten_to_strings(listOfLists):
"""Flatten a list of (lists of (lists of strings)) for any level
of nesting"""
result = []
for i in listOfLists:
# Only append if i is a basestring (superclass of string)
if isinstance(i, basestring):
result.append(i)
# Otherwise call this function recursively
else:
result.extend(flatten_to_strings(i))
return result
def get_corrected_blen_rf(t1, t2, unrooted_trees=False):
"""
>>> t1 = Tree("(A:0.3,(B:0.226,((C:0.784,D:0.159):0.759,(e:0.03,f:0.1):0.25)):0.3):0.1;")
>>> t2 = Tree("(A:0.3,(D:0.226,((C:0.784,B:0.159):0.759,(e:0.03,f:0.1):0.25)):0.3):0.1;")
Trees are the same:
>>> T1 = T2 = t1
>>> get_corrected_blen_rf(T1,T2)
0.0
Trees are different:
>>> get_corrected_blen_rf(t1,t2)
0.504654255319149
Dendropy calc of these trees' RF:
>>> dt1 = dendropy.Tree.get_from_string(t1.write(), schema="newick")
>>> dt2 = dendropy.Tree.get_from_string(t2.write(), schema="newick")
>>> dt1.robinson_foulds_distance(dt2)
3.652
They are the same and blengths are 25% shorter in second tree. This function gives almost 0
>>> t1 = Tree("(A:0.3,(B:0.226,((C:0.784,D:0.159):0.759,(e:0.03,f:0.1):0.25)):0.3):0.1;")
>>> t1_prop = Tree("(A:0.3,(B:0.226,((C:0.784,D:0.159):0.759,(e:0.03,f:0.1):0.25)):0.3):0.1;")
>>> for n in t1_prop.traverse():
... if not n.dist == 1:
... n.dist = 0.75*n.dist
>>> get_corrected_blen_rf(t1,t1_prop)
6.938893903907228e-17
>>> str1 = "(A:1.1,((B:1.1,C:1.1):1.1,(D:1.1,E:1.1):1.1):1.1);"
>>> str2 = "(A:2.2,((B:2.2,C:2.2):2.2,(D:2.2,E:2.2):2.2):2.2);"
>>> get_corrected_blen_rf(Tree(str1), Tree(str2))
0.0
Dendropy gives almost 1 for the same trees:
>>> dt1 = dendropy.Tree.get_from_string(t1.write(), schema="newick")
>>> dt1_prop = dendropy.Tree.get_from_string(t1_prop.write(), schema="newick")
>>> dt1.robinson_foulds_distance(dt1_prop)
0.7269999999999999
So dendropy RF reflects tree length differences, this function cleans them out.
One branch length changes in one tree, topology stays the same:
>>> t1 = Tree("(A:0.3,(B:0.226,((C:0.001,D:0.159):0.759,(e:0.03,f:0.1):0.25)):0.3):0.1;")
>>> dt1 = dendropy.Tree.get_from_string(t1.write(), schema="newick")
>>> get_corrected_blen_rf(t1,t1_prop)
0.23503571001673468
Same trees (same topology, different branch-lengths) in ete's RF:
>>> rf, max_rf, common_leaves, parts_t1, parts_t2 = t1.robinson_foulds(t1_prop)
>>> rf
0
The raw rf calc in ete (the starting point of this func) does not reflect any
branch length differences, proportional or otherwise.
"""
rf, max_rf, common_leaves, parts_t1, parts_t2 = t1.robinson_foulds(t2, unrooted_trees=unrooted_trees)
#print 'DEBUG:', parts_t1
#print 'DEBUG:', parts_t2
tree_length1 = get_tree_length(t1)
tree_length2 = get_tree_length(t2)
distance = 0
for part in parts_t1:
if not part in parts_t2:
raw_blen = t1.get_common_ancestor(flatten_to_strings(part)).dist
if not raw_blen==1.0:
distance += correct_branch_length_by_tree_length(raw_blen, tree_length1)
elif part in parts_t2:
#print 'DEBUG:', part
raw_blen1 = t1.get_common_ancestor(flatten_to_strings(part)).dist
raw_blen2 = t2.get_common_ancestor(flatten_to_strings(part)).dist
if raw_blen1==1:
raw_blen1 = 0
if raw_blen2==1:
raw_blen2 = 0
cor_blen1 = correct_branch_length_by_tree_length(raw_blen1, tree_length1)
cor_blen2 = correct_branch_length_by_tree_length(raw_blen2, tree_length2)
distance += get_corrected_blen_dif(cor_blen1, cor_blen2)
for part in parts_t2:
if not part in parts_t1:
raw_blen = t2.get_common_ancestor(flatten_to_strings(part)).dist
if not raw_blen==1:
distance += correct_branch_length_by_tree_length(raw_blen, tree_length2)
#print "Debug: distance: %s"%str(distance)
return distance
def get_corrected_kuhner_felsenstein(t1, t2, unrooted_trees=False):
"""
>>> t1 = Tree("(A:0.3,(B:0.226,((C:0.784,D:0.159):0.759,(e:0.03,f:0.1):0.25)):0.3):0.1;")
>>> t2 = Tree("(A:0.3,(D:0.226,((C:0.784,B:0.159):0.759,(e:0.03,f:0.1):0.25)):0.3):0.1;")
Trees are the same:
>>> T1 = T2 = t1
>>> get_corrected_kuhner_felsenstein(T1,T2)
0.0
Trees are different:
>>> get_corrected_kuhner_felsenstein(t1,t2)
0.1273379587058624
Dendropy calc of these trees' RF:
>>> dt1 = dendropy.Tree.get_from_string(t1.write(), schema="newick")
>>> dt2 = dendropy.Tree.get_from_string(t2.write(), schema="newick")
>>> dt1.robinson_foulds_distance(dt2)
3.652
They are the same and blengths are 25% shorter in second tree. This function gives almost 0
>>> t1 = Tree("(A:0.3,(B:0.226,((C:0.784,D:0.159):0.759,(e:0.03,f:0.1):0.25)):0.3):0.1;")
>>> t1_prop = Tree("(A:0.3,(B:0.226,((C:0.784,D:0.159):0.759,(e:0.03,f:0.1):0.25)):0.3):0.1;")
>>> for n in t1_prop.traverse():
... if not n.dist == 1:
... n.dist = 0.75*n.dist
>>> get_corrected_kuhner_felsenstein(t1,t1_prop)
3.274080905458301e-33
>>> str1 = "(A:1.1,((B:1.1,C:1.1):1.1,(D:1.1,E:1.1):1.1):1.1);"
>>> str2 = "(A:2.2,((B:2.2,C:2.2):2.2,(D:2.2,E:2.2):2.2):2.2);"
>>> get_corrected_kuhner_felsenstein(Tree(str1), Tree(str2))
0.0
Dendropy gives almost 1 for the same trees:
>>> dt1 = dendropy.Tree.get_from_string(t1.write(), schema="newick")
>>> dt1_prop = dendropy.Tree.get_from_string(t1_prop.write(), schema="newick")
>>> dt1.robinson_foulds_distance(dt1_prop)
0.7269999999999999
So dendropy RF reflects tree length differences, this function cleans them out.
One branch length changes in one tree, topology stays the same:
>>> t1 = Tree("(A:0.3,(B:0.226,((C:0.001,D:0.159):0.759,(e:0.03,f:0.1):0.25)):0.3):0.1;")
>>> dt1 = dendropy.Tree.get_from_string(t1.write(), schema="newick")
>>> get_corrected_kuhner_felsenstein(t1,t1_prop)
0.010930167133307158
Same trees (same topology, different branch-lengths) in ete's RF:
>>> rf, max_rf, common_leaves, parts_t1, parts_t2 = t1.robinson_foulds(t1_prop)
>>> rf
0
The raw rf calc in ete (the starting point of this func) does not reflect any
branch length differences, proportional or otherwise.
"""
from math import pow
#print 'DEBUG: in kuhner_felsenstein'
rf, max_rf, common_leaves, parts_t1, parts_t2 = t1.robinson_foulds(t2, unrooted_trees=unrooted_trees)
#print 'DEBUG:', parts_t1
#print 'DEBUG:', parts_t2
tree_length1 = get_tree_length(t1)
tree_length2 = get_tree_length(t2)
distance = 0
for part in parts_t1:
if not part in parts_t2:
raw_blen = t1.get_common_ancestor(flatten_to_strings(part)).dist
if not raw_blen==1.0:
distance += pow(correct_branch_length_by_tree_length(raw_blen, tree_length1),2)
elif part in parts_t2:
#print 'DEBUG:', part
raw_blen1 = t1.get_common_ancestor(flatten_to_strings(part)).dist
raw_blen2 = t2.get_common_ancestor(flatten_to_strings(part)).dist
if raw_blen1==1:
raw_blen1 = 0
if raw_blen2==1:
raw_blen2 = 0
cor_blen1 = correct_branch_length_by_tree_length(raw_blen1, tree_length1)
cor_blen2 = correct_branch_length_by_tree_length(raw_blen2, tree_length2)
distance += get_corrected_blen_dif_s(cor_blen1, cor_blen2)
for part in parts_t2:
if not part in parts_t1:
raw_blen = t2.get_common_ancestor(flatten_to_strings(part)).dist
if not raw_blen==1:
distance += pow(correct_branch_length_by_tree_length(raw_blen, tree_length2),2)
#print "Debug: distance: %s"%str(distance)
return distance
def calc_rf(pj, figs_folder, rf_type='proportional',meta=None, mp_root=False, trees='all', unrooted_trees=False):
"""
rf_types:
topology: only topological diff
branch-length: branch-length diffs
proportional: branch-length proportion out of tree length diff
deep-more-important: to-do
"""
if len(pj.concatenations) > 0 and not meta:
meta = pj.concatenations[0].otu_meta
elif not meta:
raise RuntimeError('RF calc does not know which meta to use to compare leaves')
if trees == 'all':
trees = pj.trees.keys()
data = []
for t1 in trees:
line = []
dupT1 = Tree(pj.trees[t1][0].write())
for l in dupT1:
for record in pj.records:
for feature in record.features:
if feature.qualifiers['feature_id'][0] == l.name and meta in feature.qualifiers.keys():
l.name = feature.qualifiers[meta][0]
if mp_root:
R=dupT1.get_midpoint_outgroup()
dupT1.set_outgroup(R)
dupT1d = dendropy.Tree.get_from_string(dupT1.write(), schema="newick")
for t2 in trees:
dupT2 = Tree(pj.trees[t2][0].write())
for l in dupT2:
for record in pj.records:
for feature in record.features:
if feature.qualifiers['feature_id'][0] == l.name and meta in feature.qualifiers.keys():
l.name = feature.qualifiers[meta][0]
if mp_root:
R=dupT2.get_midpoint_outgroup()
dupT2.set_outgroup(R)
dupT2d = dendropy.Tree.get_from_string(dupT2.write(), schema="newick")
if rf_type=='branch-length':
rf = dupT1d.robinson_foulds_distance(dupT2d)
line.append(rf)
elif rf_type=='topology':
rf, max_rf, common_leaves, parts_t1, parts_t2 = dupT1.robinson_foulds(dupT2, unrooted_trees=unrooted_trees)
line.append(rf/float(max_rf))
elif rf_type == 'proportional':
#print 'DEBUG: in proportional'
warnings.warn('proportional branch-distance: Trees must have the same taxa')
line.append(get_corrected_kuhner_felsenstein(dupT1, dupT2, unrooted_trees=unrooted_trees))
#to do
#elif rf_type == deep_nodes more important:
# line.append(get_deep_important_rf(dupT1, dupT2))
data.append(line)
row_labels = [str(i) for i in range(1, len(trees)+1)]
column_labels = row_labels
legend = [['#','LOCUS','ALIGNMENT METHOD','TRIMMING METHOD','TREE METHOD']]
for i in trees:
line = [str(trees.index(i)+1)]
for val in i.split('@'):
line.append(val)
legend.append(line)
fig, ax = plt.subplots()
data = np.array(data)
heatmap = ax.pcolor(data, cmap=plt.cm.Blues)
# put the major ticks at the middle of each cell
ax.set_xticks(np.arange(data.shape[0])+0.5, minor=False)
ax.set_yticks(np.arange(data.shape[1])+0.5, minor=False)
# want a more natural, table-like display
ax.invert_yaxis()
ax.xaxis.tick_top()
ax.set_xticklabels(row_labels, minor=False, size=14, rotation='vertical')
ax.set_yticklabels(column_labels, minor=False, size=14)
#fig.set_size_inches(12.5,12.5)
try:
fig.colorbar(heatmap, cmap=plt.cm.Blues)
except:
pass
name = str(random.randint(1000,2000))
fig.savefig(figs_folder + '/' + name +'.png')
close('all')
return figs_folder + '/' + name+'.png', legend
def draw_trimal_scc(pj, num_col, figs_folder, trimmed=False, alg = '-scc'):
import pandas as pd
import matplotlib.pyplot as plt
import random, os
from Bio import AlignIO
# get the alignment objects
#-------------------------#
alignments = pj.alignments.items()
if trimmed:
alignments = pj.trimmed_alignments.items()
num_alns = len(alignments)
subplots_arrangement = []
#-----------------------#
num_rows = round(float(num_alns)/num_col)
if num_rows < float(num_alns)/num_col:
num_rows += 1
fig = plt.figure(figsize=(10*num_col,2.3*num_rows), dpi=80, frameon = False)
plt.subplots_adjust(hspace = 0.8)
subplots_arrangement += [num_rows, num_col]
#Calc with trimal and plot
#------------------------#
for i in range(1,num_alns+1):
import subprocess as sub
subplot_position = subplots_arrangement +[i]
aln_name = alignments[i-1][0]
aln_obj = alignments[i-1][1]
name = str(random.randint(1000,2000))+'_'+aln_name+'_for_trimal_graph.fasta'
AlignIO.write(aln_obj, name, 'fasta')
stderr = open('stderr','wt')
#stdout = os.popen(programpath+'trimal '+alg+' -in '+name)#.read()
stdout = sub.Popen(programspath+"trimal "+alg+" -in " + name,
shell=True, stdout=sub.PIPE, stderr=stderr).stdout
stderr.close()
var = pd.read_table(stdout, sep='\t+', skiprows=3, engine='python')
os.remove('stderr')
if alg == '-scc':
var.columns = ['position', 'variability']
elif alg == '-sgc':
var.columns = ['position', 'pct_gaps', 'gap_score']
#Plot residue similarity figure, for nucleotides this is identity value
fig.add_subplot(subplot_position[0], subplot_position[1], subplot_position[2])
if alg == '-scc':
var.variability.plot(color='g',lw=2)
elif alg == '-sgc':
var.pct_gaps.plot(color='g',lw=2)
plt.title(aln_name.replace('@',' '), fontsize=14)
plt.axis([1,len(aln_obj[0].seq), 0, 1.1]) #0 to 1 scale for y axis
xlab = "alignment position"
ylab = "similarity score"
if alg == '-sgc':
ylab = "percent gaps"
plt.axis([1, len(aln_obj[0].seq), 0, 110]) #0 to 100 scale for y axis, ie percent
plt.xlabel(xlab, fontsize=10)
plt.ylabel(ylab, fontsize=10)
plt.grid(True)
os.remove(name)
figname = str(random.randint(1000,2000))
fig.savefig(figs_folder + '/' + figname +'.png')
plt.close('all')
return figs_folder + '/' + figname+'.png'
def view_csv_as_table(csv_filename, delimiter, quotechar='|'):
with open(csv_filename, 'rb') as csvfile:
sp_vs_lc = list(csv.reader(csvfile, delimiter=delimiter, quotechar=quotechar))
field_sizes = []
for i in range(len(sp_vs_lc[0])):
lengths = []
for row in sp_vs_lc:
lengths.append(len(row[i]))
field_sizes.append(max(lengths))
for row in sp_vs_lc:
string = ''
for i in range(len(row)):
string += row[i].ljust(field_sizes[i]+3)
print string
def rfmt_tree_for_and_char_matirx_bayestraits(pj, qual_list, rootmeta, rootvalue, treefile=None,
treetoken=None, treeburnin=0, treestep=1, treeformat=5):
if treefile and treetoken:
raise IOError("Only specify treefile ot treetoken, not both")
T = None
if treefile:
T = open(treefile,'r').readlines()
T = T[:-1]
elif treetoken:
T = [pj.ft(treetoken).write()]
else:
raise IOError("Specify treefile or treetoken")
leaf_names = Tree(T[0].rstrip(), format=treeformat).get_leaf_names()
translate = []
i = 1
for n in leaf_names:
translate.append([n,str(i)])
i +=1
char_matrix = ""
for n in leaf_names:
line = "%s "%n
quals = get_qualifiers_dictionary(pj, n)
for qual in qual_list:
if qual in quals.keys():
line += "%s "%str(quals[qual])
else:
line += "- "
line = line[:-1]+'\n'
char_matrix += line
reformatted_tree = """#NEXUS
Begin Trees;
TRANSLATE
"""
for t in translate[:-1]:
reformatted_tree += '\t\t'+t[1]+'\t'+t[0]+',\n'
reformatted_tree += '\t\t'+translate[-1][1]+'\t'+translate[-1][0]+';\n'
for i in range(int(len(T)*treeburnin),len(T),treestep):
j = T[i]
newick = Tree(j.rstrip(), format=treeformat)
brlns = []
for n in newick.traverse():
brlns.append(n.dist)
if sorted(brlns)[2] == 0.0: # three 0 length branches - too much
pass
else:
R = None
count = 0
for l in newick:
if get_qualifiers_dictionary(pj, l.name)[rootmeta] == rootvalue:
R = l.name
count += 1
if not count == 1:
raise RuntimeError("%s does not exist or not unique in qualifier %s"%(rootvalue, rootmeta))
newick.set_outgroup(newick&R)
newick_str = newick.write(format=5)
for t in translate:
newick_str = re.sub(t[0],t[1],newick_str)
reformatted_tree += 'Tree tree'+str(i)+'= '+newick_str+'\n'
reformatted_tree += 'End;\n'
return reformatted_tree, char_matrix
# Exonerate
def bayestraits(pj, qual_list, rootmeta, rootvalue,
treefile=None, treetoken=None,
treeburnin=0, treestep=1,
treeformat=5,
bayestraits = 'BayesTraits',
commands = [4,1,'kappa','delta','lambda','run']):
# make command file
import random
rand = random.randint(1000000,9999999)
cfile = open(str(rand),'wt')
for i in commands:
cfile.write(str(i)+'\n')
cfile.close()
reformatted_tree, char_matrix = rfmt_tree_for_and_char_matirx_bayestraits(pj, qual_list, rootmeta, rootvalue, treefile=treefile,
treetoken=treetoken, treeburnin=treeburnin, treestep=treestep,
treeformat=treeformat)
tfile = open(str(rand)+'.nex','wt')
tfile.write(reformatted_tree)
tfile.close()
mfile = open(str(rand)+'.txt','wt')
mfile.write(char_matrix)
mfile.close()
cline = "%s %s %s < %s" %(bayestraits, str(rand)+'.nex', str(rand)+'.txt', str(rand))
import os
stdout = os.popen(cline).read()
os.remove(str(rand))
os.remove(str(rand)+'.nex')
os.remove(str(rand)+'.txt')
return stdout
# Exonerate
class ExonerateCommandLine:
"""cline object with execute methods"""
def __init__(self,
q, #query filename
t, #target filename
path='exonerate',
Q="unknown",# query alphabet
T="unknown", # target alphabet
querychunkid=0, #query job number
targetchunkid=0, #target job number
querychunktotal=0, #Num of queries
targetchunktotal=0, #Num of targets
E="FALSE", #exhaustive search
B="FALSE", #rapid comparison between long seqs
forcescan="none", #Force FSM scan on query or target sequences q or t
saturatethreshold=0, #word saturation threshold
customserver="NULL", # Custom command to send non-standard server
fastasuffix=".fa", #Fasta file suffix filter (in subdirectories)
m="ungapped",
s=100, #Score threshold for gapped alignment
percent=0.0, #Percent self-score threshold
showalignment="TRUE",
showsugar="FALSE",
showcigar="FALSE",
showvulgar="FALSE",
showquerygff="FALSE", # Include GFF output on query in results
showtargetgff="FALSE", #Include GFF output on target in results
ryo="NULL", #Roll-your-own printf-esque output format
n=0, #Report best N results per query
S="TRUE", #Search for suboptimal alignments
g="TRUE", #Use gapped extension
refine="none", #none|full|region
refineboundary=32, #Refinement region boundary
D=32, #Maximum memory to use for DP tracebacks (Mb)
C="TRUE", #Use compiled viterbi implementations
terminalrangeint=12, #Internal terminal range
terminalrangeext=12, #External terminal range
joinrangeint=12, #Internal join range
joinrangeext=12, #External join range
x=50, #Gapped extension threshold
singlepass="TRUE", #Generate suboptimal alignment in a single pass
joinfilter=0, #BSDP join filter threshold
A="none", #Path to sequence annotation file
softmaskquery="FALSE", #Allow softmasking on the query sequence
softmasktarget="FALSE", #Allow softmasking on the target sequence
d="nucleic", #DNA substitution matrix
p="blosum62", #Protein substitution matrix
M=64, #Memory limit for FSM scanning <Mb>
forcefsm="none", #Force FSM type ( normal | compact )
wordjump=1, #Jump between query words
o=-12, #Affine gap open penalty
e=-4, #Affine gap extend penalty
codongapopen=-18, #Codon affine gap open penalty
codongapextend=-8, #Codon affine gap extend penalty
minner=10, #Minimum NER length
maxner=50000, #Maximum NER length
neropen=-20, #NER open penalty
minintron=30, #Minimum intron length
maxintron=20000, #Maximum intron length
i=-30, #Intron Opening penalty
f=-28, #Frameshift creation penalty
useaatla="TRUE", #useaatla
geneticcode=1, #Use built-in or custom genetic code
hspfilter=0, #Aggressive HSP filtering level
useworddropoff="TRUE", #Use word neighbourhood dropoff
seedrepeat=1, #Seeds per diagonal required for HSP seeding
dnawordlen=12, #Wordlength for DNA words
proteinwordlen=6, #Wordlength for protein words
codonwordlen=12, #Wordlength for codon words
dnahspdropoff=30, #DNA HSP dropoff score
proteinhspdropoff=20, #Protein HSP dropoff score
codonhspdropoff=40, #Codon HSP dropoff score
dnahspthreshold=75, #DNA HSP threshold score
proteinhspthreshold=30, #Protein HSP threshold score
codonhspthreshold=50, #Codon HSP threshold score
dnawordlimit=0, #Score limit for dna word neighbourhood
proteinwordlimit=4, #Score limit for protein word neighbourhood
codonwordlimit=4, #Score limit for codon word neighbourhood
geneseed=0, #Geneseed Threshold
geneseedrepeat=3, #Seeds per diagonal required for geneseed HSP seeding
alignmentwidth=80, #Alignment display width
forwardcoordinates="TRUE", #Report all coordinates on the forward strand
quality=0, #HSP quality threshold
splice3="primate", #Supply frequency matrix for 3' splice sites
splice5="primate", #Supply frequency matrix for 5' splice sites
forcegtag="FALSE"): #Force use of gt...ag splice sites
import inspect
frame = inspect.currentframe()
args, _, _, values = inspect.getargvalues(frame)
cline = values['path']+' '
for k in ('path','frame','inspect', 'self'):
del values[k]
for keyward in values:
dash = '-'
if len(keyward) > 1:
dash = '--'
cline += dash+keyward+' '+str(values[keyward])+' '
self.cline_string = cline
self.stdout = None
def __str__(self):
return self.cline_string
def execute(self):
import subprocess as sub
p = sub.Popen(self.cline_string, shell=True, stdout=sub.PIPE, stderr=sub.PIPE)
self.stdout = p.communicate()[0]
return self.stdout
# an all inclusive exonerate ryo format
roll = ("STARTRYOqfull = %qas @!!@!!@ qcds = %qcs @!!@!!@ qid = %qi @!!@!!@ qdescription = %qd @!!@!!@ "+
"qlen = %qal @!!@!!@ qstrand = %qS @!!@!!@ qtype = %qt @!!@!!@ qbegin = %qab @!!@!!@ qend = %qae @!!@!!@ "+
"tfull = %tas @!!@!!@ tcds = %tcs @!!@!!@ tid = %ti @!!@!!@ tdescription = %td @!!@!!@ "+
"tlen = %tal @!!@!!@ tstrand = %tS @!!@!!@ ttype = %tt @!!@!!@ tbegin = %tab @!!@!!@ tend = %tae @!!@!!@ "+
"Etotal = %et @!!@!!@ Eident = %ei @!!@!!@ Esim = %es @!!@!!@ Emis = %em @!!@!!@ Pident = %pi @!!@!!@ "+
"Psim = %ps @!!@!!@ score = %s @!!@!!@ model = %m @!!@!!@ vulgar = %VENDRYO")
def parse_ryo(exo_results):
"""parses exonerate results that include the ryo line above"""
stats = []
ryos = [i.split("ENDRYO")[0] for i in exo_results.split('STARTRYO')[1:]][:-1]
if len(ryos) > 0:
for i in ryos:
a = {}
for line in i.split('@!!@!!@'):
k, v = [line.partition('=')[0], line.partition('=')[2]]
a[k.strip().rstrip()] = v.strip().rstrip()
a['qfull'] = a['qfull'].replace('\n','').replace("\s",'')
a['qcds'] = a['qcds'].replace('\n','').replace("\s",'')
a['tfull'] = a['tfull'].replace('\n','').replace("\s",'')
a['tcds'] = a['tcds'].replace('\n','').replace("\s",'')
stats.append(a)
return stats
def exonerate(q, d, **kwargs):
"""Will run exonerate with the ryo format above.
Returns a dictionary with the ryo line content and the raw output as string"""
if 'ryo' in kwargs.keys():
kwargs.pop('ryo', None)
results = ''
exoCline = ExonerateCommandLine(q, d, ryo="\"%s\""%roll, **kwargs)
results = exoCline.execute()
#print results
stats = parse_ryo(results)
#print stats
return stats, results
def exonerate_ryo_to_gb(q, d, stats, results, get_query=False):
"""takes the parsed ryo (stats) and the raw output (results) and
builds a gb file for reprophylo"""
if get_query:
raise RuntimeError('get_query=True: currently only parses target. maybe Bio.SearchIO can help?')
gencode = int(results.split('geneticcode ')[1].split()[0])
model = stats[0]['model']
if not model == 'protein2genome:local':
raise RuntimeError("only tested with the protein2genome model")
#gencode = '1'
tfile = d
if '/' in d:
tfile = d.split('/')[-1]
matches = stats
b = 0
records = []
# making a short enough, yet unique and informative seq id is a challenge
# The approach here is to take the 4 first and last chars of the input file name
# and to add a serial number for each seq.
# We add a random three digit number at the start
# because the eight file name chars are not always unique accorss files.
from random import randint
rnd = randint(111,999)
for match in matches:
ID = ("%i|%s|%s|%i"%(rnd, tfile[:4],tfile[-4:],b)).replace('.','')
r = SeqRecord(seq=Seq(match['tfull'],
alphabet=IUPAC.unambiguous_dna),
id = ID,
description = "query: %s %s, target: %s %s, HSPID: %i"%(match['qid'],
match['qdescription'],
match['tid'],
match['tdescription'],
b))
b += 1
source = SeqFeature(FeatureLocation(0, len(r.seq)), type='source')
source.qualifiers['file'] = [tfile]
for key in match:
source.qualifiers[key] = [match[key]]
r.features.append(source)
vulgar = match['vulgar'].split()
features = [vulgar[i:i+3] for i in range(0, len(vulgar) - 2, 3)]
pos = 0
CDS = None
for feature in features:
ftype = feature[0]
flength = int(feature[2])
f = SeqFeature(FeatureLocation(pos, pos+flength), type = ftype)
f.qualifiers['gene'] = [match['qid']]
pos += flength
r.features.append(f)
coding_locations = []
for j in r.features:
if j.type == 'M' or j.type == 'S':
coding_locations.append(j.location)
elif j.type == 'G' and (int(j.location.start) < int(j.location.end)):
coding_locations.append(j.location)
coding_locations = sorted(coding_locations, key = lambda l: int(l.start))
if len(coding_locations) == 1:
CDS = SeqFeature(coding_locations[0], type='CDS')
else:
CDS = SeqFeature(CompoundLocation(coding_locations), type='CDS')
CDS.qualifiers['gene'] = [match['qid']]
get = False
CDS.qualifiers['translation'] = [str(CDS.extract(r.seq).translate(table=gencode)).replace('*','X')]
if CDS.qualifiers['translation'][0] == str(Seq(match['tcds'].replace(' ','').replace('\n',''),
alphabet=IUPAC.ambiguous_dna).translate(table=gencode)).replace('*','X'):
get = True
else:
CDS.qualifiers['translation'] = ['something went wrong']
print "DEBUG bad CDS"
print match['tid']
#print len(CDS.extract(r.seq)), len(match['tcds'].replace(' ',''))
#print str(CDS.extract(r.seq))[:20], str(CDS.extract(r.seq))[-20:]
#print match['tcds'].replace(' ','')[:20], match['tcds'].replace(' ','')[-20:]
print (str(CDS.extract(r.seq)) == match['tcds'].replace(' ',''))
if (str(CDS.extract(r.seq)) == match['tcds'].replace(' ','')):
print 'CDS retrieved correctly buy biopython could not translate'
else:
print 'Error in retrieved CDS (CDS built form vulgar does not match the CDS from ryo \%tcs'
#print str(CDS.extract(r.seq))
if get:
r.features.append(CDS)
records.append(r)
a = SeqIO.write(records,'%s.gb'%d,'genbank')
return "%i in %s.gb"%(a, d)
def report_aln_col_stat(pj, loci_names, num_col, figs_folder, trimmed=False, alg = '-scc'):
import pandas as pd
import matplotlib.pyplot as plt
import random, os
from Bio import AlignIO
# get the alignment objects
#-------------------------#
alignments = pj.alignments.items()
if trimmed:
alignments = pj.trimmed_alignments.items()
alignments = [i for i in alignments if i[0].split('@')[0] in loci_names]
num_alns = len(alignments)
subplots_arrangement = []
#-----------------------#
num_rows = round(float(num_alns)/num_col)
if num_rows < float(num_alns)/num_col:
num_rows += 1
fig = plt.figure(figsize=(10*num_col,2.3*num_rows), dpi=80, frameon = False)
plt.subplots_adjust(hspace = 0.8)
subplots_arrangement += [num_rows, num_col]
#Calc with trimal and plot
#------------------------#
for i in range(1,num_alns+1):
import subprocess as sub
subplot_position = subplots_arrangement +[i]
aln_name = alignments[i-1][0]
aln_obj = alignments[i-1][1]
name = str(random.randint(1000,2000))+'_'+aln_name+'_for_trimal_graph.fasta'
AlignIO.write(aln_obj, name, 'fasta')
stderr = open('stderr','wt')
#stdout = os.popen('trimal '+alg+' -in '+name)#.read()
stdout = sub.Popen("trimal "+alg+" -in " + name,
shell=True, stdout=sub.PIPE, stderr=stderr).stdout
stderr.close()
var = pd.read_table(stdout, sep='\t+', skiprows=3, engine='python')
os.remove('stderr')
if alg == '-scc':
var.columns = ['position', 'variability']
elif alg == '-sgc':
var.columns = ['position', 'pct_gaps', 'gap_score']
#Plot residue similarity figure, for nucleotides this is identity value
fig.add_subplot(subplot_position[0], subplot_position[1], subplot_position[2])
if alg == '-scc':
var.variability.plot(color='g',lw=2)
elif alg == '-sgc':
var.pct_gaps.plot(color='g',lw=2)
plt.title(aln_name.replace('@',' '), fontsize=14)
plt.axis([1,len(aln_obj[0].seq), 0, 1.1]) #0 to 1 scale for y axis
xlab = "alignment position"
ylab = "similarity score"
if alg == '-sgc':
ylab = "percent gaps"
plt.axis([1, len(aln_obj[0].seq), 0, 110]) #0 to 100 scale for y axis, ie percent
plt.xlabel(xlab, fontsize=10)
plt.ylabel(ylab, fontsize=10)
plt.grid(True)
os.remove(name)
figname = str(random.randint(1000,2000))
fig.savefig(figs_folder + '/' + figname +'.png')
plt.close('all')
return figs_folder + '/' + figname+'.png'
##############################################################################################################
if False:
""""LociStat class preliminaries"""
##############################################################################################################
def entropy(s, char_type):
"""
Return the Shannon's entropy value for a column in the alignment provided as a string (s)
given the character type (dna or prot).
gaps are ignored, ambiguity is ignored.
homogenous column
>>> entropy('tttttttt', 'dna')
-0.0
hetrogenous column, case insensitive
>>> entropy('ttgGaacC', 'dna')
2.0
>>> entropy('ttggaacc', 'dna')
2.0
ignore gaps
>>> entropy('ttgg--Ss', 'dna')
1.0
>>> entropy('ttggSs', 'dna')
1.0
recognize alphabet
>>> entropy('ttggSs', 'prot')
1.584962500721156
"""
missing = None
if char_type == 'prot':
missing = 'Xx'
elif char_type == 'dna':
missing = 'ryswkmbdhvnRYSWKMBDHVN'
s = s.replace('-','').replace('.','').replace('?','')
for m in missing:
s = s.replace(m,'')
p, lns = Counter(s.lower()), float(len(s.lower()))
return -sum( count/lns * math.log(count/lns, 2) for count in p.values())
def gapscore(aln_obj):
"""
Use TrimAl to get a list of gapscores given a MultipleSeqAlignmnet
object
"""
import pandas as pd
name = str(random.randint(1000,2000))+'_for_trimal_graph.fasta'
AlignIO.write(aln_obj, name, 'fasta')
stderr = open('stderr','wt')
stdout = sub.Popen(programspath+"trimal -sgc -in " + name,
shell=True, stdout=sub.PIPE, stderr=stderr).stdout
stderr.close()
var = pd.read_table(stdout, sep='\t+', skiprows=3, engine='python')
os.remove('stderr')
var.columns = ['position', 'pct_gaps', 'gap_score']
os.remove(name)
return [i[1] for i in var.to_dict()['gap_score'].items()]
def conservation(aln_obj):
"""
Use TrimAl to get a list of conservation values given a MultipleSeqAlignmnet
object
"""
import pandas as pd
name = str(random.randint(1000,2000))+'_for_trimal_graph.fasta'
AlignIO.write(aln_obj, name, 'fasta')
stderr = open('stderr','wt')
stdout = sub.Popen(programspath+"trimal -scc -in " + name,
shell=True, stdout=sub.PIPE, stderr=stderr).stdout
stderr.close()
var = pd.read_table(stdout, sep='\t+', skiprows=3, engine='python')
os.remove('stderr')
var.columns = ['position', 'conservation']
os.remove(name)
return [i[1] for i in var.to_dict()['conservation'].items()]
def get_entropies(pj, trimmed = True, alignmnet_method=None, trimming_method=None):
"""
Return a dictionary with alignment names as keys and entropy keys as values
given the alignments or trimmed alignmnets dictionary.
If a locus has more than one alignment in the dictionary, go for the specified method
or the first occurance
"""
entropies = {}
aln_dict = None
if trimmed:
aln_dict = pj.trimmed_alignments
if aln_dict == {}:
raise IOError("No trimed alignments in the Project")
elif not trimmed:
aln_dict = pj.alignments
if aln_dict == {}:
raise IOError("No alignments in the Project")
for aln_name in aln_dict.keys():
char_type = None
char_type_list = [l.char_type for l in pj.loci if l.name == aln_name.split('@')[0]]
get = True
if len(char_type_list) == 0:
get = False
warnings.warn('Cannot find Locus for alignment %s. Is it a supermatrix? Skipping.'%aln_name)
elif len(char_type_list) > 1:
if (alignmnet_method and
not aln_name.split('@')[1] == alignmnet_method):
get = False
warnings.warn('Skipping %s, taking only %s'%aln_name,alignmnet_method)
elif (trimmed and trimming_method and
not aln_name.split('@')[2] == trimming_method):
get = False
warnings.warn('Skipping %s, taking only %s'%aln_name,trimming_method)
elif aln_name.split('@')[0] in [i.split('@')[0] for i in entropies.keys()]:
exists = [i for i in entropies.keys() if i.split('@')[0] == aln_name.split('@')[0]][0]
get = False
warning.warn('Skipping %s, already have %s'%aln_name, exists)
if get:
char_type = char_type_list[0]
aln_obj = aln_dict[aln_name]
entropies[aln_name] =[]
for i in range(aln_obj.get_alignment_length()):
column = aln_obj[:,i]
entropies[aln_name].append(entropy(column, char_type))
return entropies
def get_gapscores(pj, trimmed = True, alignmnet_method=None, trimming_method=None):
"""
Return a dictionary with alignment names as keys and gap scores as values
given the alignments or trimmed alignmnets dictionary.
If a locus has more than one alignment in the dictionary, go for the specified method
or the first occurance
"""
gapscores = {}
aln_dict = None
if trimmed:
aln_dict = pj.trimmed_alignments
if aln_dict == {}:
raise IOError("No trimed alignments in the Project")
elif not trimmed:
aln_dict = pj.alignments
if aln_dict == {}:
raise IOError("No alignments in the Project")
for aln_name in aln_dict.keys():
char_type = None
char_type_list = [l.char_type for l in pj.loci if l.name == aln_name.split('@')[0]]
get = True
if len(char_type_list) == 0:
get = False
warnings.warn('Cannot find Locus for alignment %s. Is it a supermatrix? Skipping.'%aln_name)
elif len(char_type_list) > 1:
if (alignmnet_method and
not aln_name.split('@')[1] == alignmnet_method):
get = False
warnings.warn('Skipping %s, taking only %s'%aln_name,alignmnet_method)
elif (trimmed and trimming_method and
not aln_name.split('@')[2] == trimming_method):
get = False
warnings.warn('Skipping %s, taking only %s'%aln_name,trimming_method)
elif aln_name.split('@')[0] in [i.split('@')[0] for i in gapscores.keys()]:
exists = [i for i in gapscores.keys() if i.split('@')[0] == aln_name.split('@')[0]][0]
get = False
warning.warn('Skipping %s, already have %s'%aln_name, exists)
if get:
char_type = char_type_list[0]
aln_obj = aln_dict[aln_name]
gapscores[aln_name] = gapscore(aln_obj)
return gapscores
def get_conservations(pj, trimmed = True, alignmnet_method=None, trimming_method=None):
"""
Return a dictionary with alignment names as keys and conservation scores as values
given the alignments or trimmed alignmnets dictionary.
If a locus has more thn one alignment in the dictionary, go for the specified method
or the first occurance
"""
conservations = {}
aln_dict = None
if trimmed:
aln_dict = pj.trimmed_alignments
if aln_dict == {}:
raise IOError("No trimed alignments in the Project")
elif not trimmed:
aln_dict = pj.alignments
if aln_dict == {}:
raise IOError("No alignments in the Project")
for aln_name in aln_dict.keys():
char_type = None
char_type_list = [l.char_type for l in pj.loci if l.name == aln_name.split('@')[0]]
get = True
if len(char_type_list) == 0:
get = False
warnings.warn('Cannot find Locus for alignment %s. Is it a supermatrix? Skipping.'%aln_name)
elif len(char_type_list) > 1:
if (alignmnet_method and
not aln_name.split('@')[1] == alignmnet_method):
get = False
warnings.warn('Skipping %s, taking only %s'%aln_name,alignmnet_method)
elif (trimmed and trimming_method and
not aln_name.split('@')[2] == trimming_method):
get = False
warnings.warn('Skipping %s, taking only %s'%aln_name,trimming_method)
elif aln_name.split('@')[0] in [i.split('@')[0] for i in conservations.keys()]:
exists = [i for i in conservations.keys() if i.split('@')[0] == aln_name.split('@')[0]][0]
get = False
warning.warn('Skipping %s, already have %s'%aln_name, exists)
if get:
char_type = char_type_list[0]
aln_obj = aln_dict[aln_name]
conservations[aln_name] = conservation(aln_obj)
return conservations
def get_sequence_lengths(pj):
"""
Return a dictionary with locus names as keys and lists of sequence length as values
given a Project instance.
The length are calculated from the unaligned and untrimmed sequences
"""
if len(pj.records) == 0:
raise IOError('No records in the Project')
lengths = {}
if len(pj.records_by_locus) == 0:
pj.extract_by_locus()
for locus in pj.records_by_locus:
lengths[locus] = []
for r in pj.records_by_locus[locus]:
lengths[locus].append(len(r.seq))
return lengths
def get_sequence_gcs(pj):
"""
Return a dictionary with locus names as keys and lists of sequence %GC as values
given a Project instance.
The %GC are calculated from the unaligned and untrimmed sequences
"""
if len(pj.records) == 0:
raise IOError('No records in the Project')
gcs = {}
if len(pj.records_by_locus) == 0:
pj.extract_by_locus()
if any([l.char_type == 'prot' for l in pj.loci]):
warnings.warn('Protein loci GC content will be set to 0.0')
for locus in pj.records_by_locus:
char_type = [l for l in pj.loci if l.name == locus][0].char_type
if char_type == 'dna':
gcs[locus] = []
for r in pj.records_by_locus[locus]:
gcs[locus].append(GC(r.seq))
else:
gcs[locus] = [0.0, 0.0, 0.0, 0.0]
return gcs
def remove_border(axes=None, top=False, right=False, left=True, bottom=True):
"""
Minimize chartjunk by stripping out unnecesasry plot borders and axis ticks
The top/right/left/bottom keywords toggle whether the corresponding plot border is drawn
"""
ax = axes or plt.gca()
ax.spines['top'].set_visible(top)
ax.spines['right'].set_visible(right)
ax.spines['left'].set_visible(left)
ax.spines['bottom'].set_visible(bottom)
#turn off all ticks
ax.yaxis.set_ticks_position('none')
ax.xaxis.set_ticks_position('none')
#now re-enable visibles
if top:
ax.xaxis.tick_top()
if bottom:
ax.xaxis.tick_bottom()
if left:
ax.yaxis.tick_left()
if right:
ax.yaxis.tick_right()
#########################################################################################
class LociStats:
#########################################################################################
def __init__(self, pj, trimmed=True, alignmnet_method=None, trimming_method=None):
"""
>>> from reprophylo import *
>>> pj = unpickle_pj('test-data/test_locistats', git=False)
>>> stats = LociStats(pj)
>>> entropies = stats.entropies['dummy@ReadDirectly@no_trim']
>>> stats.sort()
>>> np.percentile(entropies,25)
0.0
>>> np.percentile(entropies,50)
0.0
>>> conservation = stats.conservations['dummy1@ReadDirectly@no_trim']
>>> np.percentile(conservation,25)
0.0017470471212500001
"""
self.loci = pj.loci
self.entropies = get_entropies(pj,
trimmed = trimmed,
alignmnet_method=alignmnet_method,
trimming_method=trimming_method)
self.gapscores = get_gapscores(pj,
trimmed = trimmed,
alignmnet_method=alignmnet_method,
trimming_method=trimming_method)
self.conservations = get_conservations(pj,
trimmed = trimmed,
alignmnet_method=alignmnet_method,
trimming_method=trimming_method)
self.sequence_lengths = get_sequence_lengths(pj)
self.sequence_gcs = get_sequence_gcs(pj)
combined = []
for key in self.sequence_lengths:
locus_stats = [key]
entropies = [self.entropies[i] for i in self.entropies.keys() if i.split('@')[0] == key][0]
gapscores = [self.gapscores[i] for i in self.gapscores.keys() if i.split('@')[0] == key][0]
conservations = [self.conservations[i] for i in self.conservations.keys() if i.split('@')[0] == key][0]
combined.append([key,
self.sequence_lengths[key],
self.sequence_gcs[key],
entropies,
gapscores,
conservations])
self.loci_stats = combined
self.loci_stats_sorted = None
def sort(self, parameter = 'entropy', percentile=50, percentile_range=(25,75), reverse = True):
j = None
if parameter == 'entropy':
j = 3
elif parameter == 'gapscore':
j = 4
elif parameter == 'conservation':
j = 5
elif parameter == 'sequence_length':
j = 1
elif parameter == 'sequence_gc':
j = 2
self.loci_stats_sorted = sorted(self.loci_stats,
key=lambda i: (np.percentile(i[j], percentile),
abs(np.percentile(i[j], percentile_range[1])-
np.percentile(i[j], percentile_range[0]))),
reverse=reverse)
self.loci_stats_sorted = [list(i) for i in self.loci_stats_sorted]
def plot(self, filename, figsize=(30,10), params='all', lable_fsize=40, xtick_fsize=4, ytick_fsize=4,
boxcolor='salmon', whiskercolor='gray', capcolor='black', mediancolor='white', medianline_w=3):
parameter_indices = [3,4,5,1,2]
ytitles=['',
'Sequence Lengths',
'Sequence %GC',
'Entropy',
'Gap Score',
'Conservation Scores']
if not params=='all':
parameter_indices = []
for param in params:
if param == 'entropy':
parameter_indices.append(3)
elif param == 'gapscore':
parameter_indices.append(4)
elif param == 'conservation':
parameter_indices.append(5)
elif param == 'sequence_length':
parameter_indices.append(1)
elif param == 'sequence_gc':
parameter_indices.append(2)
if len(parameter_indices) == 0:
raise IOError('Must specify at least one parameter to plot')
fig, axes = plt.subplots(len(parameter_indices), sharex=True, figsize=figsize, dpi=80, frameon = False)
if len(parameter_indices) == 1:
axes = [axes]
labels = [k[0] for k in self.loci_stats_sorted]
j = 0
for ax in axes:
values = [k[parameter_indices[j]] for k in self.loci_stats_sorted]
bp = ax.boxplot(values,0,'', positions = range(4,(len(values)*4)+1, 4), patch_artist=True)
ax.set_ylabel(ytitles[parameter_indices[j]], fontsize=lable_fsize)
#plt.xlabel("Locus", fontsize=lable_fsize)
plt.xticks(range(4,((len(values)+1)*4),4), labels, rotation=90, fontsize = xtick_fsize)
plt.tight_layout()
j += 1
remove_border()
for box in bp['boxes']:
# change outline color
box.set( color=boxcolor, linewidth=1)
box.set_facecolor(boxcolor)
# change color, linestyle and linewidth of the whiskers
for whisker in bp['whiskers']:
whisker.set(color=whiskercolor, linestyle='solid', linewidth=2.0)
# change color and linewidth of the caps
for cap in bp['caps']:
cap.set(color=capcolor, linewidth=2.0)
# change color and linewidth of the medians
for median in bp['medians']:
median.set(color=mediancolor, linewidth=medianline_w)
fig.gca().set_ylim(bottom=-0.05)
fig.savefig(filename)
def slice_loci(self, median_range, otu_meta, parameter='entropy',
otu_must_have_all_of=[], otu_must_have_one_of='any'):
j = None
if parameter == 'entropy':
j = 3
elif parameter == 'gapscore':
j = 4
elif parameter == 'conservation':
j = 5
elif parameter == 'sequence_length':
j = 1
elif parameter == 'sequence_gc':
j = 2
loci_names = [i[0] for i in self.loci_stats_sorted if median_range[0] < np.median(i[j]) < median_range[1]]
loci = [l for l in self.loci if l.name in loci_names]
concat_name = "%s_%.2f_%.2f_loci_%s_to_%s"%(parameter, float(median_range[0]), float(median_range[1]),
loci_names[0],loci_names[-1])
return Concatenation(concat_name, loci,
otu_meta,
otu_must_have_all_of=otu_must_have_all_of,
otu_must_have_one_of=otu_must_have_one_of)
def slide_loci(self, otu_meta, median_range='all', parameter='entropy', start=0, length=2, step=1,
otu_must_have_all_of=[],
otu_must_have_one_of='any'):
j = None
if parameter == 'entropy':
j = 3
elif parameter == 'gapscore':
j = 4
elif parameter == 'conservation':
j = 5
elif parameter == 'sequence_length':
j = 1
elif parameter == 'sequence_gc':
j = 2
if median_range == 'all':
medians = [np.median(i) for i in [k[j] for k in self.loci_stats_sorted]]
median_range = [min(medians), max(medians)]
loci_in_range = [[i[0], np.median(i[j])] for i in self.loci_stats_sorted
if median_range[0] <= np.median(i[j]) <= median_range[1]]
concatenations = []
stop = False
while not stop:
window_loci = loci_in_range[start: start+length]
window_loci_names = [n[0] for n in window_loci]
loci = [l for l in self.loci if l.name in window_loci_names]
window_start_median = window_loci[0][1]
window_end_median = window_loci[-1][1]
concat_name = "%s_%.2f_%.2f_loci_%i_to_%i"%(parameter, float(window_start_median), float(window_end_median),
start, start+length-1)
print concat_name
concatenations.append(Concatenation(concat_name, loci,
otu_meta,
otu_must_have_all_of=otu_must_have_all_of,
otu_must_have_one_of=otu_must_have_one_of))
start = start+step
if len(loci_in_range[start:]) < length:
stop = True
return concatenations
if __name__ == "__main__":
import doctest
doctest.testmod() | bsd-2-clause |
peastman/msmbuilder | msmbuilder/project_templates/0-test-install.py | 9 | 2531 | """This script tests your python installation as it pertains to running project templates.
MSMBuilder supports Python 2.7 and 3.3+ and has some necessary dependencies
like numpy, scipy, and scikit-learn. This templated project enforces
some more stringent requirements to make sure all the users are more-or-less
on the same page and to allow developers to exploit more helper libraries.
You can modify the template scripts to work for your particular set-up,
but it's probably easier to install `conda` and get the packages we
recommend.
{{header}}
"""
import textwrap
# Show intro text
paragraphs = __doc__.split('\n\n')
for p in paragraphs:
print(textwrap.fill(p))
print()
warnings = 0
## Test for python 3.5
import sys
if sys.version_info < (3, 5):
print(textwrap.fill(
"These scripts were all developed on Python 3.5, "
"which is the current, stable release of Python. "
"In particular, we use subprocess.run "
"(and probably some other new features). "
"You can easily modify the scripts to work on older versions "
"of Python, but why not just upgrade? We like Continuum's "
"Anaconda Python distribution for a simple install (without root)."
))
print()
warnings += 1
## Test for matplotlib
try:
import matplotlib as plt
except ImportError:
print(textwrap.fill(
"These scripts try to make some mildly intesting plots. "
"That requires `matplotlib`."
))
print()
warnings += 1
## Test for seaborn
try:
import seaborn as sns
except ImportError:
print(textwrap.fill(
"The default matplotlib styling is a little ugly. "
"By default, these scripts try to use `seaborn` to make prettier "
"plots. You can remove all the seaborn imports if you don't want "
"to install this library, but why not just install it? Try "
"`conda install seaborn`"
))
print()
warnings += 1
## Test for xdg-open
try:
import subprocess
subprocess.check_call(['xdg-open', '--version'])
except:
print(textwrap.fill(
"For convenience, the plotting scripts can try to use `xdg-open` "
"to pop up the result of the plot. Use the --display flag on "
"msmb TemplateProject to enable this behavior."
))
warnings += 1
## Report results
if warnings == 0:
print("I didn't find any problems with your installation! Good job.")
print()
else:
print("I found {} warnings, see above. Good luck!".format(warnings))
print()
| lgpl-2.1 |
drammock/mne-python | mne/utils/_testing.py | 4 | 17493 | # -*- coding: utf-8 -*-
"""Testing functions."""
# Authors: Alexandre Gramfort <[email protected]>
#
# License: BSD (3-clause)
from contextlib import contextmanager
from distutils.version import LooseVersion
from functools import partial, wraps
import os
import inspect
from io import StringIO
from shutil import rmtree
import sys
import tempfile
import traceback
from unittest import SkipTest
import warnings
import numpy as np
from numpy.testing import assert_array_equal, assert_allclose
from ._logging import warn, ClosingStringIO
from .numerics import object_diff
def _explain_exception(start=-1, stop=None, prefix='> '):
"""Explain an exception."""
# start=-1 means "only the most recent caller"
etype, value, tb = sys.exc_info()
string = traceback.format_list(traceback.extract_tb(tb)[start:stop])
string = (''.join(string).split('\n') +
traceback.format_exception_only(etype, value))
string = ':\n' + prefix + ('\n' + prefix).join(string)
return string
class _TempDir(str):
"""Create and auto-destroy temp dir.
This is designed to be used with testing modules. Instances should be
defined inside test functions. Instances defined at module level can not
guarantee proper destruction of the temporary directory.
When used at module level, the current use of the __del__() method for
cleanup can fail because the rmtree function may be cleaned up before this
object (an alternative could be using the atexit module instead).
"""
def __new__(self): # noqa: D105
new = str.__new__(self, tempfile.mkdtemp(prefix='tmp_mne_tempdir_'))
return new
def __init__(self): # noqa: D102
self._path = self.__str__()
def __del__(self): # noqa: D105
rmtree(self._path, ignore_errors=True)
def requires_nibabel():
"""Wrap to requires_module with a function call (fewer lines to change)."""
return partial(requires_module, name='nibabel')
def requires_dipy():
"""Check for dipy."""
import pytest
# for some strange reason on CIs we cane get:
#
# can get weird ImportError: dlopen: cannot load any more object
# with static TLS
#
# so let's import everything in the decorator.
try:
from dipy.align import imaffine, imwarp, metrics, transforms # noqa, analysis:ignore
from dipy.align.reslice import reslice # noqa, analysis:ignore
from dipy.align.imaffine import AffineMap # noqa, analysis:ignore
from dipy.align.imwarp import DiffeomorphicMap # noqa, analysis:ignore
except Exception:
have = False
else:
have = True
return pytest.mark.skipif(not have, reason='Requires dipy >= 0.10.1')
def requires_version(library, min_version='0.0'):
"""Check for a library version."""
import pytest
return pytest.mark.skipif(not check_version(library, min_version),
reason=('Requires %s version >= %s'
% (library, min_version)))
def requires_module(function, name, call=None):
"""Skip a test if package is not available (decorator)."""
import pytest
call = ('import %s' % name) if call is None else call
reason = 'Test %s skipped, requires %s.' % (function.__name__, name)
try:
exec(call) in globals(), locals()
except Exception as exc:
if len(str(exc)) > 0 and str(exc) != 'No module named %s' % name:
reason += ' Got exception (%s)' % (exc,)
skip = True
else:
skip = False
return pytest.mark.skipif(skip, reason=reason)(function)
_pandas_call = """
import pandas
version = LooseVersion(pandas.__version__)
if version < '0.8.0':
raise ImportError
"""
_mayavi_call = """
with warnings.catch_warnings(record=True): # traits
from mayavi import mlab
"""
_mne_call = """
if not has_mne_c():
raise ImportError
"""
_fs_call = """
if not has_freesurfer():
raise ImportError
"""
_n2ft_call = """
if 'NEUROMAG2FT_ROOT' not in os.environ:
raise ImportError
"""
requires_pandas = partial(requires_module, name='pandas', call=_pandas_call)
requires_pylsl = partial(requires_module, name='pylsl')
requires_sklearn = partial(requires_module, name='sklearn')
requires_mayavi = partial(requires_module, name='mayavi', call=_mayavi_call)
requires_mne = partial(requires_module, name='MNE-C', call=_mne_call)
def requires_freesurfer(arg):
"""Require Freesurfer."""
if isinstance(arg, str):
# Calling as @requires_freesurfer('progname'): return decorator
# after checking for progname existence
call = """
from . import run_subprocess
run_subprocess([%r, '--version'])
""" % (arg,)
return partial(
requires_module, name='Freesurfer (%s)' % (arg,), call=call)
else:
# Calling directly as @requires_freesurfer: return decorated function
# and just check env var existence
return requires_module(arg, name='Freesurfer', call=_fs_call)
requires_neuromag2ft = partial(requires_module, name='neuromag2ft',
call=_n2ft_call)
requires_vtk = partial(requires_module, name='vtk')
requires_pysurfer = partial(requires_module, name='PySurfer',
call="""import warnings
with warnings.catch_warnings(record=True):
from surfer import Brain""")
requires_good_network = partial(
requires_module, name='good network connection',
call='if int(os.environ.get("MNE_SKIP_NETWORK_TESTS", 0)):\n'
' raise ImportError')
requires_nitime = partial(requires_module, name='nitime')
requires_h5py = partial(requires_module, name='h5py')
def requires_numpydoc(func):
"""Decorate tests that need numpydoc."""
return requires_version('numpydoc', '1.0')(func) # validate needs 1.0
def check_version(library, min_version):
r"""Check minimum library version required.
Parameters
----------
library : str
The library name to import. Must have a ``__version__`` property.
min_version : str
The minimum version string. Anything that matches
``'(\d+ | [a-z]+ | \.)'``. Can also be empty to skip version
check (just check for library presence).
Returns
-------
ok : bool
True if the library exists with at least the specified version.
"""
ok = True
try:
library = __import__(library)
except ImportError:
ok = False
else:
if min_version:
this_version = LooseVersion(
getattr(library, '__version__', '0.0').lstrip('v'))
if this_version < min_version:
ok = False
return ok
def _check_mayavi_version(min_version='4.3.0'):
"""Check mayavi version."""
if not check_version('mayavi', min_version):
raise RuntimeError("Need mayavi >= %s" % min_version)
def _import_mlab():
"""Quietly import mlab."""
with warnings.catch_warnings(record=True):
from mayavi import mlab
return mlab
@contextmanager
def traits_test_context():
"""Context to raise errors in trait handlers."""
from traits.api import push_exception_handler
push_exception_handler(reraise_exceptions=True)
try:
yield
finally:
push_exception_handler(reraise_exceptions=False)
def traits_test(test_func):
"""Raise errors in trait handlers (decorator)."""
@wraps(test_func)
def dec(*args, **kwargs):
with traits_test_context():
return test_func(*args, **kwargs)
return dec
def run_command_if_main():
"""Run a given command if it's __main__."""
local_vars = inspect.currentframe().f_back.f_locals
if local_vars.get('__name__', '') == '__main__':
local_vars['run']()
class ArgvSetter(object):
"""Temporarily set sys.argv."""
def __init__(self, args=(), disable_stdout=True,
disable_stderr=True): # noqa: D102
self.argv = list(('python',) + args)
self.stdout = ClosingStringIO() if disable_stdout else sys.stdout
self.stderr = ClosingStringIO() if disable_stderr else sys.stderr
def __enter__(self): # noqa: D105
self.orig_argv = sys.argv
sys.argv = self.argv
self.orig_stdout = sys.stdout
sys.stdout = self.stdout
self.orig_stderr = sys.stderr
sys.stderr = self.stderr
return self
def __exit__(self, *args): # noqa: D105
sys.argv = self.orig_argv
sys.stdout = self.orig_stdout
sys.stderr = self.orig_stderr
class SilenceStdout(object):
"""Silence stdout."""
def __init__(self, close=True):
self.close = close
def __enter__(self): # noqa: D105
self.stdout = sys.stdout
sys.stdout = StringIO()
return sys.stdout
def __exit__(self, *args): # noqa: D105
if self.close:
sys.stdout.close()
sys.stdout = self.stdout
def has_nibabel():
"""Determine if nibabel is installed.
Returns
-------
has : bool
True if the user has nibabel.
"""
try:
import nibabel # noqa
except ImportError:
return False
else:
return True
def has_mne_c():
"""Check for MNE-C."""
return 'MNE_ROOT' in os.environ
def has_freesurfer():
"""Check for Freesurfer."""
return 'FREESURFER_HOME' in os.environ
def buggy_mkl_svd(function):
"""Decorate tests that make calls to SVD and intermittently fail."""
@wraps(function)
def dec(*args, **kwargs):
try:
return function(*args, **kwargs)
except np.linalg.LinAlgError as exp:
if 'SVD did not converge' in str(exp):
msg = 'Intel MKL SVD convergence error detected, skipping test'
warn(msg)
raise SkipTest(msg)
raise
return dec
def assert_and_remove_boundary_annot(annotations, n=1):
"""Assert that there are boundary annotations and remove them."""
from ..io.base import BaseRaw
if isinstance(annotations, BaseRaw): # allow either input
annotations = annotations.annotations
for key in ('EDGE', 'BAD'):
idx = np.where(annotations.description == '%s boundary' % key)[0]
assert len(idx) == n
annotations.delete(idx)
def assert_object_equal(a, b):
"""Assert two objects are equal."""
d = object_diff(a, b)
assert d == '', d
def _raw_annot(meas_date, orig_time):
from .. import Annotations, create_info
from ..annotations import _handle_meas_date
from ..io import RawArray
info = create_info(ch_names=10, sfreq=10.)
raw = RawArray(data=np.empty((10, 10)), info=info, first_samp=10)
if meas_date is not None:
meas_date = _handle_meas_date(meas_date)
raw.info['meas_date'] = meas_date
raw.info._check_consistency()
annot = Annotations([.5], [.2], ['dummy'], orig_time)
raw.set_annotations(annotations=annot)
return raw
def _get_data(x, ch_idx):
"""Get the (n_ch, n_times) data array."""
from ..evoked import Evoked
from ..io import BaseRaw
if isinstance(x, BaseRaw):
return x[ch_idx][0]
elif isinstance(x, Evoked):
return x.data[ch_idx]
def _check_snr(actual, desired, picks, min_tol, med_tol, msg, kind='MEG'):
"""Check the SNR of a set of channels."""
actual_data = _get_data(actual, picks)
desired_data = _get_data(desired, picks)
bench_rms = np.sqrt(np.mean(desired_data * desired_data, axis=1))
error = actual_data - desired_data
error_rms = np.sqrt(np.mean(error * error, axis=1))
np.clip(error_rms, 1e-60, np.inf, out=error_rms) # avoid division by zero
snrs = bench_rms / error_rms
# min tol
snr = snrs.min()
bad_count = (snrs < min_tol).sum()
msg = ' (%s)' % msg if msg != '' else msg
assert bad_count == 0, ('SNR (worst %0.2f) < %0.2f for %s/%s '
'channels%s' % (snr, min_tol, bad_count,
len(picks), msg))
# median tol
snr = np.median(snrs)
assert snr >= med_tol, ('%s SNR median %0.2f < %0.2f%s'
% (kind, snr, med_tol, msg))
def assert_meg_snr(actual, desired, min_tol, med_tol=500., chpi_med_tol=500.,
msg=None):
"""Assert channel SNR of a certain level.
Mostly useful for operations like Maxwell filtering that modify
MEG channels while leaving EEG and others intact.
"""
from ..io.pick import pick_types
picks = pick_types(desired.info, meg=True, exclude=[])
picks_desired = pick_types(desired.info, meg=True, exclude=[])
assert_array_equal(picks, picks_desired, err_msg='MEG pick mismatch')
chpis = pick_types(actual.info, meg=False, chpi=True, exclude=[])
chpis_desired = pick_types(desired.info, meg=False, chpi=True, exclude=[])
if chpi_med_tol is not None:
assert_array_equal(chpis, chpis_desired, err_msg='cHPI pick mismatch')
others = np.setdiff1d(np.arange(len(actual.ch_names)),
np.concatenate([picks, chpis]))
others_desired = np.setdiff1d(np.arange(len(desired.ch_names)),
np.concatenate([picks_desired,
chpis_desired]))
assert_array_equal(others, others_desired, err_msg='Other pick mismatch')
if len(others) > 0: # if non-MEG channels present
assert_allclose(_get_data(actual, others),
_get_data(desired, others), atol=1e-11, rtol=1e-5,
err_msg='non-MEG channel mismatch')
_check_snr(actual, desired, picks, min_tol, med_tol, msg, kind='MEG')
if chpi_med_tol is not None and len(chpis) > 0:
_check_snr(actual, desired, chpis, 0., chpi_med_tol, msg, kind='cHPI')
def assert_snr(actual, desired, tol):
"""Assert actual and desired arrays are within some SNR tolerance."""
from scipy import linalg
with np.errstate(divide='ignore'): # allow infinite
snr = (linalg.norm(desired, ord='fro') /
linalg.norm(desired - actual, ord='fro'))
assert snr >= tol, '%f < %f' % (snr, tol)
def assert_stcs_equal(stc1, stc2):
"""Check that two STC are equal."""
assert_allclose(stc1.times, stc2.times)
assert_allclose(stc1.data, stc2.data)
assert_array_equal(stc1.vertices[0], stc2.vertices[0])
assert_array_equal(stc1.vertices[1], stc2.vertices[1])
assert_allclose(stc1.tmin, stc2.tmin)
assert_allclose(stc1.tstep, stc2.tstep)
def _dig_sort_key(dig):
"""Sort dig keys."""
return (dig['kind'], dig['ident'])
def assert_dig_allclose(info_py, info_bin, limit=None):
"""Assert dig allclose."""
from ..bem import fit_sphere_to_headshape
from ..io.constants import FIFF
from ..io.meas_info import Info
from ..channels.montage import DigMontage
# test dig positions
dig_py, dig_bin = info_py, info_bin
if isinstance(dig_py, Info):
assert isinstance(dig_bin, Info)
dig_py, dig_bin = dig_py['dig'], dig_bin['dig']
else:
assert isinstance(dig_bin, DigMontage)
assert isinstance(dig_py, DigMontage)
dig_py, dig_bin = dig_py.dig, dig_bin.dig
info_py = info_bin = None
assert isinstance(dig_py, list)
assert isinstance(dig_bin, list)
dig_py = sorted(dig_py, key=_dig_sort_key)
dig_bin = sorted(dig_bin, key=_dig_sort_key)
assert len(dig_py) == len(dig_bin)
for ii, (d_py, d_bin) in enumerate(zip(dig_py[:limit], dig_bin[:limit])):
for key in ('ident', 'kind', 'coord_frame'):
assert d_py[key] == d_bin[key], key
assert_allclose(d_py['r'], d_bin['r'], rtol=1e-5, atol=1e-5,
err_msg='Failure on %s:\n%s\n%s'
% (ii, d_py['r'], d_bin['r']))
if any(d['kind'] == FIFF.FIFFV_POINT_EXTRA for d in dig_py) and \
info_py is not None:
r_bin, o_head_bin, o_dev_bin = fit_sphere_to_headshape(
info_bin, units='m', verbose='error')
r_py, o_head_py, o_dev_py = fit_sphere_to_headshape(
info_py, units='m', verbose='error')
assert_allclose(r_py, r_bin, atol=1e-6)
assert_allclose(o_dev_py, o_dev_bin, rtol=1e-5, atol=1e-6)
assert_allclose(o_head_py, o_head_bin, rtol=1e-5, atol=1e-6)
@contextmanager
def modified_env(**d):
"""Use a modified os.environ with temporarily replaced key/value pairs.
Parameters
----------
**kwargs : dict
The key/value pairs of environment variables to replace.
"""
orig_env = dict()
for key, val in d.items():
orig_env[key] = os.getenv(key)
if val is not None:
assert isinstance(val, str)
os.environ[key] = val
elif key in os.environ:
del os.environ[key]
try:
yield
finally:
for key, val in orig_env.items():
if val is not None:
os.environ[key] = val
elif key in os.environ:
del os.environ[key]
def _click_ch_name(fig, ch_index=0, button=1):
"""Click on a channel name in a raw/epochs/ICA browse-style plot."""
from ..viz.utils import _fake_click
fig.canvas.draw()
text = fig.mne.ax_main.get_yticklabels()[ch_index]
bbox = text.get_window_extent()
x = bbox.intervalx.mean()
y = bbox.intervaly.mean()
_fake_click(fig, fig.mne.ax_main, (x, y), xform='pix',
button=button)
| bsd-3-clause |
subutai/htmresearch | projects/union_path_integration/plot_rhombus_narrowing.py | 4 | 6852 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2018, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""Plot location module representations during narrowing."""
import argparse
import base64
import cStringIO
import json
import math
import os
import xml.etree.ElementTree as ET
import matplotlib.cm
import numpy as np
import PIL.Image
from htmresearch.algorithms.location_modules import (
ThresholdedGaussian2DLocationModule)
CWD = os.path.dirname(os.path.realpath(__file__))
CHART_DIR = os.path.join(CWD, "charts")
def insertDiscreteModules(parent, activeCells, locationModuleWidth, rhombusBase,
rhombusHeight, stroke="lightgray"):
cellPhasesAxis = np.linspace(0., 1., locationModuleWidth, endpoint=False)
cellPhases = np.array([
np.repeat(cellPhasesAxis, locationModuleWidth),
np.tile(cellPhasesAxis, locationModuleWidth)]) + (0.5/locationModuleWidth)
r = (0.5 * rhombusBase / locationModuleWidth)
# Inserting a 'g' isn't necessary, but it makes this image more organized when
# working with Illustrator layers.
g = ET.SubElement(parent, "g")
for cell, (phi1, phi2) in enumerate(cellPhases.T):
circle = ET.SubElement(g, "circle")
circle.set("cx", str(rhombusBase*(phi1 + phi2*np.cos(np.radians(60.)))))
circle.set("cy", str(rhombusHeight*(1. - phi2)))
circle.set("r", str(r))
circle.set("stroke", stroke)
circle.set("stroke-width", "1")
if cell in activeCells:
circle.set("fill", "black")
else:
circle.set("fill", "none")
def insertPointExcitations(parent, bumps, rhombusBase, rhombusHeight,
bumpSigma, opacity=0.6, enhancementFactor=4.0,
bumpOverlapMethod="probabilistic"):
imgWidth = rhombusBase * 1.5
imgHeight = rhombusBase * np.sin(np.radians(60.))
numCols = int(rhombusBase * enhancementFactor)
numRows = int(rhombusBase * np.sin(np.radians(60.)) * enhancementFactor)
numBitmapRows = int(numRows)
numBitmapCols = int(numCols * 1.5)
# Look up the rows in bitmap order.
queryPhases = np.array(
[np.tile(np.linspace(0., 1., numCols, endpoint=False), numRows),
np.repeat(np.linspace(0., 1., numRows, endpoint=False)[::-1],
numCols)]) + [[0.5 / numCols],
[0.5 / numRows]]
excitations = ThresholdedGaussian2DLocationModule.getCellExcitations(
queryPhases, bumps, bumpSigma, bumpOverlapMethod)
m = matplotlib.cm.get_cmap("rainbow")
coloredSquare = m(excitations)
coloredSquare[:,3] = opacity
bitmap = np.zeros((numBitmapRows, numBitmapCols, 4))
# Create a mapping from (row, columnInSquare) => columnInRhombus.
# These rows are in bitmap order, starting from the top.
columnOffsetByRow = np.floor(
np.linspace(0, numCols * math.cos(np.radians(60.)), numRows, endpoint=False)
)[::-1].astype("int")
columnInRhombus = columnOffsetByRow[:, np.newaxis] + np.arange(numCols)
bitmap[
(np.repeat(np.arange(numRows), numCols),
columnInRhombus.flatten())] = coloredSquare
png = PIL.Image.fromarray(((bitmap) * 255).astype("uint8"), mode="RGBA")
pngBuffer = cStringIO.StringIO()
png.save(pngBuffer, format="PNG")
pngStr = base64.b64encode(pngBuffer.getvalue())
image = ET.SubElement(parent, "image")
image.set("xlink:href", "data:image/png;base64,{}".format(pngStr))
image.set("width", str(imgWidth))
image.set("height", str(imgHeight))
def rhombusChart(inFilename, outFilename, objectNumber, moduleNumbers, numSteps,
rhombusBase=47, betweenX=4, betweenY=6):
rhombusHeight = rhombusBase * np.sin(np.radians(60.))
with open(inFilename, "r") as f:
experiments = json.load(f)
exp = next(exp for exp in experiments
if exp[0]["numObjects"] == 50)
locationModuleWidth = exp[0]["locationModuleWidth"]
bumpSigma = exp[1]["bumpSigma"]
locationLayerTimeline = exp[1]["locationLayerTimelineByObject"][str(objectNumber)]
if numSteps is not None:
locationLayerTimeline = locationLayerTimeline[:numSteps]
# The final rhombus sticks out an additional 0.5 widths, hence the 0.5.
width = (rhombusBase*(len(locationLayerTimeline) + 0.5) +
betweenY*(len(locationLayerTimeline) - 1))
height = (rhombusHeight*len(moduleNumbers) +
betweenY*(len(moduleNumbers) - 1))
svg = ET.Element("svg")
svg.set("width", str(width + 20))
svg.set("height", str(height + 20))
svg.set("xmlns", "http://www.w3.org/2000/svg")
svg.set("xmlns:xlink", "http://www.w3.org/1999/xlink")
container = ET.SubElement(svg, "g")
container.set("transform", "translate(10,10)")
for t, moduleStates in enumerate(locationLayerTimeline):
for iModule, moduleNumber in enumerate(moduleNumbers):
moduleState = moduleStates[moduleNumber]
rhombus = ET.SubElement(container, "g")
rhombus.set("transform", "translate({},{})".format(
t*(rhombusBase + betweenX), iModule*(rhombusHeight + betweenY)))
insertDiscreteModules(rhombus, moduleState["activeCells"],
locationModuleWidth, rhombusBase, rhombusHeight)
insertPointExcitations(rhombus, np.array(moduleState["bumps"]).T,
rhombusBase, rhombusHeight, bumpSigma)
filename = os.path.join(CHART_DIR, outFilename)
with open(filename, "w") as f:
print "Saving", filename
ET.ElementTree(svg).write(f, encoding="utf-8", xml_declaration=True)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--inFile", type=str, required=True)
parser.add_argument("--outFile", type=str, required=True)
parser.add_argument("--objectNumber", type=int, default=47)
parser.add_argument("--moduleNumbers", type=int, nargs="+",
default=range(0,2,9))
parser.add_argument("--numSteps", type=int, default=None)
args = parser.parse_args()
rhombusChart(args.inFile, args.outFile, args.objectNumber, args.moduleNumbers,
args.numSteps)
| agpl-3.0 |
techn0mad/selcald | selcald/analyze.py | 1 | 7349 | # Run with "ipython -i --matplotlib=qt analyze.py <file>.wav"
#
from __future__ import print_function
import sys
import numpy as np
# import pandas as pd
from scipy import signal
import matplotlib.pyplot as plt
from scipy.io.wavfile import read
from scipy.signal import butter, lfilter
from math import log10
FRAME = 0.1 # Frame time in seconds
Alpha = 312.6
Bravo = 346.7
Charlie = 384.6
Delta = 426.6
Echo = 473.2
Foxtrot = 524.8
Golf = 582.1
Hotel = 645.7
Juliette = 716.1
Kilo = 794.3
Lima = 881.0
Mike = 977.2
Papa = 1083.9
Quebec = 1202.3
Romeo = 1333.5
Sierra = 1479.1
FLT_LEN = 2000 # Samples
# Shamelessly lifted from
# https://scipy.github.io/old-wiki/pages/Cookbook/ButterworthBandpass
def butter_bandpass(lowcut, highcut, fs, order=5):
nyq = 0.5 * fs
low = lowcut / nyq
high = highcut / nyq
b, a = butter(order, [low, high], btype='band')
return b, a
def butter_bandpass_filter(data, lowcut, highcut, fs, order=5):
b, a = butter_bandpass(lowcut, highcut, fs, order=order)
y = lfilter(b, a, data)
return y
# tone synthesis
def note(freq, cycles, amp=32767.0, rate=44100):
len = cycles * (1.0/rate)
t = np.linspace(0, len, len * rate)
if freq is 0:
data = np.zeros(int(len * rate))
else:
data = np.sin(2 * np.pi * freq * t) * amp
return data.astype(int)
# analyze wav file
def analyze(file_name):
try:
sig_rate, sig_noise = read(file_name)
except Exception:
print('Error opening {}'.format(file_name))
return
print('file: ', file_name, ' rate: ', sig_rate, ' len: ', len(sig_noise))
if sig_rate == 44100:
decimate = 4 # rate = 11025, Fmax = 5512.5 Hz
elif sig_rate == 48000:
decimate = 5 # rate = 9600, Fmax = 4800 Hz
elif sig_rate == 22050:
decimate = 2 # rate = 11025, Fmax = 5512.5 Hz
elif sig_rate == 11025:
decimate = 1 # rate = 11025, Fmax = 5512.5 Hz
else:
print('Sample rate {} not supported.'.format(sig_rate))
return
if decimate > 1:
sig_noise = signal.decimate(sig_noise, decimate)
sig_rate = sig_rate / decimate
print('length after decimation: ', len(sig_noise))
sig_noise = butter_bandpass_filter(sig_noise, 270, 1700, sig_rate, order=8)
sigA = note(Alpha, FLT_LEN, rate=sig_rate)
sigB = note(Bravo, FLT_LEN, rate=sig_rate)
sigC = note(Charlie, FLT_LEN, rate=sig_rate)
sigD = note(Delta, FLT_LEN, rate=sig_rate)
sigE = note(Echo, FLT_LEN, rate=sig_rate)
sigF = note(Foxtrot, FLT_LEN, rate=sig_rate)
sigG = note(Golf, FLT_LEN, rate=sig_rate)
sigH = note(Hotel, FLT_LEN, rate=sig_rate)
sigJ = note(Juliette, FLT_LEN, rate=sig_rate)
sigK = note(Kilo, FLT_LEN, rate=sig_rate)
sigL = note(Lima, FLT_LEN, rate=sig_rate)
sigM = note(Mike, FLT_LEN, rate=sig_rate)
sigP = note(Papa, FLT_LEN, rate=sig_rate)
sigQ = note(Quebec, FLT_LEN, rate=sig_rate)
sigR = note(Romeo, FLT_LEN, rate=sig_rate)
sigS = note(Sierra, FLT_LEN, rate=sig_rate)
corrA = np.abs(signal.correlate(sig_noise, sigA, mode='same'))
print('A: {}'.format(log10(corrA.sum())))
corrB = np.abs(signal.correlate(sig_noise, sigB, mode='same'))
print('B: {}'.format(log10(corrB.sum())))
corrC = np.abs(signal.correlate(sig_noise, sigC, mode='same'))
print('C: {}'.format(log10(corrC.sum())))
corrD = np.abs(signal.correlate(sig_noise, sigD, mode='same'))
print('D: {}'.format(log10(corrD.sum())))
corrE = np.abs(signal.correlate(sig_noise, sigE, mode='same'))
print('E: {}'.format(log10(corrE.sum())))
corrF = np.abs(signal.correlate(sig_noise, sigF, mode='same'))
print('F: {}'.format(log10(corrF.sum())))
corrG = np.abs(signal.correlate(sig_noise, sigG, mode='same'))
print('G: {}'.format(log10(corrG.sum())))
corrH = np.abs(signal.correlate(sig_noise, sigH, mode='same'))
print('H: {}'.format(log10(corrH.sum())))
corrJ = np.abs(signal.correlate(sig_noise, sigJ, mode='same'))
print('J: {}'.format(log10(corrJ.sum())))
corrK = np.abs(signal.correlate(sig_noise, sigK, mode='same'))
print('K: {}'.format(log10(corrK.sum())))
corrL = np.abs(signal.correlate(sig_noise, sigL, mode='same'))
print('L: {}'.format(log10(corrL.sum())))
corrM = np.abs(signal.correlate(sig_noise, sigM, mode='same'))
print('M: {}'.format(log10(corrM.sum())))
corrP = np.abs(signal.correlate(sig_noise, sigP, mode='same'))
print('P: {}'.format(log10(corrP.sum())))
corrQ = np.abs(signal.correlate(sig_noise, sigQ, mode='same'))
print('Q: {}'.format(log10(corrQ.sum())))
corrR = np.abs(signal.correlate(sig_noise, sigR, mode='same'))
print('R: {}'.format(log10(corrR.sum())))
corrS = np.abs(signal.correlate(sig_noise, sigS, mode='same'))
print('S: {}'.format(log10(corrS.sum())))
fig, (ax_A, ax_B, ax_C, ax_D, ax_E, ax_F, ax_G, ax_H, ax_J, ax_K,
ax_L, ax_M, ax_P, ax_Q, ax_R, ax_S) = plt.subplots(16, 1,
sharex=True,
sharey=True)
# ax_sig.plot(sig_noise)
# ax_sig.set_title('Signal with noise')
# ax_sig.axis('off')
# ax_sig.margins(0, 0.1)
ax_A.plot(corrA)
ax_A.axhline(np.average(corrA), ls=':')
# ax_A.set_title(label='Alpha')
ax_A.axis('off')
ax_B.plot(corrB)
ax_B.axhline(np.average(corrB), ls=':')
# ax_B.set_title(label='Bravo')
ax_B.axis('off')
ax_C.plot(corrC)
ax_C.axhline(np.average(corrC), ls=':')
# ax_C.set_title(label='Charlie')
ax_C.axis('off')
ax_D.plot(corrD)
ax_D.axhline(np.average(corrD), ls=':')
# ax_D.set_title(label='Delta')
ax_D.axis('off')
ax_E.plot(corrE)
ax_E.axhline(np.average(corrE), ls=':')
# ax_E.set_title(label='Echo')
ax_E.axis('off')
ax_F.plot(corrF)
ax_F.axhline(np.average(corrF), ls=':')
# ax_F.set_title(label='Foxtrot')
ax_F.axis('off')
ax_G.plot(corrG)
ax_G.axhline(np.average(corrG), ls=':')
# ax_G.set_title(label='Golf')
ax_G.axis('off')
ax_H.plot(corrH)
ax_H.axhline(np.average(corrH), ls=':')
# ax_H.set_title(label='Hotel')
ax_H.axis('off')
ax_J.plot(corrJ)
ax_J.axhline(np.average(corrJ), ls=':')
# ax_J.set_title(label='Juliette')
ax_J.axis('off')
ax_K.plot(corrK)
ax_K.axhline(np.average(corrK), ls=':')
# ax_K.set_title(label='Kilo')
ax_K.axis('off')
ax_L.plot(corrL)
ax_L.axhline(np.average(corrL), ls=':')
# ax_L.set_title(label='Lima')
ax_L.axis('off')
ax_M.plot(corrM)
ax_M.axhline(np.average(corrM), ls=':')
# ax_M.set_title(label='Mike')
ax_M.axis('off')
ax_P.plot(corrP)
ax_P.axhline(np.average(corrP), ls=':')
# ax_P.set_title(label='Papa')
ax_P.axis('off')
ax_Q.plot(corrQ)
ax_Q.axhline(np.average(corrQ), ls=':')
# ax_Q.set_title(label='Quebec')
ax_Q.axis('off')
ax_R.plot(corrR)
ax_R.axhline(np.average(corrR), ls=':')
# ax_R.set_title(label='Romeo')
ax_R.axis('off')
ax_S.plot(corrS)
ax_S.axhline(np.average(corrS), ls=':')
# ax_S.set_title(label='Sierra')
ax_S.axis('off')
fig.set_tight_layout(True)
fig.show()
if __name__ == "__main__":
analyze(sys.argv[1])
| gpl-2.0 |
WFRT/Comps | graphics/files/Plot.py | 1 | 15918 | import matplotlib.pyplot as mpl
import numpy as np
from matplotlib.dates import *
import os
from matplotlib.dates import YearLocator, MonthLocator, DateFormatter, DayLocator, HourLocator, WeekdayLocator
class Plot:
def __init__(self, file):
self.file = file
self._setDefaults()
self.showSkill = False
self.dst = False
def setDst(self, flag):
self.dst = flag
def _setDefaults(self):
self.ms = 4.0 # Marker size
self.lw = 1
self.green = [0,1,0]
self.blue = [0,0,1]
self.red = [1,0,0]
self.imgRes = 100
self.showX = 1
self.showTitle = 1
self.fs = 10
self.labelFs = 10
self.showGrid = 1
self.minOffset = np.nan;
self.maxOffset = np.nan;
def setOffsets(self, offsets):
self.offsets = offsets
def plot(self, ax):
self.plotCore(ax)
if(self.showGrid):
mpl.grid('on')
else:
mpl.grid('off')
def plotCore(self, ax):
assert False, "Not implemented"
def labelAxes(self):
mpl.xlabel("Time (PDT)", fontsize=self.labelFs, position=[0.5,0.1])
def disableX(self):
self.showX = 0
def disableTitle(self):
self.showTitle = 0;
# When set to true, will colour the ensemble dots based on how skillful they are predicted to be.
# Useful for analogs
def setShowSkill(self, b):
self.showSkill = b;
def setFontSize(self, fs):
self.fs = fs
# Fill an area along x, between yLower and yUpper
# Both yLower and yUpper most correspond to points in x (i.e. be in the same order)
def _fill(self, x, yLower, yUpper, col, alpha=1, zorder=0):
# This approach doesn't work, because it doesn't remove points with missing x or y
#X = np.hstack((x, x[::-1]))
#Y = np.hstack((yLower, yUpper[::-1]))
# Populate a list of non-missing points
X = list()
Y = list()
for i in range(0,len(x)):
if(not( np.isnan(x[i]) or np.isnan(yLower[i]))):
X.append(x[i])
Y.append(yLower[i])
for i in range(len(x)-1, -1, -1):
if(not (np.isnan(x[i]) or np.isnan(yUpper[i]))):
X.append(x[i])
Y.append(yUpper[i])
mpl.fill(X, Y, facecolor=col, alpha=alpha,linewidth=0, zorder=zorder)
# Generic (abstract) plot with time as x-axis
class TimePlot(Plot):
def __init__(self, file):
Plot.__init__(self, file)
self.shortRange = True
def setShortRange(self, flag):
self.shortRange = flag
def _xAxis(self, ax):
# X-axis labels
# Don't create ticks when the x-axis range is too big. Likely this is because of
# a problem with the input data. Some versions of python crash when trying to
# create too many ticks
range = mpl.xlim()
if(range[1] - range[0] < 100):
if(self.shortRange):
mpl.gca().xaxis.set_major_locator(DayLocator(interval=1))
mpl.gca().xaxis.set_minor_locator(HourLocator(interval=6))
mpl.gca().xaxis.set_major_formatter(DateFormatter('\n %a %d %b %Y'))
mpl.gca().xaxis.set_minor_formatter(DateFormatter('%H'))
else:
mpl.gca().xaxis.set_major_locator(WeekdayLocator(byweekday=(MO,TU,WE,TH,FR)))
mpl.gca().xaxis.set_major_formatter(DateFormatter('\n%Y-%m-%d'))
mpl.gca().xaxis.set_minor_locator(WeekdayLocator(byweekday=(SA,SU)))
mpl.gca().xaxis.set_minor_formatter(DateFormatter('\n%Y-%m-%d'))
mpl.xticks(rotation=90)
if(self.showX):
mpl.xlabel('Date', fontsize=self.labelFs)
majlabels = [tick.label1 for tick in mpl.gca().xaxis.get_major_ticks()]
for i in majlabels:
# Don't show the last label, since it will be outside the range
if(i == majlabels[len(majlabels)-1]):
i.set_visible(0)
if(not self.showX):
i.set_visible(0);
else:
if(self.shortRange):
i.set_horizontalalignment('left')
i.set_position((0,-0.035))
else:
i.set_horizontalalignment('right')
i.set_rotation(30);
i.set_verticalalignment('top')
i.set_fontsize(self.fs)
i.set_position((0,-0.035))
minlabels = [tick.label1 for tick in mpl.gca().xaxis.get_minor_ticks()]
for i in minlabels:
if(not self.showX):
i.set_visible(0);
else:
if(self.shortRange):
i.set_horizontalalignment('center')
i.set_rotation(0);
i.set_color("k")
else:
i.set_horizontalalignment('right')
i.set_rotation(30);
i.set_color((1,0,1)) # Weekend days are magenta
i.set_verticalalignment('top')
i.set_fontsize(self.fs)
ylabels = [tick.label1 for tick in mpl.gca().yaxis.get_major_ticks()]
for i in ylabels:
i.set_fontsize(self.fs)
# Gridlines
mpl.gca().xaxis.grid(True, which='major', color='k', zorder=-10, linestyle='-')
if(self.shortRange):
mpl.gca().xaxis.grid(True, which='minor', color='k', zorder=0, linestyle=':')
else:
mpl.gca().xaxis.grid(True, which='minor', color=(1,0,1), zorder=0, linestyle='-')
minOffset = min(self.file.getOffsets())
maxOffset = max(self.file.getOffsets())
if(not np.isnan(self.maxOffset)):
maxOffset = minOffset + self.maxOffset/24.0
mpl.xlim(minOffset, maxOffset)
def setOffsetRange(self, min, max):
self.minOffset = min;
self.maxOffset = max;
def getMarkerSize(self, i):
if(not self.showSkill):
mss = 6
elif(i == 0):
mss = self.ms
elif(i == 1):
mss = self.ms*28/36
elif(i == 2):
mss = self.ms*16/36
else:
mss = self.ms*10/36
return mss
def getMarkerColor(self, i):
if(not self.showSkill):
col = 'k'
elif(i == 0):
col = [0,0,0.6]
elif(i == 1):
col = [0.3,0.3,1]
elif(i == 2):
col = [0.7,0.7,1]
else:
col = self.red
return col
def getMarkerEdgeColor(self, i):
if(not self.showSkill):
mec = 'w'
elif(i < 3):
mec = 'k'
else:
mec = self.getMarkerColor(i)
return mec
def getMarkerStyle(self, i):
if(not self.showSkill):
mstyle = '.'
else:
mstyle = 'o'
return mstyle
###########################
# Single click meteograms #
###########################
class MeteoPlot(TimePlot):
def __init__(self, file):
Plot.__init__(self, file)
# Default colours (not used, always overridden)
self.col = [0,0,0]
self.shading = [0.1,0.1,0.1]
# Opaqueness of the shading
self.alpha = 0.3
# Set the size and position of the axis in the figure
mpl.gca().get_axes().set_position([0.1, 0.2, 0.87, 0.75])
self.setStyle("ubc")
# Set the style of the plots:
# cmbc:
def setStyle(self, style):
if(not (style == "ubc" or style == "cmbc")):
error('test')
self.style = style
if(style == "ubc"):
self.gridStyle = '--';
elif(style == "cmbc"):
self.gridStyle = '-';
# Set colour of lines and shadings for current plot
def setCol(self, col):
self.col = col;
shade = 0.6
# Shading should be: [1 shade shade] or [shade shade 1]
self.shading = [(col[0]>0)*(1-shade)+shade, (col[1]>0)*(1-shade)+shade, (col[2]>0)*(1-shade)+shade]
def plotCore(self,ax):
ens = self.file.getEnsemble()
obs = self.file.getObs()
dets = self.file.getDeterministic()
# Plots mean
mpl.plot(ens['offsets'], dets['values'], '-', color=self.col);
# Plot shading
self._fill(ens['offsets'], ens['values'][:,0], ens['values'][:,2], self.shading,
self.alpha, zorder=-20)
# Plot obs
mpl.plot(obs['offsets'], obs['values'],'.', color=self.col);
var = self.file.getVariable()
mpl.ylabel(var['name'] + " (" + var['units'] + ")", fontsize=self.labelFs)
self._xAxis(ax)
self._yAxis(ax)
def _yAxis(self, ax):
if(self.style == "cmbc"):
#mpl.gca().yaxis.set_major_locator(MultipleLocator(2))
[y_start, y_end] = mpl.ylim();
'''
y_start = min(ylims[:,0]);
y_end = max(ylims[:,1]);
# Determine ylimits
if(y_start == -999):
y_start = -20
else:
y_start = np.floor(y_start/2)*2
if(y_end == -999):
y_end = 10
else:
y_end = np.ceil(y_end/2)*2
# Always show at least down to -5
if(y_start > -5):
y_start = -5;
mpl.ylim([y_start, y_end]);
'''
[y_start, y_end] = mpl.ylim();
# Format x-axis
def _xAxis(self, ax):
# Set range
mpl.xlim(np.floor(min(self.file.getOffsets())), np.floor(max(self.file.getOffsets())))
# X-axis labels
mpl.gca().xaxis.set_major_locator(DayLocator(interval=1))
mpl.gca().xaxis.set_minor_locator(HourLocator(interval=6))
mpl.gca().xaxis.set_major_formatter(DateFormatter('\n %a %d %b %Y'))
mpl.gca().xaxis.set_minor_formatter(DateFormatter('%H'))
# Hour labels
minlabels = [tick.label1 for tick in mpl.gca().xaxis.get_minor_ticks()]
for i in minlabels:
i.set_fontsize(12)
# Date labels
majlabels = [tick.label1 for tick in mpl.gca().xaxis.get_major_ticks()]
counter = 0
numLabels = 4;
for i in majlabels:
if(counter < numLabels):
i.set_horizontalalignment('left')
i.set_verticalalignment('top')
i.set_fontsize(12)
i.set_position((0,-0.035)) # Moves major labels to the top of the graph
# The x-coordinate seems to be irrelevant. When y-coord
# is 1, the label is near the top. For 1.1 it is above the graph
else:
i.set_visible(0) # Turn off the last date label, since it is outside the graph
counter = counter + 1
mpl.gca().xaxis.grid(True, which='major', color='k', zorder=-10, linestyle='-', linewidth=2)
mpl.gca().xaxis.grid(True, which='minor', color='k', zorder=0, linestyle=self.gridStyle)
mpl.gca().yaxis.grid(True, which='major', color='k', zorder=0)
if(self.dst):
tzLabel = "PDT"
else:
tzLabel = "PST"
mpl.xlabel("Past Time (" + tzLabel + ") Future", fontsize=15, position=[0.5, 0.1])
mpl.gcf().set_size_inches(12,4)
class CdfPlot(TimePlot):
def __init__(self, file):
TimePlot.__init__(self, file)
self._showEns = True
self._showProb = True
self._showObs = True
def setShowEns(self, flag):
self._showEns = flag
def setShowProb(self, flag):
self._showProb = flag
def setShowObs(self, flag):
self._showObs = flag
def plotCore(self, ax):
ens = self.file.getEnsemble()
self._plotObs(ax)
self._plotDeterministic(ax)
if(self._showEns):
self._plotEnsemble(ax)
if(self._showProb):
self._plotProb(ax)
var = self.file.getVariable()
mpl.ylabel(var['name'] + " (" + var['units'] + ")", fontsize=self.labelFs)
self._xAxis(ax)
if(self.showTitle):
loc = self.file.getLocation()
mpl.title('Meteogram for ' + "%d %2.2f %2.2f" % (loc['id'],loc['lat'], loc['lon']), fontsize=self.fs);
def _plotObs(self, ax):
if(self._showObs):
obs = self.file.getObs()
mpl.plot(obs['offsets'], obs['values'], 'o-', mfc='w', mew=2, color=self.red,
mec=self.red, ms=self.ms*3/4, lw=self.lw, label="Obs", zorder=5)
# Draw one dot for each ensemble member
def _plotEnsemble(self, ax):
ens = self.file.getEnsemble()
nMembers = ens['values'].shape[1]
for i in range(0,nMembers):
col = self.getMarkerColor(i)
mss = self.getMarkerSize(i)
mec = self.getMarkerEdgeColor(i)
mstyle = self.getMarkerStyle(i)
if(i == 0):
mpl.plot(ens['offsets'], ens['values'][:,i], mstyle, mec=mec, ms=mss, mfc=col,
label="Ens members");
else:
mpl.plot(ens['offsets'], ens['values'][:,i], mstyle, mec=mec, ms=mss, mfc=col);
def plotMember(self, ax, member, col, name=""):
ens = self.file.getEnsemble()
mss = 5
mec = 'k'
mpl.plot(ens['offsets'], ens['values'][:,member], '-s', lw=2, mfc=col, color=col, mec=mec, ms=mss, label=name);
# Plots CDF lines
def _plotProb(self, ax):
cdf = self.file.getCdfs()
nLines = cdf['values'].shape[1]
for i in range(nLines-1,-1,-1):
if(i < (nLines-1)/2.0):
var = 1-float(i)/((nLines-1)/2);
ec = [0,0,var]; # Edgecolour
faceCol = var
else:
var = (i - (nLines-1)/2.0)/(float(nLines-1)/2+1)
ec = [var,0,0];
faceCol = (i+1- (nLines-1)/2.0)/(float(nLines-1)/2)
if(i == (nLines-1)/2.0):
ec = [0,1,0]
col = [faceCol,faceCol,faceCol];
if(i == 0 or i == nLines-1):
mstyle = '--'
else:
mstyle = '-'
lbl = "%d" % (round(cdf['cdfs'][i]*100.0)) + "%"
mpl.plot(cdf['offsets'], cdf['values'][:,i], mstyle, color=ec, lw=self.lw,
label=lbl, zorder=-10);
if(i < nLines-1):
# Only plot if not all values are missing
if(sum(np.isnan(cdf['values'][:,i])) < len(cdf['values'][:,0])):
self._fill(cdf['offsets'], cdf['values'][:,i], cdf['values'][:,i+1], col,
zorder=-20)
def _plotDeterministic(self, ax):
dets = self.file.getDeterministic()
mpl.plot(dets['offsets'], dets['values'], 'o-', mfc=[1,1,1], mew=2,
color=self.green, mec=self.green, ms=self.ms*3/4, lw=self.lw,
label="Deterministic");
class DiscretePlot(TimePlot):
def __init__(self, file):
Plot.__init__(self, file)
self.invertY = 0;
def setInvertY(self, flag):
self.invertY = flag
def plotCore(self, ax):
self._plotProb(ax)
var = self.file.getVariable()
if(var['name'] == "Precip24"):
ylab = "Prob of Precip (%)"
else:
ylab = "Probability (%)"
mpl.ylabel(ylab, fontsize=self.labelFs)
self._xAxis(ax)
mpl.ylim([0,100]);
if(self.showTitle):
mpl.title('Meteogram for ' + str(self.file.getLocation()['id']), fontsize=self.fs);
# Plots CDF lines
def _plotProb(self, ax):
p0 = self.file.getLowerDiscrete()
y = p0['values'][:]
if(self.invertY):
y = 1 - y;
mpl.plot(p0['offsets'], 100*y, 'k-', mew=2);
# Shows which dates were used to construct ensemble. Useful for analogs.
class DatesPlot(TimePlot):
def plotCore(self, ax):
dates = self.file.getDates()
nMembers = dates['values'].shape[1]
# Only draw if there are valid points.
# mpl crashes otherwise
if(dates['values'].size - np.isnan(dates['values']).sum() > 0):
mpl.gca().yaxis.set_major_locator(YearLocator())
mpl.gca().yaxis.set_major_formatter(DateFormatter('\n%b/%Y'))
mpl.gca().yaxis.set_minor_locator(MonthLocator(interval=1))
for i in range(0, nMembers):
col = self.getMarkerColor(i)
mss = self.getMarkerSize(i)
mec = self.getMarkerEdgeColor(i)
mstyle = self.getMarkerStyle(i)
mpl.plot(dates['offsets'], dates['values'][:,i], mstyle, mec=mec, ms=mss, mfc=col);
self._xAxis(ax)
mpl.title('Dates used to construct ensembles', fontsize=self.fs)
| bsd-3-clause |
alsgregory/FADE | fade/verification/mrh.py | 2 | 6423 | """ Multidimensional Rank Histogram for an ensemble of Firedrake functions given observation
operator.
This is a technique from T. Gneiting, L. I. Stanberry, E. P. Grimit, L. Held and N. A. Johnson.
Assessing probabilistic forecasts of multivariate quantities, with an application to ensemble
predictions of surface winds. Test, 2008. """
from __future__ import division
from __future__ import absolute_import
from firedrake import *
from fade.utils import *
import numpy as np
import matplotlib.pyplot as plot
class rank_histogram(object):
def __init__(self, function_space, N):
""" Can compute the Multidimensional Rank Histogram of an ensemble of Firedrake functions
using observations given by coordinates. NB: All ensemble members must stay same
in the ensemble during all observations. Cannot do this between resampling /
transform assimilation steps.
:arg function_space: The :class:`FunctionSpace` of the ensemble :class:`Function`s
:type function_space: :class:`FunctionSpace`
:arg N: Ensemble size
:type N: int
"""
self.N = N
self.mesh = function_space.mesh()
# define function space and dg0 function of ensemble
self.function_space = function_space
self.dg0_function_space = FunctionSpace(self.mesh,
self.function_space.ufl_element().family(),
self.function_space.ufl_element().degree())
self.dg0_function = Function(self.dg0_function_space)
self.normalizing_function = Function(self.dg0_function_space)
# make ensembles and dg0 ensembles
self.in_ensemble = []
self.in_dg0_ensemble = []
self.inProjectors = []
for i in range(self.N):
self.in_ensemble.append(Function(self.function_space))
self.in_dg0_ensemble.append(Function(self.dg0_function_space))
self.inProjectors.append(Projector(self.in_ensemble[i], self.in_dg0_ensemble[i]))
# define rank list
self.ranks = []
super(rank_histogram, self).__init__()
def __choose_uniform_rank(self, a, b):
# check whole numbers
assert a % 1 == 0
assert b % 1 == 0
rank = a + 1 + np.where(np.random.multinomial(1, (np.ones(b) /
b)) == 1)[0][0]
return rank
def compute_rank(self, ensemble, observation_coords, observations):
"""
:arg ensemble: list of :class:`Function`s in the ensemble
:type ensemble: tuple / list
:arg observation_coords: tuple / list defining the coords of observations
:type observation_coords: tuple / list
:arg observations: tuple / list of observation state values
:type observations: tuple / list
"""
if len(ensemble) < 1:
raise ValueError('ensemble cannot be indexed')
assert len(ensemble) is self.N
# place ensemble into in_ensemble
if ensemble[0].function_space() is not self.function_space:
raise ValueError("ensemble needs to be on same function space as rank " +
"histogram class was initialized with")
for i in range(self.N):
self.in_ensemble[i].assign(ensemble[i])
# number of coordinate observations - proxy for dimensions
ny = len(observations)
# find cells and nodes that contain observations
cells = PointToCell(observation_coords, self.mesh)
nodes = CellToNode(cells, self.dg0_function_space)
unique_nodes = np.unique(nodes)
# project ensemble to dg0 function space
for i in range(self.N):
self.in_ensemble[i].assign(ensemble[i])
self.inProjectors[i].project()
# preallocate a ensemble of state values at coordinates
d = len(unique_nodes)
state_ensemble = np.zeros((d, self.N + 1))
self.normalizing_function.assign(0)
self.dg0_function.assign(0)
for i in range(ny):
# place aggregated observations onto dg0 function space
self.dg0_function.dat.data[nodes[i].astype(int)] += observations[i]
self.normalizing_function.dat.data[nodes[i].astype(int)] += 1.0
# normalize and extract to array of cells with observations in
observedCells = np.divide(self.dg0_function.dat.data[unique_nodes.astype(int)],
self.normalizing_function.dat.data[unique_nodes.astype(int)])
# place observations into state_ensemble
state_ensemble[:, 0] = observedCells
for j in range(self.N):
# calculate an ensemble of scalar state values
state_ensemble[:, j + 1] = self.in_dg0_ensemble[j].dat.data[unique_nodes.astype(int)]
# compute pre-ranks
rho = np.zeros(self.N + 1)
for i in range(self.N + 1):
rho[i] = np.sum(np.prod(np.reshape(state_ensemble[:, i], ((d, 1))) >
state_ensemble, axis=0))
# make start / end points of s to pick uniformly from
s_start = np.sum(rho < rho[0])
s_end = np.sum(rho == rho[0])
# uniform pick of intermediate rank
self.ranks.append(self.__choose_uniform_rank(s_start, s_end) / (self.N + 1))
def plot_histogram(self, number_of_bins=None):
# define bins
if number_of_bins is None:
bins = np.linspace(0, 1, self.N)
else:
bins = np.linspace(0, 1, number_of_bins)
# plot histogram
n, bins, patches = plot.hist(self.ranks, bins=bins, normed=1,
facecolor='green', alpha=0.75)
plot.xlabel('rank of observation')
plot.ylabel('normalised frequency')
plot.axis([0, 1, 0, 1e-1 + np.max(n)])
plot.show()
""" Iterative and Indexing functions """
def __len__(self):
""" Return the length of the rank array """
return len(ranks)
def __iter__(self):
""" Iterate over the ranks """
for en in self.ranks:
yield en
def __getitem__(self, idx):
""" Return a rank
:arg idx: The index of the rank to return
"""
return self.ranks[idx]
| mit |
huzq/scikit-learn | examples/impute/plot_missing_values.py | 11 | 9687 | """
====================================================
Imputing missing values before building an estimator
====================================================
Missing values can be replaced by the mean, the median or the most frequent
value using the basic :class:`~sklearn.impute.SimpleImputer`.
In this example we will investigate different imputation techniques:
- imputation by the constant value 0
- imputation by the mean value of each feature combined with a missing-ness
indicator auxiliary variable
- k nearest neighbor imputation
- iterative imputation
We will use two datasets: Diabetes dataset which consists of 10 feature
variables collected from diabetes patients with an aim to predict disease
progression and California Housing dataset for which the target is the median
house value for California districts.
As neither of these datasets have missing values, we will remove some
values to create new versions with artificially missing data. The performance
of
:class:`~sklearn.ensemble.RandomForestRegressor` on the full original dataset
is then compared the performance on the altered datasets with the artificially
missing values imputed using different techniques.
"""
print(__doc__)
# Authors: Maria Telenczuk <https://github.com/maikia>
# License: BSD 3 clause
# %%
# Download the data and make missing values sets
################################################
#
# First we download the two datasets. Diabetes dataset is shipped with
# scikit-learn. It has 442 entries, each with 10 features. California Housing
# dataset is much larger with 20640 entries and 8 features. It needs to be
# downloaded. We will only use the first 400 entries for the sake of speeding
# up the calculations but feel free to use the whole dataset.
#
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.datasets import load_diabetes
rng = np.random.RandomState(42)
X_diabetes, y_diabetes = load_diabetes(return_X_y=True)
X_california, y_california = fetch_california_housing(return_X_y=True)
X_california = X_california[:400]
y_california = y_california[:400]
def add_missing_values(X_full, y_full):
n_samples, n_features = X_full.shape
# Add missing values in 75% of the lines
missing_rate = 0.75
n_missing_samples = int(n_samples * missing_rate)
missing_samples = np.zeros(n_samples, dtype=bool)
missing_samples[: n_missing_samples] = True
rng.shuffle(missing_samples)
missing_features = rng.randint(0, n_features, n_missing_samples)
X_missing = X_full.copy()
X_missing[missing_samples, missing_features] = np.nan
y_missing = y_full.copy()
return X_missing, y_missing
X_miss_california, y_miss_california = add_missing_values(
X_california, y_california)
X_miss_diabetes, y_miss_diabetes = add_missing_values(
X_diabetes, y_diabetes)
# %%
# Impute the missing data and score
# #################################
# Now we will write a function which will score the results on the differently
# imputed data. Let's look at each imputer separately:
#
rng = np.random.RandomState(0)
from sklearn.ensemble import RandomForestRegressor
# To use the experimental IterativeImputer, we need to explicitly ask for it:
from sklearn.experimental import enable_iterative_imputer # noqa
from sklearn.impute import SimpleImputer, KNNImputer, IterativeImputer
from sklearn.model_selection import cross_val_score
from sklearn.pipeline import make_pipeline
N_SPLITS = 5
regressor = RandomForestRegressor(random_state=0)
# %%
# Missing information
# -------------------
# In addition to imputing the missing values, the imputers have an
# `add_indicator` parameter that marks the values that were missing, which
# might carry some information.
#
def get_scores_for_imputer(imputer, X_missing, y_missing):
estimator = make_pipeline(imputer, regressor)
impute_scores = cross_val_score(estimator, X_missing, y_missing,
scoring='neg_mean_squared_error',
cv=N_SPLITS)
return impute_scores
x_labels = ['Full data',
'Zero imputation',
'Mean Imputation',
'KNN Imputation',
'Iterative Imputation']
mses_california = np.zeros(5)
stds_california = np.zeros(5)
mses_diabetes = np.zeros(5)
stds_diabetes = np.zeros(5)
# %%
# Estimate the score
# ------------------
# First, we want to estimate the score on the original data:
#
def get_full_score(X_full, y_full):
full_scores = cross_val_score(regressor, X_full, y_full,
scoring='neg_mean_squared_error',
cv=N_SPLITS)
return full_scores.mean(), full_scores.std()
mses_california[0], stds_california[0] = get_full_score(X_california,
y_california)
mses_diabetes[0], stds_diabetes[0] = get_full_score(X_diabetes, y_diabetes)
# %%
# Replace missing values by 0
# ---------------------------
#
# Now we will estimate the score on the data where the missing values are
# replaced by 0:
#
def get_impute_zero_score(X_missing, y_missing):
imputer = SimpleImputer(missing_values=np.nan, add_indicator=True,
strategy='constant', fill_value=0)
zero_impute_scores = get_scores_for_imputer(imputer, X_missing, y_missing)
return zero_impute_scores.mean(), zero_impute_scores.std()
mses_california[1], stds_california[1] = get_impute_zero_score(
X_miss_california, y_miss_california)
mses_diabetes[1], stds_diabetes[1] = get_impute_zero_score(X_miss_diabetes,
y_miss_diabetes)
# %%
# kNN-imputation of the missing values
# ------------------------------------
#
# :class:`~sklearn.impute.KNNImputer` imputes missing values using the weighted
# or unweighted mean of the desired number of nearest neighbors.
def get_impute_knn_score(X_missing, y_missing):
imputer = KNNImputer(missing_values=np.nan, add_indicator=True)
knn_impute_scores = get_scores_for_imputer(imputer, X_missing, y_missing)
return knn_impute_scores.mean(), knn_impute_scores.std()
mses_california[2], stds_california[2] = get_impute_knn_score(
X_miss_california, y_miss_california)
mses_diabetes[2], stds_diabetes[2] = get_impute_knn_score(X_miss_diabetes,
y_miss_diabetes)
# %%
# Impute missing values with mean
# -------------------------------
#
def get_impute_mean(X_missing, y_missing):
imputer = SimpleImputer(missing_values=np.nan, strategy="mean",
add_indicator=True)
mean_impute_scores = get_scores_for_imputer(imputer, X_missing, y_missing)
return mean_impute_scores.mean(), mean_impute_scores.std()
mses_california[3], stds_california[3] = get_impute_mean(X_miss_california,
y_miss_california)
mses_diabetes[3], stds_diabetes[3] = get_impute_mean(X_miss_diabetes,
y_miss_diabetes)
# %%
# Iterative imputation of the missing values
# ------------------------------------------
#
# Another option is the :class:`~sklearn.impute.IterativeImputer`. This uses
# round-robin linear regression, modeling each feature with missing values as a
# function of other features, in turn.
# The version implemented assumes Gaussian (output) variables. If your features
# are obviously non-normal, consider transforming them to look more normal
# to potentially improve performance.
#
def get_impute_iterative(X_missing, y_missing):
imputer = IterativeImputer(missing_values=np.nan, add_indicator=True,
random_state=0, n_nearest_features=5,
sample_posterior=True)
iterative_impute_scores = get_scores_for_imputer(imputer,
X_missing,
y_missing)
return iterative_impute_scores.mean(), iterative_impute_scores.std()
mses_california[4], stds_california[4] = get_impute_iterative(
X_miss_california, y_miss_california)
mses_diabetes[4], stds_diabetes[4] = get_impute_iterative(X_miss_diabetes,
y_miss_diabetes)
mses_diabetes = mses_diabetes * -1
mses_california = mses_california * -1
# %%
# Plot the results
# ################
#
# Finally we are going to visualize the score:
#
import matplotlib.pyplot as plt
n_bars = len(mses_diabetes)
xval = np.arange(n_bars)
colors = ['r', 'g', 'b', 'orange', 'black']
# plot diabetes results
plt.figure(figsize=(12, 6))
ax1 = plt.subplot(121)
for j in xval:
ax1.barh(j, mses_diabetes[j], xerr=stds_diabetes[j],
color=colors[j], alpha=0.6, align='center')
ax1.set_title('Imputation Techniques with Diabetes Data')
ax1.set_xlim(left=np.min(mses_diabetes) * 0.9,
right=np.max(mses_diabetes) * 1.1)
ax1.set_yticks(xval)
ax1.set_xlabel('MSE')
ax1.invert_yaxis()
ax1.set_yticklabels(x_labels)
# plot california dataset results
ax2 = plt.subplot(122)
for j in xval:
ax2.barh(j, mses_california[j], xerr=stds_california[j],
color=colors[j], alpha=0.6, align='center')
ax2.set_title('Imputation Techniques with California Data')
ax2.set_yticks(xval)
ax2.set_xlabel('MSE')
ax2.invert_yaxis()
ax2.set_yticklabels([''] * n_bars)
plt.show()
# You can also try different techniques. For instance, the median is a more
# robust estimator for data with high magnitude variables which could dominate
# results (otherwise known as a 'long tail').
| bsd-3-clause |
synthicity/activitysim | activitysim/abm/models/trip_scheduling.py | 2 | 20559 | # ActivitySim
# See full license in LICENSE.txt.
from __future__ import (absolute_import, division, print_function, )
from future.standard_library import install_aliases
install_aliases() # noqa: E402
from builtins import range
import logging
import numpy as np
import pandas as pd
from activitysim.core import logit
from activitysim.core import config
from activitysim.core import inject
from activitysim.core import tracing
from activitysim.core import chunk
from activitysim.core import pipeline
from activitysim.core.util import assign_in_place
from .util import expressions
from activitysim.core.util import reindex
from activitysim.abm.models.util.trip import failed_trip_cohorts
from activitysim.abm.models.util.trip import cleanup_failed_trips
logger = logging.getLogger(__name__)
"""
StopDepartArrivePeriodModel
StopDepartArriveProportions.csv
tourpurp,isInbound,interval,trip,p1,p2,p3,p4,p5...p40
"""
NO_TRIP_ID = 0
NO_DEPART = 0
DEPART_ALT_BASE = 'DEPART_ALT_BASE'
FAILFIX = 'FAILFIX'
FAILFIX_CHOOSE_MOST_INITIAL = 'choose_most_initial'
FAILFIX_DROP_AND_CLEANUP = 'drop_and_cleanup'
FAILFIX_DEFAULT = FAILFIX_CHOOSE_MOST_INITIAL
def set_tour_hour(trips, tours):
"""
add columns 'tour_hour', 'earliest', 'latest' to trips
Parameters
----------
trips: pd.DataFrame
tours: pd.DataFrame
Returns
-------
modifies trips in place
"""
# all trips must depart between tour start and end
trips['earliest'] = reindex(tours.start, trips.tour_id)
trips['latest'] = reindex(tours.end, trips.tour_id)
# tour_hour is start for outbound trips, and end for inbound trips
trips['tour_hour'] = np.where(
trips.outbound,
trips['earliest'],
trips['latest']).astype(np.int8)
# subtours indexed by parent_tour_id
subtours = tours.loc[tours.primary_purpose == 'atwork',
['tour_num', 'tour_count', 'parent_tour_id', 'start', 'end']]
subtours = subtours.astype(int).set_index('parent_tour_id')
# bool series
trip_has_subtours = trips.tour_id.isin(subtours.index)
outbound = trip_has_subtours & trips.outbound
trips.loc[outbound, 'latest'] = \
reindex(subtours[subtours.tour_num == 1]['start'], trips[outbound].tour_id)
inbound = trip_has_subtours & ~trips.outbound
trips.loc[inbound, 'earliest'] = \
reindex(subtours[subtours.tour_num == subtours.tour_count]['end'], trips[inbound].tour_id)
def clip_probs(trips, probs, model_settings):
"""
zero out probs before trips.earliest or after trips.latest
Parameters
----------
trips: pd.DataFrame
probs: pd.DataFrame
one row per trip, one column per time period, with float prob of picking that time period
depart_alt_base: int
int to add to probs column index to get time period it represents.
e.g. depart_alt_base = 5 means first column (column 0) represents 5 am
Returns
-------
probs: pd.DataFrame
clipped version of probs
"""
depart_alt_base = model_settings.get(DEPART_ALT_BASE)
# there should be one row in probs per trip
assert trips.shape[0] == probs.shape[0]
# probs should sum to 1 across rows before clipping
probs = probs.div(probs.sum(axis=1), axis=0)
num_rows, num_cols = probs.shape
ix_map = np.tile(np.arange(0, num_cols), num_rows).reshape(num_rows, num_cols) + depart_alt_base
# 5 6 7 8 9 10...
# 5 6 7 8 9 10...
# 5 6 7 8 9 10...
clip_mask = ((ix_map >= trips.earliest.values.reshape(num_rows, 1)) &
(ix_map <= trips.latest.values.reshape(num_rows, 1))) * 1
# [0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 0 0 0]
# [0 0 0 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0]
# [0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0]...
probs = probs*clip_mask
return probs
def report_bad_choices(bad_row_map, df, filename, trace_label, trace_choosers=None):
"""
Parameters
----------
bad_row_map
df : pandas.DataFrame
utils or probs dataframe
trace_choosers : pandas.dataframe
the choosers df (for interaction_simulate) to facilitate the reporting of hh_id
because we can't deduce hh_id from the interaction_dataset which is indexed on index
values from alternatives df
"""
df = df[bad_row_map]
if trace_choosers is None:
hh_ids = tracing.hh_id_for_chooser(df.index, df)
else:
hh_ids = tracing.hh_id_for_chooser(df.index, trace_choosers)
df['household_id'] = hh_ids
filename = "%s.%s" % (trace_label, filename)
logger.info("dumping %s" % filename)
tracing.write_csv(df, file_name=filename, transpose=False)
# log the indexes of the first MAX_PRINT offending rows
MAX_PRINT = 0
for idx in df.index[:MAX_PRINT].values:
row_msg = "%s : failed %s = %s (hh_id = %s)" % \
(trace_label, df.index.name, idx, df.household_id.loc[idx])
logger.warning(row_msg)
def schedule_nth_trips(
trips,
probs_spec,
model_settings,
first_trip_in_leg,
report_failed_trips,
trace_hh_id,
trace_label):
"""
We join each trip with the appropriate row in probs_spec by joining on probs_join_cols,
which should exist in both trips, probs_spec dataframe.
Parameters
----------
trips: pd.DataFrame
probs_spec: pd.DataFrame
Dataframe of probs for choice of depart times and join columns to match them with trips.
Depart columns names are irrelevant. Instead, they are position dependent,
time period choice is their index + depart_alt_base
depart_alt_base: int
int to add to probs column index to get time period it represents.
e.g. depart_alt_base = 5 means first column (column 0) represents 5 am
report_failed_trips : bool
trace_hh_id
trace_label
Returns
-------
choices: pd.Series
time periods depart choices, one per trip (except for trips with zero probs)
"""
depart_alt_base = model_settings.get('DEPART_ALT_BASE')
probs_join_cols = ['primary_purpose', 'outbound', 'tour_hour', 'trip_num']
probs_cols = [c for c in probs_spec.columns if c not in probs_join_cols]
# left join trips to probs (there may be multiple rows per trip for multiple depart ranges)
choosers = pd.merge(trips.reset_index(), probs_spec, on=probs_join_cols,
how='left').set_index('trip_id')
chunk.log_df(trace_label, "choosers", choosers)
if trace_hh_id and tracing.has_trace_targets(trips):
tracing.trace_df(choosers, '%s.choosers' % trace_label)
# choosers should now match trips row for row
assert choosers.index.is_unique
assert len(choosers.index) == len(trips.index)
# zero out probs outside earliest-latest window
chooser_probs = clip_probs(trips, choosers[probs_cols], model_settings)
chunk.log_df(trace_label, "chooser_probs", chooser_probs)
if first_trip_in_leg:
# probs should sum to 1 unless all zero
chooser_probs = chooser_probs.div(chooser_probs.sum(axis=1), axis=0).fillna(0)
# probs should sum to 1 with residual probs resulting in choice of 'fail'
chooser_probs['fail'] = 1 - chooser_probs.sum(axis=1).clip(0, 1)
if trace_hh_id and tracing.has_trace_targets(trips):
tracing.trace_df(chooser_probs, '%s.chooser_probs' % trace_label)
choices, rands = logit.make_choices(
chooser_probs,
trace_label=trace_label, trace_choosers=choosers)
chunk.log_df(trace_label, "choices", choices)
chunk.log_df(trace_label, "rands", rands)
if trace_hh_id and tracing.has_trace_targets(trips):
tracing.trace_df(choices, '%s.choices' % trace_label, columns=[None, 'depart'])
tracing.trace_df(rands, '%s.rands' % trace_label, columns=[None, 'rand'])
# convert alt choice index to depart time (setting failed choices to -1)
failed = (choices == chooser_probs.columns.get_loc('fail'))
choices = (choices + depart_alt_base).where(~failed, -1)
chunk.log_df(trace_label, "failed", failed)
# report failed trips while we have the best diagnostic info
if report_failed_trips and failed.any():
report_bad_choices(
bad_row_map=failed,
df=choosers,
filename='failed_choosers',
trace_label=trace_label,
trace_choosers=None)
# trace before removing failures
if trace_hh_id and tracing.has_trace_targets(trips):
tracing.trace_df(choices, '%s.choices' % trace_label, columns=[None, 'depart'])
tracing.trace_df(rands, '%s.rands' % trace_label, columns=[None, 'rand'])
# remove any failed choices
if failed.any():
choices = choices[~failed]
assert (choices >= trips.earliest[~failed]).all()
assert (choices <= trips.latest[~failed]).all()
return choices
def schedule_trips_in_leg(
outbound,
trips,
probs_spec,
model_settings,
last_iteration,
trace_hh_id, trace_label):
"""
Parameters
----------
outbound
trips
probs_spec
depart_alt_base
last_iteration
trace_hh_id
trace_label
Returns
-------
choices: pd.Series
depart choice for trips, indexed by trip_id
"""
failfix = model_settings.get(FAILFIX, FAILFIX_DEFAULT)
# logger.debug("%s scheduling %s trips" % (trace_label, trips.shape[0]))
assert (trips.outbound == outbound).all()
# initial trip of leg and all atwork trips get tour_hour
is_initial = (trips.trip_num == 1) if outbound else (trips.trip_num == trips.trip_count)
no_scheduling = is_initial | (trips.primary_purpose == 'atwork')
choices = trips.tour_hour[no_scheduling]
if no_scheduling.all():
return choices
result_list = []
result_list.append(choices)
trips = trips[~no_scheduling]
# add next_trip_id temp column (temp as trips is now a copy, as result of slicing)
trips = trips.sort_index()
trips['next_trip_id'] = np.roll(trips.index, -1 if outbound else 1)
is_final = (trips.trip_num == trips.trip_count) if outbound else (trips.trip_num == 1)
trips.next_trip_id = trips.next_trip_id.where(is_final, NO_TRIP_ID)
# iterate over outbound trips in ascending trip_num order, skipping the initial trip
# iterate over inbound trips in descending trip_num order, skipping the finial trip
first_trip_in_leg = True
for i in range(trips.trip_num.min(), trips.trip_num.max() + 1):
if outbound:
nth_trips = trips[trips.trip_num == i]
else:
nth_trips = trips[trips.trip_num == trips.trip_count - i]
nth_trace_label = tracing.extend_trace_label(trace_label, 'num_%s' % i)
chunk.log_open(nth_trace_label, chunk_size=0, effective_chunk_size=0)
choices = schedule_nth_trips(
nth_trips,
probs_spec,
model_settings,
first_trip_in_leg=first_trip_in_leg,
report_failed_trips=last_iteration,
trace_hh_id=trace_hh_id,
trace_label=nth_trace_label)
chunk.log_close(nth_trace_label)
# if outbound, this trip's depart constrains next trip's earliest depart option
# if inbound, we are handling in reverse order, so it constrains latest depart instead
ADJUST_NEXT_DEPART_COL = 'earliest' if outbound else 'latest'
# most initial departure (when no choice was made because all probs were zero)
if last_iteration and (failfix == FAILFIX_CHOOSE_MOST_INITIAL):
choices = choices.reindex(nth_trips.index)
logger.warning("%s coercing %s depart choices to most initial" %
(nth_trace_label, choices.isna().sum()))
choices = choices.fillna(trips[ADJUST_NEXT_DEPART_COL])
# adjust allowed depart range of next trip
has_next_trip = (nth_trips.next_trip_id != NO_TRIP_ID)
if has_next_trip.any():
next_trip_ids = nth_trips.next_trip_id[has_next_trip]
# patch choice any trips with next_trips that weren't scheduled
trips.loc[next_trip_ids, ADJUST_NEXT_DEPART_COL] = \
choices.reindex(next_trip_ids.index).fillna(trips[ADJUST_NEXT_DEPART_COL]).values
result_list.append(choices)
first_trip_in_leg = False
if len(result_list) > 1:
choices = pd.concat(result_list)
return choices
def trip_scheduling_rpc(chunk_size, choosers, spec, trace_label):
# NOTE we chunk chunk_id
num_choosers = choosers['chunk_id'].max() + 1
# if not chunking, then return num_choosers
# if chunk_size == 0:
# return num_choosers, 0
# extra columns from spec
extra_columns = spec.shape[1]
chooser_row_size = choosers.shape[1] + extra_columns
# scale row_size by average number of chooser rows per chunk_id
rows_per_chunk_id = choosers.shape[0] / num_choosers
row_size = (rows_per_chunk_id * chooser_row_size)
# print "num_choosers", num_choosers
# print "choosers.shape", choosers.shape
# print "rows_per_chunk_id", rows_per_chunk_id
# print "chooser_row_size", chooser_row_size
# print "(rows_per_chunk_id * chooser_row_size)", (rows_per_chunk_id * chooser_row_size)
# print "row_size", row_size
# #bug
return chunk.rows_per_chunk(chunk_size, row_size, num_choosers, trace_label)
def run_trip_scheduling(
trips,
tours,
probs_spec,
model_settings,
last_iteration,
chunk_size,
trace_hh_id,
trace_label):
set_tour_hour(trips, tours)
rows_per_chunk, effective_chunk_size = \
trip_scheduling_rpc(chunk_size, trips, probs_spec, trace_label)
result_list = []
for i, num_chunks, trips_chunk in chunk.chunked_choosers_by_chunk_id(trips, rows_per_chunk):
if num_chunks > 1:
chunk_trace_label = tracing.extend_trace_label(trace_label, 'chunk_%s' % i)
logger.info("%s of %s size %d" % (chunk_trace_label, num_chunks, len(trips_chunk)))
else:
chunk_trace_label = trace_label
leg_trace_label = tracing.extend_trace_label(chunk_trace_label, 'outbound')
chunk.log_open(leg_trace_label, chunk_size, effective_chunk_size)
choices = \
schedule_trips_in_leg(
outbound=True,
trips=trips_chunk[trips_chunk.outbound],
probs_spec=probs_spec,
model_settings=model_settings,
last_iteration=last_iteration,
trace_hh_id=trace_hh_id,
trace_label=leg_trace_label)
result_list.append(choices)
chunk.log_close(leg_trace_label)
leg_trace_label = tracing.extend_trace_label(chunk_trace_label, 'inbound')
chunk.log_open(leg_trace_label, chunk_size, effective_chunk_size)
choices = \
schedule_trips_in_leg(
outbound=False,
trips=trips_chunk[~trips_chunk.outbound],
probs_spec=probs_spec,
model_settings=model_settings,
last_iteration=last_iteration,
trace_hh_id=trace_hh_id,
trace_label=leg_trace_label)
result_list.append(choices)
chunk.log_close(leg_trace_label)
choices = pd.concat(result_list)
return choices
@inject.step()
def trip_scheduling(
trips,
tours,
chunk_size,
trace_hh_id):
"""
Trip scheduling assigns depart times for trips within the start, end limits of the tour.
The algorithm is simplistic:
The first outbound trip starts at the tour start time, and subsequent outbound trips are
processed in trip_num order, to ensure that subsequent trips do not depart before the
trip that preceeds them.
Inbound trips are handled similarly, except in reverse order, starting with the last trip,
and working backwards to ensure that inbound trips do not depart after the trip that
succeeds them.
The probability spec assigns probabilities for depart times, but those possible departs must
be clipped to disallow depart times outside the tour limits, the departs of prior trips, and
in the case of work tours, the start/end times of any atwork subtours.
Scheduling can fail if the probability table assigns zero probabilities to all the available
depart times in a trip's depart window. (This could be avoided by giving every window a small
probability, rather than zero, but the existing mtctm1 prob spec does not do this. I believe
this is due to the its having been generated from a small household travel survey sample
that lacked any departs for some time periods.)
Rescheduling the trips that fail (along with their inbound or outbound leg-mates) can sometimes
fix this problem, if it was caused by an earlier trip's depart choice blocking a subsequent
trip's ability to schedule a depart within the resulting window. But it can also happen if
a tour is very short (e.g. one time period) and the prob spec having a zero probability for
that tour hour.
Therefor we need to handle trips that could not be scheduled. There are two ways (at least)
to solve this problem:
1) CHOOSE_MOST_INITIAL
simply assign a depart time to the trip, even if it has a zero probability. It makes
most sense, in this case, to assign the 'most initial' depart time, so that subsequent trips
are minimally impacted. This can be done in the final iteration, thus affecting only the
trips that could no be scheduled by the standard approach
2) drop_and_cleanup
drop trips that could no be scheduled, and adjust their leg mates, as is done for failed
trips in trip_destination.
For now we are choosing among these approaches with a manifest constant, but this could
be made a model setting...
"""
trace_label = "trip_scheduling"
model_settings = config.read_model_settings('trip_scheduling.yaml')
assert 'DEPART_ALT_BASE' in model_settings
failfix = model_settings.get(FAILFIX, FAILFIX_DEFAULT)
probs_spec = pd.read_csv(config.config_file_path('trip_scheduling_probs.csv'), comment='#')
trips_df = trips.to_frame()
tours = tours.to_frame()
# add tour-based chunk_id so we can chunk all trips in tour together
trips_df['chunk_id'] = \
reindex(pd.Series(list(range(tours.shape[0])), tours.index), trips_df.tour_id)
max_iterations = model_settings.get('MAX_ITERATIONS', 1)
assert max_iterations > 0
choices_list = []
i = 0
while (i < max_iterations) and not trips_df.empty:
i += 1
last_iteration = (i == max_iterations)
trace_label_i = tracing.extend_trace_label(trace_label, "i%s" % i)
logger.info("%s scheduling %s trips", trace_label_i, trips_df.shape[0])
choices = \
run_trip_scheduling(
trips_df,
tours,
probs_spec,
model_settings,
last_iteration=last_iteration,
trace_hh_id=trace_hh_id,
chunk_size=chunk_size,
trace_label=trace_label_i)
# boolean series of trips whose individual trip scheduling failed
failed = choices.reindex(trips_df.index).isnull()
logger.info("%s %s failed", trace_label_i, failed.sum())
if not last_iteration:
# boolean series of trips whose leg scheduling failed
failed_cohorts = failed_trip_cohorts(trips_df, failed)
trips_df = trips_df[failed_cohorts]
choices = choices[~failed_cohorts]
choices_list.append(choices)
trips_df = trips.to_frame()
choices = pd.concat(choices_list)
choices = choices.reindex(trips_df.index)
if choices.isnull().any():
logger.warning("%s of %s trips could not be scheduled after %s iterations" %
(choices.isnull().sum(), trips_df.shape[0], i))
if failfix != FAILFIX_DROP_AND_CLEANUP:
raise RuntimeError("%s setting '%s' not enabled in settings" %
(FAILFIX, FAILFIX_DROP_AND_CLEANUP))
trips_df['failed'] = choices.isnull()
trips_df = cleanup_failed_trips(trips_df)
choices = choices.reindex(trips_df.index)
trips_df['depart'] = choices
assert not trips_df.depart.isnull().any()
pipeline.replace_table("trips", trips_df)
| agpl-3.0 |
lmallin/coverage_test | python_venv/lib/python2.7/site-packages/pandas/io/clipboard/clipboard.py | 14 | 3793 | """ io on the clipboard """
from pandas import compat, get_option, option_context, DataFrame
from pandas.compat import StringIO, PY2
def read_clipboard(sep='\s+', **kwargs): # pragma: no cover
r"""
Read text from clipboard and pass to read_table. See read_table for the
full argument list
Parameters
----------
sep : str, default '\s+'.
A string or regex delimiter. The default of '\s+' denotes
one or more whitespace characters.
Returns
-------
parsed : DataFrame
"""
encoding = kwargs.pop('encoding', 'utf-8')
# only utf-8 is valid for passed value because that's what clipboard
# supports
if encoding is not None and encoding.lower().replace('-', '') != 'utf8':
raise NotImplementedError(
'reading from clipboard only supports utf-8 encoding')
from pandas.io.clipboard import clipboard_get
from pandas.io.parsers import read_table
text = clipboard_get()
# try to decode (if needed on PY3)
# Strange. linux py33 doesn't complain, win py33 does
if compat.PY3:
try:
text = compat.bytes_to_str(
text, encoding=(kwargs.get('encoding') or
get_option('display.encoding'))
)
except:
pass
# Excel copies into clipboard with \t separation
# inspect no more then the 10 first lines, if they
# all contain an equal number (>0) of tabs, infer
# that this came from excel and set 'sep' accordingly
lines = text[:10000].split('\n')[:-1][:10]
# Need to remove leading white space, since read_table
# accepts:
# a b
# 0 1 2
# 1 3 4
counts = set([x.lstrip().count('\t') for x in lines])
if len(lines) > 1 and len(counts) == 1 and counts.pop() != 0:
sep = '\t'
if sep is None and kwargs.get('delim_whitespace') is None:
sep = '\s+'
return read_table(StringIO(text), sep=sep, **kwargs)
def to_clipboard(obj, excel=None, sep=None, **kwargs): # pragma: no cover
"""
Attempt to write text representation of object to the system clipboard
The clipboard can be then pasted into Excel for example.
Parameters
----------
obj : the object to write to the clipboard
excel : boolean, defaults to True
if True, use the provided separator, writing in a csv
format for allowing easy pasting into excel.
if False, write a string representation of the object
to the clipboard
sep : optional, defaults to tab
other keywords are passed to to_csv
Notes
-----
Requirements for your platform
- Linux: xclip, or xsel (with gtk or PyQt4 modules)
- Windows:
- OS X:
"""
encoding = kwargs.pop('encoding', 'utf-8')
# testing if an invalid encoding is passed to clipboard
if encoding is not None and encoding.lower().replace('-', '') != 'utf8':
raise ValueError('clipboard only supports utf-8 encoding')
from pandas.io.clipboard import clipboard_set
if excel is None:
excel = True
if excel:
try:
if sep is None:
sep = '\t'
buf = StringIO()
# clipboard_set (pyperclip) expects unicode
obj.to_csv(buf, sep=sep, encoding='utf-8', **kwargs)
text = buf.getvalue()
if PY2:
text = text.decode('utf-8')
clipboard_set(text)
return
except:
pass
if isinstance(obj, DataFrame):
# str(df) has various unhelpful defaults, like truncation
with option_context('display.max_colwidth', 999999):
objstr = obj.to_string(**kwargs)
else:
objstr = str(obj)
clipboard_set(objstr)
| mit |
frank-tancf/scikit-learn | sklearn/linear_model/tests/test_sgd.py | 14 | 44270 | import pickle
import unittest
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import raises
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_false, assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import ignore_warnings
from sklearn import linear_model, datasets, metrics
from sklearn.base import clone
from sklearn.linear_model import SGDClassifier, SGDRegressor
from sklearn.preprocessing import LabelEncoder, scale, MinMaxScaler
class SparseSGDClassifier(SGDClassifier):
def fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return super(SparseSGDClassifier, self).fit(X, y, *args, **kw)
def partial_fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return super(SparseSGDClassifier, self).partial_fit(X, y, *args, **kw)
def decision_function(self, X):
X = sp.csr_matrix(X)
return super(SparseSGDClassifier, self).decision_function(X)
def predict_proba(self, X):
X = sp.csr_matrix(X)
return super(SparseSGDClassifier, self).predict_proba(X)
class SparseSGDRegressor(SGDRegressor):
def fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return SGDRegressor.fit(self, X, y, *args, **kw)
def partial_fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return SGDRegressor.partial_fit(self, X, y, *args, **kw)
def decision_function(self, X, *args, **kw):
X = sp.csr_matrix(X)
return SGDRegressor.decision_function(self, X, *args, **kw)
# Test Data
# test sample 1
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
Y = [1, 1, 1, 2, 2, 2]
T = np.array([[-1, -1], [2, 2], [3, 2]])
true_result = [1, 2, 2]
# test sample 2; string class labels
X2 = np.array([[-1, 1], [-0.75, 0.5], [-1.5, 1.5],
[1, 1], [0.75, 0.5], [1.5, 1.5],
[-1, -1], [0, -0.5], [1, -1]])
Y2 = ["one"] * 3 + ["two"] * 3 + ["three"] * 3
T2 = np.array([[-1.5, 0.5], [1, 2], [0, -2]])
true_result2 = ["one", "two", "three"]
# test sample 3
X3 = np.array([[1, 1, 0, 0, 0, 0], [1, 1, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0], [0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 1, 1], [0, 0, 0, 0, 1, 1],
[0, 0, 0, 1, 0, 0], [0, 0, 0, 1, 0, 0]])
Y3 = np.array([1, 1, 1, 1, 2, 2, 2, 2])
# test sample 4 - two more or less redundant feature groups
X4 = np.array([[1, 0.9, 0.8, 0, 0, 0], [1, .84, .98, 0, 0, 0],
[1, .96, .88, 0, 0, 0], [1, .91, .99, 0, 0, 0],
[0, 0, 0, .89, .91, 1], [0, 0, 0, .79, .84, 1],
[0, 0, 0, .91, .95, 1], [0, 0, 0, .93, 1, 1]])
Y4 = np.array([1, 1, 1, 1, 2, 2, 2, 2])
iris = datasets.load_iris()
# test sample 5 - test sample 1 as binary classification problem
X5 = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
Y5 = [1, 1, 1, 2, 2, 2]
true_result5 = [0, 1, 1]
# Classification Test Case
class CommonTest(object):
def factory(self, **kwargs):
if "random_state" not in kwargs:
kwargs["random_state"] = 42
return self.factory_class(**kwargs)
# a simple implementation of ASGD to use for testing
# uses squared loss to find the gradient
def asgd(self, X, y, eta, alpha, weight_init=None, intercept_init=0.0):
if weight_init is None:
weights = np.zeros(X.shape[1])
else:
weights = weight_init
average_weights = np.zeros(X.shape[1])
intercept = intercept_init
average_intercept = 0.0
decay = 1.0
# sparse data has a fixed decay of .01
if (isinstance(self, SparseSGDClassifierTestCase) or
isinstance(self, SparseSGDRegressorTestCase)):
decay = .01
for i, entry in enumerate(X):
p = np.dot(entry, weights)
p += intercept
gradient = p - y[i]
weights *= 1.0 - (eta * alpha)
weights += -(eta * gradient * entry)
intercept += -(eta * gradient) * decay
average_weights *= i
average_weights += weights
average_weights /= i + 1.0
average_intercept *= i
average_intercept += intercept
average_intercept /= i + 1.0
return average_weights, average_intercept
def _test_warm_start(self, X, Y, lr):
# Test that explicit warm restart...
clf = self.factory(alpha=0.01, eta0=0.01, n_iter=5, shuffle=False,
learning_rate=lr)
clf.fit(X, Y)
clf2 = self.factory(alpha=0.001, eta0=0.01, n_iter=5, shuffle=False,
learning_rate=lr)
clf2.fit(X, Y,
coef_init=clf.coef_.copy(),
intercept_init=clf.intercept_.copy())
# ... and implicit warm restart are equivalent.
clf3 = self.factory(alpha=0.01, eta0=0.01, n_iter=5, shuffle=False,
warm_start=True, learning_rate=lr)
clf3.fit(X, Y)
assert_equal(clf3.t_, clf.t_)
assert_array_almost_equal(clf3.coef_, clf.coef_)
clf3.set_params(alpha=0.001)
clf3.fit(X, Y)
assert_equal(clf3.t_, clf2.t_)
assert_array_almost_equal(clf3.coef_, clf2.coef_)
def test_warm_start_constant(self):
self._test_warm_start(X, Y, "constant")
def test_warm_start_invscaling(self):
self._test_warm_start(X, Y, "invscaling")
def test_warm_start_optimal(self):
self._test_warm_start(X, Y, "optimal")
def test_input_format(self):
# Input format tests.
clf = self.factory(alpha=0.01, n_iter=5,
shuffle=False)
clf.fit(X, Y)
Y_ = np.array(Y)[:, np.newaxis]
Y_ = np.c_[Y_, Y_]
assert_raises(ValueError, clf.fit, X, Y_)
def test_clone(self):
# Test whether clone works ok.
clf = self.factory(alpha=0.01, n_iter=5, penalty='l1')
clf = clone(clf)
clf.set_params(penalty='l2')
clf.fit(X, Y)
clf2 = self.factory(alpha=0.01, n_iter=5, penalty='l2')
clf2.fit(X, Y)
assert_array_equal(clf.coef_, clf2.coef_)
def test_plain_has_no_average_attr(self):
clf = self.factory(average=True, eta0=.01)
clf.fit(X, Y)
assert_true(hasattr(clf, 'average_coef_'))
assert_true(hasattr(clf, 'average_intercept_'))
assert_true(hasattr(clf, 'standard_intercept_'))
assert_true(hasattr(clf, 'standard_coef_'))
clf = self.factory()
clf.fit(X, Y)
assert_false(hasattr(clf, 'average_coef_'))
assert_false(hasattr(clf, 'average_intercept_'))
assert_false(hasattr(clf, 'standard_intercept_'))
assert_false(hasattr(clf, 'standard_coef_'))
def test_late_onset_averaging_not_reached(self):
clf1 = self.factory(average=600)
clf2 = self.factory()
for _ in range(100):
if isinstance(clf1, SGDClassifier):
clf1.partial_fit(X, Y, classes=np.unique(Y))
clf2.partial_fit(X, Y, classes=np.unique(Y))
else:
clf1.partial_fit(X, Y)
clf2.partial_fit(X, Y)
assert_array_almost_equal(clf1.coef_, clf2.coef_, decimal=16)
assert_almost_equal(clf1.intercept_, clf2.intercept_, decimal=16)
def test_late_onset_averaging_reached(self):
eta0 = .001
alpha = .0001
Y_encode = np.array(Y)
Y_encode[Y_encode == 1] = -1.0
Y_encode[Y_encode == 2] = 1.0
clf1 = self.factory(average=7, learning_rate="constant",
loss='squared_loss', eta0=eta0,
alpha=alpha, n_iter=2, shuffle=False)
clf2 = self.factory(average=0, learning_rate="constant",
loss='squared_loss', eta0=eta0,
alpha=alpha, n_iter=1, shuffle=False)
clf1.fit(X, Y_encode)
clf2.fit(X, Y_encode)
average_weights, average_intercept = \
self.asgd(X, Y_encode, eta0, alpha,
weight_init=clf2.coef_.ravel(),
intercept_init=clf2.intercept_)
assert_array_almost_equal(clf1.coef_.ravel(),
average_weights.ravel(),
decimal=16)
assert_almost_equal(clf1.intercept_, average_intercept, decimal=16)
@raises(ValueError)
def test_sgd_bad_alpha_for_optimal_learning_rate(self):
# Check whether expected ValueError on bad alpha, i.e. 0
# since alpha is used to compute the optimal learning rate
self.factory(alpha=0, learning_rate="optimal")
class DenseSGDClassifierTestCase(unittest.TestCase, CommonTest):
"""Test suite for the dense representation variant of SGD"""
factory_class = SGDClassifier
def test_sgd(self):
# Check that SGD gives any results :-)
for loss in ("hinge", "squared_hinge", "log", "modified_huber"):
clf = self.factory(penalty='l2', alpha=0.01, fit_intercept=True,
loss=loss, n_iter=10, shuffle=True)
clf.fit(X, Y)
# assert_almost_equal(clf.coef_[0], clf.coef_[1], decimal=7)
assert_array_equal(clf.predict(T), true_result)
@raises(ValueError)
def test_sgd_bad_l1_ratio(self):
# Check whether expected ValueError on bad l1_ratio
self.factory(l1_ratio=1.1)
@raises(ValueError)
def test_sgd_bad_learning_rate_schedule(self):
# Check whether expected ValueError on bad learning_rate
self.factory(learning_rate="<unknown>")
@raises(ValueError)
def test_sgd_bad_eta0(self):
# Check whether expected ValueError on bad eta0
self.factory(eta0=0, learning_rate="constant")
@raises(ValueError)
def test_sgd_bad_alpha(self):
# Check whether expected ValueError on bad alpha
self.factory(alpha=-.1)
@raises(ValueError)
def test_sgd_bad_penalty(self):
# Check whether expected ValueError on bad penalty
self.factory(penalty='foobar', l1_ratio=0.85)
@raises(ValueError)
def test_sgd_bad_loss(self):
# Check whether expected ValueError on bad loss
self.factory(loss="foobar")
@raises(ValueError)
def test_sgd_n_iter_param(self):
# Test parameter validity check
self.factory(n_iter=-10000)
@raises(ValueError)
def test_sgd_shuffle_param(self):
# Test parameter validity check
self.factory(shuffle="false")
@raises(TypeError)
def test_argument_coef(self):
# Checks coef_init not allowed as model argument (only fit)
# Provided coef_ does not match dataset.
self.factory(coef_init=np.zeros((3,))).fit(X, Y)
@raises(ValueError)
def test_provide_coef(self):
# Checks coef_init shape for the warm starts
# Provided coef_ does not match dataset.
self.factory().fit(X, Y, coef_init=np.zeros((3,)))
@raises(ValueError)
def test_set_intercept(self):
# Checks intercept_ shape for the warm starts
# Provided intercept_ does not match dataset.
self.factory().fit(X, Y, intercept_init=np.zeros((3,)))
def test_set_intercept_binary(self):
# Checks intercept_ shape for the warm starts in binary case
self.factory().fit(X5, Y5, intercept_init=0)
def test_average_binary_computed_correctly(self):
# Checks the SGDClassifier correctly computes the average weights
eta = .1
alpha = 2.
n_samples = 20
n_features = 10
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
w = rng.normal(size=n_features)
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
# simple linear function without noise
y = np.dot(X, w)
y = np.sign(y)
clf.fit(X, y)
average_weights, average_intercept = self.asgd(X, y, eta, alpha)
average_weights = average_weights.reshape(1, -1)
assert_array_almost_equal(clf.coef_,
average_weights,
decimal=14)
assert_almost_equal(clf.intercept_, average_intercept, decimal=14)
def test_set_intercept_to_intercept(self):
# Checks intercept_ shape consistency for the warm starts
# Inconsistent intercept_ shape.
clf = self.factory().fit(X5, Y5)
self.factory().fit(X5, Y5, intercept_init=clf.intercept_)
clf = self.factory().fit(X, Y)
self.factory().fit(X, Y, intercept_init=clf.intercept_)
@raises(ValueError)
def test_sgd_at_least_two_labels(self):
# Target must have at least two labels
self.factory(alpha=0.01, n_iter=20).fit(X2, np.ones(9))
def test_partial_fit_weight_class_balanced(self):
# partial_fit with class_weight='balanced' not supported"""
assert_raises_regexp(ValueError,
"class_weight 'balanced' is not supported for "
"partial_fit. In order to use 'balanced' weights, "
"use compute_class_weight\('balanced', classes, y\). "
"In place of y you can us a large enough sample "
"of the full training set target to properly "
"estimate the class frequency distributions. "
"Pass the resulting weights as the class_weight "
"parameter.",
self.factory(class_weight='balanced').partial_fit,
X, Y, classes=np.unique(Y))
def test_sgd_multiclass(self):
# Multi-class test case
clf = self.factory(alpha=0.01, n_iter=20).fit(X2, Y2)
assert_equal(clf.coef_.shape, (3, 2))
assert_equal(clf.intercept_.shape, (3,))
assert_equal(clf.decision_function([[0, 0]]).shape, (1, 3))
pred = clf.predict(T2)
assert_array_equal(pred, true_result2)
def test_sgd_multiclass_average(self):
eta = .001
alpha = .01
# Multi-class average test case
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
np_Y2 = np.array(Y2)
clf.fit(X2, np_Y2)
classes = np.unique(np_Y2)
for i, cl in enumerate(classes):
y_i = np.ones(np_Y2.shape[0])
y_i[np_Y2 != cl] = -1
average_coef, average_intercept = self.asgd(X2, y_i, eta, alpha)
assert_array_almost_equal(average_coef, clf.coef_[i], decimal=16)
assert_almost_equal(average_intercept,
clf.intercept_[i],
decimal=16)
def test_sgd_multiclass_with_init_coef(self):
# Multi-class test case
clf = self.factory(alpha=0.01, n_iter=20)
clf.fit(X2, Y2, coef_init=np.zeros((3, 2)),
intercept_init=np.zeros(3))
assert_equal(clf.coef_.shape, (3, 2))
assert_true(clf.intercept_.shape, (3,))
pred = clf.predict(T2)
assert_array_equal(pred, true_result2)
def test_sgd_multiclass_njobs(self):
# Multi-class test case with multi-core support
clf = self.factory(alpha=0.01, n_iter=20, n_jobs=2).fit(X2, Y2)
assert_equal(clf.coef_.shape, (3, 2))
assert_equal(clf.intercept_.shape, (3,))
assert_equal(clf.decision_function([[0, 0]]).shape, (1, 3))
pred = clf.predict(T2)
assert_array_equal(pred, true_result2)
def test_set_coef_multiclass(self):
# Checks coef_init and intercept_init shape for multi-class
# problems
# Provided coef_ does not match dataset
clf = self.factory()
assert_raises(ValueError, clf.fit, X2, Y2, coef_init=np.zeros((2, 2)))
# Provided coef_ does match dataset
clf = self.factory().fit(X2, Y2, coef_init=np.zeros((3, 2)))
# Provided intercept_ does not match dataset
clf = self.factory()
assert_raises(ValueError, clf.fit, X2, Y2,
intercept_init=np.zeros((1,)))
# Provided intercept_ does match dataset.
clf = self.factory().fit(X2, Y2, intercept_init=np.zeros((3,)))
def test_sgd_proba(self):
# Check SGD.predict_proba
# Hinge loss does not allow for conditional prob estimate.
# We cannot use the factory here, because it defines predict_proba
# anyway.
clf = SGDClassifier(loss="hinge", alpha=0.01, n_iter=10).fit(X, Y)
assert_false(hasattr(clf, "predict_proba"))
assert_false(hasattr(clf, "predict_log_proba"))
# log and modified_huber losses can output probability estimates
# binary case
for loss in ["log", "modified_huber"]:
clf = self.factory(loss="modified_huber", alpha=0.01, n_iter=10)
clf.fit(X, Y)
p = clf.predict_proba([[3, 2]])
assert_true(p[0, 1] > 0.5)
p = clf.predict_proba([[-1, -1]])
assert_true(p[0, 1] < 0.5)
p = clf.predict_log_proba([[3, 2]])
assert_true(p[0, 1] > p[0, 0])
p = clf.predict_log_proba([[-1, -1]])
assert_true(p[0, 1] < p[0, 0])
# log loss multiclass probability estimates
clf = self.factory(loss="log", alpha=0.01, n_iter=10).fit(X2, Y2)
d = clf.decision_function([[.1, -.1], [.3, .2]])
p = clf.predict_proba([[.1, -.1], [.3, .2]])
assert_array_equal(np.argmax(p, axis=1), np.argmax(d, axis=1))
assert_almost_equal(p[0].sum(), 1)
assert_true(np.all(p[0] >= 0))
p = clf.predict_proba([[-1, -1]])
d = clf.decision_function([[-1, -1]])
assert_array_equal(np.argsort(p[0]), np.argsort(d[0]))
l = clf.predict_log_proba([[3, 2]])
p = clf.predict_proba([[3, 2]])
assert_array_almost_equal(np.log(p), l)
l = clf.predict_log_proba([[-1, -1]])
p = clf.predict_proba([[-1, -1]])
assert_array_almost_equal(np.log(p), l)
# Modified Huber multiclass probability estimates; requires a separate
# test because the hard zero/one probabilities may destroy the
# ordering present in decision_function output.
clf = self.factory(loss="modified_huber", alpha=0.01, n_iter=10)
clf.fit(X2, Y2)
d = clf.decision_function([[3, 2]])
p = clf.predict_proba([[3, 2]])
if not isinstance(self, SparseSGDClassifierTestCase):
assert_equal(np.argmax(d, axis=1), np.argmax(p, axis=1))
else: # XXX the sparse test gets a different X2 (?)
assert_equal(np.argmin(d, axis=1), np.argmin(p, axis=1))
# the following sample produces decision_function values < -1,
# which would cause naive normalization to fail (see comment
# in SGDClassifier.predict_proba)
x = X.mean(axis=0)
d = clf.decision_function([x])
if np.all(d < -1): # XXX not true in sparse test case (why?)
p = clf.predict_proba([x])
assert_array_almost_equal(p[0], [1 / 3.] * 3)
def test_sgd_l1(self):
# Test L1 regularization
n = len(X4)
rng = np.random.RandomState(13)
idx = np.arange(n)
rng.shuffle(idx)
X = X4[idx, :]
Y = Y4[idx]
clf = self.factory(penalty='l1', alpha=.2, fit_intercept=False,
n_iter=2000, shuffle=False)
clf.fit(X, Y)
assert_array_equal(clf.coef_[0, 1:-1], np.zeros((4,)))
pred = clf.predict(X)
assert_array_equal(pred, Y)
# test sparsify with dense inputs
clf.sparsify()
assert_true(sp.issparse(clf.coef_))
pred = clf.predict(X)
assert_array_equal(pred, Y)
# pickle and unpickle with sparse coef_
clf = pickle.loads(pickle.dumps(clf))
assert_true(sp.issparse(clf.coef_))
pred = clf.predict(X)
assert_array_equal(pred, Y)
def test_class_weights(self):
# Test class weights.
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False,
class_weight=None)
clf.fit(X, y)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# we give a small weights to class 1
clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False,
class_weight={1: 0.001})
clf.fit(X, y)
# now the hyperplane should rotate clock-wise and
# the prediction on this point should shift
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1]))
def test_equal_class_weight(self):
# Test if equal class weights approx. equals no class weights.
X = [[1, 0], [1, 0], [0, 1], [0, 1]]
y = [0, 0, 1, 1]
clf = self.factory(alpha=0.1, n_iter=1000, class_weight=None)
clf.fit(X, y)
X = [[1, 0], [0, 1]]
y = [0, 1]
clf_weighted = self.factory(alpha=0.1, n_iter=1000,
class_weight={0: 0.5, 1: 0.5})
clf_weighted.fit(X, y)
# should be similar up to some epsilon due to learning rate schedule
assert_almost_equal(clf.coef_, clf_weighted.coef_, decimal=2)
@raises(ValueError)
def test_wrong_class_weight_label(self):
# ValueError due to not existing class label.
clf = self.factory(alpha=0.1, n_iter=1000, class_weight={0: 0.5})
clf.fit(X, Y)
@raises(ValueError)
def test_wrong_class_weight_format(self):
# ValueError due to wrong class_weight argument type.
clf = self.factory(alpha=0.1, n_iter=1000, class_weight=[0.5])
clf.fit(X, Y)
def test_weights_multiplied(self):
# Tests that class_weight and sample_weight are multiplicative
class_weights = {1: .6, 2: .3}
sample_weights = np.random.random(Y4.shape[0])
multiplied_together = np.copy(sample_weights)
multiplied_together[Y4 == 1] *= class_weights[1]
multiplied_together[Y4 == 2] *= class_weights[2]
clf1 = self.factory(alpha=0.1, n_iter=20, class_weight=class_weights)
clf2 = self.factory(alpha=0.1, n_iter=20)
clf1.fit(X4, Y4, sample_weight=sample_weights)
clf2.fit(X4, Y4, sample_weight=multiplied_together)
assert_almost_equal(clf1.coef_, clf2.coef_)
def test_balanced_weight(self):
# Test class weights for imbalanced data"""
# compute reference metrics on iris dataset that is quite balanced by
# default
X, y = iris.data, iris.target
X = scale(X)
idx = np.arange(X.shape[0])
rng = np.random.RandomState(6)
rng.shuffle(idx)
X = X[idx]
y = y[idx]
clf = self.factory(alpha=0.0001, n_iter=1000,
class_weight=None, shuffle=False).fit(X, y)
assert_almost_equal(metrics.f1_score(y, clf.predict(X), average='weighted'), 0.96,
decimal=1)
# make the same prediction using balanced class_weight
clf_balanced = self.factory(alpha=0.0001, n_iter=1000,
class_weight="balanced",
shuffle=False).fit(X, y)
assert_almost_equal(metrics.f1_score(y, clf_balanced.predict(X), average='weighted'), 0.96,
decimal=1)
# Make sure that in the balanced case it does not change anything
# to use "balanced"
assert_array_almost_equal(clf.coef_, clf_balanced.coef_, 6)
# build an very very imbalanced dataset out of iris data
X_0 = X[y == 0, :]
y_0 = y[y == 0]
X_imbalanced = np.vstack([X] + [X_0] * 10)
y_imbalanced = np.concatenate([y] + [y_0] * 10)
# fit a model on the imbalanced data without class weight info
clf = self.factory(n_iter=1000, class_weight=None, shuffle=False)
clf.fit(X_imbalanced, y_imbalanced)
y_pred = clf.predict(X)
assert_less(metrics.f1_score(y, y_pred, average='weighted'), 0.96)
# fit a model with balanced class_weight enabled
clf = self.factory(n_iter=1000, class_weight="balanced", shuffle=False)
clf.fit(X_imbalanced, y_imbalanced)
y_pred = clf.predict(X)
assert_greater(metrics.f1_score(y, y_pred, average='weighted'), 0.96)
# fit another using a fit parameter override
clf = self.factory(n_iter=1000, class_weight="balanced", shuffle=False)
clf.fit(X_imbalanced, y_imbalanced)
y_pred = clf.predict(X)
assert_greater(metrics.f1_score(y, y_pred, average='weighted'), 0.96)
def test_sample_weights(self):
# Test weights on individual samples
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False)
clf.fit(X, y)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# we give a small weights to class 1
clf.fit(X, y, sample_weight=[0.001] * 3 + [1] * 2)
# now the hyperplane should rotate clock-wise and
# the prediction on this point should shift
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1]))
@raises(ValueError)
def test_wrong_sample_weights(self):
# Test if ValueError is raised if sample_weight has wrong shape
clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False)
# provided sample_weight too long
clf.fit(X, Y, sample_weight=np.arange(7))
@raises(ValueError)
def test_partial_fit_exception(self):
clf = self.factory(alpha=0.01)
# classes was not specified
clf.partial_fit(X3, Y3)
def test_partial_fit_binary(self):
third = X.shape[0] // 3
clf = self.factory(alpha=0.01)
classes = np.unique(Y)
clf.partial_fit(X[:third], Y[:third], classes=classes)
assert_equal(clf.coef_.shape, (1, X.shape[1]))
assert_equal(clf.intercept_.shape, (1,))
assert_equal(clf.decision_function([[0, 0]]).shape, (1, ))
id1 = id(clf.coef_.data)
clf.partial_fit(X[third:], Y[third:])
id2 = id(clf.coef_.data)
# check that coef_ haven't been re-allocated
assert_true(id1, id2)
y_pred = clf.predict(T)
assert_array_equal(y_pred, true_result)
def test_partial_fit_multiclass(self):
third = X2.shape[0] // 3
clf = self.factory(alpha=0.01)
classes = np.unique(Y2)
clf.partial_fit(X2[:third], Y2[:third], classes=classes)
assert_equal(clf.coef_.shape, (3, X2.shape[1]))
assert_equal(clf.intercept_.shape, (3,))
assert_equal(clf.decision_function([[0, 0]]).shape, (1, 3))
id1 = id(clf.coef_.data)
clf.partial_fit(X2[third:], Y2[third:])
id2 = id(clf.coef_.data)
# check that coef_ haven't been re-allocated
assert_true(id1, id2)
def test_partial_fit_multiclass_average(self):
third = X2.shape[0] // 3
clf = self.factory(alpha=0.01, average=X2.shape[0])
classes = np.unique(Y2)
clf.partial_fit(X2[:third], Y2[:third], classes=classes)
assert_equal(clf.coef_.shape, (3, X2.shape[1]))
assert_equal(clf.intercept_.shape, (3,))
clf.partial_fit(X2[third:], Y2[third:])
assert_equal(clf.coef_.shape, (3, X2.shape[1]))
assert_equal(clf.intercept_.shape, (3,))
def test_fit_then_partial_fit(self):
# Partial_fit should work after initial fit in the multiclass case.
# Non-regression test for #2496; fit would previously produce a
# Fortran-ordered coef_ that subsequent partial_fit couldn't handle.
clf = self.factory()
clf.fit(X2, Y2)
clf.partial_fit(X2, Y2) # no exception here
def _test_partial_fit_equal_fit(self, lr):
for X_, Y_, T_ in ((X, Y, T), (X2, Y2, T2)):
clf = self.factory(alpha=0.01, eta0=0.01, n_iter=2,
learning_rate=lr, shuffle=False)
clf.fit(X_, Y_)
y_pred = clf.decision_function(T_)
t = clf.t_
classes = np.unique(Y_)
clf = self.factory(alpha=0.01, eta0=0.01, learning_rate=lr,
shuffle=False)
for i in range(2):
clf.partial_fit(X_, Y_, classes=classes)
y_pred2 = clf.decision_function(T_)
assert_equal(clf.t_, t)
assert_array_almost_equal(y_pred, y_pred2, decimal=2)
def test_partial_fit_equal_fit_constant(self):
self._test_partial_fit_equal_fit("constant")
def test_partial_fit_equal_fit_optimal(self):
self._test_partial_fit_equal_fit("optimal")
def test_partial_fit_equal_fit_invscaling(self):
self._test_partial_fit_equal_fit("invscaling")
def test_regression_losses(self):
clf = self.factory(alpha=0.01, learning_rate="constant",
eta0=0.1, loss="epsilon_insensitive")
clf.fit(X, Y)
assert_equal(1.0, np.mean(clf.predict(X) == Y))
clf = self.factory(alpha=0.01, learning_rate="constant",
eta0=0.1, loss="squared_epsilon_insensitive")
clf.fit(X, Y)
assert_equal(1.0, np.mean(clf.predict(X) == Y))
clf = self.factory(alpha=0.01, loss="huber")
clf.fit(X, Y)
assert_equal(1.0, np.mean(clf.predict(X) == Y))
clf = self.factory(alpha=0.01, learning_rate="constant", eta0=0.01,
loss="squared_loss")
clf.fit(X, Y)
assert_equal(1.0, np.mean(clf.predict(X) == Y))
def test_warm_start_multiclass(self):
self._test_warm_start(X2, Y2, "optimal")
def test_multiple_fit(self):
# Test multiple calls of fit w/ different shaped inputs.
clf = self.factory(alpha=0.01, n_iter=5,
shuffle=False)
clf.fit(X, Y)
assert_true(hasattr(clf, "coef_"))
# Non-regression test: try fitting with a different label set.
y = [["ham", "spam"][i] for i in LabelEncoder().fit_transform(Y)]
clf.fit(X[:, :-1], y)
class SparseSGDClassifierTestCase(DenseSGDClassifierTestCase):
"""Run exactly the same tests using the sparse representation variant"""
factory_class = SparseSGDClassifier
###############################################################################
# Regression Test Case
class DenseSGDRegressorTestCase(unittest.TestCase, CommonTest):
"""Test suite for the dense representation variant of SGD"""
factory_class = SGDRegressor
def test_sgd(self):
# Check that SGD gives any results.
clf = self.factory(alpha=0.1, n_iter=2,
fit_intercept=False)
clf.fit([[0, 0], [1, 1], [2, 2]], [0, 1, 2])
assert_equal(clf.coef_[0], clf.coef_[1])
@raises(ValueError)
def test_sgd_bad_penalty(self):
# Check whether expected ValueError on bad penalty
self.factory(penalty='foobar', l1_ratio=0.85)
@raises(ValueError)
def test_sgd_bad_loss(self):
# Check whether expected ValueError on bad loss
self.factory(loss="foobar")
def test_sgd_averaged_computed_correctly(self):
# Tests the average regressor matches the naive implementation
eta = .001
alpha = .01
n_samples = 20
n_features = 10
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
w = rng.normal(size=n_features)
# simple linear function without noise
y = np.dot(X, w)
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
clf.fit(X, y)
average_weights, average_intercept = self.asgd(X, y, eta, alpha)
assert_array_almost_equal(clf.coef_,
average_weights,
decimal=16)
assert_almost_equal(clf.intercept_, average_intercept, decimal=16)
def test_sgd_averaged_partial_fit(self):
# Tests whether the partial fit yields the same average as the fit
eta = .001
alpha = .01
n_samples = 20
n_features = 10
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
w = rng.normal(size=n_features)
# simple linear function without noise
y = np.dot(X, w)
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
clf.partial_fit(X[:int(n_samples / 2)][:], y[:int(n_samples / 2)])
clf.partial_fit(X[int(n_samples / 2):][:], y[int(n_samples / 2):])
average_weights, average_intercept = self.asgd(X, y, eta, alpha)
assert_array_almost_equal(clf.coef_,
average_weights,
decimal=16)
assert_almost_equal(clf.intercept_[0], average_intercept, decimal=16)
def test_average_sparse(self):
# Checks the average weights on data with 0s
eta = .001
alpha = .01
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
n_samples = Y3.shape[0]
clf.partial_fit(X3[:int(n_samples / 2)][:], Y3[:int(n_samples / 2)])
clf.partial_fit(X3[int(n_samples / 2):][:], Y3[int(n_samples / 2):])
average_weights, average_intercept = self.asgd(X3, Y3, eta, alpha)
assert_array_almost_equal(clf.coef_,
average_weights,
decimal=16)
assert_almost_equal(clf.intercept_, average_intercept, decimal=16)
def test_sgd_least_squares_fit(self):
xmin, xmax = -5, 5
n_samples = 100
rng = np.random.RandomState(0)
X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1)
# simple linear function without noise
y = 0.5 * X.ravel()
clf = self.factory(loss='squared_loss', alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_greater(score, 0.99)
# simple linear function with noise
y = 0.5 * X.ravel() + rng.randn(n_samples, 1).ravel()
clf = self.factory(loss='squared_loss', alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_greater(score, 0.5)
def test_sgd_epsilon_insensitive(self):
xmin, xmax = -5, 5
n_samples = 100
X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1)
# simple linear function without noise
y = 0.5 * X.ravel()
clf = self.factory(loss='epsilon_insensitive', epsilon=0.01,
alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_true(score > 0.99)
# simple linear function with noise
y = 0.5 * X.ravel() \
+ np.random.randn(n_samples, 1).ravel()
clf = self.factory(loss='epsilon_insensitive', epsilon=0.01,
alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_true(score > 0.5)
def test_sgd_huber_fit(self):
xmin, xmax = -5, 5
n_samples = 100
rng = np.random.RandomState(0)
X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1)
# simple linear function without noise
y = 0.5 * X.ravel()
clf = self.factory(loss="huber", epsilon=0.1, alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_greater(score, 0.99)
# simple linear function with noise
y = 0.5 * X.ravel() + rng.randn(n_samples, 1).ravel()
clf = self.factory(loss="huber", epsilon=0.1, alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_greater(score, 0.5)
def test_elasticnet_convergence(self):
# Check that the SGD output is consistent with coordinate descent
n_samples, n_features = 1000, 5
rng = np.random.RandomState(0)
X = np.random.randn(n_samples, n_features)
# ground_truth linear model that generate y from X and to which the
# models should converge if the regularizer would be set to 0.0
ground_truth_coef = rng.randn(n_features)
y = np.dot(X, ground_truth_coef)
# XXX: alpha = 0.1 seems to cause convergence problems
for alpha in [0.01, 0.001]:
for l1_ratio in [0.5, 0.8, 1.0]:
cd = linear_model.ElasticNet(alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=False)
cd.fit(X, y)
sgd = self.factory(penalty='elasticnet', n_iter=50,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=False)
sgd.fit(X, y)
err_msg = ("cd and sgd did not converge to comparable "
"results for alpha=%f and l1_ratio=%f"
% (alpha, l1_ratio))
assert_almost_equal(cd.coef_, sgd.coef_, decimal=2,
err_msg=err_msg)
@ignore_warnings
def test_partial_fit(self):
third = X.shape[0] // 3
clf = self.factory(alpha=0.01)
clf.partial_fit(X[:third], Y[:third])
assert_equal(clf.coef_.shape, (X.shape[1], ))
assert_equal(clf.intercept_.shape, (1,))
assert_equal(clf.predict([[0, 0]]).shape, (1, ))
id1 = id(clf.coef_.data)
clf.partial_fit(X[third:], Y[third:])
id2 = id(clf.coef_.data)
# check that coef_ haven't been re-allocated
assert_true(id1, id2)
def _test_partial_fit_equal_fit(self, lr):
clf = self.factory(alpha=0.01, n_iter=2, eta0=0.01,
learning_rate=lr, shuffle=False)
clf.fit(X, Y)
y_pred = clf.predict(T)
t = clf.t_
clf = self.factory(alpha=0.01, eta0=0.01,
learning_rate=lr, shuffle=False)
for i in range(2):
clf.partial_fit(X, Y)
y_pred2 = clf.predict(T)
assert_equal(clf.t_, t)
assert_array_almost_equal(y_pred, y_pred2, decimal=2)
def test_partial_fit_equal_fit_constant(self):
self._test_partial_fit_equal_fit("constant")
def test_partial_fit_equal_fit_optimal(self):
self._test_partial_fit_equal_fit("optimal")
def test_partial_fit_equal_fit_invscaling(self):
self._test_partial_fit_equal_fit("invscaling")
def test_loss_function_epsilon(self):
clf = self.factory(epsilon=0.9)
clf.set_params(epsilon=0.1)
assert clf.loss_functions['huber'][1] == 0.1
class SparseSGDRegressorTestCase(DenseSGDRegressorTestCase):
# Run exactly the same tests using the sparse representation variant
factory_class = SparseSGDRegressor
def test_l1_ratio():
# Test if l1 ratio extremes match L1 and L2 penalty settings.
X, y = datasets.make_classification(n_samples=1000,
n_features=100, n_informative=20,
random_state=1234)
# test if elasticnet with l1_ratio near 1 gives same result as pure l1
est_en = SGDClassifier(alpha=0.001, penalty='elasticnet',
l1_ratio=0.9999999999, random_state=42).fit(X, y)
est_l1 = SGDClassifier(alpha=0.001, penalty='l1', random_state=42).fit(X, y)
assert_array_almost_equal(est_en.coef_, est_l1.coef_)
# test if elasticnet with l1_ratio near 0 gives same result as pure l2
est_en = SGDClassifier(alpha=0.001, penalty='elasticnet',
l1_ratio=0.0000000001, random_state=42).fit(X, y)
est_l2 = SGDClassifier(alpha=0.001, penalty='l2', random_state=42).fit(X, y)
assert_array_almost_equal(est_en.coef_, est_l2.coef_)
def test_underflow_or_overlow():
with np.errstate(all='raise'):
# Generate some weird data with hugely unscaled features
rng = np.random.RandomState(0)
n_samples = 100
n_features = 10
X = rng.normal(size=(n_samples, n_features))
X[:, :2] *= 1e300
assert_true(np.isfinite(X).all())
# Use MinMaxScaler to scale the data without introducing a numerical
# instability (computing the standard deviation naively is not possible
# on this data)
X_scaled = MinMaxScaler().fit_transform(X)
assert_true(np.isfinite(X_scaled).all())
# Define a ground truth on the scaled data
ground_truth = rng.normal(size=n_features)
y = (np.dot(X_scaled, ground_truth) > 0.).astype(np.int32)
assert_array_equal(np.unique(y), [0, 1])
model = SGDClassifier(alpha=0.1, loss='squared_hinge', n_iter=500)
# smoke test: model is stable on scaled data
model.fit(X_scaled, y)
assert_true(np.isfinite(model.coef_).all())
# model is numerically unstable on unscaled data
msg_regxp = (r"Floating-point under-/overflow occurred at epoch #.*"
" Scaling input data with StandardScaler or MinMaxScaler"
" might help.")
assert_raises_regexp(ValueError, msg_regxp, model.fit, X, y)
def test_numerical_stability_large_gradient():
# Non regression test case for numerical stability on scaled problems
# where the gradient can still explode with some losses
model = SGDClassifier(loss='squared_hinge', n_iter=10, shuffle=True,
penalty='elasticnet', l1_ratio=0.3, alpha=0.01,
eta0=0.001, random_state=0)
with np.errstate(all='raise'):
model.fit(iris.data, iris.target)
assert_true(np.isfinite(model.coef_).all())
def test_large_regularization():
# Non regression tests for numerical stability issues caused by large
# regularization parameters
for penalty in ['l2', 'l1', 'elasticnet']:
model = SGDClassifier(alpha=1e5, learning_rate='constant', eta0=0.1,
n_iter=5, penalty=penalty, shuffle=False)
with np.errstate(all='raise'):
model.fit(iris.data, iris.target)
assert_array_almost_equal(model.coef_, np.zeros_like(model.coef_))
| bsd-3-clause |
0x0all/scikit-learn | examples/applications/plot_outlier_detection_housing.py | 21 | 5578 | """
====================================
Outlier detection on a real data set
====================================
This example illustrates the need for robust covariance estimation
on a real data set. It is useful both for outlier detection and for
a better understanding of the data structure.
We selected two sets of two variables from the boston housing data set
as an illustration of what kind of analysis can be done with several
outlier detection tools. For the purpose of visualization, we are working
with two-dimensional examples, but one should be aware that things are
not so trivial in high-dimension, as it will be pointed out.
In both examples below, the main result is that the empirical covariance
estimate, as a non-robust one, is highly influenced by the heterogeneous
structure of the observations. Although the robust covariance estimate is
able to focus on the main mode of the data distribution, it sticks to the
assumption that the data should be Gaussian distributed, yielding some biased
estimation of the data structure, but yet accurate to some extent.
The One-Class SVM algorithm
First example
-------------
The first example illustrates how robust covariance estimation can help
concentrating on a relevant cluster when another one exists. Here, many
observations are confounded into one and break down the empirical covariance
estimation.
Of course, some screening tools would have pointed out the presence of two
clusters (Support Vector Machines, Gaussian Mixture Models, univariate
outlier detection, ...). But had it been a high-dimensional example, none
of these could be applied that easily.
Second example
--------------
The second example shows the ability of the Minimum Covariance Determinant
robust estimator of covariance to concentrate on the main mode of the data
distribution: the location seems to be well estimated, although the covariance
is hard to estimate due to the banana-shaped distribution. Anyway, we can
get rid of some outlying observations.
The One-Class SVM is able to capture the real data structure, but the
difficulty is to adjust its kernel bandwidth parameter so as to obtain
a good compromise between the shape of the data scatter matrix and the
risk of over-fitting the data.
"""
print(__doc__)
# Author: Virgile Fritsch <[email protected]>
# License: BSD 3 clause
import numpy as np
from sklearn.covariance import EllipticEnvelope
from sklearn.svm import OneClassSVM
import matplotlib.pyplot as plt
import matplotlib.font_manager
from sklearn.datasets import load_boston
# Get data
X1 = load_boston()['data'][:, [8, 10]] # two clusters
X2 = load_boston()['data'][:, [5, 12]] # "banana"-shaped
# Define "classifiers" to be used
classifiers = {
"Empirical Covariance": EllipticEnvelope(support_fraction=1.,
contamination=0.261),
"Robust Covariance (Minimum Covariance Determinant)":
EllipticEnvelope(contamination=0.261),
"OCSVM": OneClassSVM(nu=0.261, gamma=0.05)}
colors = ['m', 'g', 'b']
legend1 = {}
legend2 = {}
# Learn a frontier for outlier detection with several classifiers
xx1, yy1 = np.meshgrid(np.linspace(-8, 28, 500), np.linspace(3, 40, 500))
xx2, yy2 = np.meshgrid(np.linspace(3, 10, 500), np.linspace(-5, 45, 500))
for i, (clf_name, clf) in enumerate(classifiers.items()):
plt.figure(1)
clf.fit(X1)
Z1 = clf.decision_function(np.c_[xx1.ravel(), yy1.ravel()])
Z1 = Z1.reshape(xx1.shape)
legend1[clf_name] = plt.contour(
xx1, yy1, Z1, levels=[0], linewidths=2, colors=colors[i])
plt.figure(2)
clf.fit(X2)
Z2 = clf.decision_function(np.c_[xx2.ravel(), yy2.ravel()])
Z2 = Z2.reshape(xx2.shape)
legend2[clf_name] = plt.contour(
xx2, yy2, Z2, levels=[0], linewidths=2, colors=colors[i])
legend1_values_list = list( legend1.values() )
legend1_keys_list = list( legend1.keys() )
# Plot the results (= shape of the data points cloud)
plt.figure(1) # two clusters
plt.title("Outlier detection on a real data set (boston housing)")
plt.scatter(X1[:, 0], X1[:, 1], color='black')
bbox_args = dict(boxstyle="round", fc="0.8")
arrow_args = dict(arrowstyle="->")
plt.annotate("several confounded points", xy=(24, 19),
xycoords="data", textcoords="data",
xytext=(13, 10), bbox=bbox_args, arrowprops=arrow_args)
plt.xlim((xx1.min(), xx1.max()))
plt.ylim((yy1.min(), yy1.max()))
plt.legend((legend1_values_list[0].collections[0],
legend1_values_list[1].collections[0],
legend1_values_list[2].collections[0]),
(legend1_keys_list[0], legend1_keys_list[1], legend1_keys_list[2]),
loc="upper center",
prop=matplotlib.font_manager.FontProperties(size=12))
plt.ylabel("accessibility to radial highways")
plt.xlabel("pupil-teatcher ratio by town")
legend2_values_list = list( legend2.values() )
legend2_keys_list = list( legend2.keys() )
plt.figure(2) # "banana" shape
plt.title("Outlier detection on a real data set (boston housing)")
plt.scatter(X2[:, 0], X2[:, 1], color='black')
plt.xlim((xx2.min(), xx2.max()))
plt.ylim((yy2.min(), yy2.max()))
plt.legend((legend2_values_list[0].collections[0],
legend2_values_list[1].collections[0],
legend2_values_list[2].collections[0]),
(legend2_values_list[0], legend2_values_list[1], legend2_values_list[2]),
loc="upper center",
prop=matplotlib.font_manager.FontProperties(size=12))
plt.ylabel("% lower status of the population")
plt.xlabel("average number of rooms per dwelling")
plt.show()
| bsd-3-clause |
MohammedWasim/scikit-learn | sklearn/datasets/tests/test_20news.py | 280 | 3045 | """Test the 20news downloader, if the data is available."""
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import SkipTest
from sklearn import datasets
def test_20news():
try:
data = datasets.fetch_20newsgroups(
subset='all', download_if_missing=False, shuffle=False)
except IOError:
raise SkipTest("Download 20 newsgroups to run this test")
# Extract a reduced dataset
data2cats = datasets.fetch_20newsgroups(
subset='all', categories=data.target_names[-1:-3:-1], shuffle=False)
# Check that the ordering of the target_names is the same
# as the ordering in the full dataset
assert_equal(data2cats.target_names,
data.target_names[-2:])
# Assert that we have only 0 and 1 as labels
assert_equal(np.unique(data2cats.target).tolist(), [0, 1])
# Check that the number of filenames is consistent with data/target
assert_equal(len(data2cats.filenames), len(data2cats.target))
assert_equal(len(data2cats.filenames), len(data2cats.data))
# Check that the first entry of the reduced dataset corresponds to
# the first entry of the corresponding category in the full dataset
entry1 = data2cats.data[0]
category = data2cats.target_names[data2cats.target[0]]
label = data.target_names.index(category)
entry2 = data.data[np.where(data.target == label)[0][0]]
assert_equal(entry1, entry2)
def test_20news_length_consistency():
"""Checks the length consistencies within the bunch
This is a non-regression test for a bug present in 0.16.1.
"""
try:
data = datasets.fetch_20newsgroups(
subset='all', download_if_missing=False, shuffle=False)
except IOError:
raise SkipTest("Download 20 newsgroups to run this test")
# Extract the full dataset
data = datasets.fetch_20newsgroups(subset='all')
assert_equal(len(data['data']), len(data.data))
assert_equal(len(data['target']), len(data.target))
assert_equal(len(data['filenames']), len(data.filenames))
def test_20news_vectorized():
# This test is slow.
raise SkipTest("Test too slow.")
bunch = datasets.fetch_20newsgroups_vectorized(subset="train")
assert_true(sp.isspmatrix_csr(bunch.data))
assert_equal(bunch.data.shape, (11314, 107428))
assert_equal(bunch.target.shape[0], 11314)
assert_equal(bunch.data.dtype, np.float64)
bunch = datasets.fetch_20newsgroups_vectorized(subset="test")
assert_true(sp.isspmatrix_csr(bunch.data))
assert_equal(bunch.data.shape, (7532, 107428))
assert_equal(bunch.target.shape[0], 7532)
assert_equal(bunch.data.dtype, np.float64)
bunch = datasets.fetch_20newsgroups_vectorized(subset="all")
assert_true(sp.isspmatrix_csr(bunch.data))
assert_equal(bunch.data.shape, (11314 + 7532, 107428))
assert_equal(bunch.target.shape[0], 11314 + 7532)
assert_equal(bunch.data.dtype, np.float64)
| bsd-3-clause |
phdowling/scikit-learn | sklearn/datasets/tests/test_20news.py | 280 | 3045 | """Test the 20news downloader, if the data is available."""
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import SkipTest
from sklearn import datasets
def test_20news():
try:
data = datasets.fetch_20newsgroups(
subset='all', download_if_missing=False, shuffle=False)
except IOError:
raise SkipTest("Download 20 newsgroups to run this test")
# Extract a reduced dataset
data2cats = datasets.fetch_20newsgroups(
subset='all', categories=data.target_names[-1:-3:-1], shuffle=False)
# Check that the ordering of the target_names is the same
# as the ordering in the full dataset
assert_equal(data2cats.target_names,
data.target_names[-2:])
# Assert that we have only 0 and 1 as labels
assert_equal(np.unique(data2cats.target).tolist(), [0, 1])
# Check that the number of filenames is consistent with data/target
assert_equal(len(data2cats.filenames), len(data2cats.target))
assert_equal(len(data2cats.filenames), len(data2cats.data))
# Check that the first entry of the reduced dataset corresponds to
# the first entry of the corresponding category in the full dataset
entry1 = data2cats.data[0]
category = data2cats.target_names[data2cats.target[0]]
label = data.target_names.index(category)
entry2 = data.data[np.where(data.target == label)[0][0]]
assert_equal(entry1, entry2)
def test_20news_length_consistency():
"""Checks the length consistencies within the bunch
This is a non-regression test for a bug present in 0.16.1.
"""
try:
data = datasets.fetch_20newsgroups(
subset='all', download_if_missing=False, shuffle=False)
except IOError:
raise SkipTest("Download 20 newsgroups to run this test")
# Extract the full dataset
data = datasets.fetch_20newsgroups(subset='all')
assert_equal(len(data['data']), len(data.data))
assert_equal(len(data['target']), len(data.target))
assert_equal(len(data['filenames']), len(data.filenames))
def test_20news_vectorized():
# This test is slow.
raise SkipTest("Test too slow.")
bunch = datasets.fetch_20newsgroups_vectorized(subset="train")
assert_true(sp.isspmatrix_csr(bunch.data))
assert_equal(bunch.data.shape, (11314, 107428))
assert_equal(bunch.target.shape[0], 11314)
assert_equal(bunch.data.dtype, np.float64)
bunch = datasets.fetch_20newsgroups_vectorized(subset="test")
assert_true(sp.isspmatrix_csr(bunch.data))
assert_equal(bunch.data.shape, (7532, 107428))
assert_equal(bunch.target.shape[0], 7532)
assert_equal(bunch.data.dtype, np.float64)
bunch = datasets.fetch_20newsgroups_vectorized(subset="all")
assert_true(sp.isspmatrix_csr(bunch.data))
assert_equal(bunch.data.shape, (11314 + 7532, 107428))
assert_equal(bunch.target.shape[0], 11314 + 7532)
assert_equal(bunch.data.dtype, np.float64)
| bsd-3-clause |
hammerlab/mhcflurry | mhcflurry/encodable_sequences.py | 1 | 18992 | """
Class for encoding variable-length peptides to fixed-size numerical matrices
"""
from __future__ import (
print_function,
division,
absolute_import,
)
import math
from six import string_types
from functools import partial
import numpy
import pandas
from . import amino_acid
class EncodingError(ValueError):
"""
Exception raised when peptides cannot be encoded
"""
def __init__(self, message, supported_peptide_lengths):
self.supported_peptide_lengths = supported_peptide_lengths
ValueError.__init__(
self,
message + " Supported lengths: %s - %s." % supported_peptide_lengths)
class EncodableSequences(object):
"""
Class for encoding variable-length peptides to fixed-size numerical matrices
This class caches various encodings of a list of sequences.
In practice this is used only for peptides. To encode MHC allele sequences,
see AlleleEncoding.
"""
unknown_character = "X"
@classmethod
def create(klass, sequences):
"""
Factory that returns an EncodableSequences given a list of
strings. As a convenience, you can also pass it an EncodableSequences
instance, in which case the object is returned unchanged.
"""
if isinstance(sequences, klass):
return sequences
return klass(sequences)
def __init__(self, sequences):
if not all(isinstance(obj, string_types) for obj in sequences):
raise ValueError("Sequence of strings is required")
self.sequences = numpy.array(sequences)
lengths = pandas.Series(self.sequences, dtype=numpy.object_).str.len()
self.min_length = lengths.min()
self.max_length = lengths.max()
self.encoding_cache = {}
self.fixed_sequence_length = None
if len(self.sequences) > 0 and all(
len(s) == len(self.sequences[0]) for s in self.sequences):
self.fixed_sequence_length = len(self.sequences[0])
def __len__(self):
return len(self.sequences)
def variable_length_to_fixed_length_categorical(
self,
alignment_method="pad_middle",
left_edge=4,
right_edge=4,
max_length=15):
"""
Encode variable-length sequences to a fixed-size index-encoded (integer)
matrix.
See `sequences_to_fixed_length_index_encoded_array` for details.
Parameters
----------
alignment_method : string
One of "pad_middle" or "left_pad_right_pad"
left_edge : int, size of fixed-position left side
Only relevant for pad_middle alignment method
right_edge : int, size of the fixed-position right side
Only relevant for pad_middle alignment method
max_length : maximum supported peptide length
Returns
-------
numpy.array of integers with shape (num sequences, encoded length)
For pad_middle, the encoded length is max_length. For left_pad_right_pad,
it's 3 * max_length.
"""
cache_key = (
"fixed_length_categorical",
alignment_method,
left_edge,
right_edge,
max_length)
if cache_key not in self.encoding_cache:
fixed_length_sequences = (
self.sequences_to_fixed_length_index_encoded_array(
self.sequences,
alignment_method=alignment_method,
left_edge=left_edge,
right_edge=right_edge,
max_length=max_length))
self.encoding_cache[cache_key] = fixed_length_sequences
return self.encoding_cache[cache_key]
def variable_length_to_fixed_length_vector_encoding(
self,
vector_encoding_name,
alignment_method="pad_middle",
left_edge=4,
right_edge=4,
max_length=15,
trim=False,
allow_unsupported_amino_acids=False):
"""
Encode variable-length sequences to a fixed-size matrix. Amino acids
are encoded as specified by the vector_encoding_name argument.
See `sequences_to_fixed_length_index_encoded_array` for details.
See also: variable_length_to_fixed_length_categorical.
Parameters
----------
vector_encoding_name : string
How to represent amino acids.
One of "BLOSUM62", "one-hot", etc. Full list of supported vector
encodings is given by available_vector_encodings().
alignment_method : string
One of "pad_middle" or "left_pad_right_pad"
left_edge : int
Size of fixed-position left side.
Only relevant for pad_middle alignment method
right_edge : int
Size of the fixed-position right side.
Only relevant for pad_middle alignment method
max_length : int
Maximum supported peptide length
trim : bool
If True, longer sequences will be trimmed to fit the maximum
supported length. Not supported for all alignment methods.
allow_unsupported_amino_acids : bool
If True, non-canonical amino acids will be replaced with the X
character before encoding.
Returns
-------
numpy.array with shape (num sequences, encoded length, m)
where
- m is the vector encoding length (usually 21).
- encoded length is max_length if alignment_method is pad_middle;
3 * max_length if it's left_pad_right_pad.
"""
cache_key = (
"fixed_length_vector_encoding",
vector_encoding_name,
alignment_method,
left_edge,
right_edge,
max_length,
trim,
allow_unsupported_amino_acids)
if cache_key not in self.encoding_cache:
fixed_length_sequences = (
self.sequences_to_fixed_length_index_encoded_array(
self.sequences,
alignment_method=alignment_method,
left_edge=left_edge,
right_edge=right_edge,
max_length=max_length,
trim=trim,
allow_unsupported_amino_acids=allow_unsupported_amino_acids))
result = amino_acid.fixed_vectors_encoding(
fixed_length_sequences,
amino_acid.ENCODING_DATA_FRAMES[vector_encoding_name])
assert result.shape[0] == len(self.sequences)
self.encoding_cache[cache_key] = result
return self.encoding_cache[cache_key]
@classmethod
def sequences_to_fixed_length_index_encoded_array(
klass,
sequences,
alignment_method="pad_middle",
left_edge=4,
right_edge=4,
max_length=15,
trim=False,
allow_unsupported_amino_acids=False):
"""
Encode variable-length sequences to a fixed-size index-encoded (integer)
matrix.
How variable length sequences get mapped to fixed length is set by the
"alignment_method" argument. Supported alignment methods are:
pad_middle
Encoding designed for preserving the anchor positions of class
I peptides. This is what is used in allele-specific models.
Each string must be of length at least left_edge + right_edge
and at most max_length. The first left_edge characters in the
input always map to the first left_edge characters in the
output. Similarly for the last right_edge characters. The
middle characters are filled in based on the length, with the
X character filling in the blanks.
Example:
AAAACDDDD -> AAAAXXXCXXXDDDD
left_pad_centered_right_pad
Encoding that makes no assumptions on anchor positions but is
3x larger than pad_middle, since it duplicates the peptide
(left aligned + centered + right aligned). This is what is used
for the pan-allele models.
Example:
AAAACDDDD -> AAAACDDDDXXXXXXXXXAAAACDDDDXXXXXXXXXAAAACDDDD
left_pad_right_pad
Same as left_pad_centered_right_pad but only includes left-
and right-padded peptide.
Example:
AAAACDDDD -> AAAACDDDDXXXXXXXXXXXXAAAACDDDD
Parameters
----------
sequences : list of string
alignment_method : string
One of "pad_middle" or "left_pad_right_pad"
left_edge : int
Size of fixed-position left side.
Only relevant for pad_middle alignment method
right_edge : int
Size of the fixed-position right side.
Only relevant for pad_middle alignment method
max_length : int
maximum supported peptide length
trim : bool
If True, longer sequences will be trimmed to fit the maximum
supported length. Not supported for all alignment methods.
allow_unsupported_amino_acids : bool
If True, non-canonical amino acids will be replaced with the X
character before encoding.
Returns
-------
numpy.array of integers with shape (num sequences, encoded length)
For pad_middle, the encoded length is max_length. For left_pad_right_pad,
it's 2 * max_length. For left_pad_centered_right_pad, it's
3 * max_length.
"""
if allow_unsupported_amino_acids:
fill_value = amino_acid.AMINO_ACID_INDEX['X']
def get_amino_acid_index(a):
return amino_acid.AMINO_ACID_INDEX.get(a, fill_value)
else:
get_amino_acid_index = amino_acid.AMINO_ACID_INDEX.__getitem__
result = None
if alignment_method == 'pad_middle':
if trim:
raise NotImplementedError("trim not supported")
# Result array is int32, filled with X (null amino acid) value.
result = numpy.full(
fill_value=amino_acid.AMINO_ACID_INDEX['X'],
shape=(len(sequences), max_length),
dtype="int32")
df = pandas.DataFrame({"peptide": sequences}, dtype=numpy.object_)
df["length"] = df.peptide.str.len()
middle_length = max_length - left_edge - right_edge
min_length = left_edge + right_edge
# For efficiency we handle each supported peptide length using bulk
# array operations.
for (length, sub_df) in df.groupby("length"):
if length < min_length or length > max_length:
raise EncodingError(
"Sequence '%s' (length %d) unsupported. There are %d "
"total peptides with this length." % (
sub_df.iloc[0].peptide,
length,
len(sub_df)), supported_peptide_lengths=(
min_length, max_length))
# Array of shape (num peptides, length) giving fixed-length
# amino acid encoding each peptide of the current length.
fixed_length_sequences = numpy.stack(
sub_df.peptide.map(
lambda s: numpy.array([
get_amino_acid_index(char) for char in s
])).values)
num_null = max_length - length
num_null_left = int(math.ceil(num_null / 2))
num_middle_filled = middle_length - num_null
middle_start = left_edge + num_null_left
# Set left edge
result[sub_df.index, :left_edge] = fixed_length_sequences[
:, :left_edge
]
# Set middle.
result[
sub_df.index,
middle_start : middle_start + num_middle_filled
] = fixed_length_sequences[
:, left_edge : left_edge + num_middle_filled
]
# Set right edge.
result[
sub_df.index,
-right_edge:
] = fixed_length_sequences[:, -right_edge:]
elif alignment_method == "left_pad_right_pad":
if trim:
raise NotImplementedError("trim not supported")
# We arbitrarily set a minimum length of 5, although this encoding
# could handle smaller peptides.
min_length = 5
# Result array is int32, filled with X (null amino acid) value.
result = numpy.full(
fill_value=amino_acid.AMINO_ACID_INDEX['X'],
shape=(len(sequences), max_length * 2),
dtype="int32")
df = pandas.DataFrame({"peptide": sequences}, dtype=numpy.object_)
# For efficiency we handle each supported peptide length using bulk
# array operations.
for (length, sub_df) in df.groupby(df.peptide.str.len()):
if length < min_length or length > max_length:
raise EncodingError(
"Sequence '%s' (length %d) unsupported. There are %d "
"total peptides with this length." % (
sub_df.iloc[0].peptide,
length,
len(sub_df)), supported_peptide_lengths=(
min_length, max_length))
# Array of shape (num peptides, length) giving fixed-length
# amino acid encoding each peptide of the current length.
fixed_length_sequences = numpy.stack(sub_df.peptide.map(
lambda s: numpy.array([
get_amino_acid_index(char) for char in s
])).values)
# Set left edge
result[sub_df.index, :length] = fixed_length_sequences
# Set right edge.
result[sub_df.index, -length:] = fixed_length_sequences
elif alignment_method == "left_pad_centered_right_pad":
if trim:
raise NotImplementedError("trim not supported")
# We arbitrarily set a minimum length of 5, although this encoding
# could handle smaller peptides.
min_length = 5
# Result array is int32, filled with X (null amino acid) value.
result = numpy.full(
fill_value=amino_acid.AMINO_ACID_INDEX['X'],
shape=(len(sequences), max_length * 3),
dtype="int32")
df = pandas.DataFrame({"peptide": sequences}, dtype=numpy.object_)
# For efficiency we handle each supported peptide length using bulk
# array operations.
for (length, sub_df) in df.groupby(df.peptide.str.len()):
if length < min_length or length > max_length:
raise EncodingError(
"Sequence '%s' (length %d) unsupported. There are %d "
"total peptides with this length." % (
sub_df.iloc[0].peptide,
length,
len(sub_df)), supported_peptide_lengths=(
min_length, max_length))
# Array of shape (num peptides, length) giving fixed-length
# amino acid encoding each peptide of the current length.
fixed_length_sequences = numpy.stack(sub_df.peptide.map(
lambda s: numpy.array([
get_amino_acid_index(char) for char in s
])).values)
# Set left edge
result[sub_df.index, :length] = fixed_length_sequences
# Set right edge.
result[sub_df.index, -length:] = fixed_length_sequences
# Set center.
center_left_padding = int(
math.floor((max_length - length) / 2))
center_left_offset = max_length + center_left_padding
result[
sub_df.index,
center_left_offset : center_left_offset + length
] = fixed_length_sequences
elif alignment_method in ("right_pad", "left_pad"):
min_length = 1
# Result array is int32, filled with X (null amino acid) value.
result = numpy.full(
fill_value=amino_acid.AMINO_ACID_INDEX['X'],
shape=(len(sequences), max_length),
dtype="int32")
df = pandas.DataFrame({"peptide": sequences}, dtype=numpy.object_)
# For efficiency we handle each supported peptide length using bulk
# array operations.
for (length, sub_df) in df.groupby(df.peptide.str.len()):
if length < min_length or (not trim and length > max_length):
raise EncodingError(
"Sequence '%s' (length %d) unsupported. There are %d "
"total peptides with this length." % (
sub_df.iloc[0].peptide,
length,
len(sub_df)), supported_peptide_lengths=(
min_length, max_length))
peptides = sub_df.peptide
if length > max_length:
# Trim.
if alignment_method == "right_pad":
peptides = peptides.str.slice(0, max_length)
else:
peptides = peptides.str.slice(length - max_length)
# Array of shape (num peptides, length) giving fixed-length
# amino acid encoding each peptide of the current length.
fixed_length_sequences = numpy.stack(peptides.map(
lambda s: numpy.array([
get_amino_acid_index(char) for char in s
])).values)
if alignment_method == "right_pad":
# Left align (i.e. pad right): set left edge
result[sub_df.index, :length] = fixed_length_sequences
else:
# Right align: set right edge.
result[sub_df.index, -length:] = fixed_length_sequences
else:
raise NotImplementedError(
"Unsupported alignment method: %s" % alignment_method)
return result
| apache-2.0 |
aejax/KerasRL | simple.py | 1 | 4512 | import numpy as np
import matplotlib.pyplot as plt
import timeit
from keras.models import Sequential, load_model
from keras.layers import Dense, Activation
from keras.optimizers import sgd
import gym
class SimpleAgent(object):
def __init__(self, S, A, gamma=0.9, epsilon=0.1, learning_rate=1e-3):
self.S = S
self.A = A
#self.hidden = hidden
self.gamma = gamma
self.epsilon = epsilon
self.learning_rate = learning_rate
self.input_dim = self.get_space_dim(S)
self.output_dim = self.get_space_dim(A)
model = Sequential()
model.add(Dense(self.output_dim, input_dim=self.input_dim, name='Layer1'))
model.compile(loss='mse', optimizer=sgd(lr=learning_rate))
self.model = model
self.state = np.zeros((1, self.input_dim))
self.action = 0
def observe(self, s_next, r, done):
s_next = s_next[np.newaxis,:] # turn vector into matrix
targets = np.zeros((1,self.output_dim))
inputs = self.state
targets = self.model.predict(self.state)
if done:
targets[0,self.action] = r
else:
maxQ = self.model.predict(s_next).max()
targets[0,self.action] = r + self.gamma*maxQ
loss = self.model.train_on_batch(inputs, targets)
self.state = s_next
return loss
def act(self):
rand = np.random.uniform()
if rand > self.epsilon:
self.action = self.model.predict(self.state).argmax()
else:
self.action = np.random.choice(self.output_dim)
return self.action
@staticmethod
def get_space_dim(Space):
# get the dimensions of the spaces
if type(Space) == gym.spaces.Box:
s_dim = 1
for n in Space.shape:
s_dim *= n
elif type(Space) == gym.spaces.Discrete:
s_dim = Space.n
else:
print 'Wrong type for A: must be either Box or Discrete'
return s_dim
def save(self, filename):
self.model.save(filename)
def load(self, filename):
self.model = load_model(filename)
def run(env, agent, n_episode, tMax, plot=True):
returns = []
losses = []
start_time = timeit.default_timer()
for episode in xrange(n_episode):
observation = env.reset()
l_sum = agent.observe(observation, 0, False )
r_sum = 0
for t in xrange(tMax):
#env.render(mode='human')
action = agent.act()
observation, reward, done, info = env.step(action)
l_sum += agent.observe(observation, reward, done)
r_sum += reward
if done:
break
if (episode+1)%10 == 0:
print 'Episode {} finished with return of {}.'.format(episode+1,r_sum)
returns.append(r_sum)
losses.append(l_sum)
end_time = timeit.default_timer()
ave_r = reduce(lambda x, y: x+y, returns) / n_episode
print 'Learning Rate: {:.2}'.format(agent.learning_rate)
print "Training time: {}".format(end_time - start_time)
print 'Average Reward: {}'.format(ave_r)
if plot:
plt.figure()
plt.subplot(121)
plt.plot(returns)
plt.title('Rewards')
plt.subplot(122)
plt.plot(losses)
plt.title('Loss')
plt.savefig('lr={:.2}.png'.format(agent.learning_rate), format='png')
plt.close('all')
return ave_r
def test():
n_episode = 100000
tMax = 200
gamma = 0.9
epsilon = 0.1
learning_rate = 1e-2
env = gym.make('CartPole-v0')
S = env.observation_space
A = env.action_space
agent = SimpleAgent(S, A, gamma=gamma, epsilon=epsilon, learning_rate=learning_rate)
#agent.model = load_model('model.h5')
ave_r = run(env, agent, n_episode, tMax, plot=True)
agent.model.save('model.h5')
exit()
# Sample learning rates #
lrs = 10**np.random.uniform(-2.0, -1.0, size=10)
learning_rates = [lrs[n] for n in xrange(lrs.shape[0]) ]
ave_returns = []
for lr in learning_rates:
agent = SimpleAgent(S, A, gamma=gamma, epsilon=epsilon, learning_rate=lr)
ave_r = run(env, agent, n_episode, tMax, plot=True)
ave_returns.append(ave_r)
plt.figure()
plt.semilogx(learning_rates, ave_returns, 'o')
plt.savefig('ave_returns.png'.format(), format='png')
if __name__ == '__main__':
test()
| gpl-3.0 |
CopyChat/Plotting | Python/climate_change/global_changes_pr_rcp85.py | 1 | 7782 | #!/usr/bin/env python
########################################
#Globale Karte fuer tests
# from Rabea Amther
########################################
# http://gfesuite.noaa.gov/developer/netCDFPythonInterface.html
import math
import numpy as np
import pylab as pl
import Scientific.IO.NetCDF as IO
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.ticker as mtick
import matplotlib.lines as lines
from mpl_toolkits.basemap import Basemap , addcyclic
from matplotlib.colors import LinearSegmentedColormap
import textwrap
pl.close('all')
########################## for CMIP5 charactors
DIR='/Users/tang/climate/CMIP5/monthly/pr/'
VARIABLE='pr'
PRODUCT='Amon'
ENSEMBLE='r1i1p1'
AbsTemp=273.15
#AbsTemp=0
RefTemp=5
CRUmean=8.148 #1900-2100 land
TargetModel=[\
#'CanESM2',\
#'BCC-CSM1.1',\
#'CCSM4',\
#'CNRM-CM5',\
#'CSIRO-Mk3.6.0',\
#'EC-EARTH',\
#'GFDL-ESM2G',\
#'GFDL-ESM2M',\
#'GISS-E2-H',\
'GISS-E2-R',\
#'HadGEM2-CC',\
'HadGEM2-ES',\
#'INM-CM4',\
#'IPSL-CM5A-LR',\
#'IPSL-CM5A-MR',\
#'MIROC-ESM-CHEM',\
'MIROC-ESM',\
#'MIROC5',\
#'MPI-ESM-LR',\
#'MRI-CGCM3',\
#'NorESM1-M',\
#'MPI-ESM-LR',\
]
COLORtar=['darkred','black','deeppink','orange',\
'orangered','yellow','gold','brown','chocolate',\
'green','yellowgreen','aqua','olive','teal',\
'blue','purple','darkmagenta','fuchsia','indigo',\
'dimgray','black','navy']
COLORall=['darkred','darkblue','darkgreen','deeppink',\
'red','blue','green','pink','gold',\
'lime','lightcyan','orchid','yellow','lightsalmon',\
'brown','khaki','aquamarine','yellowgreen','blueviolet',\
'snow','skyblue','slateblue','orangered','dimgray',\
'chocolate','teal','mediumvioletred','gray','cadetblue',\
'mediumorchid','bisque','tomato','hotpink','firebrick',\
'Chartreuse','purple','goldenrod',\
'black','orangered','cyan','magenta']
linestyles=['_', '_', '_', '-', '-',\
'-', '--','--','--', '--',\
'_', '_','_','_',\
'_', '_','_','_',\
'_', '-', '--', ':','_', '-', '--', ':','_', '-', '--', ':','_', '-', '--', ':']
#================================================ CMIP5 models
# for rcp8.5
# ls -l | awk '{printf "999%s998,\\\n",$NF}' | sort -n
modelist2=[\
'ACCESS1-0',\
'ACCESS1-3',\
'BNU-ESM',\
'CCSM4',\
'CESM1-BGC',\
'CESM1-CAM5',\
'CMCC-CM',\
'CMCC-CMS',\
'CNRM-CM5',\
'CSIRO-Mk3-6-0',\
'CanESM2',\
'EC-EARTH',\
'FIO-ESM',\
'GFDL-CM3',\
'GFDL-ESM2G',\
'GFDL-ESM2M',\
'GISS-E2-R',\
'HadGEM2-AO',\
'HadGEM2-CC',\
'HadGEM2-ES',\
'IPSL-CM5A-LR',\
'IPSL-CM5A-MR',\
'IPSL-CM5B-LR',\
'MIROC-ESM-CHEM',\
'MIROC-ESM',\
'MIROC5',\
'MPI-ESM-LR',\
'MPI-ESM-MR',\
'MRI-CGCM3',\
'NorESM1-M',\
'NorESM1-ME',\
'bcc-csm1-1-m',\
'bcc-csm1-1',\
'inmcm4',\
]
print "==============================================="
#=================================================== define the Plot:
fig1=plt.figure(figsize=(16,9))
ax = fig1.add_subplot(111)
plt.xlabel('Year',fontsize=16)
#plt.ylabel('Global Surface Downwelling Solar Radiation Change (W/m2)',fontsize=16)
plt.ylabel('Global Surface Temperature Changes ($^\circ$C)',fontsize=16)
plt.title("Global Surface Tempereture Changes simulated by CMIP5 models",fontsize=18)
#plt.title('Global Surface Downwelling Solar Radiation Changes simulated by CMIP5 models (W/m2)',fontsize=18)
plt.ylim(-0.1,0.4)
plt.xlim(1961,2099)
plt.grid()
plt.xticks(np.arange(1961, 2093+10, 20))
plt.tick_params(axis='both', which='major', labelsize=14)
plt.tick_params(axis='both', which='minor', labelsize=14)
# vertical at 2005
plt.axvline(x=2005.5,linewidth=2, color='gray')
plt.axhline(y=0,linewidth=2, color='gray')
#plt.plot(x,y,color="blue",linewidth=4)
########################## for historical
########################## for historical
print "========== for rcp85 ==============="
EXPERIMENT='historical-rcp85'
PRODUCT='Amon'
ENSEMBLE='r1i1p1'
TIME='196101-209912'
filetag="globalmean"
YEAR=range(1960,2099)
Nmonth=1668
SumTemp=np.zeros(Nmonth/12)
K=0
for Model in modelist2:
#define the K-th model input file:
K=K+1 # for average
infile1=DIR+'rcp8.5'+'/'+Model+'/'\
+VARIABLE+'_'+PRODUCT+'_'+Model+'_'+EXPERIMENT+'_'+ENSEMBLE+'_'+TIME+'.'+filetag+'.nc'
#an example: tas_Amon_CanESM2_historical-rcp85_r1i1p1_200601-210012.globalmean.nc & \
#this file was copied locally for tests in this book
print('the file is == ' +infile1)
#open input files
infile=IO.NetCDFFile(infile1,'r')
# read the variable tas
TAS=infile.variables[VARIABLE][:,:,:].copy()
print 'the variable tas ===============: '
print TAS
# to change units to mm/day
TAS=TAS*86400
# calculate the annual mean temp:
TEMP=range(0,Nmonth,12)
for j in range(0,Nmonth,12):
TEMP[j/12]=np.mean(TAS[j:j+11][:][:])-AbsTemp
print " temp ======================== absolut"
print TEMP
# reference temp: mean of 1996-2005
RefTemp=np.mean(TEMP[len(TEMP)-94-10+1:len(TEMP)-94])
if K==1:
ArrRefTemp=[RefTemp]
else:
ArrRefTemp=ArrRefTemp+[RefTemp]
print 'ArrRefTemp ========== ',ArrRefTemp
TEMP=[t-RefTemp for t in TEMP]
print " temp ======================== relative to mean of 1986-2005"
print TEMP
##quit()
# for std
# get array of temp K*TimeStep
if K==1:
ArrTemp=[TEMP]
else:
ArrTemp=ArrTemp+[TEMP]
SumTemp=SumTemp+TEMP
print SumTemp
#=================================================== to plot
print "======== to plot =========="
print len(TEMP)
print 'NO. of year:',len(YEAR)
#quit()
#plot only target models
if Model in TargetModel:
plt.plot(YEAR,TEMP,\
label=Model,\
#linestyles[TargetModel.index(Model)],\
color=COLORtar[TargetModel.index(Model)],\
linewidth=2)
#if Model=='CanESM2':
#plt.plot(YEAR,TEMP,color="red",linewidth=1)
#if Model=='MPI-ESM-LR':
#plt.plot(YEAR,TEMP,color="blue",linewidth=1)
#if Model=='MPI-ESM-MR':
#plt.plot(YEAR,TEMP,color="green",linewidth=1)
#=================================================== for ensemble mean
AveTemp=[e/K for e in SumTemp]
ArrTemp=list(np.array(ArrTemp))
print 'shape of ArrTemp:',np.shape(ArrTemp)
StdTemp=np.std(np.array(ArrTemp),axis=0)
print 'shape of StdTemp:',np.shape(StdTemp)
print "ArrTemp ========================:"
print ArrTemp
print "StdTemp ========================:"
print StdTemp
# 5-95% range ( +-1.64 STD)
StdTemp1=[AveTemp[i]+StdTemp[i]*1.64 for i in range(0,len(StdTemp))]
StdTemp2=[AveTemp[i]-StdTemp[i]*1.64 for i in range(0,len(StdTemp))]
print "Model number for historical is :",K
print "models for historical:";print modelist2
plt.plot(YEAR,AveTemp,label='ensemble mean',color="black",linewidth=4)
plt.plot(YEAR,StdTemp1,color="black",linewidth=0.1)
plt.plot(YEAR,StdTemp2,color="black",linewidth=0.1)
plt.fill_between(YEAR,StdTemp1,StdTemp2,color='black',alpha=0.3)
# draw NO. of model used:
plt.text(2015,2,str(K)+' models',size=16,rotation=0.,
ha="center",va="center",
#bbox = dict(boxstyle="round",
#ec=(1., 0.5, 0.5),
#fc=(1., 0.8, 0.8),
)
print "==============================================="
plt.legend(loc=2)
plt.show()
quit()
| gpl-3.0 |
google/syzygy | third_party/numpy/files/numpy/core/function_base.py | 82 | 5474 | __all__ = ['logspace', 'linspace']
import numeric as _nx
from numeric import array
def linspace(start, stop, num=50, endpoint=True, retstep=False):
"""
Return evenly spaced numbers over a specified interval.
Returns `num` evenly spaced samples, calculated over the
interval [`start`, `stop` ].
The endpoint of the interval can optionally be excluded.
Parameters
----------
start : scalar
The starting value of the sequence.
stop : scalar
The end value of the sequence, unless `endpoint` is set to False.
In that case, the sequence consists of all but the last of ``num + 1``
evenly spaced samples, so that `stop` is excluded. Note that the step
size changes when `endpoint` is False.
num : int, optional
Number of samples to generate. Default is 50.
endpoint : bool, optional
If True, `stop` is the last sample. Otherwise, it is not included.
Default is True.
retstep : bool, optional
If True, return (`samples`, `step`), where `step` is the spacing
between samples.
Returns
-------
samples : ndarray
There are `num` equally spaced samples in the closed interval
``[start, stop]`` or the half-open interval ``[start, stop)``
(depending on whether `endpoint` is True or False).
step : float (only if `retstep` is True)
Size of spacing between samples.
See Also
--------
arange : Similiar to `linspace`, but uses a step size (instead of the
number of samples).
logspace : Samples uniformly distributed in log space.
Examples
--------
>>> np.linspace(2.0, 3.0, num=5)
array([ 2. , 2.25, 2.5 , 2.75, 3. ])
>>> np.linspace(2.0, 3.0, num=5, endpoint=False)
array([ 2. , 2.2, 2.4, 2.6, 2.8])
>>> np.linspace(2.0, 3.0, num=5, retstep=True)
(array([ 2. , 2.25, 2.5 , 2.75, 3. ]), 0.25)
Graphical illustration:
>>> import matplotlib.pyplot as plt
>>> N = 8
>>> y = np.zeros(N)
>>> x1 = np.linspace(0, 10, N, endpoint=True)
>>> x2 = np.linspace(0, 10, N, endpoint=False)
>>> plt.plot(x1, y, 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.plot(x2, y + 0.5, 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.ylim([-0.5, 1])
(-0.5, 1)
>>> plt.show()
"""
num = int(num)
if num <= 0:
return array([], float)
if endpoint:
if num == 1:
return array([float(start)])
step = (stop-start)/float((num-1))
y = _nx.arange(0, num) * step + start
y[-1] = stop
else:
step = (stop-start)/float(num)
y = _nx.arange(0, num) * step + start
if retstep:
return y, step
else:
return y
def logspace(start,stop,num=50,endpoint=True,base=10.0):
"""
Return numbers spaced evenly on a log scale.
In linear space, the sequence starts at ``base ** start``
(`base` to the power of `start`) and ends with ``base ** stop``
(see `endpoint` below).
Parameters
----------
start : float
``base ** start`` is the starting value of the sequence.
stop : float
``base ** stop`` is the final value of the sequence, unless `endpoint`
is False. In that case, ``num + 1`` values are spaced over the
interval in log-space, of which all but the last (a sequence of
length ``num``) are returned.
num : integer, optional
Number of samples to generate. Default is 50.
endpoint : boolean, optional
If true, `stop` is the last sample. Otherwise, it is not included.
Default is True.
base : float, optional
The base of the log space. The step size between the elements in
``ln(samples) / ln(base)`` (or ``log_base(samples)``) is uniform.
Default is 10.0.
Returns
-------
samples : ndarray
`num` samples, equally spaced on a log scale.
See Also
--------
arange : Similiar to linspace, with the step size specified instead of the
number of samples. Note that, when used with a float endpoint, the
endpoint may or may not be included.
linspace : Similar to logspace, but with the samples uniformly distributed
in linear space, instead of log space.
Notes
-----
Logspace is equivalent to the code
>>> y = np.linspace(start, stop, num=num, endpoint=endpoint)
... # doctest: +SKIP
>>> power(base, y)
... # doctest: +SKIP
Examples
--------
>>> np.logspace(2.0, 3.0, num=4)
array([ 100. , 215.443469 , 464.15888336, 1000. ])
>>> np.logspace(2.0, 3.0, num=4, endpoint=False)
array([ 100. , 177.827941 , 316.22776602, 562.34132519])
>>> np.logspace(2.0, 3.0, num=4, base=2.0)
array([ 4. , 5.0396842 , 6.34960421, 8. ])
Graphical illustration:
>>> import matplotlib.pyplot as plt
>>> N = 10
>>> x1 = np.logspace(0.1, 1, N, endpoint=True)
>>> x2 = np.logspace(0.1, 1, N, endpoint=False)
>>> y = np.zeros(N)
>>> plt.plot(x1, y, 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.plot(x2, y + 0.5, 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.ylim([-0.5, 1])
(-0.5, 1)
>>> plt.show()
"""
y = linspace(start,stop,num=num,endpoint=endpoint)
return _nx.power(base,y)
| apache-2.0 |
Unidata/MetPy | v0.8/_downloads/Find_Natural_Neighbors_Verification.py | 3 | 2729 | # Copyright (c) 2016 MetPy Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""
Find Natural Neighbors Verification
===================================
Finding natural neighbors in a triangulation
A triangle is a natural neighbor of a point if that point is within a circumradius of the
circumcenter of a circumscribed circle containing the triangle.
"""
import matplotlib.pyplot as plt
import numpy as np
from scipy.spatial import Delaunay
from metpy.gridding.triangles import find_natural_neighbors
# Create test observations, test points, and plot the triangulation and points.
gx, gy = np.meshgrid(np.arange(0, 20, 4), np.arange(0, 20, 4))
pts = np.vstack([gx.ravel(), gy.ravel()]).T
tri = Delaunay(pts)
fig, ax = plt.subplots(figsize=(15, 10))
for i, inds in enumerate(tri.simplices):
pts = tri.points[inds]
x, y = np.vstack((pts, pts[0])).T
ax.plot(x, y)
ax.annotate(i, xy=(np.mean(x), np.mean(y)))
test_points = np.array([[2, 2], [5, 10], [12, 13.4], [12, 8], [20, 20]])
for i, (x, y) in enumerate(test_points):
ax.plot(x, y, 'k.', markersize=6)
ax.annotate('test ' + str(i), xy=(x, y))
###########################################
# Since finding natural neighbors already calculates circumcenters and circumradii, return
# that information for later use.
#
# The key of the neighbors dictionary refers to the test point index, and the list of integers
# are the triangles that are natural neighbors of that particular test point.
#
# Since point 4 is far away from the triangulation, it has no natural neighbors.
# Point 3 is at the confluence of several triangles so it has many natural neighbors.
neighbors, tri_info = find_natural_neighbors(tri, test_points)
print(neighbors)
###########################################
# We can then use the information in tri_info later.
#
# The dictionary key is the index of a particular triangle in the Delaunay triangulation data
# structure. 'cc' is that triangle's circumcenter, and 'r' is the radius of the circumcircle
# containing that triangle.
fig, ax = plt.subplots(figsize=(15, 10))
for i, inds in enumerate(tri.simplices):
pts = tri.points[inds]
x, y = np.vstack((pts, pts[0])).T
ax.plot(x, y)
ax.annotate(i, xy=(np.mean(x), np.mean(y)))
# Using circumcenter and radius information from tri_info, plot circumcircles and
# circumcenters for each triangle.
for _idx, item in tri_info.items():
ax.plot(item['cc'][0], item['cc'][1], 'k.', markersize=5)
circ = plt.Circle(item['cc'], item['r'], edgecolor='k', facecolor='none',
transform=fig.axes[0].transData)
ax.add_artist(circ)
ax.set_aspect('equal', 'datalim')
plt.show()
| bsd-3-clause |
oscarmore2/deep-learning-study | image-classification/helper.py | 155 | 5631 | import pickle
import numpy as np
import matplotlib.pyplot as plt
from sklearn.preprocessing import LabelBinarizer
def _load_label_names():
"""
Load the label names from file
"""
return ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
def load_cfar10_batch(cifar10_dataset_folder_path, batch_id):
"""
Load a batch of the dataset
"""
with open(cifar10_dataset_folder_path + '/data_batch_' + str(batch_id), mode='rb') as file:
batch = pickle.load(file, encoding='latin1')
features = batch['data'].reshape((len(batch['data']), 3, 32, 32)).transpose(0, 2, 3, 1)
labels = batch['labels']
return features, labels
def display_stats(cifar10_dataset_folder_path, batch_id, sample_id):
"""
Display Stats of the the dataset
"""
batch_ids = list(range(1, 6))
if batch_id not in batch_ids:
print('Batch Id out of Range. Possible Batch Ids: {}'.format(batch_ids))
return None
features, labels = load_cfar10_batch(cifar10_dataset_folder_path, batch_id)
if not (0 <= sample_id < len(features)):
print('{} samples in batch {}. {} is out of range.'.format(len(features), batch_id, sample_id))
return None
print('\nStats of batch {}:'.format(batch_id))
print('Samples: {}'.format(len(features)))
print('Label Counts: {}'.format(dict(zip(*np.unique(labels, return_counts=True)))))
print('First 20 Labels: {}'.format(labels[:20]))
sample_image = features[sample_id]
sample_label = labels[sample_id]
label_names = _load_label_names()
print('\nExample of Image {}:'.format(sample_id))
print('Image - Min Value: {} Max Value: {}'.format(sample_image.min(), sample_image.max()))
print('Image - Shape: {}'.format(sample_image.shape))
print('Label - Label Id: {} Name: {}'.format(sample_label, label_names[sample_label]))
plt.axis('off')
plt.imshow(sample_image)
def _preprocess_and_save(normalize, one_hot_encode, features, labels, filename):
"""
Preprocess data and save it to file
"""
features = normalize(features)
labels = one_hot_encode(labels)
pickle.dump((features, labels), open(filename, 'wb'))
def preprocess_and_save_data(cifar10_dataset_folder_path, normalize, one_hot_encode):
"""
Preprocess Training and Validation Data
"""
n_batches = 5
valid_features = []
valid_labels = []
for batch_i in range(1, n_batches + 1):
features, labels = load_cfar10_batch(cifar10_dataset_folder_path, batch_i)
validation_count = int(len(features) * 0.1)
# Prprocess and save a batch of training data
_preprocess_and_save(
normalize,
one_hot_encode,
features[:-validation_count],
labels[:-validation_count],
'preprocess_batch_' + str(batch_i) + '.p')
# Use a portion of training batch for validation
valid_features.extend(features[-validation_count:])
valid_labels.extend(labels[-validation_count:])
# Preprocess and Save all validation data
_preprocess_and_save(
normalize,
one_hot_encode,
np.array(valid_features),
np.array(valid_labels),
'preprocess_validation.p')
with open(cifar10_dataset_folder_path + '/test_batch', mode='rb') as file:
batch = pickle.load(file, encoding='latin1')
# load the test data
test_features = batch['data'].reshape((len(batch['data']), 3, 32, 32)).transpose(0, 2, 3, 1)
test_labels = batch['labels']
# Preprocess and Save all test data
_preprocess_and_save(
normalize,
one_hot_encode,
np.array(test_features),
np.array(test_labels),
'preprocess_test.p')
def batch_features_labels(features, labels, batch_size):
"""
Split features and labels into batches
"""
for start in range(0, len(features), batch_size):
end = min(start + batch_size, len(features))
yield features[start:end], labels[start:end]
def load_preprocess_training_batch(batch_id, batch_size):
"""
Load the Preprocessed Training data and return them in batches of <batch_size> or less
"""
filename = 'preprocess_batch_' + str(batch_id) + '.p'
features, labels = pickle.load(open(filename, mode='rb'))
# Return the training data in batches of size <batch_size> or less
return batch_features_labels(features, labels, batch_size)
def display_image_predictions(features, labels, predictions):
n_classes = 10
label_names = _load_label_names()
label_binarizer = LabelBinarizer()
label_binarizer.fit(range(n_classes))
label_ids = label_binarizer.inverse_transform(np.array(labels))
fig, axies = plt.subplots(nrows=4, ncols=2)
fig.tight_layout()
fig.suptitle('Softmax Predictions', fontsize=20, y=1.1)
n_predictions = 3
margin = 0.05
ind = np.arange(n_predictions)
width = (1. - 2. * margin) / n_predictions
for image_i, (feature, label_id, pred_indicies, pred_values) in enumerate(zip(features, label_ids, predictions.indices, predictions.values)):
pred_names = [label_names[pred_i] for pred_i in pred_indicies]
correct_name = label_names[label_id]
axies[image_i][0].imshow(feature)
axies[image_i][0].set_title(correct_name)
axies[image_i][0].set_axis_off()
axies[image_i][1].barh(ind + margin, pred_values[::-1], width)
axies[image_i][1].set_yticks(ind + margin)
axies[image_i][1].set_yticklabels(pred_names[::-1])
axies[image_i][1].set_xticks([0, 0.5, 1.0])
| mit |
ARudiuk/mne-python | examples/decoding/plot_decoding_xdawn_eeg.py | 4 | 3406 | """
=============================
XDAWN Decoding From EEG data
=============================
ERP decoding with Xdawn. For each event type, a set of spatial Xdawn filters
are trained and applied on the signal. Channels are concatenated and rescaled
to create features vectors that will be fed into a Logistic Regression.
References
----------
[1] Rivet, B., Souloumiac, A., Attina, V., & Gibert, G. (2009). xDAWN
algorithm to enhance evoked potentials: application to brain-computer
interface. Biomedical Engineering, IEEE Transactions on, 56(8), 2035-2043.
[2] Rivet, B., Cecotti, H., Souloumiac, A., Maby, E., & Mattout, J. (2011,
August). Theoretical analysis of xDAWN algorithm: application to an
efficient sensor selection in a P300 BCI. In Signal Processing Conference,
2011 19th European (pp. 1382-1386). IEEE.
"""
# Authors: Alexandre Barachant <[email protected]>
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cross_validation import StratifiedKFold
from sklearn.pipeline import make_pipeline
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import classification_report, confusion_matrix
from sklearn.preprocessing import MinMaxScaler
from mne import io, pick_types, read_events, Epochs
from mne.datasets import sample
from mne.preprocessing import Xdawn
from mne.decoding import EpochsVectorizer
from mne.viz import tight_layout
print(__doc__)
data_path = sample.data_path()
###############################################################################
# Set parameters and read data
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
tmin, tmax = -0.1, 0.3
event_id = dict(aud_l=1, aud_r=2, vis_l=3, vis_r=4)
# Setup for reading the raw data
raw = io.read_raw_fif(raw_fname, preload=True)
raw.filter(1, 20, method='iir')
events = read_events(event_fname)
picks = pick_types(raw.info, meg=False, eeg=True, stim=False, eog=False,
exclude='bads')
epochs = Epochs(raw, events, event_id, tmin, tmax, proj=False,
picks=picks, baseline=None, preload=True,
add_eeg_ref=False, verbose=False)
# Create classification pipeline
clf = make_pipeline(Xdawn(n_components=3),
EpochsVectorizer(),
MinMaxScaler(),
LogisticRegression(penalty='l1'))
# Get the labels
labels = epochs.events[:, -1]
# Cross validator
cv = StratifiedKFold(y=labels, n_folds=10, shuffle=True, random_state=42)
# Do cross-validation
preds = np.empty(len(labels))
for train, test in cv:
clf.fit(epochs[train], labels[train])
preds[test] = clf.predict(epochs[test])
# Classification report
target_names = ['aud_l', 'aud_r', 'vis_l', 'vis_r']
report = classification_report(labels, preds, target_names=target_names)
print(report)
# Normalized confusion matrix
cm = confusion_matrix(labels, preds)
cm_normalized = cm.astype(float) / cm.sum(axis=1)[:, np.newaxis]
# Plot confusion matrix
plt.imshow(cm_normalized, interpolation='nearest', cmap=plt.cm.Blues)
plt.title('Normalized Confusion matrix')
plt.colorbar()
tick_marks = np.arange(len(target_names))
plt.xticks(tick_marks, target_names, rotation=45)
plt.yticks(tick_marks, target_names)
tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.show()
| bsd-3-clause |
matpalm/cartpoleplusplus | event_log.py | 1 | 6521 | #!/usr/bin/env python
import event_pb2
import gzip
import matplotlib.pyplot as plt
import numpy as np
import StringIO
import struct
def rgb_to_png(rgb):
"""convert RGB data from render to png"""
sio = StringIO.StringIO()
plt.imsave(sio, rgb)
return sio.getvalue()
def png_to_rgb(png_bytes):
"""convert png (from rgb_to_png) to RGB"""
# note PNG is always RGBA so we need to slice off A
rgba = plt.imread(StringIO.StringIO(png_bytes))
return rgba[:,:,:3]
def read_state_from_event(event):
"""unpack state from event (i.e. inverse of add_state_to_event)"""
if len(event.state[0].render) > 0:
num_repeats = len(event.state)
num_cameras = len(event.state[0].render)
eg_render = event.state[0].render[0]
state = np.empty((eg_render.height, eg_render.width, 3,
num_cameras, num_repeats))
for r_idx in range(num_repeats):
repeat = event.state[r_idx]
for c_idx in range(num_cameras):
png_bytes = repeat.render[c_idx].png_bytes
state[:,:,:,c_idx,r_idx] = png_to_rgb(png_bytes)
else:
state = np.empty((len(event.state), 2, 7))
for i, s in enumerate(event.state):
state[i][0] = s.cart_pose
state[i][1] = s.pole_pose
return state
class EventLog(object):
def __init__(self, path, use_raw_pixels):
self.log_file = open(path, "ab")
self.episode_entry = None
self.use_raw_pixels = use_raw_pixels
def reset(self):
if self.episode_entry is not None:
# *sigh* have to frame these ourselves :/
# (a long as a header-len will do...)
buff = self.episode_entry.SerializeToString()
if len(buff) > 0:
buff_len = struct.pack('=l', len(buff))
self.log_file.write(buff_len)
self.log_file.write(buff)
self.log_file.flush()
self.episode_entry = event_pb2.Episode()
def add_state_to_event(self, state, event):
"""pack state into event"""
if self.use_raw_pixels:
# TODO: be nice to have pose info here too in the pixel case...
num_repeats = state.shape[4]
for r_idx in range(num_repeats):
s = event.state.add()
num_cameras = state.shape[3]
for c_idx in range(num_cameras):
render = s.render.add()
render.width = state.shape[1]
render.height = state.shape[0]
render.png_bytes = rgb_to_png(state[:,:,:,c_idx,r_idx])
else:
num_repeats = state.shape[0]
for r in range(num_repeats):
s = event.state.add()
s.cart_pose.extend(map(float, state[r][0]))
s.pole_pose.extend(map(float, state[r][1]))
def add(self, state, action, reward):
event = self.episode_entry.event.add()
self.add_state_to_event(state, event)
if isinstance(action, int):
event.action.append(action) # single action
else:
assert action.shape[0] == 1 # never log batch operations
event.action.extend(map(float, action[0]))
event.reward = reward
def add_just_state(self, state):
event = self.episode_entry.event.add()
self.add_state_to_event(state, event)
class EventLogReader(object):
def __init__(self, path):
if path.endswith(".gz"):
self.log_file = gzip.open(path, "rb")
else:
self.log_file = open(path, "rb")
def entries(self):
episode = event_pb2.Episode()
while True:
buff_len_bytes = self.log_file.read(4)
if len(buff_len_bytes) == 0: return
buff_len = struct.unpack('=l', buff_len_bytes)[0]
buff = self.log_file.read(buff_len)
episode.ParseFromString(buff)
yield episode
def make_dir(d):
if not os.path.exists(d):
os.makedirs(d)
if __name__ == "__main__":
import argparse, os, sys, Image, ImageDraw
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--log-file', type=str, default=None)
parser.add_argument('--echo', action='store_true', help="write event to stdout")
parser.add_argument('--episodes', type=str, default=None,
help="if set only process these specific episodes (comma separated list)")
parser.add_argument('--img-output-dir', type=str, default=None,
help="if set output all renders to this DIR/e_NUM/s_NUM.png")
parser.add_argument('--img-debug-overlay', action='store_true',
help="if set overlay image with debug info")
# TODO args for episode range
opts = parser.parse_args()
episode_whitelist = None
if opts.episodes is not None:
episode_whitelist = set(map(int, opts.episodes.split(",")))
if opts.img_output_dir is not None:
make_dir(opts.img_output_dir)
total_num_read_episodes = 0
total_num_read_events = 0
elr = EventLogReader(opts.log_file)
for episode_id, episode in enumerate(elr.entries()):
if episode_whitelist is not None and episode_id not in episode_whitelist:
continue
if opts.echo:
print "-----", episode_id
print episode
total_num_read_episodes += 1
total_num_read_events += len(episode.event)
if opts.img_output_dir is not None:
dir = "%s/ep_%05d" % (opts.img_output_dir, episode_id)
make_dir(dir)
make_dir(dir + "/c0") # HACK: assume only max two cameras
make_dir(dir + "/c1")
for event_id, event in enumerate(episode.event):
for state_id, state in enumerate(event.state):
for camera_id, render in enumerate(state.render):
assert camera_id in [0, 1], "fix hack above"
# open RGB png in an image canvas
img = Image.open(StringIO.StringIO(render.png_bytes))
if opts.img_debug_overlay:
canvas = ImageDraw.Draw(img)
# draw episode and event number in top left
canvas.text((0, 0), "%d %d" % (episode_id, event_id), fill="black")
# draw simple fx/fy representation in bottom right...
# a bounding box
bx, by, bw = 40, 40, 10
canvas.line((bx-bw,by-bw, bx+bw,by-bw, bx+bw,by+bw, bx-bw,by+bw, bx-bw,by-bw), fill="black")
# then a simple fx/fy line
fx, fy = event.action[0], event.action[1]
canvas.line((bx,by, bx+(fx*bw), by+(fy*bw)), fill="black")
# write it out
img = img.resize((200, 200))
filename = "%s/c%d/e%05d_r%d.png" % (dir, camera_id, event_id, state_id)
img.save(filename)
print >>sys.stderr, "read", total_num_read_episodes, "episodes for a total of", total_num_read_events, "events"
| mit |
hsuantien/scikit-learn | sklearn/datasets/mldata.py | 309 | 7838 | """Automatically download MLdata datasets."""
# Copyright (c) 2011 Pietro Berkes
# License: BSD 3 clause
import os
from os.path import join, exists
import re
import numbers
try:
# Python 2
from urllib2 import HTTPError
from urllib2 import quote
from urllib2 import urlopen
except ImportError:
# Python 3+
from urllib.error import HTTPError
from urllib.parse import quote
from urllib.request import urlopen
import numpy as np
import scipy as sp
from scipy import io
from shutil import copyfileobj
from .base import get_data_home, Bunch
MLDATA_BASE_URL = "http://mldata.org/repository/data/download/matlab/%s"
def mldata_filename(dataname):
"""Convert a raw name for a data set in a mldata.org filename."""
dataname = dataname.lower().replace(' ', '-')
return re.sub(r'[().]', '', dataname)
def fetch_mldata(dataname, target_name='label', data_name='data',
transpose_data=True, data_home=None):
"""Fetch an mldata.org data set
If the file does not exist yet, it is downloaded from mldata.org .
mldata.org does not have an enforced convention for storing data or
naming the columns in a data set. The default behavior of this function
works well with the most common cases:
1) data values are stored in the column 'data', and target values in the
column 'label'
2) alternatively, the first column stores target values, and the second
data values
3) the data array is stored as `n_features x n_samples` , and thus needs
to be transposed to match the `sklearn` standard
Keyword arguments allow to adapt these defaults to specific data sets
(see parameters `target_name`, `data_name`, `transpose_data`, and
the examples below).
mldata.org data sets may have multiple columns, which are stored in the
Bunch object with their original name.
Parameters
----------
dataname:
Name of the data set on mldata.org,
e.g.: "leukemia", "Whistler Daily Snowfall", etc.
The raw name is automatically converted to a mldata.org URL .
target_name: optional, default: 'label'
Name or index of the column containing the target values.
data_name: optional, default: 'data'
Name or index of the column containing the data.
transpose_data: optional, default: True
If True, transpose the downloaded data array.
data_home: optional, default: None
Specify another download and cache folder for the data sets. By default
all scikit learn data is stored in '~/scikit_learn_data' subfolders.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'target', the classification labels,
'DESCR', the full description of the dataset, and
'COL_NAMES', the original names of the dataset columns.
Examples
--------
Load the 'iris' dataset from mldata.org:
>>> from sklearn.datasets.mldata import fetch_mldata
>>> import tempfile
>>> test_data_home = tempfile.mkdtemp()
>>> iris = fetch_mldata('iris', data_home=test_data_home)
>>> iris.target.shape
(150,)
>>> iris.data.shape
(150, 4)
Load the 'leukemia' dataset from mldata.org, which needs to be transposed
to respects the sklearn axes convention:
>>> leuk = fetch_mldata('leukemia', transpose_data=True,
... data_home=test_data_home)
>>> leuk.data.shape
(72, 7129)
Load an alternative 'iris' dataset, which has different names for the
columns:
>>> iris2 = fetch_mldata('datasets-UCI iris', target_name=1,
... data_name=0, data_home=test_data_home)
>>> iris3 = fetch_mldata('datasets-UCI iris',
... target_name='class', data_name='double0',
... data_home=test_data_home)
>>> import shutil
>>> shutil.rmtree(test_data_home)
"""
# normalize dataset name
dataname = mldata_filename(dataname)
# check if this data set has been already downloaded
data_home = get_data_home(data_home=data_home)
data_home = join(data_home, 'mldata')
if not exists(data_home):
os.makedirs(data_home)
matlab_name = dataname + '.mat'
filename = join(data_home, matlab_name)
# if the file does not exist, download it
if not exists(filename):
urlname = MLDATA_BASE_URL % quote(dataname)
try:
mldata_url = urlopen(urlname)
except HTTPError as e:
if e.code == 404:
e.msg = "Dataset '%s' not found on mldata.org." % dataname
raise
# store Matlab file
try:
with open(filename, 'w+b') as matlab_file:
copyfileobj(mldata_url, matlab_file)
except:
os.remove(filename)
raise
mldata_url.close()
# load dataset matlab file
with open(filename, 'rb') as matlab_file:
matlab_dict = io.loadmat(matlab_file, struct_as_record=True)
# -- extract data from matlab_dict
# flatten column names
col_names = [str(descr[0])
for descr in matlab_dict['mldata_descr_ordering'][0]]
# if target or data names are indices, transform then into names
if isinstance(target_name, numbers.Integral):
target_name = col_names[target_name]
if isinstance(data_name, numbers.Integral):
data_name = col_names[data_name]
# rules for making sense of the mldata.org data format
# (earlier ones have priority):
# 1) there is only one array => it is "data"
# 2) there are multiple arrays
# a) copy all columns in the bunch, using their column name
# b) if there is a column called `target_name`, set "target" to it,
# otherwise set "target" to first column
# c) if there is a column called `data_name`, set "data" to it,
# otherwise set "data" to second column
dataset = {'DESCR': 'mldata.org dataset: %s' % dataname,
'COL_NAMES': col_names}
# 1) there is only one array => it is considered data
if len(col_names) == 1:
data_name = col_names[0]
dataset['data'] = matlab_dict[data_name]
# 2) there are multiple arrays
else:
for name in col_names:
dataset[name] = matlab_dict[name]
if target_name in col_names:
del dataset[target_name]
dataset['target'] = matlab_dict[target_name]
else:
del dataset[col_names[0]]
dataset['target'] = matlab_dict[col_names[0]]
if data_name in col_names:
del dataset[data_name]
dataset['data'] = matlab_dict[data_name]
else:
del dataset[col_names[1]]
dataset['data'] = matlab_dict[col_names[1]]
# set axes to sklearn conventions
if transpose_data:
dataset['data'] = dataset['data'].T
if 'target' in dataset:
if not sp.sparse.issparse(dataset['target']):
dataset['target'] = dataset['target'].squeeze()
return Bunch(**dataset)
# The following is used by nosetests to setup the docstring tests fixture
def setup_module(module):
# setup mock urllib2 module to avoid downloading from mldata.org
from sklearn.utils.testing import install_mldata_mock
install_mldata_mock({
'iris': {
'data': np.empty((150, 4)),
'label': np.empty(150),
},
'datasets-uci-iris': {
'double0': np.empty((150, 4)),
'class': np.empty((150,)),
},
'leukemia': {
'data': np.empty((72, 7129)),
},
})
def teardown_module(module):
from sklearn.utils.testing import uninstall_mldata_mock
uninstall_mldata_mock()
| bsd-3-clause |
BigDataforYou/movie_recommendation_workshop_1 | big_data_4_you_demo_1/venv/lib/python2.7/site-packages/pandas/io/tests/test_wb.py | 2 | 4757 | # flake8: noqa
import nose
import pandas
from pandas.compat import u
from pandas.util.testing import network
from pandas.util.testing import assert_frame_equal
from numpy.testing.decorators import slow
import pandas.util.testing as tm
# deprecated
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
from pandas.io.wb import search, download, get_countries
class TestWB(tm.TestCase):
@slow
@network
def test_wdi_search(self):
# Test that a name column exists, and that some results were returned
# ...without being too strict about what the actual contents of the
# results actually are. The fact that there are some, is good enough.
result = search('gdp.*capita.*constant')
self.assertTrue(result.name.str.contains('GDP').any())
@slow
@network
def test_wdi_download(self):
# Test a bad indicator with double (US), triple (USA),
# standard (CA, MX), non standard (KSV),
# duplicated (US, US, USA), and unknown (BLA) country codes
# ...but NOT a crash inducing country code (World bank strips pandas
# users of the luxury of laziness, because they create their
# own exceptions, and don't clean up legacy country codes.
# ...but NOT a retired indicator (User should want it to error.)
cntry_codes = ['CA', 'MX', 'USA', 'US', 'US', 'KSV', 'BLA']
inds = ['NY.GDP.PCAP.CD','BAD.INDICATOR']
expected = {'NY.GDP.PCAP.CD': {('Canada', '2003'): 28026.006013044702, ('Mexico', '2003'): 6601.0420648056606, ('Canada', '2004'): 31829.522562759001, ('Kosovo', '2003'): 1969.56271307405, ('Mexico', '2004'): 7042.0247834044303, ('United States', '2004'): 41928.886136479705, ('United States', '2003'): 39682.472247320402, ('Kosovo', '2004'): 2135.3328465238301}}
expected = pandas.DataFrame(expected)
#Round, to ignore revisions to data.
expected = pandas.np.round(expected,decimals=-3)
expected.sort(inplace=True)
result = download(country=cntry_codes, indicator=inds,
start=2003, end=2004, errors='ignore')
result.sort(inplace=True)
#Round, to ignore revisions to data.
result = pandas.np.round(result,decimals=-3)
expected.index = result.index
assert_frame_equal(result, pandas.DataFrame(expected))
@slow
@network
def test_wdi_download_w_retired_indicator(self):
cntry_codes = ['CA', 'MX', 'US']
# Despite showing up in the search feature, and being listed online,
# the api calls to GDPPCKD don't work in their own query builder, nor
# pandas module. GDPPCKD used to be a common symbol.
# This test is written to ensure that error messages to pandas users
# continue to make sense, rather than a user getting some missing
# key error, cause their JSON message format changed. If
# World bank ever finishes the deprecation of this symbol,
# this nose test should still pass.
inds = ['GDPPCKD']
try:
result = download(country=cntry_codes, indicator=inds,
start=2003, end=2004, errors='ignore')
# If for some reason result actually ever has data, it's cause WB
# fixed the issue with this ticker. Find another bad one.
except ValueError as e:
raise nose.SkipTest("No indicators returned data: {0}".format(e))
# if it ever gets here, it means WB unretired the indicator.
# even if they dropped it completely, it would still get caught above
# or the WB API changed somehow in a really unexpected way.
if len(result) > 0:
raise nose.SkipTest("Invalid results")
@slow
@network
def test_wdi_download_w_crash_inducing_countrycode(self):
cntry_codes = ['CA', 'MX', 'US', 'XXX']
inds = ['NY.GDP.PCAP.CD']
try:
result = download(country=cntry_codes, indicator=inds,
start=2003, end=2004, errors='ignore')
except ValueError as e:
raise nose.SkipTest("No indicators returned data: {0}".format(e))
# if it ever gets here, it means the country code XXX got used by WB
# or the WB API changed somehow in a really unexpected way.
if len(result) > 0:
raise nose.SkipTest("Invalid results")
@slow
@network
def test_wdi_get_countries(self):
result = get_countries()
self.assertTrue('Zimbabwe' in list(result['name']))
self.assertTrue(len(result) > 100)
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| mit |
theislab/scanpy | scanpy/tests/test_highly_variable_genes.py | 1 | 8483 | import pytest
import pandas as pd
import numpy as np
import scanpy as sc
from pathlib import Path
FILE = Path(__file__).parent / Path('_scripts/seurat_hvg.csv')
FILE_V3 = Path(__file__).parent / Path('_scripts/seurat_hvg_v3.csv.gz')
FILE_V3_BATCH = Path(__file__).parent / Path('_scripts/seurat_hvg_v3_batch.csv')
def test_highly_variable_genes_basic():
adata = sc.datasets.blobs()
sc.pp.highly_variable_genes(adata)
adata = sc.datasets.blobs()
np.random.seed(0)
adata.obs['batch'] = np.random.binomial(3, 0.5, size=(adata.n_obs))
adata.obs['batch'] = adata.obs['batch'].astype('category')
sc.pp.highly_variable_genes(adata, batch_key='batch')
assert 'highly_variable_nbatches' in adata.var.columns
assert 'highly_variable_intersection' in adata.var.columns
adata = sc.datasets.blobs()
adata.obs['batch'] = np.random.binomial(4, 0.5, size=(adata.n_obs))
adata.obs['batch'] = adata.obs['batch'].astype('category')
sc.pp.highly_variable_genes(adata, batch_key='batch', n_top_genes=3)
assert 'highly_variable_nbatches' in adata.var.columns
assert adata.var['highly_variable'].sum() == 3
sc.pp.highly_variable_genes(adata)
no_batch_hvg = adata.var.highly_variable.copy()
assert no_batch_hvg.any()
adata.obs['batch'] = 'batch'
adata.obs['batch'] = adata.obs['batch'].astype('category')
sc.pp.highly_variable_genes(adata, batch_key='batch')
assert np.all(no_batch_hvg == adata.var.highly_variable)
assert np.all(adata.var.highly_variable_intersection == adata.var.highly_variable)
adata.obs["batch"] = "a"
adata.obs.batch.loc[::2] = "b"
sc.pp.highly_variable_genes(adata, batch_key="batch")
assert adata.var["highly_variable"].any()
colnames = [
'means',
'dispersions',
'dispersions_norm',
'highly_variable_nbatches',
'highly_variable_intersection',
'highly_variable',
]
hvg_df = sc.pp.highly_variable_genes(adata, batch_key="batch", inplace=False)
assert np.all(np.isin(colnames, hvg_df.columns))
def test_higly_variable_genes_compare_to_seurat():
seurat_hvg_info = pd.read_csv(FILE, sep=' ')
pbmc = sc.datasets.pbmc68k_reduced()
pbmc.X = pbmc.raw.X
pbmc.var_names_make_unique()
sc.pp.normalize_per_cell(pbmc, counts_per_cell_after=1e4)
sc.pp.log1p(pbmc)
sc.pp.highly_variable_genes(
pbmc, flavor='seurat', min_mean=0.0125, max_mean=3, min_disp=0.5, inplace=True
)
np.testing.assert_array_equal(
seurat_hvg_info['highly_variable'], pbmc.var['highly_variable']
)
# (still) Not equal to tolerance rtol=2e-05, atol=2e-05
# np.testing.assert_allclose(4, 3.9999, rtol=2e-05, atol=2e-05)
np.testing.assert_allclose(
seurat_hvg_info['means'],
pbmc.var['means'],
rtol=2e-05,
atol=2e-05,
)
np.testing.assert_allclose(
seurat_hvg_info['dispersions'],
pbmc.var['dispersions'],
rtol=2e-05,
atol=2e-05,
)
np.testing.assert_allclose(
seurat_hvg_info['dispersions_norm'],
pbmc.var['dispersions_norm'],
rtol=2e-05,
atol=2e-05,
)
def test_higly_variable_genes_compare_to_seurat_v3():
seurat_hvg_info = pd.read_csv(
FILE_V3, sep=' ', dtype={"variances_norm": np.float64}
)
pbmc = sc.datasets.pbmc3k()
pbmc.var_names_make_unique()
pbmc_dense = pbmc.copy()
pbmc_dense.X = pbmc_dense.X.toarray()
sc.pp.highly_variable_genes(pbmc, n_top_genes=1000, flavor='seurat_v3')
sc.pp.highly_variable_genes(pbmc_dense, n_top_genes=1000, flavor='seurat_v3')
np.testing.assert_array_equal(
seurat_hvg_info['highly_variable'], pbmc.var['highly_variable']
)
np.testing.assert_allclose(
seurat_hvg_info['variances'],
pbmc.var['variances'],
rtol=2e-05,
atol=2e-05,
)
np.testing.assert_allclose(
seurat_hvg_info['variances_norm'],
pbmc.var['variances_norm'],
rtol=2e-05,
atol=2e-05,
)
np.testing.assert_allclose(
pbmc_dense.var['variances_norm'],
pbmc.var['variances_norm'],
rtol=2e-05,
atol=2e-05,
)
batch = np.zeros((len(pbmc)), dtype=int)
batch[1500:] = 1
pbmc.obs["batch"] = batch
df = sc.pp.highly_variable_genes(
pbmc, n_top_genes=4000, flavor='seurat_v3', batch_key="batch", inplace=False
)
df.sort_values(
["highly_variable_nbatches", "highly_variable_rank"],
ascending=[False, True],
na_position="last",
inplace=True,
)
df = df.iloc[:4000]
seurat_hvg_info_batch = pd.read_csv(
FILE_V3_BATCH, sep=' ', dtype={"variances_norm": np.float64}
)
# ranks might be slightly different due to many genes having same normalized var
seu = pd.Index(seurat_hvg_info_batch['x'].values)
assert len(seu.intersection(df.index)) / 4000 > 0.95
sc.pp.log1p(pbmc)
with pytest.warns(
UserWarning,
match="`flavor='seurat_v3'` expects raw count data, but non-integers were found.",
):
sc.pp.highly_variable_genes(pbmc, n_top_genes=1000, flavor='seurat_v3')
def test_filter_genes_dispersion_compare_to_seurat():
seurat_hvg_info = pd.read_csv(FILE, sep=' ')
pbmc = sc.datasets.pbmc68k_reduced()
pbmc.X = pbmc.raw.X
pbmc.var_names_make_unique()
sc.pp.normalize_per_cell(pbmc, counts_per_cell_after=1e4)
sc.pp.filter_genes_dispersion(
pbmc,
flavor='seurat',
log=True,
subset=False,
min_mean=0.0125,
max_mean=3,
min_disp=0.5,
)
np.testing.assert_array_equal(
seurat_hvg_info['highly_variable'], pbmc.var['highly_variable']
)
# (still) Not equal to tolerance rtol=2e-05, atol=2e-05:
# np.testing.assert_allclose(4, 3.9999, rtol=2e-05, atol=2e-05)
np.testing.assert_allclose(
seurat_hvg_info['means'],
pbmc.var['means'],
rtol=2e-05,
atol=2e-05,
)
np.testing.assert_allclose(
seurat_hvg_info['dispersions'],
pbmc.var['dispersions'],
rtol=2e-05,
atol=2e-05,
)
np.testing.assert_allclose(
seurat_hvg_info['dispersions_norm'],
pbmc.var['dispersions_norm'],
rtol=2e-05,
atol=2e-05,
)
def test_highly_variable_genes_batches():
adata = sc.datasets.pbmc68k_reduced()
adata[:100, :100].X = np.zeros((100, 100))
adata.obs['batch'] = ['0' if i < 100 else '1' for i in range(adata.n_obs)]
adata_1 = adata[adata.obs.batch.isin(['0']), :]
adata_2 = adata[adata.obs.batch.isin(['1']), :]
sc.pp.highly_variable_genes(
adata,
batch_key='batch',
flavor='cell_ranger',
n_top_genes=200,
)
sc.pp.filter_genes(adata_1, min_cells=1)
sc.pp.filter_genes(adata_2, min_cells=1)
hvg1 = sc.pp.highly_variable_genes(
adata_1, flavor='cell_ranger', n_top_genes=200, inplace=False
)
hvg2 = sc.pp.highly_variable_genes(
adata_2, flavor='cell_ranger', n_top_genes=200, inplace=False
)
assert np.isclose(
adata.var['dispersions_norm'][100],
0.5 * hvg1['dispersions_norm'][0] + 0.5 * hvg2['dispersions_norm'][100],
)
assert np.isclose(
adata.var['dispersions_norm'][101],
0.5 * hvg1['dispersions_norm'][1] + 0.5 * hvg2['dispersions_norm'][101],
)
assert np.isclose(
adata.var['dispersions_norm'][0], 0.5 * hvg2['dispersions_norm'][0]
)
colnames = [
'means',
'dispersions',
'dispersions_norm',
'highly_variable',
]
assert np.all(np.isin(colnames, hvg1.columns))
from scanpy.preprocessing._utils import _get_mean_var
def test_seurat_v3_mean_var_output_with_batchkey():
pbmc = sc.datasets.pbmc3k()
pbmc.var_names_make_unique()
n_cells = pbmc.shape[0]
batch = np.zeros((n_cells), dtype=int)
batch[1500:] = 1
pbmc.obs["batch"] = batch
# true_mean, true_var = _get_mean_var(pbmc.X)
true_mean = np.mean(pbmc.X.toarray(), axis=0)
true_var = np.var(pbmc.X.toarray(), axis=0, dtype=np.float64, ddof=1)
result_df = sc.pp.highly_variable_genes(
pbmc, batch_key='batch', flavor='seurat_v3', n_top_genes=4000, inplace=False
)
np.testing.assert_allclose(true_mean, result_df['means'], rtol=2e-05, atol=2e-05)
np.testing.assert_allclose(true_var, result_df['variances'], rtol=2e-05, atol=2e-05)
| bsd-3-clause |
alexeyum/scikit-learn | sklearn/metrics/tests/test_regression.py | 272 | 6066 | from __future__ import division, print_function
import numpy as np
from itertools import product
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.metrics import explained_variance_score
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import mean_squared_error
from sklearn.metrics import median_absolute_error
from sklearn.metrics import r2_score
from sklearn.metrics.regression import _check_reg_targets
def test_regression_metrics(n_samples=50):
y_true = np.arange(n_samples)
y_pred = y_true + 1
assert_almost_equal(mean_squared_error(y_true, y_pred), 1.)
assert_almost_equal(mean_absolute_error(y_true, y_pred), 1.)
assert_almost_equal(median_absolute_error(y_true, y_pred), 1.)
assert_almost_equal(r2_score(y_true, y_pred), 0.995, 2)
assert_almost_equal(explained_variance_score(y_true, y_pred), 1.)
def test_multioutput_regression():
y_true = np.array([[1, 0, 0, 1], [0, 1, 1, 1], [1, 1, 0, 1]])
y_pred = np.array([[0, 0, 0, 1], [1, 0, 1, 1], [0, 0, 0, 1]])
error = mean_squared_error(y_true, y_pred)
assert_almost_equal(error, (1. / 3 + 2. / 3 + 2. / 3) / 4.)
# mean_absolute_error and mean_squared_error are equal because
# it is a binary problem.
error = mean_absolute_error(y_true, y_pred)
assert_almost_equal(error, (1. / 3 + 2. / 3 + 2. / 3) / 4.)
error = r2_score(y_true, y_pred, multioutput='variance_weighted')
assert_almost_equal(error, 1. - 5. / 2)
error = r2_score(y_true, y_pred, multioutput='uniform_average')
assert_almost_equal(error, -.875)
def test_regression_metrics_at_limits():
assert_almost_equal(mean_squared_error([0.], [0.]), 0.00, 2)
assert_almost_equal(mean_absolute_error([0.], [0.]), 0.00, 2)
assert_almost_equal(median_absolute_error([0.], [0.]), 0.00, 2)
assert_almost_equal(explained_variance_score([0.], [0.]), 1.00, 2)
assert_almost_equal(r2_score([0., 1], [0., 1]), 1.00, 2)
def test__check_reg_targets():
# All of length 3
EXAMPLES = [
("continuous", [1, 2, 3], 1),
("continuous", [[1], [2], [3]], 1),
("continuous-multioutput", [[1, 1], [2, 2], [3, 1]], 2),
("continuous-multioutput", [[5, 1], [4, 2], [3, 1]], 2),
("continuous-multioutput", [[1, 3, 4], [2, 2, 2], [3, 1, 1]], 3),
]
for (type1, y1, n_out1), (type2, y2, n_out2) in product(EXAMPLES,
repeat=2):
if type1 == type2 and n_out1 == n_out2:
y_type, y_check1, y_check2, multioutput = _check_reg_targets(
y1, y2, None)
assert_equal(type1, y_type)
if type1 == 'continuous':
assert_array_equal(y_check1, np.reshape(y1, (-1, 1)))
assert_array_equal(y_check2, np.reshape(y2, (-1, 1)))
else:
assert_array_equal(y_check1, y1)
assert_array_equal(y_check2, y2)
else:
assert_raises(ValueError, _check_reg_targets, y1, y2, None)
def test_regression_multioutput_array():
y_true = [[1, 2], [2.5, -1], [4.5, 3], [5, 7]]
y_pred = [[1, 1], [2, -1], [5, 4], [5, 6.5]]
mse = mean_squared_error(y_true, y_pred, multioutput='raw_values')
mae = mean_absolute_error(y_true, y_pred, multioutput='raw_values')
r = r2_score(y_true, y_pred, multioutput='raw_values')
evs = explained_variance_score(y_true, y_pred, multioutput='raw_values')
assert_array_almost_equal(mse, [0.125, 0.5625], decimal=2)
assert_array_almost_equal(mae, [0.25, 0.625], decimal=2)
assert_array_almost_equal(r, [0.95, 0.93], decimal=2)
assert_array_almost_equal(evs, [0.95, 0.93], decimal=2)
# mean_absolute_error and mean_squared_error are equal because
# it is a binary problem.
y_true = [[0, 0]]*4
y_pred = [[1, 1]]*4
mse = mean_squared_error(y_true, y_pred, multioutput='raw_values')
mae = mean_absolute_error(y_true, y_pred, multioutput='raw_values')
r = r2_score(y_true, y_pred, multioutput='raw_values')
assert_array_almost_equal(mse, [1., 1.], decimal=2)
assert_array_almost_equal(mae, [1., 1.], decimal=2)
assert_array_almost_equal(r, [0., 0.], decimal=2)
r = r2_score([[0, -1], [0, 1]], [[2, 2], [1, 1]], multioutput='raw_values')
assert_array_almost_equal(r, [0, -3.5], decimal=2)
assert_equal(np.mean(r), r2_score([[0, -1], [0, 1]], [[2, 2], [1, 1]],
multioutput='uniform_average'))
evs = explained_variance_score([[0, -1], [0, 1]], [[2, 2], [1, 1]],
multioutput='raw_values')
assert_array_almost_equal(evs, [0, -1.25], decimal=2)
# Checking for the condition in which both numerator and denominator is
# zero.
y_true = [[1, 3], [-1, 2]]
y_pred = [[1, 4], [-1, 1]]
r2 = r2_score(y_true, y_pred, multioutput='raw_values')
assert_array_almost_equal(r2, [1., -3.], decimal=2)
assert_equal(np.mean(r2), r2_score(y_true, y_pred,
multioutput='uniform_average'))
evs = explained_variance_score(y_true, y_pred, multioutput='raw_values')
assert_array_almost_equal(evs, [1., -3.], decimal=2)
assert_equal(np.mean(evs), explained_variance_score(y_true, y_pred))
def test_regression_custom_weights():
y_true = [[1, 2], [2.5, -1], [4.5, 3], [5, 7]]
y_pred = [[1, 1], [2, -1], [5, 4], [5, 6.5]]
msew = mean_squared_error(y_true, y_pred, multioutput=[0.4, 0.6])
maew = mean_absolute_error(y_true, y_pred, multioutput=[0.4, 0.6])
rw = r2_score(y_true, y_pred, multioutput=[0.4, 0.6])
evsw = explained_variance_score(y_true, y_pred, multioutput=[0.4, 0.6])
assert_almost_equal(msew, 0.39, decimal=2)
assert_almost_equal(maew, 0.475, decimal=3)
assert_almost_equal(rw, 0.94, decimal=2)
assert_almost_equal(evsw, 0.94, decimal=2)
| bsd-3-clause |
toobaz/pandas | pandas/tests/sparse/frame/test_to_from_scipy.py | 2 | 6938 | import numpy as np
import pytest
from pandas.core.dtypes.common import is_bool_dtype
import pandas as pd
from pandas import SparseDataFrame, SparseSeries
from pandas.core.sparse.api import SparseDtype
from pandas.util import testing as tm
scipy = pytest.importorskip("scipy")
ignore_matrix_warning = pytest.mark.filterwarnings(
"ignore:the matrix subclass:PendingDeprecationWarning"
)
@pytest.mark.parametrize("index", [None, list("abc")]) # noqa: F811
@pytest.mark.parametrize("columns", [None, list("def")])
@pytest.mark.parametrize("fill_value", [None, 0, np.nan])
@pytest.mark.parametrize("dtype", [bool, int, float, np.uint16])
@ignore_matrix_warning
@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
def test_from_to_scipy(spmatrix, index, columns, fill_value, dtype):
# GH 4343
# Make one ndarray and from it one sparse matrix, both to be used for
# constructing frames and comparing results
arr = np.eye(3, dtype=dtype)
# GH 16179
arr[0, 1] = dtype(2)
try:
spm = spmatrix(arr)
assert spm.dtype == arr.dtype
except (TypeError, AssertionError):
# If conversion to sparse fails for this spmatrix type and arr.dtype,
# then the combination is not currently supported in NumPy, so we
# can just skip testing it thoroughly
return
sdf = SparseDataFrame(
spm, index=index, columns=columns, default_fill_value=fill_value
)
# Expected result construction is kind of tricky for all
# dtype-fill_value combinations; easiest to cast to something generic
# and except later on
rarr = arr.astype(object)
rarr[arr == 0] = np.nan
expected = SparseDataFrame(rarr, index=index, columns=columns).fillna(
fill_value if fill_value is not None else np.nan
)
# Assert frame is as expected
sdf_obj = sdf.astype(object)
tm.assert_sp_frame_equal(sdf_obj, expected)
tm.assert_frame_equal(sdf_obj.to_dense(), expected.to_dense())
# Assert spmatrices equal
assert dict(sdf.to_coo().todok()) == dict(spm.todok())
# Ensure dtype is preserved if possible
# XXX: verify this
res_dtype = bool if is_bool_dtype(dtype) else dtype
tm.assert_contains_all(
sdf.dtypes.apply(lambda dtype: dtype.subtype), {np.dtype(res_dtype)}
)
assert sdf.to_coo().dtype == res_dtype
# However, adding a str column results in an upcast to object
sdf["strings"] = np.arange(len(sdf)).astype(str)
assert sdf.to_coo().dtype == np.object_
@pytest.mark.parametrize("fill_value", [None, 0, np.nan]) # noqa: F811
@ignore_matrix_warning
@pytest.mark.filterwarnings("ignore:object dtype is not supp:UserWarning")
@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
def test_from_to_scipy_object(spmatrix, fill_value):
# GH 4343
dtype = object
columns = list("cd")
index = list("ab")
if spmatrix is scipy.sparse.dok_matrix:
pytest.skip("dok_matrix from object does not work in SciPy")
# Make one ndarray and from it one sparse matrix, both to be used for
# constructing frames and comparing results
arr = np.eye(2, dtype=dtype)
try:
spm = spmatrix(arr)
assert spm.dtype == arr.dtype
except (TypeError, AssertionError):
# If conversion to sparse fails for this spmatrix type and arr.dtype,
# then the combination is not currently supported in NumPy, so we
# can just skip testing it thoroughly
return
sdf = SparseDataFrame(
spm, index=index, columns=columns, default_fill_value=fill_value
)
# Expected result construction is kind of tricky for all
# dtype-fill_value combinations; easiest to cast to something generic
# and except later on
rarr = arr.astype(object)
rarr[arr == 0] = np.nan
expected = SparseDataFrame(rarr, index=index, columns=columns).fillna(
fill_value if fill_value is not None else np.nan
)
# Assert frame is as expected
sdf_obj = sdf.astype(SparseDtype(object, fill_value))
tm.assert_sp_frame_equal(sdf_obj, expected)
tm.assert_frame_equal(sdf_obj.to_dense(), expected.to_dense())
# Assert spmatrices equal
assert dict(sdf.to_coo().todok()) == dict(spm.todok())
# Ensure dtype is preserved if possible
res_dtype = object
tm.assert_contains_all(
sdf.dtypes.apply(lambda dtype: dtype.subtype), {np.dtype(res_dtype)}
)
assert sdf.to_coo().dtype == res_dtype
@ignore_matrix_warning
@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
def test_from_scipy_correct_ordering(spmatrix):
# GH 16179
arr = np.arange(1, 5).reshape(2, 2)
try:
spm = spmatrix(arr)
assert spm.dtype == arr.dtype
except (TypeError, AssertionError):
# If conversion to sparse fails for this spmatrix type and arr.dtype,
# then the combination is not currently supported in NumPy, so we
# can just skip testing it thoroughly
return
sdf = SparseDataFrame(spm)
expected = SparseDataFrame(arr)
tm.assert_sp_frame_equal(sdf, expected)
tm.assert_frame_equal(sdf.to_dense(), expected.to_dense())
@ignore_matrix_warning
@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
def test_from_scipy_fillna(spmatrix):
# GH 16112
arr = np.eye(3)
arr[1:, 0] = np.nan
try:
spm = spmatrix(arr)
assert spm.dtype == arr.dtype
except (TypeError, AssertionError):
# If conversion to sparse fails for this spmatrix type and arr.dtype,
# then the combination is not currently supported in NumPy, so we
# can just skip testing it thoroughly
return
sdf = SparseDataFrame(spm).fillna(-1.0)
# Returning frame should fill all nan values with -1.0
expected = SparseDataFrame(
{
0: SparseSeries([1.0, -1, -1]),
1: SparseSeries([np.nan, 1, np.nan]),
2: SparseSeries([np.nan, np.nan, 1]),
},
default_fill_value=-1,
)
# fill_value is expected to be what .fillna() above was called with
# We don't use -1 as initial fill_value in expected SparseSeries
# construction because this way we obtain "compressed" SparseArrays,
# avoiding having to construct them ourselves
for col in expected:
expected[col].fill_value = -1
tm.assert_sp_frame_equal(sdf, expected)
@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
@pytest.mark.filterwarnings("ignore:Series.to_sparse:FutureWarning")
def test_index_names_multiple_nones():
# https://github.com/pandas-dev/pandas/pull/24092
sparse = pytest.importorskip("scipy.sparse")
s = pd.Series(1, index=pd.MultiIndex.from_product([["A", "B"], [0, 1]])).to_sparse()
result, _, _ = s.to_coo()
assert isinstance(result, sparse.coo_matrix)
result = result.toarray()
expected = np.ones((2, 2), dtype="int64")
tm.assert_numpy_array_equal(result, expected)
| bsd-3-clause |
jszopi/repESP | scripts/old/compromise.py | 1 | 8488 | from repESP import resp, resp_helpers, graphs
from repESP.field_comparison import rms_and_rep
from repESP.charges import update_with_charges, _update_molecule_with_charges
from repESP.charges import compare_charges
import os
import matplotlib.pyplot as plt
import math
import shutil
# NOTE: This ad-hoc script has been replaced with the more general field_diff.py
# This was necessary to prevent title from being cut-off when it's shifted up
# due to the second x-axis label.
from matplotlib import rcParams
rcParams.update({'figure.autolayout': True})
esp_charge_type = 'mk'
# esp_charge_type = 'chelpg'
charge_type = 'nbo'
# charge_type = 'aim'
molecule_name = 'methane'
indicator_label = 1
# molecule_name = 'NMe3H_plus'
# indicator_label = 13
path = '../data/' + molecule_name + '/'
output_path = path + "compromise_{0}_and_{1}/".format(charge_type,
esp_charge_type)
os.mkdir(output_path)
esp_fn = molecule_name + "_" + esp_charge_type + '.esp'
zero_net_charge = True
if "plus" in molecule_name or "minus" in molecule_name:
zero_net_charge = False
print("\nThe molecule was found {0}to be neutral based on its name. You should"
" check if this is correct.".format("" if zero_net_charge else "NOT "))
resp_output_path = output_path + 'resp_calcs/'
min_resp_output_path = output_path + 'min_resp_calcs/'
os.mkdir(resp_output_path)
os.mkdir(min_resp_output_path)
log_fn = path + molecule_name + "_" + charge_type + ".log"
esp_log_fn = path + molecule_name + "_" + esp_charge_type + ".log"
g = resp_helpers.G09_esp(path + esp_fn)
# Both the Gaussian ESP fitting methods and other charge assignment methods may
# not yield equivalent charges. As equivalent charges make more sense for force
# field development, they will be used. The ESP charges are equivalenced by
# performing unrestrained RESP, which will be used as a reference for the fit
# minimum. Charges from the other method will be equivalenced manually by my
# averaging function `resp.equivalence`. They will be scaled to obtain
# different ratio charges. All the charges are calculated and printed at the
# start for reference.
update_with_charges(esp_charge_type, esp_log_fn, g.molecule)
update_with_charges(charge_type, log_fn, g.molecule)
equiv_charges = resp.equivalence(g.molecule, charge_type, path)[0]
_update_molecule_with_charges(g.molecule, equiv_charges, charge_type+'_equiv')
print("\nRunning unrestrained RESP to fit ESP with equivalence:")
esp_equiv_molecule = resp.run_resp(
path, resp_output_path + 'unrest', resp_type='unrest', esp_fn=esp_fn)
charge_rrms = rms_and_rep(g.field, g.molecule, charge_type)[1]
equiv_charge_rrms = rms_and_rep(g.field, g.molecule, charge_type + '_equiv')[1]
esp_charge_rrms = rms_and_rep(g.field, g.molecule, esp_charge_type)[1]
resp_charge_rrms = rms_and_rep(g.field, esp_equiv_molecule, 'resp')[1]
print("\nThe molecule with {0} charges:".format(charge_type.upper()))
print("RRMS: {0:.5f}".format(charge_rrms))
for atom in g.molecule:
atom.print_with_charge(charge_type)
print("\nThe molecule with equivalenced {0} charges:".format(
charge_type.upper()))
print("RRMS: {0:.5f}".format(equiv_charge_rrms))
for atom in g.molecule:
atom.print_with_charge(charge_type + '_equiv')
print("\nChecking differences between raw and equivalenced charges ...")
print(compare_charges(charge_type, charge_type + '_equiv', g.molecule))
print("\nThe molecule with {0} charges:".format(esp_charge_type.upper()))
print("RRMS: {0:.5f}".format(esp_charge_rrms))
for atom in g.molecule:
atom.print_with_charge(esp_charge_type)
print("\nThe molecule with equivalenced {0} charges (unrestrained RESP):"
.format(esp_charge_type.upper()))
print("RRMS: {0:.5f}".format(resp_charge_rrms))
for atom in esp_equiv_molecule:
atom.print_with_charge('resp')
print("\nChecking differences between raw and equivalenced charges ...")
print(compare_charges(esp_charge_type, 'resp', g.molecule, esp_equiv_molecule))
start_charges = [atom.charges[charge_type + '_equiv'] for atom in g.molecule]
num = 50
ratio_limits = (0, 1.5)
print("\nEvaluating HEAVY ratios. This may take a while.")
heavy_args = (g.field, path, resp_output_path, esp_fn, False)
heavy_result, indicator_charge, ratio_values = resp.eval_ratios(
'heavy', ratio_limits, start_charges, num, indicator_label, heavy_args,
first_verbose=True)
if zero_net_charge:
# Scaling all charges is only possible with neutral molecules as
# otherwise in this case there's no free hydrogens to compensate as in
# the 'heavy_only' version
print("\nEvaluating REGULAR ratios. This may take a while.")
regular_args = (g.molecule, g.field)
# Note that indicator charge and ratio values are re-used from the heavy
# version. This is fine for ratio_values. For indicator_charge it's fine as
# long as the indicator charge is on a heavy atom. TODO
result = resp.eval_ratios('regular', ratio_limits, start_charges, num,
indicator_label, regular_args,
first_verbose=True)[0]
# RATIO MINIMIZATION
print("\nMinimizing HEAVY ratio. This shouldn't take long.")
# Most arguments here are the same as in the loop with minor changes specific
# to an optimization run (output directory, verbosity)
heavy_args = (start_charges, g.field, path, min_resp_output_path, esp_fn, True)
heavy_min_ratio, heavy_min_ratio_rrms, heavy_charges = resp.minimize_ratio(
'heavy', ratio_values, heavy_result, heavy_args)
if zero_net_charge:
print("Minimizing REGULAR ratio. This shouldn't take long.")
regular_args = (start_charges, g.molecule, g.field)
reg_min_ratio, reg_min_ratio_rrms, reg_charges = resp.minimize_ratio(
'regular', ratio_values, result, regular_args)
shutil.rmtree(min_resp_output_path)
def plot(result_list, heavy, min_ratio, min_ratio_rrms):
fig, ax1 = plt.subplots()
ax1.plot(ratio_values, result_list)
ax2 = ax1.twiny()
ax2.plot(indicator_charge, result_list)
if start_charges[indicator_label-1] < 0:
ax2.invert_xaxis()
ax1.set_xlabel(charge_type.upper() + " charge ratio")
ax2.set_xlabel("Charge on " + g.molecule[indicator_label-1].atomic_number +
str(indicator_label))
ax1.set_ylabel("RRMS")
ax1.set_ylim(0, ax1.get_ylim()[1])
ax1.set_xlim(*ratio_limits)
ax2.set_xlim(indicator_charge[0], indicator_charge[-1])
# The lines should overlap so one of the lines can be removed:
ax2.lines[0].remove()
# NOTE: if the plots don't look right, try disabling the above option and
# see whether you get two different lines. However, hard-coding the x-axis
# limits should ensure that the lines do overlap.
mark_charge = g.molecule[indicator_label-1].charges[charge_type]
ax2.scatter(mark_charge, charge_rrms, marker='D')
ax2.annotate(charge_type.upper(), xy=(mark_charge, charge_rrms),
textcoords='offset points', xytext=(5, -10))
ax1.scatter(1, equiv_charge_rrms)
mark_equiv_charge = g.molecule[indicator_label-1].charges[charge_type +
'_equiv']
if (math.isclose(mark_charge, mark_equiv_charge, rel_tol=0.04) and
math.isclose(charge_rrms, equiv_charge_rrms, rel_tol=0.04)):
print("WARNING: The NBO and NBO (equiv) points overlap or are close to"
" overlapping. Only one label is plotted.")
else:
ax1.annotate(charge_type.upper() + ' (equiv)',
xy=(1, equiv_charge_rrms), textcoords='offset points',
xytext=(5, -10))
ax1.plot((1, 1), (0, ax1.get_ylim()[1]), 'g--')
ax1.plot(ratio_limits, (resp_charge_rrms, resp_charge_rrms), 'r--')
ax1.plot((min_ratio, min_ratio), (0, min_ratio_rrms), 'g--')
title = "{0}: RRMS on {1} fitting points v. {2} ratio".format(
molecule_name, esp_charge_type.upper(), charge_type.upper())
if heavy:
title += "\nset on heavy atoms ONLY (H free to improve fit)"
else:
title += "\nset on ALL atoms (only possible for neutral molecules)"
plt.title(title, y=1.15)
plt.savefig(output_path+"{0}.pdf".format('heavy' if heavy else 'regular'))
plt.close()
molecule_name = graphs.pretty_molecule_name(molecule_name)
if zero_net_charge:
plot(result, False, reg_min_ratio, reg_min_ratio_rrms)
plot(heavy_result, True, heavy_min_ratio, heavy_min_ratio_rrms)
| gpl-3.0 |
poeticcapybara/pythalesians | pythalesians/economics/events/histecondatafactory.py | 1 | 5008 | __author__ = 'saeedamen' # Saeed Amen / [email protected]
#
# Copyright 2015 Thalesians Ltd. - http//www.thalesians.com / @thalesians
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and limitations under the License.
#
"""
HistEconDataFactory
Provides functions for getting historical economic data. Uses aliases for tickers, to make it relatively easy to use,
rather than having to remember all the underlying vendor tickers. Can use Fred, Quandl or Bloomberg.
The files below, contain default tickers and country groups. However, you can add whichever tickers you'd like.
- conf/all_econ_tickers.csv
- conf/econ_country_codes.csv
- conf/econ_country_groups.csv
These can be automatically generated via conf/econ_tickers.xlsm
"""
from pythalesians.util.loggermanager import LoggerManager
from pythalesians.market.requests.timeseriesrequest import TimeSeriesRequest
from pythalesians.market.loaders.lighttimeseriesfactory import LightTimeSeriesFactory
from pythalesians.util.constants import Constants
import pandas
import datetime
class HistEconDataFactory:
def __init__(self):
self.logger = LoggerManager().getLogger(__name__)
self._all_econ_tickers = pandas.read_csv(Constants().all_econ_tickers)
self._econ_country_codes = pandas.read_csv(Constants().econ_country_codes)
self._econ_country_groups = pandas.read_csv(Constants().econ_country_groups)
self.time_series_factory = LightTimeSeriesFactory()
# if Constants().default_time_series_factory == 'lighttimeseriesfactory':
# self.time_series_factory = LightTimeSeriesFactory()
# else:
# self.time_series_factory = CachedTimeSeriesFactory()
# return
def get_economic_data_history(self, start_date, finish_date, country_group, data_type,
source = 'fred', cache_algo = "internet_load_return"):
#vendor_country_codes = self.fred_country_codes[country_group]
#vendor_pretty_country = self.fred_nice_country_codes[country_group]
if isinstance(country_group, list):
pretty_country_names = country_group
else:
# get all the country names in the country_group
pretty_country_names = list(self._econ_country_groups[
self._econ_country_groups["Country Group"] == country_group]['Country'])
# construct the pretty tickers
pretty_tickers = [x + '-' + data_type for x in pretty_country_names]
# get vendor tickers
vendor_tickers = []
for pretty_ticker in pretty_tickers:
vendor_ticker = list(self._all_econ_tickers[
self._all_econ_tickers["Full Code"] == pretty_ticker][source].values)
if vendor_ticker == []:
vendor_ticker = None
self.logger.error('Could not find match for ' + pretty_ticker)
else:
vendor_ticker = vendor_ticker[0]
vendor_tickers.append(vendor_ticker)
vendor_fields = ['close']
if source == 'bloomberg': vendor_fields = ['PX_LAST']
time_series_request = TimeSeriesRequest(
start_date = start_date, # start date
finish_date = finish_date, # finish date
category = 'economic',
freq = 'daily', # intraday data
data_source = source, # use Bloomberg as data source
cut = 'LOC',
tickers = pretty_tickers,
fields = ['close'], # which fields to download
vendor_tickers = vendor_tickers,
vendor_fields = vendor_fields, # which Bloomberg fields to download
cache_algo = cache_algo) # how to return data
return self.time_series_factory.harvest_time_series(time_series_request)
def grasp_coded_entry(self, df, index):
df = df.ix[index:].stack()
df = df.reset_index()
df.columns = ['Date', 'Name', 'Val']
countries = df['Name']
countries = [x.split('-', 1)[0] for x in countries]
df['Code'] = sum(
[list(self._econ_country_codes[self._econ_country_codes["Country"] == x]['Code']) for x in countries],
[])
return df
if __name__ == '__main__':
pass
# see examples/histecondata_examples for ideas on how to call
| apache-2.0 |
thatchristoph/RTLSDR-Scanner | src/panels.py | 1 | 32280 | #
# rtlsdr_scan
#
# http://eartoearoak.com/software/rtlsdr-scanner
#
# Copyright 2012 - 2014 Al Brown
#
# A frequency scanning GUI for the OsmoSDR rtl-sdr library at
# http://sdr.osmocom.org/trac/wiki/rtl-sdr
#
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import copy
from matplotlib import cm
import matplotlib
from matplotlib.backends.backend_wxagg import \
FigureCanvasWxAgg as FigureCanvas
from matplotlib.colorbar import ColorbarBase
from matplotlib.colors import Normalize
from matplotlib.ticker import AutoMinorLocator, ScalarFormatter
import wx
from constants import Display
from controls import GridToolTips, CheckBoxCellRenderer
from misc import format_precision
from plot_3d import Plotter3d
from plot_controls import MouseZoom, MouseSelect
from plot_line import Plotter
from plot_spect import Spectrogram
from plot_status import PlotterStatus
from spectrum import split_spectrum_sort, Measure, reduce_points
from toolbars import NavigationToolbar, NavigationToolbarCompare
from utils_wx import close_modeless
import wx.grid as wxGrid
class PanelGraph(wx.Panel):
def __init__(self, panel, notify, settings, callbackMotion, remoteControl):
self.panel = panel
self.notify = notify
self.plot = None
self.settings = settings
self.remoteControl = remoteControl
self.spectrum = None
self.isLimited = None
self.limit = None
self.extent = None
self.annotate = None
self.mouseSelect = None
self.mouseZoom = None
self.measureTable = None
self.background = None
self.selectStart = None
self.selectEnd = None
self.menuClearSelect = []
self.measure = None
self.show = None
self.doDraw = False
wx.Panel.__init__(self, panel)
self.figure = matplotlib.figure.Figure(facecolor='white')
self.canvas = FigureCanvas(self, -1, self.figure)
self.measureTable = PanelMeasure(self, settings)
self.toolbar = NavigationToolbar(self.canvas, self, settings,
self.__hide_overlay)
self.toolbar.Realize()
vbox = wx.BoxSizer(wx.VERTICAL)
vbox.Add(self.canvas, 1, wx.EXPAND)
vbox.Add(self.measureTable, 0, wx.EXPAND)
vbox.Add(self.toolbar, 0, wx.EXPAND)
self.SetSizer(vbox)
vbox.Fit(self)
self.create_plot()
self.canvas.mpl_connect('button_press_event', self.__on_press)
self.canvas.mpl_connect('motion_notify_event', callbackMotion)
self.canvas.mpl_connect('draw_event', self.__on_draw)
self.canvas.mpl_connect('idle_event', self.__on_idle)
self.Bind(wx.EVT_SIZE, self.__on_size)
self.timer = wx.Timer(self)
self.Bind(wx.EVT_TIMER, self.__on_timer, self.timer)
def __set_fonts(self):
axes = self.plot.get_axes()
if axes is not None:
axes.xaxis.label.set_size('small')
axes.yaxis.label.set_size('small')
if self.settings.display == Display.SURFACE:
axes.zaxis.label.set_size('small')
axes.tick_params(axis='both', which='major', labelsize='small')
axes = self.plot.get_axes_bar()
axes.tick_params(axis='both', which='major', labelsize='small')
def __enable_menu(self, state):
for menu in self.menuClearSelect:
menu.Enable(state)
def __on_press(self, event):
if self.settings.clickTune and event.dblclick:
frequency = int(event.xdata * 1e6)
self.remoteControl.tune(frequency)
def __on_size(self, event):
ppi = wx.ScreenDC().GetPPI()
size = [float(v) for v in self.canvas.GetSize()]
width = size[0] / ppi[0]
height = size[1] / ppi[1]
self.figure.set_figwidth(width)
self.figure.set_figheight(height)
self.figure.set_dpi(ppi[0])
event.Skip()
def __on_draw(self, _event):
axes = self.plot.get_axes()
if axes is not None:
self.background = self.canvas.copy_from_bbox(axes.bbox)
self.__draw_overlay()
def __on_idle(self, _event):
if self.doDraw and self.plot.get_plot_thread() is None:
self.__hide_overlay()
self.canvas.draw()
self.doDraw = False
def __on_timer(self, _event):
self.timer.Stop()
self.set_plot(None, None, None, None, self.annotate)
def __draw_overlay(self):
if self.background is not None:
self.canvas.restore_region(self.background)
self.__draw_select()
self.draw_measure()
axes = self.plot.get_axes()
if axes is None:
self.canvas.draw()
else:
self.canvas.blit(axes.bbox)
def __draw_select(self):
if self.selectStart is not None and self.selectEnd is not None:
self.mouseSelect.draw(self.selectStart, self.selectEnd)
def __hide_overlay(self):
if self.plot is not None:
self.plot.hide_measure()
self.__hide_select()
def __hide_select(self):
if self.mouseSelect is not None:
self.mouseSelect.hide()
def create_plot(self):
if self.plot is not None:
self.plot.close()
self.toolbar.set_auto(True)
if self.settings.display == Display.PLOT:
self.plot = Plotter(self.notify, self.figure, self.settings)
elif self.settings.display == Display.SPECT:
self.plot = Spectrogram(self.notify, self.figure, self.settings)
elif self.settings.display == Display.SURFACE:
self.plot = Plotter3d(self.notify, self.figure, self.settings)
else:
self.plot = PlotterStatus(self.notify, self.figure, self.settings)
self.__set_fonts()
self.toolbar.set_plot(self.plot)
self.toolbar.set_type(self.settings.display)
self.measureTable.set_type(self.settings.display)
self.set_plot_title()
self.figure.subplots_adjust(top=0.85)
self.redraw_plot()
self.plot.scale_plot(True)
self.mouseZoom = MouseZoom(self.toolbar, plot=self.plot,
callbackHide=self.__hide_overlay)
self.mouseSelect = MouseSelect(self.plot, self.on_select,
self.on_selected)
self.measureTable.show(self.settings.showMeasure)
self.panel.SetFocus()
def on_select(self):
self.hide_measure()
def on_selected(self, start, end):
self.__enable_menu(True)
self.selectStart = start
self.selectEnd = end
self.measureTable.set_selected(self.spectrum, start, end)
def add_menu_clear_select(self, menu):
self.menuClearSelect.append(menu)
menu.Enable(False)
def draw(self):
self.doDraw = True
def show_measure_table(self, show):
self.measureTable.show(show)
self.Layout()
def set_plot(self, spectrum, isLimited, limit, extent, annotate=False):
if spectrum is not None and extent is not None:
if isLimited is not None and limit is not None:
self.spectrum = copy.copy(spectrum)
self.extent = extent
self.annotate = annotate
self.isLimited = isLimited
self.limit = limit
if self.plot.get_plot_thread() is None:
self.timer.Stop()
self.measureTable.set_selected(self.spectrum, self.selectStart,
self.selectEnd)
if isLimited:
spectrum = reduce_points(spectrum, limit)
self.plot.set_plot(self.spectrum, self.extent, annotate)
else:
self.timer.Start(200, oneShot=True)
def set_plot_title(self):
if len(self.settings.devicesRtl) > 0:
gain = self.settings.devicesRtl[self.settings.indexRtl].gain
else:
gain = 0
self.plot.set_title("Frequency Spectrogram\n{0} - {1} MHz,"
" gain = {2}dB".format(self.settings.start,
self.settings.stop, gain))
def redraw_plot(self):
if self.spectrum is not None:
self.set_plot(self.spectrum,
self.settings.pointsLimit,
self.settings.pointsMax,
self.extent, self.settings.annotate)
def set_grid(self, on):
self.plot.set_grid(on)
def hide_measure(self):
if self.plot is not None:
self.plot.hide_measure()
def draw_measure(self):
if self.measure is not None and self.measure.is_valid():
self.plot.draw_measure(self.measure, self.show)
def update_measure(self, measure=None, show=None):
if not measure and not show:
self.measureTable.update_measure()
else:
self.measure = measure
self.show = show
self.__draw_overlay()
def get_figure(self):
return self.figure
def get_axes(self):
return self.plot.get_axes()
def get_canvas(self):
return self.canvas
def get_toolbar(self):
return self.toolbar
def scale_plot(self, force=False):
self.plot.scale_plot(force)
def clear_plots(self):
self.plot.clear_plots()
self.spectrum = None
self.doDraw = True
def clear_selection(self):
self.measure = None
self.measureTable.clear_measurement()
self.selectStart = None
self.selectEnd = None
self.mouseSelect.clear()
self.__enable_menu(False)
def close(self):
close_modeless()
class PanelGraphCompare(wx.Panel):
def __init__(self, parent, callback):
self.callback = callback
self.spectrum1 = None
self.spectrum2 = None
self.spectrumDiff = None
self.mouseZoom = None
formatter = ScalarFormatter(useOffset=False)
wx.Panel.__init__(self, parent)
figure = matplotlib.figure.Figure(facecolor='white')
figure.set_size_inches(8, 4.5)
figure.set_tight_layout(True)
self.axesScan = figure.add_subplot(111)
self.axesScan.xaxis.set_minor_locator(AutoMinorLocator(10))
self.axesScan.yaxis.set_minor_locator(AutoMinorLocator(10))
self.axesScan.xaxis.set_major_formatter(formatter)
self.axesScan.yaxis.set_major_formatter(formatter)
self.axesDiff = self.axesScan.twinx()
self.axesDiff.yaxis.set_minor_locator(AutoMinorLocator(10))
self.plotScan1, = self.axesScan.plot([], [], 'b-',
linewidth=0.4)
self.plotScan2, = self.axesScan.plot([], [], 'g-',
linewidth=0.4)
self.plotDiff, = self.axesDiff.plot([], [], 'r-', linewidth=0.4)
self.axesScan.set_ylim(auto=True)
self.axesDiff.set_ylim(auto=True)
self.axesScan.set_title("Level Comparison")
self.axesScan.set_xlabel("Frequency (MHz)")
self.axesScan.set_ylabel('Level ($\mathsf{dB/\sqrt{Hz}}$)')
self.axesDiff.set_ylabel('Difference ($\mathsf{dB/\sqrt{Hz}}$)')
self.canvas = FigureCanvas(self, -1, figure)
self.set_grid(True)
self.textIntersect = wx.StaticText(self, label="Intersections: ")
toolbar = NavigationToolbarCompare(self)
toolbar.Realize()
self.mouseZoom = MouseZoom(toolbar, figure=figure)
vbox = wx.BoxSizer(wx.VERTICAL)
vbox.Add(self.canvas, 1, wx.LEFT | wx.TOP | wx.GROW)
vbox.Add(self.textIntersect, 0, wx.EXPAND | wx.ALL, border=5)
vbox.Add(toolbar, 0, wx.EXPAND)
self.SetSizer(vbox)
vbox.Fit(self)
self.canvas.mpl_connect('motion_notify_event', self.__on_motion)
def __on_motion(self, event):
xpos = event.xdata
ypos = event.ydata
if xpos is None or ypos is None:
return
locs = dict.fromkeys(['x1', 'y1', 'x2', 'y2', 'x3', 'y3'], None)
if self.spectrum1 is not None and len(self.spectrum1) > 0:
locs['x1'] = min(self.spectrum1.keys(),
key=lambda freq: abs(freq - xpos))
locs['y1'] = self.spectrum1[locs['x1']]
if self.spectrum2 is not None and len(self.spectrum2) > 0:
locs['x2'] = min(self.spectrum2.keys(),
key=lambda freq: abs(freq - xpos))
locs['y2'] = self.spectrum2[locs['x2']]
if self.spectrumDiff is not None and len(self.spectrumDiff) > 0:
locs['x3'] = min(self.spectrumDiff.keys(),
key=lambda freq: abs(freq - xpos))
locs['y3'] = self.spectrumDiff[locs['x3']]
self.callback(locs)
def __relim(self):
self.axesScan.relim()
self.axesDiff.relim()
def __plot_diff(self):
diff = {}
intersections = 0
if self.spectrum1 is not None and self.spectrum2 is not None:
set1 = set(self.spectrum1)
set2 = set(self.spectrum2)
intersect = set1.intersection(set2)
intersections = len(intersect)
for freq in intersect:
diff[freq] = self.spectrum1[freq] - self.spectrum2[freq]
freqs, powers = split_spectrum_sort(diff)
self.plotDiff.set_xdata(freqs)
self.plotDiff.set_ydata(powers)
elif self.spectrum1 is None:
freqs, powers = split_spectrum_sort(self.spectrum2)
intersections = len(freqs)
self.plotDiff.set_xdata(freqs)
self.plotDiff.set_ydata([0] * intersections)
else:
freqs, powers = split_spectrum_sort(self.spectrum1)
intersections = len(freqs)
self.plotDiff.set_xdata(freqs)
self.plotDiff.set_ydata([0] * intersections)
self.spectrumDiff = diff
self.textIntersect.SetLabel('Intersections: {0}'.format(intersections))
def get_canvas(self):
return self.canvas
def show_plot1(self, enable):
self.plotScan1.set_visible(enable)
self.canvas.draw()
def show_plot2(self, enable):
self.plotScan2.set_visible(enable)
self.canvas.draw()
def show_plotdiff(self, enable):
self.plotDiff.set_visible(enable)
self.canvas.draw()
def set_spectrum1(self, spectrum):
timeStamp = max(spectrum)
self.spectrum1 = spectrum[timeStamp]
freqs, powers = split_spectrum_sort(self.spectrum1)
self.plotScan1.set_xdata(freqs)
self.plotScan1.set_ydata(powers)
self.__plot_diff()
self.__relim()
self.autoscale()
def set_spectrum2(self, spectrum):
timeStamp = max(spectrum)
self.spectrum2 = spectrum[timeStamp]
freqs, powers = split_spectrum_sort(self.spectrum2)
self.plotScan2.set_xdata(freqs)
self.plotScan2.set_ydata(powers)
self.__plot_diff()
self.__relim()
self.autoscale()
def set_grid(self, grid):
self.axesScan.grid(grid)
self.canvas.draw()
def autoscale(self):
self.axesScan.autoscale_view()
self.axesDiff.autoscale_view()
self.canvas.draw()
class PanelColourBar(wx.Panel):
def __init__(self, parent, colourMap):
wx.Panel.__init__(self, parent)
dpi = wx.ScreenDC().GetPPI()[0]
figure = matplotlib.figure.Figure(facecolor='white', dpi=dpi)
figure.set_size_inches(200.0 / dpi, 25.0 / dpi)
self.canvas = FigureCanvas(self, -1, figure)
axes = figure.add_subplot(111)
figure.subplots_adjust(0, 0, 1, 1)
norm = Normalize(vmin=0, vmax=1)
self.bar = ColorbarBase(axes, norm=norm, orientation='horizontal',
cmap=cm.get_cmap(colourMap))
axes.xaxis.set_visible(False)
def set_map(self, colourMap):
self.bar.set_cmap(colourMap)
self.bar.changed()
self.bar.draw_all()
self.canvas.draw()
class PanelLine(wx.Panel):
def __init__(self, parent, colour):
self.colour = colour
wx.Panel.__init__(self, parent)
self.Bind(wx.EVT_PAINT, self.__on_paint)
def __on_paint(self, _event):
dc = wx.BufferedPaintDC(self)
width, height = self.GetClientSize()
if not width or not height:
return
pen = wx.Pen(self.colour, 2)
dc.SetPen(pen)
colourBack = self.GetBackgroundColour()
brush = wx.Brush(colourBack, wx.SOLID)
dc.SetBackground(brush)
dc.Clear()
dc.DrawLine(0, height / 2., width, height / 2.)
class PanelMeasure(wx.Panel):
def __init__(self, graph, settings):
wx.Panel.__init__(self, graph)
self.graph = graph
self.settings = settings
self.measure = None
self.checked = {Measure.MIN: None,
Measure.MAX: None,
Measure.AVG: None,
Measure.GMEAN: None,
Measure.HBW: None,
Measure.OBW: None}
self.selected = None
self.SetBackgroundColour('white')
self.grid = wxGrid.Grid(self)
self.grid.CreateGrid(3, 19)
self.grid.EnableEditing(False)
self.grid.EnableDragGridSize(False)
self.grid.SetColLabelSize(1)
self.grid.SetRowLabelSize(1)
self.grid.SetColMinimalAcceptableWidth(1)
self.grid.SetColSize(2, 1)
self.grid.SetColSize(7, 1)
self.grid.SetColSize(11, 1)
self.grid.SetColSize(15, 1)
self.grid.SetMargins(0, wx.SystemSettings_GetMetric(wx.SYS_HSCROLL_Y))
for x in xrange(self.grid.GetNumberRows()):
self.grid.SetRowLabelValue(x, '')
for y in xrange(self.grid.GetNumberCols()):
self.grid.SetColLabelValue(y, '')
self.locsDesc = {'F Start': (0, 0),
'F End': (1, 0),
'F Delta': (2, 0),
'P Min': (0, 4),
'P Max': (1, 4),
'P Delta': (2, 4),
'Mean': (0, 9),
'GMean': (1, 9),
'Flatness': (2, 9),
'-3dB Start': (0, 13),
'-3dB End': (1, 13),
'-3dB Delta': (2, 13),
'OBW Start': (0, 17),
'OBW End': (1, 17),
'OBW Delta': (2, 17)}
self.__set_descs()
self.locsCheck = {Measure.MIN: (0, 3), Measure.MAX: (1, 3),
Measure.AVG: (0, 8), Measure.GMEAN: (1, 8),
Measure.HBW: (0, 12),
Measure.OBW: (0, 16)}
self.__set_check_editor()
colour = self.grid.GetBackgroundColour()
self.grid.SetCellTextColour(2, 3, colour)
self.grid.SetCellTextColour(2, 8, colour)
self.grid.SetCellTextColour(1, 12, colour)
self.grid.SetCellTextColour(2, 12, colour)
self.grid.SetCellTextColour(1, 16, colour)
self.grid.SetCellTextColour(2, 16, colour)
self.__clear_checks()
self.locsMeasure = {'start': (0, 1), 'end': (1, 1), 'deltaF': (2, 1),
'minFP': (0, 5), 'maxFP': (1, 5), 'deltaFP': (2, 5),
'minP': (0, 6), 'maxP': (1, 6), 'deltaP': (2, 6),
'avg': (0, 10), 'gmean': (1, 10), 'flat': (2, 10),
'hbwstart': (0, 14), 'hbwend': (1, 14), 'hbwdelta': (2, 14),
'obwstart': (0, 18), 'obwend': (1, 18), 'obwdelta': (2, 18)}
fontCell = self.grid.GetDefaultCellFont()
fontSize = fontCell.GetPointSize()
fontStyle = fontCell.GetStyle()
fontWeight = fontCell.GetWeight()
font = wx.Font(fontSize, wx.FONTFAMILY_MODERN, fontStyle,
fontWeight)
dc = wx.WindowDC(self.grid)
dc.SetFont(font)
widthMHz = dc.GetTextExtent('###.######')[0] * 1.2
widthdB = dc.GetTextExtent('-##.##')[0] * 1.2
for _desc, (_row, col) in self.locsDesc.iteritems():
self.grid.AutoSizeColumn(col)
for col in [1, 5, 14, 18]:
self.grid.SetColSize(col, widthMHz)
for row in xrange(self.grid.GetNumberRows()):
self.grid.SetCellFont(row, col, font)
for col in [6, 10]:
self.grid.SetColSize(col, widthdB)
for row in xrange(self.grid.GetNumberRows()):
self.grid.SetCellFont(row, col, font)
for _desc, (_row, col) in self.locsCheck.iteritems():
self.grid.AutoSizeColumn(col)
toolTips = {}
toolTips[self.locsMeasure['start']] = 'Selection start (MHz)'
toolTips[self.locsMeasure['end']] = 'Selection end (MHz)'
toolTips[self.locsMeasure['deltaF']] = 'Selection bandwidth (MHz)'
toolTips[self.locsMeasure['minFP']] = 'Minimum power location (MHz)'
toolTips[self.locsMeasure['maxFP']] = 'Maximum power location (MHz)'
toolTips[self.locsMeasure['deltaFP']] = 'Power location difference (MHz)'
toolTips[self.locsMeasure['minP']] = 'Minimum power (dB)'
toolTips[self.locsMeasure['maxP']] = 'Maximum power (dB)'
toolTips[self.locsMeasure['deltaP']] = 'Power difference (dB)'
toolTips[self.locsMeasure['avg']] = 'Mean power (dB)'
toolTips[self.locsMeasure['gmean']] = 'Geometric mean power (dB)'
toolTips[self.locsMeasure['flat']] = 'Spectral flatness'
toolTips[self.locsMeasure['hbwstart']] = '-3db start location (MHz)'
toolTips[self.locsMeasure['hbwend']] = '-3db end location (MHz)'
toolTips[self.locsMeasure['hbwdelta']] = '-3db bandwidth (MHz)'
toolTips[self.locsMeasure['obwstart']] = '99% start location (MHz)'
toolTips[self.locsMeasure['obwend']] = '99% end location (MHz)'
toolTips[self.locsMeasure['obwdelta']] = '99% bandwidth (MHz)'
self.toolTips = GridToolTips(self.grid, toolTips)
self.popupMenu = wx.Menu()
self.popupMenuCopy = self.popupMenu.Append(wx.ID_ANY, "&Copy",
"Copy entry")
self.Bind(wx.EVT_MENU, self.__on_copy, self.popupMenuCopy)
self.Bind(wxGrid.EVT_GRID_CELL_RIGHT_CLICK, self.__on_popup_menu)
self.Bind(wxGrid.EVT_GRID_CELL_LEFT_CLICK, self.__on_cell_click)
box = wx.BoxSizer(wx.VERTICAL)
box.Add(self.grid, 0, wx.EXPAND | wx.TOP | wx.LEFT | wx.RIGHT,
border=10)
self.SetSizer(box)
def __set_descs(self):
font = self.grid.GetCellFont(0, 0)
font.SetWeight(wx.BOLD)
for desc, (row, col) in self.locsDesc.iteritems():
self.grid.SetCellValue(row, col, desc)
self.grid.SetCellFont(row, col, font)
def __set_check_editor(self):
for _desc, (row, col) in self.locsCheck.iteritems():
self.grid.SetCellEditor(row, col, wxGrid.GridCellBoolEditor())
self.grid.SetCellAlignment(row, col, wx.ALIGN_RIGHT, wx.ALIGN_CENTRE)
self.grid.SetCellRenderer(row, col, CheckBoxCellRenderer(self))
def __set_check_value(self, cell, value):
(row, col) = self.locsCheck[cell]
self.grid.SetCellValue(row, col, value)
def __set_measure_value(self, cell, value):
(row, col) = self.locsMeasure[cell]
try:
self.grid.SetCellValue(row, col, value)
except(TypeError):
pass
def __set_check_read_only(self, cell, readOnly):
(row, col) = self.locsCheck[cell]
renderer = self.grid.GetCellRenderer(row, col)
renderer.Enable(not readOnly)
def __get_checks(self):
checks = {}
for cell in self.checked:
if self.checked[cell] == '1':
checks[cell] = True
else:
checks[cell] = False
return checks
def __update_checks(self):
for cell in self.checked:
self.__set_check_value(cell, self.checked[cell])
def __clear_checks(self):
for cell in self.checked:
self.checked[cell] = '0'
self.__update_checks()
def __on_cell_click(self, event):
self.grid.ClearSelection()
row = event.GetRow()
col = event.GetCol()
if (row, col) in self.locsCheck.values():
if not self.grid.IsReadOnly(row, col) and self.measure is not None:
check = self.grid.GetCellValue(row, col)
if check == '1':
check = '0'
else:
check = '1'
self.grid.SetCellValue(row, col, check)
for control, (r, c) in self.locsCheck.iteritems():
if (r, c) == (row, col):
self.checked[control] = check
if self.selected is None:
self.selected = self.locsMeasure['start']
row = self.selected[0]
col = self.selected[1]
self.grid.SetGridCursor(row, col)
self.update_measure()
elif (row, col) in self.locsMeasure.itervalues():
self.selected = (row, col)
self.grid.SetGridCursor(row, col)
elif self.selected is None:
self.selected = self.locsMeasure['start']
row = self.selected[0]
col = self.selected[1]
self.grid.SetGridCursor(row, col)
def __on_popup_menu(self, _event):
if self.selected:
self.popupMenuCopy.Enable(True)
else:
self.popupMenuCopy.Enable(False)
self.PopupMenu(self.popupMenu)
def __on_copy(self, _event):
value = self.grid.GetCellValue(self.selected[0], self.selected[1])
clip = wx.TextDataObject(value)
wx.TheClipboard.Open()
wx.TheClipboard.SetData(clip)
wx.TheClipboard.Close()
def update_measure(self):
show = self.__get_checks()
self.graph.update_measure(self.measure, show)
def clear_measurement(self):
for control in self.locsMeasure:
self.__set_measure_value(control, "")
self.__clear_checks()
self.update_measure()
self.measure = None
def set_selected(self, spectrum, start, end):
self.measure = Measure(spectrum, start, end)
if not self.measure.is_valid():
self.clear_measurement()
return
minF, maxF = self.measure.get_f()
minP = self.measure.get_min_p()
maxP = self.measure.get_max_p()
avgP = self.measure.get_avg_p()
gMeanP = self.measure.get_gmean_p()
flatness = self.measure.get_flatness()
hbw = self.measure.get_hpw()
obw = self.measure.get_obw()
self.__set_measure_value('start',
format_precision(self.settings,
minF,
units=False))
self.__set_measure_value('end',
format_precision(self.settings,
maxF,
units=False))
self.__set_measure_value('deltaF',
format_precision(self.settings,
maxF - minF,
units=False))
self.__set_measure_value('minFP',
format_precision(self.settings,
minP[0],
units=False))
self.__set_measure_value('maxFP',
format_precision(self.settings,
maxP[0],
units=False))
self.__set_measure_value('deltaFP',
format_precision(self.settings,
maxP[0] - minP[0],
units=False))
self.__set_measure_value('minP',
format_precision(self.settings,
level=minP[1],
units=False))
self.__set_measure_value('maxP',
format_precision(self.settings,
level=maxP[1],
units=False))
self.__set_measure_value('deltaP',
format_precision(self.settings,
level=maxP[1] - minP[1],
units=False))
self.__set_measure_value('avg',
format_precision(self.settings,
level=avgP,
units=False))
self.__set_measure_value('gmean',
format_precision(self.settings,
level=gMeanP,
units=False))
self.__set_measure_value('flat',
"{0:.4f}".format(flatness))
if hbw[0] is not None:
text = format_precision(self.settings, hbw[0], units=False)
else:
text = ''
self.__set_measure_value('hbwstart', text)
if hbw[1] is not None:
text = format_precision(self.settings, hbw[1], units=False)
else:
text = ''
self.__set_measure_value('hbwend', text)
if hbw[0] is not None and hbw[1] is not None:
text = format_precision(self.settings, hbw[1] - hbw[0], units=False)
else:
text = ''
self.__set_measure_value('hbwdelta', text)
if obw[0] is not None:
text = format_precision(self.settings, obw[0], units=False)
else:
text = ''
self.__set_measure_value('obwstart', text)
if obw[1] is not None:
text = text = format_precision(self.settings, obw[1], units=False)
else:
text = ''
self.__set_measure_value('obwend', text)
if obw[0] is not None and obw[1] is not None:
text = text = format_precision(self.settings, obw[1] - obw[0],
units=False)
else:
text = ''
self.__set_measure_value('obwdelta', text)
self.update_measure()
def show(self, show):
if show:
self.Show()
else:
self.Hide()
self.Layout()
def set_type(self, display):
for cell in self.locsCheck:
self.__set_check_read_only(cell, True)
if display == Display.PLOT:
for cell in self.locsCheck:
self.__set_check_read_only(cell, False)
elif display == Display.SPECT:
self.__set_check_read_only(Measure.HBW, False)
self.__set_check_read_only(Measure.OBW, False)
self.grid.Refresh()
if __name__ == '__main__':
print 'Please run rtlsdr_scan.py'
exit(1)
| gpl-3.0 |
astocko/statsmodels | statsmodels/examples/l1_demo/short_demo.py | 33 | 3737 | """
You can fit your LikelihoodModel using l1 regularization by changing
the method argument and adding an argument alpha. See code for
details.
The Story
---------
The maximum likelihood (ML) solution works well when the number of data
points is large and the noise is small. When the ML solution starts
"breaking", the regularized solution should do better.
The l1 Solvers
--------------
The standard l1 solver is fmin_slsqp and is included with scipy. It
sometimes has trouble verifying convergence when the data size is
large.
The l1_cvxopt_cp solver is part of CVXOPT and this package needs to be
installed separately. It works well even for larger data sizes.
"""
from __future__ import print_function
from statsmodels.compat.python import range
import statsmodels.api as sm
import matplotlib.pyplot as plt
import numpy as np
import pdb # pdb.set_trace()
## Load the data from Spector and Mazzeo (1980)
spector_data = sm.datasets.spector.load()
spector_data.exog = sm.add_constant(spector_data.exog)
N = len(spector_data.endog)
K = spector_data.exog.shape[1]
### Logit Model
logit_mod = sm.Logit(spector_data.endog, spector_data.exog)
## Standard logistic regression
logit_res = logit_mod.fit()
## Regularized regression
# Set the reularization parameter to something reasonable
alpha = 0.05 * N * np.ones(K)
# Use l1, which solves via a built-in (scipy.optimize) solver
logit_l1_res = logit_mod.fit_regularized(method='l1', alpha=alpha, acc=1e-6)
# Use l1_cvxopt_cp, which solves with a CVXOPT solver
logit_l1_cvxopt_res = logit_mod.fit_regularized(
method='l1_cvxopt_cp', alpha=alpha)
## Print results
print("============ Results for Logit =================")
print("ML results")
print(logit_res.summary())
print("l1 results")
print(logit_l1_res.summary())
print(logit_l1_cvxopt_res.summary())
### Multinomial Logit Example using American National Election Studies Data
anes_data = sm.datasets.anes96.load()
anes_exog = anes_data.exog
anes_exog = sm.add_constant(anes_exog, prepend=False)
mlogit_mod = sm.MNLogit(anes_data.endog, anes_exog)
mlogit_res = mlogit_mod.fit()
## Set the regularization parameter.
alpha = 10 * np.ones((mlogit_mod.J - 1, mlogit_mod.K))
# Don't regularize the constant
alpha[-1,:] = 0
mlogit_l1_res = mlogit_mod.fit_regularized(method='l1', alpha=alpha)
print(mlogit_l1_res.params)
#mlogit_l1_res = mlogit_mod.fit_regularized(
# method='l1_cvxopt_cp', alpha=alpha, abstol=1e-10, trim_tol=1e-6)
#print mlogit_l1_res.params
## Print results
print("============ Results for MNLogit =================")
print("ML results")
print(mlogit_res.summary())
print("l1 results")
print(mlogit_l1_res.summary())
#
#
#### Logit example with many params, sweeping alpha
spector_data = sm.datasets.spector.load()
X = spector_data.exog
Y = spector_data.endog
## Fit
N = 50 # number of points to solve at
K = X.shape[1]
logit_mod = sm.Logit(Y, X)
coeff = np.zeros((N, K)) # Holds the coefficients
alphas = 1 / np.logspace(-0.5, 2, N)
## Sweep alpha and store the coefficients
# QC check doesn't always pass with the default options.
# Use the options QC_verbose=True and disp=True
# to to see what is happening. It just barely doesn't pass, so I decreased
# acc and increased QC_tol to make it pass
for n, alpha in enumerate(alphas):
logit_res = logit_mod.fit_regularized(
method='l1', alpha=alpha, trim_mode='off', QC_tol=0.1, disp=False,
QC_verbose=True, acc=1e-15)
coeff[n,:] = logit_res.params
## Plot
plt.figure(1);plt.clf();plt.grid()
plt.title('Regularization Path');
plt.xlabel('alpha');
plt.ylabel('Parameter value');
for i in range(K):
plt.plot(alphas, coeff[:,i], label='X'+str(i), lw=3)
plt.legend(loc='best')
plt.show()
| bsd-3-clause |
mne-tools/mne-tools.github.io | 0.13/_downloads/rt_feedback_server.py | 9 | 4929 | """
==============================================
Real-time feedback for decoding :: Server Side
==============================================
This example demonstrates how to setup a real-time feedback
mechanism using StimServer and StimClient.
The idea here is to display future stimuli for the class which
is predicted less accurately. This allows on-demand adaptation
of the stimuli depending on the needs of the classifier.
To run this example, open ipython in two separate terminals.
In the first, run rt_feedback_server.py and then wait for the
message
RtServer: Start
Once that appears, run rt_feedback_client.py in the other terminal
and the feedback script should start.
All brain responses are simulated from a fiff file to make it easy
to test. However, it should be possible to adapt this script
for a real experiment.
"""
# Author: Mainak Jas <[email protected]>
#
# License: BSD (3-clause)
import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn import preprocessing
from sklearn.svm import SVC
from sklearn.pipeline import Pipeline
from sklearn.cross_validation import train_test_split
from sklearn.metrics import confusion_matrix
import mne
from mne.datasets import sample
from mne.realtime import StimServer
from mne.realtime import MockRtClient
from mne.decoding import Vectorizer, FilterEstimator
print(__doc__)
# Load fiff file to simulate data
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
raw = mne.io.read_raw_fif(raw_fname, preload=True)
# Instantiating stimulation server
# The with statement is necessary to ensure a clean exit
with StimServer(port=4218) as stim_server:
# The channels to be used while decoding
picks = mne.pick_types(raw.info, meg='grad', eeg=False, eog=True,
stim=True, exclude=raw.info['bads'])
rt_client = MockRtClient(raw)
# Constructing the pipeline for classification
filt = FilterEstimator(raw.info, 1, 40)
scaler = preprocessing.StandardScaler()
vectorizer = Vectorizer()
clf = SVC(C=1, kernel='linear')
concat_classifier = Pipeline([('filter', filt), ('vector', vectorizer),
('scaler', scaler), ('svm', clf)])
stim_server.start(verbose=True)
# Just some initially decided events to be simulated
# Rest will decided on the fly
ev_list = [4, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4]
score_c1, score_c2, score_x = [], [], []
for ii in range(50):
# Tell the stim_client about the next stimuli
stim_server.add_trigger(ev_list[ii])
# Collecting data
if ii == 0:
X = rt_client.get_event_data(event_id=ev_list[ii], tmin=-0.2,
tmax=0.5, picks=picks,
stim_channel='STI 014')[None, ...]
y = ev_list[ii]
else:
X_temp = rt_client.get_event_data(event_id=ev_list[ii], tmin=-0.2,
tmax=0.5, picks=picks,
stim_channel='STI 014')
X_temp = X_temp[np.newaxis, ...]
X = np.concatenate((X, X_temp), axis=0)
time.sleep(1) # simulating the isi
y = np.append(y, ev_list[ii])
# Start decoding after collecting sufficient data
if ii >= 10:
# Now start doing rtfeedback
X_train, X_test, y_train, y_test = train_test_split(X, y,
test_size=0.2,
random_state=7)
y_pred = concat_classifier.fit(X_train, y_train).predict(X_test)
cm = confusion_matrix(y_test, y_pred)
score_c1.append(float(cm[0, 0]) / sum(cm, 1)[0] * 100)
score_c2.append(float(cm[1, 1]) / sum(cm, 1)[1] * 100)
# do something if one class is decoded better than the other
if score_c1[-1] < score_c2[-1]:
print("We decoded class RV better than class LV")
ev_list.append(3) # adding more LV to future simulated data
else:
print("We decoded class LV better than class RV")
ev_list.append(4) # adding more RV to future simulated data
# Clear the figure
plt.clf()
# The x-axis for the plot
score_x.append(ii)
# Now plot the accuracy
plt.plot(score_x[-5:], score_c1[-5:])
plt.hold(True)
plt.plot(score_x[-5:], score_c2[-5:])
plt.xlabel('Trials')
plt.ylabel('Classification score (% correct)')
plt.title('Real-time feedback')
plt.ylim([0, 100])
plt.xticks(score_x[-5:])
plt.legend(('LV', 'RV'), loc='upper left')
plt.show()
| bsd-3-clause |
WarrenWeckesser/scikits-image | doc/examples/plot_multiblock_local_binary_pattern.py | 22 | 2498 | """
===========================================================
Multi-Block Local Binary Pattern for texture classification
===========================================================
This example shows how to compute multi-block local binary pattern (MB-LBP)
features as well as how to visualize them.
The features are calculated similarly to local binary patterns (LBPs), except
that summed blocks are used instead of individual pixel values.
MB-LBP is an extension of LBP that can be computed on multiple scales in
constant time using the integral image. 9 equally-sized rectangles are used to
compute a feature. For each rectangle, the sum of the pixel intensities is
computed. Comparisons of these sums to that of the central rectangle determine
the feature, similarly to LBP (See `LBP <plot_local_binary_pattern.html>`_).
First, we generate an image to illustrate the functioning of MB-LBP: consider
a (9, 9) rectangle and divide it into (3, 3) block, upon which we then apply
MB-LBP.
"""
from __future__ import print_function
from skimage.feature import multiblock_lbp
import numpy as np
from numpy.testing import assert_equal
from skimage.transform import integral_image
# Create test matrix where first and fifth rectangles starting
# from top left clockwise have greater value than the central one.
test_img = np.zeros((9, 9), dtype='uint8')
test_img[3:6, 3:6] = 1
test_img[:3, :3] = 50
test_img[6:, 6:] = 50
# First and fifth bits should be filled. This correct value will
# be compared to the computed one.
correct_answer = 0b10001000
int_img = integral_image(test_img)
lbp_code = multiblock_lbp(int_img, 0, 0, 3, 3)
assert_equal(correct_answer, lbp_code)
"""
Now let's apply the operator to a real image and see how the
visualization works.
"""
from skimage import data
from matplotlib import pyplot as plt
from skimage.feature import draw_multiblock_lbp
test_img = data.coins()
int_img = integral_image(test_img)
lbp_code = multiblock_lbp(int_img, 0, 0, 90, 90)
img = draw_multiblock_lbp(test_img, 0, 0, 90, 90,
lbp_code=lbp_code, alpha=0.5)
plt.imshow(img, interpolation='nearest')
plt.show()
"""
.. image:: PLOT2RST.current_figure
On the above plot we see the result of computing a MB-LBP and visualization of
the computed feature. The rectangles that have less intensities' sum than the
central rectangle are marked in cyan. The ones that have higher intensity
values are marked in white. The central rectangle is left untouched.
"""
| bsd-3-clause |
fzalkow/scikit-learn | sklearn/tests/test_qda.py | 155 | 3481 | import numpy as np
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import ignore_warnings
from sklearn import qda
# Data is just 6 separable points in the plane
X = np.array([[0, 0], [-2, -2], [-2, -1], [-1, -1], [-1, -2],
[1, 3], [1, 2], [2, 1], [2, 2]])
y = np.array([1, 1, 1, 1, 1, 2, 2, 2, 2])
y3 = np.array([1, 2, 3, 2, 3, 1, 2, 3, 1])
# Degenerate data with 1 feature (still should be separable)
X1 = np.array([[-3, ], [-2, ], [-1, ], [-1, ], [0, ], [1, ], [1, ],
[2, ], [3, ]])
# Data that has zero variance in one dimension and needs regularization
X2 = np.array([[-3, 0], [-2, 0], [-1, 0], [-1, 0], [0, 0], [1, 0], [1, 0],
[2, 0], [3, 0]])
# One element class
y4 = np.array([1, 1, 1, 1, 1, 1, 1, 1, 2])
# Data with less samples in a class than n_features
X5 = np.c_[np.arange(8), np.zeros((8,3))]
y5 = np.array([0, 0, 0, 0, 0, 1, 1, 1])
def test_qda():
# QDA classification.
# This checks that QDA implements fit and predict and returns
# correct values for a simple toy dataset.
clf = qda.QDA()
y_pred = clf.fit(X, y).predict(X)
assert_array_equal(y_pred, y)
# Assure that it works with 1D data
y_pred1 = clf.fit(X1, y).predict(X1)
assert_array_equal(y_pred1, y)
# Test probas estimates
y_proba_pred1 = clf.predict_proba(X1)
assert_array_equal((y_proba_pred1[:, 1] > 0.5) + 1, y)
y_log_proba_pred1 = clf.predict_log_proba(X1)
assert_array_almost_equal(np.exp(y_log_proba_pred1), y_proba_pred1, 8)
y_pred3 = clf.fit(X, y3).predict(X)
# QDA shouldn't be able to separate those
assert_true(np.any(y_pred3 != y3))
# Classes should have at least 2 elements
assert_raises(ValueError, clf.fit, X, y4)
def test_qda_priors():
clf = qda.QDA()
y_pred = clf.fit(X, y).predict(X)
n_pos = np.sum(y_pred == 2)
neg = 1e-10
clf = qda.QDA(priors=np.array([neg, 1 - neg]))
y_pred = clf.fit(X, y).predict(X)
n_pos2 = np.sum(y_pred == 2)
assert_greater(n_pos2, n_pos)
def test_qda_store_covariances():
# The default is to not set the covariances_ attribute
clf = qda.QDA().fit(X, y)
assert_true(not hasattr(clf, 'covariances_'))
# Test the actual attribute:
clf = qda.QDA().fit(X, y, store_covariances=True)
assert_true(hasattr(clf, 'covariances_'))
assert_array_almost_equal(
clf.covariances_[0],
np.array([[0.7, 0.45], [0.45, 0.7]])
)
assert_array_almost_equal(
clf.covariances_[1],
np.array([[0.33333333, -0.33333333], [-0.33333333, 0.66666667]])
)
def test_qda_regularization():
# the default is reg_param=0. and will cause issues
# when there is a constant variable
clf = qda.QDA()
with ignore_warnings():
y_pred = clf.fit(X2, y).predict(X2)
assert_true(np.any(y_pred != y))
# adding a little regularization fixes the problem
clf = qda.QDA(reg_param=0.01)
with ignore_warnings():
clf.fit(X2, y)
y_pred = clf.predict(X2)
assert_array_equal(y_pred, y)
# Case n_samples_in_a_class < n_features
clf = qda.QDA(reg_param=0.1)
with ignore_warnings():
clf.fit(X5, y5)
y_pred5 = clf.predict(X5)
assert_array_equal(y_pred5, y5)
| bsd-3-clause |
PythonCharmers/orange3 | Orange/tests/test_tree.py | 4 | 3458 | import unittest
from collections import Counter
import numpy as np
import sklearn.tree as skl_tree
from sklearn.tree._tree import TREE_LEAF
from Orange.data import Table
from Orange.classification import TreeLearner
class TreeTest(unittest.TestCase):
def test_classification(self):
table = Table('iris')
learn = TreeLearner()
clf = learn(table)
Z = clf(table)
self.assertTrue(np.all(table.Y.flatten() == Z))
class SklearnTreeTest(unittest.TestCase):
def test_full_tree(self):
table = Table('iris')
clf = skl_tree.DecisionTreeClassifier()
clf = clf.fit(table.X, table.Y)
Z = clf.predict(table.X)
self.assertTrue(np.all(table.Y.flatten() == Z))
def test_min_samples_split(self):
table = Table('iris')
lim = 5
clf = skl_tree.DecisionTreeClassifier(min_samples_split=lim)
clf = clf.fit(table.X, table.Y)
t = clf.tree_
for i in range(t.node_count):
if t.children_left[i] != TREE_LEAF:
self.assertTrue(t.n_node_samples[i] >= lim)
def test_min_samples_leaf(self):
table = Table('iris')
lim = 5
clf = skl_tree.DecisionTreeClassifier(min_samples_leaf=lim)
clf = clf.fit(table.X, table.Y)
t = clf.tree_
for i in range(t.node_count):
if t.children_left[i] == TREE_LEAF:
self.assertTrue(t.n_node_samples[i] >= lim)
def test_max_leaf_nodes(self):
table = Table('iris')
lim = 5
clf = skl_tree.DecisionTreeClassifier(max_leaf_nodes=lim)
clf = clf.fit(table.X, table.Y)
t = clf.tree_
self.assertTrue(t.node_count <= lim * 2 - 1)
def test_criterion(self):
table = Table('iris')
clf = skl_tree.DecisionTreeClassifier(criterion="entropy")
clf = clf.fit(table.X, table.Y)
def test_splitter(self):
table = Table('iris')
clf = skl_tree.DecisionTreeClassifier(splitter="random")
clf = clf.fit(table.X, table.Y)
def test_weights(self):
table = Table('iris')
clf = skl_tree.DecisionTreeClassifier(max_depth=2)
clf = clf.fit(table.X, table.Y)
clfw = skl_tree.DecisionTreeClassifier(max_depth=2)
clfw = clfw.fit(table.X, table.Y, sample_weight=np.arange(len(table)))
self.assertFalse(len(clf.tree_.feature) == len(clfw.tree_.feature) and
np.all(clf.tree_.feature == clfw.tree_.feature))
def test_impurity(self):
table = Table('iris')
clf = skl_tree.DecisionTreeClassifier()
clf = clf.fit(table.X, table.Y)
t = clf.tree_
for i in range(t.node_count):
if t.children_left[i] == TREE_LEAF:
self.assertTrue(t.impurity[i] == 0)
else:
l, r = t.children_left[i], t.children_right[i]
child_impurity = min(t.impurity[l], t.impurity[r])
self.assertTrue(child_impurity <= t.impurity[i])
def test_navigate_tree(self):
table = Table('iris')
clf = skl_tree.DecisionTreeClassifier(max_depth=1)
clf = clf.fit(table.X, table.Y)
t = clf.tree_
x = table.X[0]
if x[t.feature[0]] <= t.threshold[0]:
v = t.value[t.children_left[0]][0]
else:
v = t.value[t.children_right[0]][0]
self.assertTrue(np.argmax(v) == clf.predict(table.X[0]))
| gpl-3.0 |
aarchiba/scipy | scipy/special/_precompute/struve_convergence.py | 17 | 3498 | """
Convergence regions of the expansions used in ``struve.c``
Note that for v >> z both functions tend rapidly to 0,
and for v << -z, they tend to infinity.
The floating-point functions over/underflow in the lower left and right
corners of the figure.
Figure legend
=============
Red region
Power series is close (1e-12) to the mpmath result
Blue region
Asymptotic series is close to the mpmath result
Green region
Bessel series is close to the mpmath result
Dotted colored lines
Boundaries of the regions
Solid colored lines
Boundaries estimated by the routine itself. These will be used
for determining which of the results to use.
Black dashed line
The line z = 0.7*|v| + 12
"""
from __future__ import absolute_import, division, print_function
import numpy as np
import matplotlib.pyplot as plt
import mpmath
def err_metric(a, b, atol=1e-290):
m = abs(a - b) / (atol + abs(b))
m[np.isinf(b) & (a == b)] = 0
return m
def do_plot(is_h=True):
from scipy.special._ufuncs import (_struve_power_series,
_struve_asymp_large_z,
_struve_bessel_series)
vs = np.linspace(-1000, 1000, 91)
zs = np.sort(np.r_[1e-5, 1.0, np.linspace(0, 700, 91)[1:]])
rp = _struve_power_series(vs[:,None], zs[None,:], is_h)
ra = _struve_asymp_large_z(vs[:,None], zs[None,:], is_h)
rb = _struve_bessel_series(vs[:,None], zs[None,:], is_h)
mpmath.mp.dps = 50
if is_h:
sh = lambda v, z: float(mpmath.struveh(mpmath.mpf(v), mpmath.mpf(z)))
else:
sh = lambda v, z: float(mpmath.struvel(mpmath.mpf(v), mpmath.mpf(z)))
ex = np.vectorize(sh, otypes='d')(vs[:,None], zs[None,:])
err_a = err_metric(ra[0], ex) + 1e-300
err_p = err_metric(rp[0], ex) + 1e-300
err_b = err_metric(rb[0], ex) + 1e-300
err_est_a = abs(ra[1]/ra[0])
err_est_p = abs(rp[1]/rp[0])
err_est_b = abs(rb[1]/rb[0])
z_cutoff = 0.7*abs(vs) + 12
levels = [-1000, -12]
plt.cla()
plt.hold(1)
plt.contourf(vs, zs, np.log10(err_p).T, levels=levels, colors=['r', 'r'], alpha=0.1)
plt.contourf(vs, zs, np.log10(err_a).T, levels=levels, colors=['b', 'b'], alpha=0.1)
plt.contourf(vs, zs, np.log10(err_b).T, levels=levels, colors=['g', 'g'], alpha=0.1)
plt.contour(vs, zs, np.log10(err_p).T, levels=levels, colors=['r', 'r'], linestyles=[':', ':'])
plt.contour(vs, zs, np.log10(err_a).T, levels=levels, colors=['b', 'b'], linestyles=[':', ':'])
plt.contour(vs, zs, np.log10(err_b).T, levels=levels, colors=['g', 'g'], linestyles=[':', ':'])
lp = plt.contour(vs, zs, np.log10(err_est_p).T, levels=levels, colors=['r', 'r'], linestyles=['-', '-'])
la = plt.contour(vs, zs, np.log10(err_est_a).T, levels=levels, colors=['b', 'b'], linestyles=['-', '-'])
lb = plt.contour(vs, zs, np.log10(err_est_b).T, levels=levels, colors=['g', 'g'], linestyles=['-', '-'])
plt.clabel(lp, fmt={-1000: 'P', -12: 'P'})
plt.clabel(la, fmt={-1000: 'A', -12: 'A'})
plt.clabel(lb, fmt={-1000: 'B', -12: 'B'})
plt.plot(vs, z_cutoff, 'k--')
plt.xlim(vs.min(), vs.max())
plt.ylim(zs.min(), zs.max())
plt.xlabel('v')
plt.ylabel('z')
def main():
plt.clf()
plt.subplot(121)
do_plot(True)
plt.title('Struve H')
plt.subplot(122)
do_plot(False)
plt.title('Struve L')
plt.savefig('struve_convergence.png')
plt.show()
if __name__ == "__main__":
main()
| bsd-3-clause |
yanlend/scikit-learn | examples/classification/plot_digits_classification.py | 289 | 2397 | """
================================
Recognizing hand-written digits
================================
An example showing how the scikit-learn can be used to recognize images of
hand-written digits.
This example is commented in the
:ref:`tutorial section of the user manual <introduction>`.
"""
print(__doc__)
# Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
# License: BSD 3 clause
# Standard scientific Python imports
import matplotlib.pyplot as plt
# Import datasets, classifiers and performance metrics
from sklearn import datasets, svm, metrics
# The digits dataset
digits = datasets.load_digits()
# The data that we are interested in is made of 8x8 images of digits, let's
# have a look at the first 3 images, stored in the `images` attribute of the
# dataset. If we were working from image files, we could load them using
# pylab.imread. Note that each image must have the same size. For these
# images, we know which digit they represent: it is given in the 'target' of
# the dataset.
images_and_labels = list(zip(digits.images, digits.target))
for index, (image, label) in enumerate(images_and_labels[:4]):
plt.subplot(2, 4, index + 1)
plt.axis('off')
plt.imshow(image, cmap=plt.cm.gray_r, interpolation='nearest')
plt.title('Training: %i' % label)
# To apply a classifier on this data, we need to flatten the image, to
# turn the data in a (samples, feature) matrix:
n_samples = len(digits.images)
data = digits.images.reshape((n_samples, -1))
# Create a classifier: a support vector classifier
classifier = svm.SVC(gamma=0.001)
# We learn the digits on the first half of the digits
classifier.fit(data[:n_samples / 2], digits.target[:n_samples / 2])
# Now predict the value of the digit on the second half:
expected = digits.target[n_samples / 2:]
predicted = classifier.predict(data[n_samples / 2:])
print("Classification report for classifier %s:\n%s\n"
% (classifier, metrics.classification_report(expected, predicted)))
print("Confusion matrix:\n%s" % metrics.confusion_matrix(expected, predicted))
images_and_predictions = list(zip(digits.images[n_samples / 2:], predicted))
for index, (image, prediction) in enumerate(images_and_predictions[:4]):
plt.subplot(2, 4, index + 5)
plt.axis('off')
plt.imshow(image, cmap=plt.cm.gray_r, interpolation='nearest')
plt.title('Prediction: %i' % prediction)
plt.show()
| bsd-3-clause |
rzwang/SecurityStreaming | hack-2015/pipeline.py | 1 | 4310 | from clarifai.client import ClarifaiApi
from matplotlib import pyplot
from PIL import Image
from pymongo import MongoClient
import matplotlib.pyplot as plt
import numpy
import gridfs
import subprocess
import time
import sys
import logging
import os
FFMPEG_BIN = "/usr/local/Cellar/ffmpeg/2.5.4/bin/ffmpeg"
# FFMPEG_BIN = "C:\\ffmpeg\\bin\\ffmpeg.exe"
DATA_PATH = "data"
PICTURE_PATH = "public"
VIDEO_FILENAME = os.path.join(DATA_PATH, "graphic_vid.avi")
IMAGE_FILEPATH = os.path.join(PICTURE_PATH, "temp.png")
FRAME_WIDTH = 640
FRAME_HEIGHT = 360
EVERY_NTH_FRAME = 25
COMMAND = [ FFMPEG_BIN,
# '-ss', '00:00;00', #When to start reading the video file
'-i', VIDEO_FILENAME,
'-f', 'image2pipe',
'-pix_fmt', 'rgb24',
'-vcodec', 'rawvideo', '-']
pipe = subprocess.Popen(COMMAND,
stdout = subprocess.PIPE,
bufsize=10**8)
clarifai_api = clarifai_api = ClarifaiApi("2Ocx2ZtBsi6zR_1FzFTLpafYICK5bRV0KhiA0fmQ", "xxsO3-1b9omh2wZ3JF1BrPe-IEuO0t5pFKgn3fs0") # assumes environment variables are set.
logging.basicConfig(
level = logging.INFO,
format = "[%(asctime)s] [%(process)d] [%(name)s] [%(levelname)s] [%(funcName)s] [line: %(lineno)s] - %(message)s",
stream = sys.stdout
)
log = logging.getLogger(name = "pipeline")
client = MongoClient('mongodb://127.0.0.1:3001/meteor')
db = client.meteor
danger_score_db = db.DangerScore
fs = gridfs.GridFS(db)
def image_data_to_file(image_data):
image_data = image_data.reshape((FRAME_HEIGHT, FRAME_WIDTH, 3))
image_file = Image.fromarray(image_data)
image_file.save(IMAGE_FILEPATH)
# plt.imshow(image_file)
# plt.show()
def determineRiskScore(result):
tags = result['results'][0]['result']['tag']
classes = tags['classes']
probabilities = tags['probs']
ratios= {}
for i in range(0, len(classes)):
ratios[classes[i]] = probabilities[i]
risk_factors = {"men":.1, "people":.1, "action":.3,"motion":.55, "danger":.5, "handgun":.8, "machine gun":.8, "weapon":.8, "risk": .3, "military":.3, "knife":.8, "blood":.5, "gun":.8, "sugery":1.0, "police":1.0, "hurry":.3}
risk_score = 0
for risk, value in risk_factors.items():
if risk in ratios:
risk_score += ratios[risk]*value
return risk_score
def update_database(risk_score, file_id):
# def update_database(risk_score):
if danger_score_db.find().count() > 0:
danger_score_db.update({"current":{"$exists":1}}, {"current":risk_score})
danger_score_db.update({"file_id":{"$exists":1}}, {"file_id":file_id})
else:
danger_score_db.insert({"current":risk_score})
danger_score_db.insert({"file_id":file_id})
def run():
number_of_frames_processed = 0
while True:
raw_image = pipe.stdout.read(FRAME_HEIGHT * FRAME_WIDTH * 3) #3 for pixels per byte
number_of_frames_processed += 1
if number_of_frames_processed % EVERY_NTH_FRAME == 0:
log.info("Processed frame {}".format(number_of_frames_processed))
image_data = numpy.fromstring(raw_image, dtype='uint8')
try:
image_data_to_file(image_data)
file_id = fs.put(open(IMAGE_FILEPATH, 'rb'))
result = clarifai_api.tag_images(open(IMAGE_FILEPATH, 'rb'))
risk_score = determineRiskScore(result)
log.info("Frame {} had result {} and score {}".format(number_of_frames_processed, result, risk_score))
update_database(risk_score, file_id)
# update_database(risk_score)
log.info("Updated database with risk score")
except Exception:
log.warning("Could not process image frame {}".format(number_of_frames_processed))
break
pipe.stdout.flush()
if __name__ == '__main__':
run()
# if capture.isOpened():
# print "yay"
# else:
# capture.open(VIDEO_FILENAME)
# # while True:
# # if capture.grab():
# # flag, frame = capture.retrieve()
# while capture.isOpened():
# ret, frame = capture.read()
# cv2.imshow('frame', frame)
# if cv2.waitKey(1000) and 0xFF == ord('q'):
# break
# result = clarifai_api.tag_images(frame)
# print result
#To release the video capture. Eventually, when this is running continuously, we'll probably want to kill the program periodically
# capture.release()
# cv2.destroyAllWindows()
| mit |
quheng/scikit-learn | benchmarks/bench_sample_without_replacement.py | 397 | 8008 | """
Benchmarks for sampling without replacement of integer.
"""
from __future__ import division
from __future__ import print_function
import gc
import sys
import optparse
from datetime import datetime
import operator
import matplotlib.pyplot as plt
import numpy as np
import random
from sklearn.externals.six.moves import xrange
from sklearn.utils.random import sample_without_replacement
def compute_time(t_start, delta):
mu_second = 0.0 + 10 ** 6 # number of microseconds in a second
return delta.seconds + delta.microseconds / mu_second
def bench_sample(sampling, n_population, n_samples):
gc.collect()
# start time
t_start = datetime.now()
sampling(n_population, n_samples)
delta = (datetime.now() - t_start)
# stop time
time = compute_time(t_start, delta)
return time
if __name__ == "__main__":
###########################################################################
# Option parser
###########################################################################
op = optparse.OptionParser()
op.add_option("--n-times",
dest="n_times", default=5, type=int,
help="Benchmark results are average over n_times experiments")
op.add_option("--n-population",
dest="n_population", default=100000, type=int,
help="Size of the population to sample from.")
op.add_option("--n-step",
dest="n_steps", default=5, type=int,
help="Number of step interval between 0 and n_population.")
default_algorithms = "custom-tracking-selection,custom-auto," \
"custom-reservoir-sampling,custom-pool,"\
"python-core-sample,numpy-permutation"
op.add_option("--algorithm",
dest="selected_algorithm",
default=default_algorithms,
type=str,
help="Comma-separated list of transformer to benchmark. "
"Default: %default. \nAvailable: %default")
# op.add_option("--random-seed",
# dest="random_seed", default=13, type=int,
# help="Seed used by the random number generators.")
(opts, args) = op.parse_args()
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
selected_algorithm = opts.selected_algorithm.split(',')
for key in selected_algorithm:
if key not in default_algorithms.split(','):
raise ValueError("Unknown sampling algorithm \"%s\" not in (%s)."
% (key, default_algorithms))
###########################################################################
# List sampling algorithm
###########################################################################
# We assume that sampling algorithm has the following signature:
# sample(n_population, n_sample)
#
sampling_algorithm = {}
###########################################################################
# Set Python core input
sampling_algorithm["python-core-sample"] = \
lambda n_population, n_sample: \
random.sample(xrange(n_population), n_sample)
###########################################################################
# Set custom automatic method selection
sampling_algorithm["custom-auto"] = \
lambda n_population, n_samples, random_state=None: \
sample_without_replacement(n_population,
n_samples,
method="auto",
random_state=random_state)
###########################################################################
# Set custom tracking based method
sampling_algorithm["custom-tracking-selection"] = \
lambda n_population, n_samples, random_state=None: \
sample_without_replacement(n_population,
n_samples,
method="tracking_selection",
random_state=random_state)
###########################################################################
# Set custom reservoir based method
sampling_algorithm["custom-reservoir-sampling"] = \
lambda n_population, n_samples, random_state=None: \
sample_without_replacement(n_population,
n_samples,
method="reservoir_sampling",
random_state=random_state)
###########################################################################
# Set custom reservoir based method
sampling_algorithm["custom-pool"] = \
lambda n_population, n_samples, random_state=None: \
sample_without_replacement(n_population,
n_samples,
method="pool",
random_state=random_state)
###########################################################################
# Numpy permutation based
sampling_algorithm["numpy-permutation"] = \
lambda n_population, n_sample: \
np.random.permutation(n_population)[:n_sample]
###########################################################################
# Remove unspecified algorithm
sampling_algorithm = dict((key, value)
for key, value in sampling_algorithm.items()
if key in selected_algorithm)
###########################################################################
# Perform benchmark
###########################################################################
time = {}
n_samples = np.linspace(start=0, stop=opts.n_population,
num=opts.n_steps).astype(np.int)
ratio = n_samples / opts.n_population
print('Benchmarks')
print("===========================")
for name in sorted(sampling_algorithm):
print("Perform benchmarks for %s..." % name, end="")
time[name] = np.zeros(shape=(opts.n_steps, opts.n_times))
for step in xrange(opts.n_steps):
for it in xrange(opts.n_times):
time[name][step, it] = bench_sample(sampling_algorithm[name],
opts.n_population,
n_samples[step])
print("done")
print("Averaging results...", end="")
for name in sampling_algorithm:
time[name] = np.mean(time[name], axis=1)
print("done\n")
# Print results
###########################################################################
print("Script arguments")
print("===========================")
arguments = vars(opts)
print("%s \t | %s " % ("Arguments".ljust(16),
"Value".center(12),))
print(25 * "-" + ("|" + "-" * 14) * 1)
for key, value in arguments.items():
print("%s \t | %s " % (str(key).ljust(16),
str(value).strip().center(12)))
print("")
print("Sampling algorithm performance:")
print("===============================")
print("Results are averaged over %s repetition(s)." % opts.n_times)
print("")
fig = plt.figure('scikit-learn sample w/o replacement benchmark results')
plt.title("n_population = %s, n_times = %s" %
(opts.n_population, opts.n_times))
ax = fig.add_subplot(111)
for name in sampling_algorithm:
ax.plot(ratio, time[name], label=name)
ax.set_xlabel('ratio of n_sample / n_population')
ax.set_ylabel('Time (s)')
ax.legend()
# Sort legend labels
handles, labels = ax.get_legend_handles_labels()
hl = sorted(zip(handles, labels), key=operator.itemgetter(1))
handles2, labels2 = zip(*hl)
ax.legend(handles2, labels2, loc=0)
plt.show()
| bsd-3-clause |
sravan-s/zeppelin | interpreter/lib/python/backend_zinline.py | 61 | 11831 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file provides a static (non-interactive) matplotlib plotting backend
# for zeppelin notebooks for use with the python/pyspark interpreters
from __future__ import print_function
import sys
import uuid
import warnings
import base64
from io import BytesIO
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
import mpl_config
import matplotlib
from matplotlib._pylab_helpers import Gcf
from matplotlib.backends.backend_agg import new_figure_manager, FigureCanvasAgg
from matplotlib.backend_bases import ShowBase, FigureManagerBase
from matplotlib.figure import Figure
########################################################################
#
# The following functions and classes are for pylab and implement
# window/figure managers, etc...
#
########################################################################
class Show(ShowBase):
"""
A callable object that displays the figures to the screen. Valid kwargs
include figure width and height (in units supported by the div tag), block
(allows users to override blocking behavior regardless of whether or not
interactive mode is enabled, currently unused) and close (Implicitly call
matplotlib.pyplot.close('all') with each call to show()).
"""
def __call__(self, close=None, block=None, **kwargs):
if close is None:
close = mpl_config.get('close')
try:
managers = Gcf.get_all_fig_managers()
if not managers:
return
# Tell zeppelin that the output will be html using the %html magic
# We want to do this only once to avoid seeing "%html" printed
# directly to the outout when multiple figures are displayed from
# one paragraph.
if mpl_config.get('angular'):
print('%angular')
else:
print('%html')
# Show all open figures
for manager in managers:
manager.show(**kwargs)
finally:
# This closes all the figures if close is set to True.
if close and Gcf.get_all_fig_managers():
Gcf.destroy_all()
class FigureCanvasZInline(FigureCanvasAgg):
"""
The canvas the figure renders into. Calls the draw and print fig
methods, creates the renderers, etc...
"""
def get_bytes(self, **kwargs):
"""
Get the byte representation of the figure.
Should only be used with jpg/png formats.
"""
# Make sure format is correct
fmt = kwargs.get('format', mpl_config.get('format'))
if fmt == 'svg':
raise ValueError("get_bytes() does not support svg, use png or jpg")
# Express the image as bytes
buf = BytesIO()
self.print_figure(buf, **kwargs)
fmt = fmt.encode()
if sys.version_info >= (3, 4) and sys.version_info < (3, 5):
byte_str = bytes("data:image/%s;base64," %fmt, "utf-8")
else:
byte_str = b"data:image/%s;base64," %fmt
byte_str += base64.b64encode(buf.getvalue())
# Python3 forces all strings to default to unicode, but for raster image
# formats (eg png, jpg), we want to work with bytes. Thus this step is
# needed to ensure compatability for all python versions.
byte_str = byte_str.decode('ascii')
buf.close()
return byte_str
def get_svg(self, **kwargs):
"""
Get the svg representation of the figure.
Should only be used with svg format.
"""
# Make sure format is correct
fmt = kwargs.get('format', mpl_config.get('format'))
if fmt != 'svg':
raise ValueError("get_svg() does not support png or jpg, use svg")
# For SVG the data string has to be unicode, not bytes
buf = StringIO()
self.print_figure(buf, **kwargs)
svg_str = buf.getvalue()
buf.close()
return svg_str
def draw_idle(self, *args, **kwargs):
"""
Called when the figure gets updated (eg through a plotting command).
This is overriden to allow open figures to be reshown after they
are updated when mpl_config.get('close') is False.
"""
if not self._is_idle_drawing:
with self._idle_draw_cntx():
self.draw(*args, **kwargs)
draw_if_interactive()
class FigureManagerZInline(FigureManagerBase):
"""
Wrap everything up into a window for the pylab interface
"""
def __init__(self, canvas, num):
FigureManagerBase.__init__(self, canvas, num)
self.fig_id = "figure_{0}".format(uuid.uuid4().hex)
self._shown = False
def angular_bind(self, **kwargs):
"""
Bind figure data to Zeppelin's Angular Object Registry.
If mpl_config("angular") is True and PY4J is supported, this allows
for the possibility to interactively update a figure from a separate
paragraph without having to display it multiple times.
"""
# This doesn't work for SVG so make sure it's not our format
fmt = kwargs.get('format', mpl_config.get('format'))
if fmt == 'svg':
return
# Get the figure data as a byte array
src = self.canvas.get_bytes(**kwargs)
# Flag to determine whether or not to use
# zeppelin's angular display system
angular = mpl_config.get('angular')
# ZeppelinContext instance (requires PY4J)
context = mpl_config.get('context')
# Finally we must ensure that automatic closing is set to False,
# as otherwise using the angular display system is pointless
close = mpl_config.get('close')
# If above conditions are met, bind the figure data to
# the Angular Object Registry.
if not close and angular:
if hasattr(context, 'angularBind'):
# Binding is performed through figure ID to ensure this works
# if multiple figures are open
context.angularBind(self.fig_id, src)
# Zeppelin will automatically replace this value even if it
# is updated from another pargraph thanks to the {{}} notation
src = "{{%s}}" %self.fig_id
else:
warnings.warn("Cannot bind figure to Angular Object Registry. "
"Check if PY4J is installed.")
return src
def angular_unbind(self):
"""
Unbind figure from angular display system.
"""
context = mpl_config.get('context')
if hasattr(context, 'angularUnbind'):
context.angularUnbind(self.fig_id)
def destroy(self):
"""
Called when close=True or implicitly by pyplot.close().
Overriden to automatically clean up the angular object registry.
"""
self.angular_unbind()
def show(self, **kwargs):
if not self._shown:
zdisplay(self.canvas.figure, **kwargs)
else:
self.canvas.draw_idle()
self.angular_bind(**kwargs)
self._shown = True
def draw_if_interactive():
"""
If interactive mode is on, this allows for updating properties of
the figure when each new plotting command is called.
"""
manager = Gcf.get_active()
interactive = matplotlib.is_interactive()
angular = mpl_config.get('angular')
# Don't bother continuing if we aren't in interactive mode
# or if there are no active figures. Also pointless to continue
# in angular mode as we don't want to reshow the figure.
if not interactive or angular or manager is None:
return
# Allow for figure to be reshown if close is false since
# this function call implies that it has been updated
if not mpl_config.get('close'):
manager._shown = False
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
# if a main-level app must be created, this (and
# new_figure_manager_given_figure) is the usual place to
# do it -- see backend_wx, backend_wxagg and backend_tkagg for
# examples. Not all GUIs require explicit instantiation of a
# main-level app (egg backend_gtk, backend_gtkagg) for pylab
FigureClass = kwargs.pop('FigureClass', Figure)
thisFig = FigureClass(*args, **kwargs)
return new_figure_manager_given_figure(num, thisFig)
def new_figure_manager_given_figure(num, figure):
"""
Create a new figure manager instance for the given figure.
"""
canvas = FigureCanvasZInline(figure)
manager = FigureManagerZInline(canvas, num)
return manager
########################################################################
#
# Backend specific functions
#
########################################################################
def zdisplay(fig, **kwargs):
"""
Publishes a matplotlib figure to the notebook paragraph output.
"""
# kwargs can be width or height (in units supported by div tag)
width = kwargs.pop('width', 'auto')
height = kwargs.pop('height', 'auto')
fmt = kwargs.get('format', mpl_config.get('format'))
# Check if format is supported
supported_formats = mpl_config.get('supported_formats')
if fmt not in supported_formats:
raise ValueError("Unsupported format %s" %fmt)
# For SVG the data string has to be unicode, not bytes
if fmt == 'svg':
img = fig.canvas.get_svg(**kwargs)
# This is needed to ensure the SVG image is the correct size.
# We should find a better way to do this...
width = '{}px'.format(mpl_config.get('width'))
height = '{}px'.format(mpl_config.get('height'))
else:
# Express the image as bytes
src = fig.canvas.manager.angular_bind(**kwargs)
img = "<img src={src} style='width={width};height:{height}'>"
img = img.format(src=src, width=width, height=height)
# Print the image to the notebook paragraph via the %html magic
html = "<div style='width:{width};height:{height}'>{img}<div>"
print(html.format(width=width, height=height, img=img))
def displayhook():
"""
Called post paragraph execution if interactive mode is on
"""
if matplotlib.is_interactive():
show()
########################################################################
#
# Now just provide the standard names that backend.__init__ is expecting
#
########################################################################
# Create a reference to the show function we are using. This is what actually
# gets called by matplotlib.pyplot.show().
show = Show()
# Default FigureCanvas and FigureManager classes to use from the backend
FigureCanvas = FigureCanvasZInline
FigureManager = FigureManagerZInline
| apache-2.0 |
zhenv5/scikit-learn | examples/decomposition/plot_ica_blind_source_separation.py | 349 | 2228 | """
=====================================
Blind source separation using FastICA
=====================================
An example of estimating sources from noisy data.
:ref:`ICA` is used to estimate sources given noisy measurements.
Imagine 3 instruments playing simultaneously and 3 microphones
recording the mixed signals. ICA is used to recover the sources
ie. what is played by each instrument. Importantly, PCA fails
at recovering our `instruments` since the related signals reflect
non-Gaussian processes.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy import signal
from sklearn.decomposition import FastICA, PCA
###############################################################################
# Generate sample data
np.random.seed(0)
n_samples = 2000
time = np.linspace(0, 8, n_samples)
s1 = np.sin(2 * time) # Signal 1 : sinusoidal signal
s2 = np.sign(np.sin(3 * time)) # Signal 2 : square signal
s3 = signal.sawtooth(2 * np.pi * time) # Signal 3: saw tooth signal
S = np.c_[s1, s2, s3]
S += 0.2 * np.random.normal(size=S.shape) # Add noise
S /= S.std(axis=0) # Standardize data
# Mix data
A = np.array([[1, 1, 1], [0.5, 2, 1.0], [1.5, 1.0, 2.0]]) # Mixing matrix
X = np.dot(S, A.T) # Generate observations
# Compute ICA
ica = FastICA(n_components=3)
S_ = ica.fit_transform(X) # Reconstruct signals
A_ = ica.mixing_ # Get estimated mixing matrix
# We can `prove` that the ICA model applies by reverting the unmixing.
assert np.allclose(X, np.dot(S_, A_.T) + ica.mean_)
# For comparison, compute PCA
pca = PCA(n_components=3)
H = pca.fit_transform(X) # Reconstruct signals based on orthogonal components
###############################################################################
# Plot results
plt.figure()
models = [X, S, S_, H]
names = ['Observations (mixed signal)',
'True Sources',
'ICA recovered signals',
'PCA recovered signals']
colors = ['red', 'steelblue', 'orange']
for ii, (model, name) in enumerate(zip(models, names), 1):
plt.subplot(4, 1, ii)
plt.title(name)
for sig, color in zip(model.T, colors):
plt.plot(sig, color=color)
plt.subplots_adjust(0.09, 0.04, 0.94, 0.94, 0.26, 0.46)
plt.show()
| bsd-3-clause |
sanketloke/scikit-learn | examples/covariance/plot_robust_vs_empirical_covariance.py | 73 | 6451 | r"""
=======================================
Robust vs Empirical covariance estimate
=======================================
The usual covariance maximum likelihood estimate is very sensitive to the
presence of outliers in the data set. In such a case, it would be better to
use a robust estimator of covariance to guarantee that the estimation is
resistant to "erroneous" observations in the data set.
Minimum Covariance Determinant Estimator
----------------------------------------
The Minimum Covariance Determinant estimator is a robust, high-breakdown point
(i.e. it can be used to estimate the covariance matrix of highly contaminated
datasets, up to
:math:`\frac{n_\text{samples} - n_\text{features}-1}{2}` outliers) estimator of
covariance. The idea is to find
:math:`\frac{n_\text{samples} + n_\text{features}+1}{2}`
observations whose empirical covariance has the smallest determinant, yielding
a "pure" subset of observations from which to compute standards estimates of
location and covariance. After a correction step aiming at compensating the
fact that the estimates were learned from only a portion of the initial data,
we end up with robust estimates of the data set location and covariance.
The Minimum Covariance Determinant estimator (MCD) has been introduced by
P.J.Rousseuw in [1]_.
Evaluation
----------
In this example, we compare the estimation errors that are made when using
various types of location and covariance estimates on contaminated Gaussian
distributed data sets:
- The mean and the empirical covariance of the full dataset, which break
down as soon as there are outliers in the data set
- The robust MCD, that has a low error provided
:math:`n_\text{samples} > 5n_\text{features}`
- The mean and the empirical covariance of the observations that are known
to be good ones. This can be considered as a "perfect" MCD estimation,
so one can trust our implementation by comparing to this case.
References
----------
.. [1] P. J. Rousseeuw. Least median of squares regression. Journal of American
Statistical Ass., 79:871, 1984.
.. [2] Johanna Hardin, David M Rocke. The distribution of robust distances.
Journal of Computational and Graphical Statistics. December 1, 2005,
14(4): 928-946.
.. [3] Zoubir A., Koivunen V., Chakhchoukh Y. and Muma M. (2012). Robust
estimation in signal processing: A tutorial-style treatment of
fundamental concepts. IEEE Signal Processing Magazine 29(4), 61-80.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.font_manager
from sklearn.covariance import EmpiricalCovariance, MinCovDet
# example settings
n_samples = 80
n_features = 5
repeat = 10
range_n_outliers = np.concatenate(
(np.linspace(0, n_samples / 8, 5),
np.linspace(n_samples / 8, n_samples / 2, 5)[1:-1]))
# definition of arrays to store results
err_loc_mcd = np.zeros((range_n_outliers.size, repeat))
err_cov_mcd = np.zeros((range_n_outliers.size, repeat))
err_loc_emp_full = np.zeros((range_n_outliers.size, repeat))
err_cov_emp_full = np.zeros((range_n_outliers.size, repeat))
err_loc_emp_pure = np.zeros((range_n_outliers.size, repeat))
err_cov_emp_pure = np.zeros((range_n_outliers.size, repeat))
# computation
for i, n_outliers in enumerate(range_n_outliers):
for j in range(repeat):
rng = np.random.RandomState(i * j)
# generate data
X = rng.randn(n_samples, n_features)
# add some outliers
outliers_index = rng.permutation(n_samples)[:n_outliers]
outliers_offset = 10. * \
(np.random.randint(2, size=(n_outliers, n_features)) - 0.5)
X[outliers_index] += outliers_offset
inliers_mask = np.ones(n_samples).astype(bool)
inliers_mask[outliers_index] = False
# fit a Minimum Covariance Determinant (MCD) robust estimator to data
mcd = MinCovDet().fit(X)
# compare raw robust estimates with the true location and covariance
err_loc_mcd[i, j] = np.sum(mcd.location_ ** 2)
err_cov_mcd[i, j] = mcd.error_norm(np.eye(n_features))
# compare estimators learned from the full data set with true
# parameters
err_loc_emp_full[i, j] = np.sum(X.mean(0) ** 2)
err_cov_emp_full[i, j] = EmpiricalCovariance().fit(X).error_norm(
np.eye(n_features))
# compare with an empirical covariance learned from a pure data set
# (i.e. "perfect" mcd)
pure_X = X[inliers_mask]
pure_location = pure_X.mean(0)
pure_emp_cov = EmpiricalCovariance().fit(pure_X)
err_loc_emp_pure[i, j] = np.sum(pure_location ** 2)
err_cov_emp_pure[i, j] = pure_emp_cov.error_norm(np.eye(n_features))
# Display results
font_prop = matplotlib.font_manager.FontProperties(size=11)
plt.subplot(2, 1, 1)
lw = 2
plt.errorbar(range_n_outliers, err_loc_mcd.mean(1),
yerr=err_loc_mcd.std(1) / np.sqrt(repeat),
label="Robust location", lw=lw, color='m')
plt.errorbar(range_n_outliers, err_loc_emp_full.mean(1),
yerr=err_loc_emp_full.std(1) / np.sqrt(repeat),
label="Full data set mean", lw=lw, color='green')
plt.errorbar(range_n_outliers, err_loc_emp_pure.mean(1),
yerr=err_loc_emp_pure.std(1) / np.sqrt(repeat),
label="Pure data set mean", lw=lw, color='black')
plt.title("Influence of outliers on the location estimation")
plt.ylabel(r"Error ($||\mu - \hat{\mu}||_2^2$)")
plt.legend(loc="upper left", prop=font_prop)
plt.subplot(2, 1, 2)
x_size = range_n_outliers.size
plt.errorbar(range_n_outliers, err_cov_mcd.mean(1),
yerr=err_cov_mcd.std(1),
label="Robust covariance (mcd)", color='m')
plt.errorbar(range_n_outliers[:(x_size / 5 + 1)],
err_cov_emp_full.mean(1)[:(x_size / 5 + 1)],
yerr=err_cov_emp_full.std(1)[:(x_size / 5 + 1)],
label="Full data set empirical covariance", color='green')
plt.plot(range_n_outliers[(x_size / 5):(x_size / 2 - 1)],
err_cov_emp_full.mean(1)[(x_size / 5):(x_size / 2 - 1)], color='green',
ls='--')
plt.errorbar(range_n_outliers, err_cov_emp_pure.mean(1),
yerr=err_cov_emp_pure.std(1),
label="Pure data set empirical covariance", color='black')
plt.title("Influence of outliers on the covariance estimation")
plt.xlabel("Amount of contamination (%)")
plt.ylabel("RMSE")
plt.legend(loc="upper center", prop=font_prop)
plt.show()
| bsd-3-clause |
vshtanko/scikit-learn | sklearn/tests/test_common.py | 127 | 7665 | """
General tests for all estimators in sklearn.
"""
# Authors: Andreas Mueller <[email protected]>
# Gael Varoquaux [email protected]
# License: BSD 3 clause
from __future__ import print_function
import os
import warnings
import sys
import pkgutil
from sklearn.externals.six import PY3
from sklearn.utils.testing import assert_false, clean_warning_registry
from sklearn.utils.testing import all_estimators
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import ignore_warnings
import sklearn
from sklearn.cluster.bicluster import BiclusterMixin
from sklearn.linear_model.base import LinearClassifierMixin
from sklearn.utils.estimator_checks import (
_yield_all_checks,
CROSS_DECOMPOSITION,
check_parameters_default_constructible,
check_class_weight_balanced_linear_classifier,
check_transformer_n_iter,
check_non_transformer_estimators_n_iter,
check_get_params_invariance)
def test_all_estimator_no_base_class():
# test that all_estimators doesn't find abstract classes.
for name, Estimator in all_estimators():
msg = ("Base estimators such as {0} should not be included"
" in all_estimators").format(name)
assert_false(name.lower().startswith('base'), msg=msg)
def test_all_estimators():
# Test that estimators are default-constructible, clonable
# and have working repr.
estimators = all_estimators(include_meta_estimators=True)
# Meta sanity-check to make sure that the estimator introspection runs
# properly
assert_greater(len(estimators), 0)
for name, Estimator in estimators:
# some can just not be sensibly default constructed
yield check_parameters_default_constructible, name, Estimator
def test_non_meta_estimators():
# input validation etc for non-meta estimators
estimators = all_estimators()
for name, Estimator in estimators:
if issubclass(Estimator, BiclusterMixin):
continue
if name.startswith("_"):
continue
for check in _yield_all_checks(name, Estimator):
yield check, name, Estimator
def test_configure():
# Smoke test the 'configure' step of setup, this tests all the
# 'configure' functions in the setup.pys in the scikit
cwd = os.getcwd()
setup_path = os.path.abspath(os.path.join(sklearn.__path__[0], '..'))
setup_filename = os.path.join(setup_path, 'setup.py')
if not os.path.exists(setup_filename):
return
try:
os.chdir(setup_path)
old_argv = sys.argv
sys.argv = ['setup.py', 'config']
clean_warning_registry()
with warnings.catch_warnings():
# The configuration spits out warnings when not finding
# Blas/Atlas development headers
warnings.simplefilter('ignore', UserWarning)
if PY3:
with open('setup.py') as f:
exec(f.read(), dict(__name__='__main__'))
else:
execfile('setup.py', dict(__name__='__main__'))
finally:
sys.argv = old_argv
os.chdir(cwd)
def test_class_weight_balanced_linear_classifiers():
classifiers = all_estimators(type_filter='classifier')
clean_warning_registry()
with warnings.catch_warnings(record=True):
linear_classifiers = [
(name, clazz)
for name, clazz in classifiers
if 'class_weight' in clazz().get_params().keys()
and issubclass(clazz, LinearClassifierMixin)]
for name, Classifier in linear_classifiers:
if name == "LogisticRegressionCV":
# Contrary to RidgeClassifierCV, LogisticRegressionCV use actual
# CV folds and fit a model for each CV iteration before averaging
# the coef. Therefore it is expected to not behave exactly as the
# other linear model.
continue
yield check_class_weight_balanced_linear_classifier, name, Classifier
@ignore_warnings
def test_import_all_consistency():
# Smoke test to check that any name in a __all__ list is actually defined
# in the namespace of the module or package.
pkgs = pkgutil.walk_packages(path=sklearn.__path__, prefix='sklearn.',
onerror=lambda _: None)
submods = [modname for _, modname, _ in pkgs]
for modname in submods + ['sklearn']:
if ".tests." in modname:
continue
package = __import__(modname, fromlist="dummy")
for name in getattr(package, '__all__', ()):
if getattr(package, name, None) is None:
raise AttributeError(
"Module '{0}' has no attribute '{1}'".format(
modname, name))
def test_root_import_all_completeness():
EXCEPTIONS = ('utils', 'tests', 'base', 'setup')
for _, modname, _ in pkgutil.walk_packages(path=sklearn.__path__,
onerror=lambda _: None):
if '.' in modname or modname.startswith('_') or modname in EXCEPTIONS:
continue
assert_in(modname, sklearn.__all__)
def test_non_transformer_estimators_n_iter():
# Test that all estimators of type which are non-transformer
# and which have an attribute of max_iter, return the attribute
# of n_iter atleast 1.
for est_type in ['regressor', 'classifier', 'cluster']:
regressors = all_estimators(type_filter=est_type)
for name, Estimator in regressors:
# LassoLars stops early for the default alpha=1.0 for
# the iris dataset.
if name == 'LassoLars':
estimator = Estimator(alpha=0.)
else:
estimator = Estimator()
if hasattr(estimator, "max_iter"):
# These models are dependent on external solvers like
# libsvm and accessing the iter parameter is non-trivial.
if name in (['Ridge', 'SVR', 'NuSVR', 'NuSVC',
'RidgeClassifier', 'SVC', 'RandomizedLasso',
'LogisticRegressionCV']):
continue
# Tested in test_transformer_n_iter below
elif (name in CROSS_DECOMPOSITION or
name in ['LinearSVC', 'LogisticRegression']):
continue
else:
# Multitask models related to ENet cannot handle
# if y is mono-output.
yield (check_non_transformer_estimators_n_iter,
name, estimator, 'Multi' in name)
def test_transformer_n_iter():
transformers = all_estimators(type_filter='transformer')
for name, Estimator in transformers:
estimator = Estimator()
# Dependent on external solvers and hence accessing the iter
# param is non-trivial.
external_solver = ['Isomap', 'KernelPCA', 'LocallyLinearEmbedding',
'RandomizedLasso', 'LogisticRegressionCV']
if hasattr(estimator, "max_iter") and name not in external_solver:
yield check_transformer_n_iter, name, estimator
def test_get_params_invariance():
# Test for estimators that support get_params, that
# get_params(deep=False) is a subset of get_params(deep=True)
# Related to issue #4465
estimators = all_estimators(include_meta_estimators=False, include_other=True)
for name, Estimator in estimators:
if hasattr(Estimator, 'get_params'):
yield check_get_params_invariance, name, Estimator
| bsd-3-clause |
wangmiao1981/spark | python/pyspark/pandas/indexes/multi.py | 2 | 41734 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from distutils.version import LooseVersion
from functools import partial
from typing import Any, Callable, Iterator, List, Optional, Tuple, Union, cast, no_type_check
import pandas as pd
from pandas.api.types import is_list_like
from pandas.api.types import is_hashable
from pyspark.sql import functions as F, Column, Window
from pyspark.sql.types import DataType
# For running doctests and reference resolution in PyCharm.
from pyspark import pandas as ps # noqa: F401
from pyspark.pandas._typing import Scalar
from pyspark.pandas.exceptions import PandasNotImplementedError
from pyspark.pandas.frame import DataFrame
from pyspark.pandas.indexes.base import Index
from pyspark.pandas.missing.indexes import MissingPandasLikeMultiIndex
from pyspark.pandas.series import Series, first_series
from pyspark.pandas.utils import (
compare_disallow_null,
is_name_like_tuple,
name_like_string,
scol_for,
verify_temp_column_name,
)
from pyspark.pandas.internal import (
InternalField,
InternalFrame,
NATURAL_ORDER_COLUMN_NAME,
SPARK_INDEX_NAME_FORMAT,
)
from pyspark.pandas.spark import functions as SF
class MultiIndex(Index):
"""
pandas-on-Spark MultiIndex that corresponds to pandas MultiIndex logically. This might hold
Spark Column internally.
Parameters
----------
levels : sequence of arrays
The unique labels for each level.
codes : sequence of arrays
Integers for each level designating which label at each location.
sortorder : optional int
Level of sortedness (must be lexicographically sorted by that
level).
names : optional sequence of objects
Names for each of the index levels. (name is accepted for compat).
copy : bool, default False
Copy the meta-data.
verify_integrity : bool, default True
Check that the levels/codes are consistent and valid.
See Also
--------
MultiIndex.from_arrays : Convert list of arrays to MultiIndex.
MultiIndex.from_product : Create a MultiIndex from the cartesian product
of iterables.
MultiIndex.from_tuples : Convert list of tuples to a MultiIndex.
MultiIndex.from_frame : Make a MultiIndex from a DataFrame.
Index : A single-level Index.
Examples
--------
>>> ps.DataFrame({'a': ['a', 'b', 'c']}, index=[[1, 2, 3], [4, 5, 6]]).index # doctest: +SKIP
MultiIndex([(1, 4),
(2, 5),
(3, 6)],
)
>>> ps.DataFrame({'a': [1, 2, 3]}, index=[list('abc'), list('def')]).index # doctest: +SKIP
MultiIndex([('a', 'd'),
('b', 'e'),
('c', 'f')],
)
"""
@no_type_check
def __new__(
cls,
levels=None,
codes=None,
sortorder=None,
names=None,
dtype=None,
copy=False,
name=None,
verify_integrity: bool = True,
) -> "MultiIndex":
if LooseVersion(pd.__version__) < LooseVersion("0.24"):
if levels is None or codes is None:
raise TypeError("Must pass both levels and codes")
pidx = pd.MultiIndex(
levels=levels,
labels=codes,
sortorder=sortorder,
names=names,
dtype=dtype,
copy=copy,
name=name,
verify_integrity=verify_integrity,
)
else:
pidx = pd.MultiIndex(
levels=levels,
codes=codes,
sortorder=sortorder,
names=names,
dtype=dtype,
copy=copy,
name=name,
verify_integrity=verify_integrity,
)
return ps.from_pandas(pidx)
@property
def _internal(self) -> InternalFrame:
internal = self._psdf._internal
scol = F.struct(*internal.index_spark_columns)
return internal.copy(
column_labels=[None],
data_spark_columns=[scol],
data_fields=[None],
column_label_names=None,
)
@property
def _column_label(self) -> Optional[Tuple]:
return None
def __abs__(self) -> "MultiIndex":
raise TypeError("TypeError: cannot perform __abs__ with this index type: MultiIndex")
def _with_new_scol(
self, scol: Column, *, field: Optional[InternalField] = None
) -> "MultiIndex":
raise NotImplementedError("Not supported for type MultiIndex")
@no_type_check
def any(self, *args, **kwargs) -> None:
raise TypeError("cannot perform any with this index type: MultiIndex")
@no_type_check
def all(self, *args, **kwargs) -> None:
raise TypeError("cannot perform all with this index type: MultiIndex")
@staticmethod
def from_tuples(
tuples: List[Tuple],
sortorder: Optional[int] = None,
names: Optional[List[Union[Any, Tuple]]] = None,
) -> "MultiIndex":
"""
Convert list of tuples to MultiIndex.
Parameters
----------
tuples : list / sequence of tuple-likes
Each tuple is the index of one row/column.
sortorder : int or None
Level of sortedness (must be lexicographically sorted by that level).
names : list / sequence of str, optional
Names for the levels in the index.
Returns
-------
index : MultiIndex
Examples
--------
>>> tuples = [(1, 'red'), (1, 'blue'),
... (2, 'red'), (2, 'blue')]
>>> ps.MultiIndex.from_tuples(tuples, names=('number', 'color')) # doctest: +SKIP
MultiIndex([(1, 'red'),
(1, 'blue'),
(2, 'red'),
(2, 'blue')],
names=['number', 'color'])
"""
return cast(
MultiIndex,
ps.from_pandas(
pd.MultiIndex.from_tuples(tuples=tuples, sortorder=sortorder, names=names)
),
)
@staticmethod
def from_arrays(
arrays: List[List],
sortorder: Optional[int] = None,
names: Optional[List[Union[Any, Tuple]]] = None,
) -> "MultiIndex":
"""
Convert arrays to MultiIndex.
Parameters
----------
arrays: list / sequence of array-likes
Each array-like gives one level’s value for each data point. len(arrays)
is the number of levels.
sortorder: int or None
Level of sortedness (must be lexicographically sorted by that level).
names: list / sequence of str, optional
Names for the levels in the index.
Returns
-------
index: MultiIndex
Examples
--------
>>> arrays = [[1, 1, 2, 2], ['red', 'blue', 'red', 'blue']]
>>> ps.MultiIndex.from_arrays(arrays, names=('number', 'color')) # doctest: +SKIP
MultiIndex([(1, 'red'),
(1, 'blue'),
(2, 'red'),
(2, 'blue')],
names=['number', 'color'])
"""
return cast(
MultiIndex,
ps.from_pandas(
pd.MultiIndex.from_arrays(arrays=arrays, sortorder=sortorder, names=names)
),
)
@staticmethod
def from_product(
iterables: List[List],
sortorder: Optional[int] = None,
names: Optional[List[Union[Any, Tuple]]] = None,
) -> "MultiIndex":
"""
Make a MultiIndex from the cartesian product of multiple iterables.
Parameters
----------
iterables : list / sequence of iterables
Each iterable has unique labels for each level of the index.
sortorder : int or None
Level of sortedness (must be lexicographically sorted by that
level).
names : list / sequence of str, optional
Names for the levels in the index.
Returns
-------
index : MultiIndex
See Also
--------
MultiIndex.from_arrays : Convert list of arrays to MultiIndex.
MultiIndex.from_tuples : Convert list of tuples to MultiIndex.
Examples
--------
>>> numbers = [0, 1, 2]
>>> colors = ['green', 'purple']
>>> ps.MultiIndex.from_product([numbers, colors],
... names=['number', 'color']) # doctest: +SKIP
MultiIndex([(0, 'green'),
(0, 'purple'),
(1, 'green'),
(1, 'purple'),
(2, 'green'),
(2, 'purple')],
names=['number', 'color'])
"""
return cast(
MultiIndex,
ps.from_pandas(
pd.MultiIndex.from_product(iterables=iterables, sortorder=sortorder, names=names)
),
)
@staticmethod
def from_frame(df: DataFrame, names: Optional[List[Union[Any, Tuple]]] = None) -> "MultiIndex":
"""
Make a MultiIndex from a DataFrame.
Parameters
----------
df : DataFrame
DataFrame to be converted to MultiIndex.
names : list-like, optional
If no names are provided, use the column names, or tuple of column
names if the columns is a MultiIndex. If a sequence, overwrite
names with the given sequence.
Returns
-------
MultiIndex
The MultiIndex representation of the given DataFrame.
See Also
--------
MultiIndex.from_arrays : Convert list of arrays to MultiIndex.
MultiIndex.from_tuples : Convert list of tuples to MultiIndex.
MultiIndex.from_product : Make a MultiIndex from cartesian product
of iterables.
Examples
--------
>>> df = ps.DataFrame([['HI', 'Temp'], ['HI', 'Precip'],
... ['NJ', 'Temp'], ['NJ', 'Precip']],
... columns=['a', 'b'])
>>> df # doctest: +SKIP
a b
0 HI Temp
1 HI Precip
2 NJ Temp
3 NJ Precip
>>> ps.MultiIndex.from_frame(df) # doctest: +SKIP
MultiIndex([('HI', 'Temp'),
('HI', 'Precip'),
('NJ', 'Temp'),
('NJ', 'Precip')],
names=['a', 'b'])
Using explicit names, instead of the column names
>>> ps.MultiIndex.from_frame(df, names=['state', 'observation']) # doctest: +SKIP
MultiIndex([('HI', 'Temp'),
('HI', 'Precip'),
('NJ', 'Temp'),
('NJ', 'Precip')],
names=['state', 'observation'])
"""
if not isinstance(df, DataFrame):
raise TypeError("Input must be a DataFrame")
sdf = df.to_spark()
if names is None:
names = df._internal.column_labels
elif not is_list_like(names):
raise TypeError("Names should be list-like for a MultiIndex")
else:
names = [name if is_name_like_tuple(name) else (name,) for name in names]
internal = InternalFrame(
spark_frame=sdf,
index_spark_columns=[scol_for(sdf, col) for col in sdf.columns],
index_names=names,
)
return cast(MultiIndex, DataFrame(internal).index)
@property
def name(self) -> Union[Any, Tuple]:
raise PandasNotImplementedError(class_name="pd.MultiIndex", property_name="name")
@name.setter
def name(self, name: Union[Any, Tuple]) -> None:
raise PandasNotImplementedError(class_name="pd.MultiIndex", property_name="name")
def _verify_for_rename( # type: ignore[override]
self, name: List[Union[Any, Tuple]]
) -> List[Tuple]:
if is_list_like(name):
if self._internal.index_level != len(name):
raise ValueError(
"Length of new names must be {}, got {}".format(
self._internal.index_level, len(name)
)
)
if any(not is_hashable(n) for n in name):
raise TypeError("MultiIndex.name must be a hashable type")
return [n if is_name_like_tuple(n) else (n,) for n in name]
else:
raise TypeError("Must pass list-like as `names`.")
def swaplevel(self, i: int = -2, j: int = -1) -> "MultiIndex":
"""
Swap level i with level j.
Calling this method does not change the ordering of the values.
Parameters
----------
i : int, str, default -2
First level of index to be swapped. Can pass level name as string.
Type of parameters can be mixed.
j : int, str, default -1
Second level of index to be swapped. Can pass level name as string.
Type of parameters can be mixed.
Returns
-------
MultiIndex
A new MultiIndex.
Examples
--------
>>> midx = ps.MultiIndex.from_arrays([['a', 'b'], [1, 2]], names = ['word', 'number'])
>>> midx # doctest: +SKIP
MultiIndex([('a', 1),
('b', 2)],
names=['word', 'number'])
>>> midx.swaplevel(0, 1) # doctest: +SKIP
MultiIndex([(1, 'a'),
(2, 'b')],
names=['number', 'word'])
>>> midx.swaplevel('number', 'word') # doctest: +SKIP
MultiIndex([(1, 'a'),
(2, 'b')],
names=['number', 'word'])
"""
for index in (i, j):
if not isinstance(index, int) and index not in self.names:
raise KeyError("Level %s not found" % index)
i = i if isinstance(i, int) else self.names.index(i)
j = j if isinstance(j, int) else self.names.index(j)
for index in (i, j):
if index >= len(self.names) or index < -len(self.names):
raise IndexError(
"Too many levels: Index has only %s levels, "
"%s is not a valid level number" % (len(self.names), index)
)
index_map = list(
zip(
self._internal.index_spark_columns,
self._internal.index_names,
self._internal.index_fields,
)
)
index_map[i], index_map[j] = index_map[j], index_map[i]
index_spark_columns, index_names, index_fields = zip(*index_map)
internal = self._internal.copy(
index_spark_columns=list(index_spark_columns),
index_names=list(index_names),
index_fields=list(index_fields),
column_labels=[],
data_spark_columns=[],
data_fields=[],
)
return cast(MultiIndex, DataFrame(internal).index)
@property
def levshape(self) -> Tuple[int, ...]:
"""
A tuple with the length of each level.
Examples
--------
>>> midx = ps.MultiIndex.from_tuples([('a', 'x'), ('b', 'y'), ('c', 'z')])
>>> midx # doctest: +SKIP
MultiIndex([('a', 'x'),
('b', 'y'),
('c', 'z')],
)
>>> midx.levshape
(3, 3)
"""
result = self._internal.spark_frame.agg(
*(F.countDistinct(c) for c in self._internal.index_spark_columns)
).collect()[0]
return tuple(result)
@staticmethod
def _comparator_for_monotonic_increasing(
data_type: DataType,
) -> Callable[[Column, Column, Callable[[Column, Column], Column]], Column]:
return compare_disallow_null
def _is_monotonic(self, order: str) -> bool:
if order == "increasing":
return self._is_monotonic_increasing().all()
else:
return self._is_monotonic_decreasing().all()
def _is_monotonic_increasing(self) -> Series:
window = Window.orderBy(NATURAL_ORDER_COLUMN_NAME).rowsBetween(-1, -1)
cond = SF.lit(True)
has_not_null = SF.lit(True)
for scol in self._internal.index_spark_columns[::-1]:
data_type = self._internal.spark_type_for(scol)
prev = F.lag(scol, 1).over(window)
compare = MultiIndex._comparator_for_monotonic_increasing(data_type)
# Since pandas 1.1.4, null value is not allowed at any levels of MultiIndex.
# Therefore, we should check `has_not_null` over the all levels.
has_not_null = has_not_null & scol.isNotNull()
cond = F.when(scol.eqNullSafe(prev), cond).otherwise(compare(scol, prev, Column.__gt__))
cond = has_not_null & (prev.isNull() | cond)
cond_name = verify_temp_column_name(
self._internal.spark_frame.select(self._internal.index_spark_columns),
"__is_monotonic_increasing_cond__",
)
sdf = self._internal.spark_frame.select(
self._internal.index_spark_columns + [cond.alias(cond_name)]
)
internal = InternalFrame(
spark_frame=sdf,
index_spark_columns=[
scol_for(sdf, col) for col in self._internal.index_spark_column_names
],
index_names=self._internal.index_names,
index_fields=self._internal.index_fields,
)
return first_series(DataFrame(internal))
@staticmethod
def _comparator_for_monotonic_decreasing(
data_type: DataType,
) -> Callable[[Column, Column, Callable[[Column, Column], Column]], Column]:
return compare_disallow_null
def _is_monotonic_decreasing(self) -> Series:
window = Window.orderBy(NATURAL_ORDER_COLUMN_NAME).rowsBetween(-1, -1)
cond = SF.lit(True)
has_not_null = SF.lit(True)
for scol in self._internal.index_spark_columns[::-1]:
data_type = self._internal.spark_type_for(scol)
prev = F.lag(scol, 1).over(window)
compare = MultiIndex._comparator_for_monotonic_increasing(data_type)
# Since pandas 1.1.4, null value is not allowed at any levels of MultiIndex.
# Therefore, we should check `has_not_null` over the all levels.
has_not_null = has_not_null & scol.isNotNull()
cond = F.when(scol.eqNullSafe(prev), cond).otherwise(compare(scol, prev, Column.__lt__))
cond = has_not_null & (prev.isNull() | cond)
cond_name = verify_temp_column_name(
self._internal.spark_frame.select(self._internal.index_spark_columns),
"__is_monotonic_decreasing_cond__",
)
sdf = self._internal.spark_frame.select(
self._internal.index_spark_columns + [cond.alias(cond_name)]
)
internal = InternalFrame(
spark_frame=sdf,
index_spark_columns=[
scol_for(sdf, col) for col in self._internal.index_spark_column_names
],
index_names=self._internal.index_names,
index_fields=self._internal.index_fields,
)
return first_series(DataFrame(internal))
def to_frame( # type: ignore[override]
self, index: bool = True, name: Optional[List[Union[Any, Tuple]]] = None
) -> DataFrame:
"""
Create a DataFrame with the levels of the MultiIndex as columns.
Column ordering is determined by the DataFrame constructor with data as
a dict.
Parameters
----------
index : boolean, default True
Set the index of the returned DataFrame as the original MultiIndex.
name : list / sequence of strings, optional
The passed names should substitute index level names.
Returns
-------
DataFrame : a DataFrame containing the original MultiIndex data.
See Also
--------
DataFrame
Examples
--------
>>> tuples = [(1, 'red'), (1, 'blue'),
... (2, 'red'), (2, 'blue')]
>>> idx = ps.MultiIndex.from_tuples(tuples, names=('number', 'color'))
>>> idx # doctest: +SKIP
MultiIndex([(1, 'red'),
(1, 'blue'),
(2, 'red'),
(2, 'blue')],
names=['number', 'color'])
>>> idx.to_frame() # doctest: +NORMALIZE_WHITESPACE
number color
number color
1 red 1 red
blue 1 blue
2 red 2 red
blue 2 blue
By default, the original Index is reused. To enforce a new Index:
>>> idx.to_frame(index=False)
number color
0 1 red
1 1 blue
2 2 red
3 2 blue
To override the name of the resulting column, specify `name`:
>>> idx.to_frame(name=['n', 'c']) # doctest: +NORMALIZE_WHITESPACE
n c
number color
1 red 1 red
blue 1 blue
2 red 2 red
blue 2 blue
"""
if name is None:
name = [
name if name is not None else (i,)
for i, name in enumerate(self._internal.index_names)
]
elif is_list_like(name):
if len(name) != self._internal.index_level:
raise ValueError("'name' should have same length as number of levels on index.")
name = [n if is_name_like_tuple(n) else (n,) for n in name]
else:
raise TypeError("'name' must be a list / sequence of column names.")
return self._to_frame(index=index, names=name)
def to_pandas(self) -> pd.MultiIndex:
"""
Return a pandas MultiIndex.
.. note:: This method should only be used if the resulting pandas object is expected
to be small, as all the data is loaded into the driver's memory.
Examples
--------
>>> df = ps.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)],
... columns=['dogs', 'cats'],
... index=[list('abcd'), list('efgh')])
>>> df['dogs'].index.to_pandas() # doctest: +SKIP
MultiIndex([('a', 'e'),
('b', 'f'),
('c', 'g'),
('d', 'h')],
)
"""
# TODO: We might need to handle internal state change.
# So far, we don't have any functions to change the internal state of MultiIndex except for
# series-like operations. In that case, it creates new Index object instead of MultiIndex.
return super().to_pandas()
def nunique(self, dropna: bool = True, approx: bool = False, rsd: float = 0.05) -> int:
raise NotImplementedError("nunique is not defined for MultiIndex")
# TODO: add 'name' parameter after pd.MultiIndex.name is implemented
def copy(self, deep: Optional[bool] = None) -> "MultiIndex": # type: ignore[override]
"""
Make a copy of this object.
Parameters
----------
deep : None
this parameter is not supported but just dummy parameter to match pandas.
Examples
--------
>>> df = ps.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)],
... columns=['dogs', 'cats'],
... index=[list('abcd'), list('efgh')])
>>> df['dogs'].index # doctest: +SKIP
MultiIndex([('a', 'e'),
('b', 'f'),
('c', 'g'),
('d', 'h')],
)
Copy index
>>> df.index.copy() # doctest: +SKIP
MultiIndex([('a', 'e'),
('b', 'f'),
('c', 'g'),
('d', 'h')],
)
"""
return super().copy(deep=deep) # type: ignore
def symmetric_difference( # type: ignore[override]
self,
other: Index,
result_name: Optional[List[Union[Any, Tuple]]] = None,
sort: Optional[bool] = None,
) -> "MultiIndex":
"""
Compute the symmetric difference of two MultiIndex objects.
Parameters
----------
other : Index or array-like
result_name : list
sort : True or None, default None
Whether to sort the resulting index.
* True : Attempt to sort the result.
* None : Do not sort the result.
Returns
-------
symmetric_difference : MiltiIndex
Notes
-----
``symmetric_difference`` contains elements that appear in either
``idx1`` or ``idx2`` but not both. Equivalent to the Index created by
``idx1.difference(idx2) | idx2.difference(idx1)`` with duplicates
dropped.
Examples
--------
>>> midx1 = pd.MultiIndex([['lama', 'cow', 'falcon'],
... ['speed', 'weight', 'length']],
... [[0, 0, 0, 1, 1, 1, 2, 2, 2],
... [0, 0, 0, 0, 1, 2, 0, 1, 2]])
>>> midx2 = pd.MultiIndex([['pandas-on-Spark', 'cow', 'falcon'],
... ['speed', 'weight', 'length']],
... [[0, 0, 0, 1, 1, 1, 2, 2, 2],
... [0, 0, 0, 0, 1, 2, 0, 1, 2]])
>>> s1 = ps.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3],
... index=midx1)
>>> s2 = ps.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3],
... index=midx2)
>>> s1.index.symmetric_difference(s2.index) # doctest: +SKIP
MultiIndex([('pandas-on-Spark', 'speed'),
( 'lama', 'speed')],
)
You can set names of result Index.
>>> s1.index.symmetric_difference(s2.index, result_name=['a', 'b']) # doctest: +SKIP
MultiIndex([('pandas-on-Spark', 'speed'),
( 'lama', 'speed')],
names=['a', 'b'])
You can set sort to `True`, if you want to sort the resulting index.
>>> s1.index.symmetric_difference(s2.index, sort=True) # doctest: +SKIP
MultiIndex([('pandas-on-Spark', 'speed'),
( 'lama', 'speed')],
)
You can also use the ``^`` operator:
>>> s1.index ^ s2.index # doctest: +SKIP
MultiIndex([('pandas-on-Spark', 'speed'),
( 'lama', 'speed')],
)
"""
if type(self) != type(other):
raise NotImplementedError(
"Doesn't support symmetric_difference between Index & MultiIndex for now"
)
sdf_self = self._psdf._internal.spark_frame.select(self._internal.index_spark_columns)
sdf_other = other._psdf._internal.spark_frame.select(other._internal.index_spark_columns)
sdf_symdiff = sdf_self.union(sdf_other).subtract(sdf_self.intersect(sdf_other))
if sort:
sdf_symdiff = sdf_symdiff.sort(*self._internal.index_spark_columns)
internal = InternalFrame( # TODO: dtypes?
spark_frame=sdf_symdiff,
index_spark_columns=[
scol_for(sdf_symdiff, col) for col in self._internal.index_spark_column_names
],
index_names=self._internal.index_names,
)
result = cast(MultiIndex, DataFrame(internal).index)
if result_name:
result.names = result_name
return result
# TODO: ADD error parameter
def drop(
self, codes: List[Any], level: Optional[Union[int, Any, Tuple]] = None
) -> "MultiIndex":
"""
Make new MultiIndex with passed list of labels deleted
Parameters
----------
codes : array-like
Must be a list of tuples
level : int or level name, default None
Returns
-------
dropped : MultiIndex
Examples
--------
>>> index = ps.MultiIndex.from_tuples([('a', 'x'), ('b', 'y'), ('c', 'z')])
>>> index # doctest: +SKIP
MultiIndex([('a', 'x'),
('b', 'y'),
('c', 'z')],
)
>>> index.drop(['a']) # doctest: +SKIP
MultiIndex([('b', 'y'),
('c', 'z')],
)
>>> index.drop(['x', 'y'], level=1) # doctest: +SKIP
MultiIndex([('c', 'z')],
)
"""
internal = self._internal.resolved_copy
sdf = internal.spark_frame
index_scols = internal.index_spark_columns
if level is None:
scol = index_scols[0]
elif isinstance(level, int):
scol = index_scols[level]
else:
scol = None
for index_spark_column, index_name in zip(
internal.index_spark_columns, internal.index_names
):
if not isinstance(level, tuple):
level = (level,)
if level == index_name:
if scol is not None:
raise ValueError(
"The name {} occurs multiple times, use a level number".format(
name_like_string(level)
)
)
scol = index_spark_column
if scol is None:
raise KeyError("Level {} not found".format(name_like_string(level)))
sdf = sdf[~scol.isin(codes)]
internal = InternalFrame(
spark_frame=sdf,
index_spark_columns=[scol_for(sdf, col) for col in internal.index_spark_column_names],
index_names=internal.index_names,
index_fields=internal.index_fields,
column_labels=[],
data_spark_columns=[],
data_fields=[],
)
return cast(MultiIndex, DataFrame(internal).index)
def argmax(self) -> None:
raise TypeError("reduction operation 'argmax' not allowed for this dtype")
def argmin(self) -> None:
raise TypeError("reduction operation 'argmin' not allowed for this dtype")
def asof(self, label: Any) -> None:
raise NotImplementedError(
"only the default get_loc method is currently supported for MultiIndex"
)
@property
def is_all_dates(self) -> bool:
"""
is_all_dates always returns False for MultiIndex
Examples
--------
>>> from datetime import datetime
>>> idx = ps.MultiIndex.from_tuples(
... [(datetime(2019, 1, 1, 0, 0, 0), datetime(2019, 1, 1, 0, 0, 0)),
... (datetime(2019, 1, 1, 0, 0, 0), datetime(2019, 1, 1, 0, 0, 0))])
>>> idx # doctest: +SKIP
MultiIndex([('2019-01-01', '2019-01-01'),
('2019-01-01', '2019-01-01')],
)
>>> idx.is_all_dates
False
"""
return False
def __getattr__(self, item: str) -> Any:
if hasattr(MissingPandasLikeMultiIndex, item):
property_or_func = getattr(MissingPandasLikeMultiIndex, item)
if isinstance(property_or_func, property):
return property_or_func.fget(self) # type: ignore
else:
return partial(property_or_func, self)
raise AttributeError("'MultiIndex' object has no attribute '{}'".format(item))
def _get_level_number(self, level: Union[int, Any, Tuple]) -> int:
"""
Return the level number if a valid level is given.
"""
count = self.names.count(level)
if (count > 1) and not isinstance(level, int):
raise ValueError("The name %s occurs multiple times, use a level number" % level)
if level in self.names:
level = self.names.index(level)
elif isinstance(level, int):
nlevels = self.nlevels
if level >= nlevels:
raise IndexError(
"Too many levels: Index has only %d "
"levels, %d is not a valid level number" % (nlevels, level)
)
if level < 0:
if (level + nlevels) < 0:
raise IndexError(
"Too many levels: Index has only %d levels, "
"not %d" % (nlevels, level + 1)
)
level = level + nlevels
else:
raise KeyError("Level %s not found" % str(level))
return level
def get_level_values(self, level: Union[int, Any, Tuple]) -> Index:
"""
Return vector of label values for requested level,
equal to the length of the index.
Parameters
----------
level : int or str
``level`` is either the integer position of the level in the
MultiIndex, or the name of the level.
Returns
-------
values : Index
Values is a level of this MultiIndex converted to
a single :class:`Index` (or subclass thereof).
Examples
--------
Create a MultiIndex:
>>> mi = ps.MultiIndex.from_tuples([('x', 'a'), ('x', 'b'), ('y', 'a')])
>>> mi.names = ['level_1', 'level_2']
Get level values by supplying level as either integer or name:
>>> mi.get_level_values(0)
Index(['x', 'x', 'y'], dtype='object', name='level_1')
>>> mi.get_level_values('level_2')
Index(['a', 'b', 'a'], dtype='object', name='level_2')
"""
level = self._get_level_number(level)
index_scol = self._internal.index_spark_columns[level]
index_name = self._internal.index_names[level]
index_field = self._internal.index_fields[level]
internal = self._internal.copy(
index_spark_columns=[index_scol],
index_names=[index_name],
index_fields=[index_field],
column_labels=[],
data_spark_columns=[],
data_fields=[],
)
return DataFrame(internal).index
def insert(self, loc: int, item: Any) -> Index:
"""
Make new MultiIndex inserting new item at location.
Follows Python list.append semantics for negative values.
Parameters
----------
loc : int
item : object
Returns
-------
new_index : MultiIndex
Examples
--------
>>> psmidx = ps.MultiIndex.from_tuples([("a", "x"), ("b", "y"), ("c", "z")])
>>> psmidx.insert(3, ("h", "j")) # doctest: +SKIP
MultiIndex([('a', 'x'),
('b', 'y'),
('c', 'z'),
('h', 'j')],
)
For negative values
>>> psmidx.insert(-2, ("h", "j")) # doctest: +SKIP
MultiIndex([('a', 'x'),
('h', 'j'),
('b', 'y'),
('c', 'z')],
)
"""
length = len(self)
if loc < 0:
loc = loc + length
if loc < 0:
raise IndexError(
"index {} is out of bounds for axis 0 with size {}".format(
(loc - length), length
)
)
else:
if loc > length:
raise IndexError(
"index {} is out of bounds for axis 0 with size {}".format(loc, length)
)
index_name = [
(name,) for name in self._internal.index_spark_column_names
] # type: List[Tuple]
sdf_before = self.to_frame(name=index_name)[:loc].to_spark()
sdf_middle = Index([item]).to_frame(name=index_name).to_spark()
sdf_after = self.to_frame(name=index_name)[loc:].to_spark()
sdf = sdf_before.union(sdf_middle).union(sdf_after)
internal = InternalFrame( # TODO: dtypes?
spark_frame=sdf,
index_spark_columns=[
scol_for(sdf, col) for col in self._internal.index_spark_column_names
],
index_names=self._internal.index_names,
)
return DataFrame(internal).index
def item(self) -> Tuple[Scalar, ...]:
"""
Return the first element of the underlying data as a python tuple.
Returns
-------
tuple
The first element of MultiIndex.
Raises
------
ValueError
If the data is not length-1.
Examples
--------
>>> psmidx = ps.MultiIndex.from_tuples([('a', 'x')])
>>> psmidx.item()
('a', 'x')
"""
return self._psdf.head(2)._to_internal_pandas().index.item()
def intersection(self, other: Union[DataFrame, Series, Index, List]) -> "MultiIndex":
"""
Form the intersection of two Index objects.
This returns a new Index with elements common to the index and `other`.
Parameters
----------
other : Index or array-like
Returns
-------
intersection : MultiIndex
Examples
--------
>>> midx1 = ps.MultiIndex.from_tuples([("a", "x"), ("b", "y"), ("c", "z")])
>>> midx2 = ps.MultiIndex.from_tuples([("c", "z"), ("d", "w")])
>>> midx1.intersection(midx2).sort_values() # doctest: +SKIP
MultiIndex([('c', 'z')],
)
"""
if isinstance(other, Series) or not is_list_like(other):
raise TypeError("other must be a MultiIndex or a list of tuples")
elif isinstance(other, DataFrame):
raise ValueError("Index data must be 1-dimensional")
elif isinstance(other, MultiIndex):
spark_frame_other = other.to_frame().to_spark()
keep_name = self.names == other.names
elif isinstance(other, Index):
# Always returns an empty MultiIndex if `other` is Index.
return self.to_frame().head(0).index # type: ignore
elif not all(isinstance(item, tuple) for item in other):
raise TypeError("other must be a MultiIndex or a list of tuples")
else:
spark_frame_other = MultiIndex.from_tuples(list(other)).to_frame().to_spark()
keep_name = True
default_name = [SPARK_INDEX_NAME_FORMAT(i) for i in range(self.nlevels)] # type: List
spark_frame_self = self.to_frame(name=default_name).to_spark()
spark_frame_intersected = spark_frame_self.intersect(spark_frame_other)
if keep_name:
index_names = self._internal.index_names
else:
index_names = None
internal = InternalFrame( # TODO: dtypes?
spark_frame=spark_frame_intersected,
index_spark_columns=[scol_for(spark_frame_intersected, col) for col in default_name],
index_names=index_names,
)
return cast(MultiIndex, DataFrame(internal).index)
@property
def hasnans(self) -> bool:
raise NotImplementedError("hasnans is not defined for MultiIndex")
@property
def inferred_type(self) -> str:
"""
Return a string of the type inferred from the values.
"""
# Always returns "mixed" for MultiIndex
return "mixed"
@property
def asi8(self) -> None:
"""
Integer representation of the values.
"""
# Always returns None for MultiIndex
return None
def factorize(
self, sort: bool = True, na_sentinel: Optional[int] = -1
) -> Tuple["MultiIndex", pd.Index]:
return MissingPandasLikeMultiIndex.factorize(self, sort=sort, na_sentinel=na_sentinel)
def __iter__(self) -> Iterator:
return MissingPandasLikeMultiIndex.__iter__(self)
def _test() -> None:
import os
import doctest
import sys
import numpy
from pyspark.sql import SparkSession
import pyspark.pandas.indexes.multi
os.chdir(os.environ["SPARK_HOME"])
globs = pyspark.pandas.indexes.multi.__dict__.copy()
globs["np"] = numpy
globs["ps"] = pyspark.pandas
spark = (
SparkSession.builder.master("local[4]")
.appName("pyspark.pandas.indexes.multi tests")
.getOrCreate()
)
(failure_count, test_count) = doctest.testmod(
pyspark.pandas.indexes.multi,
globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE,
)
spark.stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
| apache-2.0 |
wangyum/spark | python/setup.py | 2 | 12728 | #!/usr/bin/env python3
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import importlib.util
import glob
import os
import sys
from setuptools import setup
from setuptools.command.install import install
from shutil import copyfile, copytree, rmtree
try:
exec(open('pyspark/version.py').read())
except IOError:
print("Failed to load PySpark version file for packaging. You must be in Spark's python dir.",
file=sys.stderr)
sys.exit(-1)
try:
spec = importlib.util.spec_from_file_location("install", "pyspark/install.py")
install_module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(install_module)
except IOError:
print("Failed to load the installing module (pyspark/install.py) which had to be "
"packaged together.",
file=sys.stderr)
sys.exit(-1)
VERSION = __version__ # noqa
# A temporary path so we can access above the Python project root and fetch scripts and jars we need
TEMP_PATH = "deps"
SPARK_HOME = os.path.abspath("../")
# Provide guidance about how to use setup.py
incorrect_invocation_message = """
If you are installing pyspark from spark source, you must first build Spark and
run sdist.
To build Spark with maven you can run:
./build/mvn -DskipTests clean package
Building the source dist is done in the Python directory:
cd python
python setup.py sdist
pip install dist/*.tar.gz"""
# Figure out where the jars are we need to package with PySpark.
JARS_PATH = glob.glob(os.path.join(SPARK_HOME, "assembly/target/scala-*/jars/"))
if len(JARS_PATH) == 1:
JARS_PATH = JARS_PATH[0]
elif (os.path.isfile("../RELEASE") and len(glob.glob("../jars/spark*core*.jar")) == 1):
# Release mode puts the jars in a jars directory
JARS_PATH = os.path.join(SPARK_HOME, "jars")
elif len(JARS_PATH) > 1:
print("Assembly jars exist for multiple scalas ({0}), please cleanup assembly/target".format(
JARS_PATH), file=sys.stderr)
sys.exit(-1)
elif len(JARS_PATH) == 0 and not os.path.exists(TEMP_PATH):
print(incorrect_invocation_message, file=sys.stderr)
sys.exit(-1)
EXAMPLES_PATH = os.path.join(SPARK_HOME, "examples/src/main/python")
SCRIPTS_PATH = os.path.join(SPARK_HOME, "bin")
USER_SCRIPTS_PATH = os.path.join(SPARK_HOME, "sbin")
DATA_PATH = os.path.join(SPARK_HOME, "data")
LICENSES_PATH = os.path.join(SPARK_HOME, "licenses")
SCRIPTS_TARGET = os.path.join(TEMP_PATH, "bin")
USER_SCRIPTS_TARGET = os.path.join(TEMP_PATH, "sbin")
JARS_TARGET = os.path.join(TEMP_PATH, "jars")
EXAMPLES_TARGET = os.path.join(TEMP_PATH, "examples")
DATA_TARGET = os.path.join(TEMP_PATH, "data")
LICENSES_TARGET = os.path.join(TEMP_PATH, "licenses")
# Check and see if we are under the spark path in which case we need to build the symlink farm.
# This is important because we only want to build the symlink farm while under Spark otherwise we
# want to use the symlink farm. And if the symlink farm exists under while under Spark (e.g. a
# partially built sdist) we should error and have the user sort it out.
in_spark = (os.path.isfile("../core/src/main/scala/org/apache/spark/SparkContext.scala") or
(os.path.isfile("../RELEASE") and len(glob.glob("../jars/spark*core*.jar")) == 1))
def _supports_symlinks():
"""Check if the system supports symlinks (e.g. *nix) or not."""
return getattr(os, "symlink", None) is not None
if (in_spark):
# Construct links for setup
try:
os.mkdir(TEMP_PATH)
except:
print("Temp path for symlink to parent already exists {0}".format(TEMP_PATH),
file=sys.stderr)
sys.exit(-1)
# If you are changing the versions here, please also change ./python/pyspark/sql/pandas/utils.py
# For Arrow, you should also check ./pom.xml and ensure there are no breaking changes in the
# binary format protocol with the Java version, see ARROW_HOME/format/* for specifications.
# Also don't forget to update python/docs/source/getting_started/install.rst.
_minimum_pandas_version = "0.23.2"
_minimum_pyarrow_version = "1.0.0"
class InstallCommand(install):
# TODO(SPARK-32837) leverage pip's custom options
def run(self):
install.run(self)
# Make sure the destination is always clean.
spark_dist = os.path.join(self.install_lib, "pyspark", "spark-distribution")
rmtree(spark_dist, ignore_errors=True)
if ("PYSPARK_HADOOP_VERSION" in os.environ) or ("PYSPARK_HIVE_VERSION" in os.environ):
# Note that PYSPARK_VERSION environment is just a testing purpose.
# PYSPARK_HIVE_VERSION environment variable is also internal for now in case
# we support another version of Hive in the future.
spark_version, hadoop_version, hive_version = install_module.checked_versions(
os.environ.get("PYSPARK_VERSION", VERSION).lower(),
os.environ.get("PYSPARK_HADOOP_VERSION", install_module.DEFAULT_HADOOP).lower(),
os.environ.get("PYSPARK_HIVE_VERSION", install_module.DEFAULT_HIVE).lower())
if ("PYSPARK_VERSION" not in os.environ and
((install_module.DEFAULT_HADOOP, install_module.DEFAULT_HIVE) ==
(hadoop_version, hive_version))):
# Do not download and install if they are same as default.
return
install_module.install_spark(
dest=spark_dist,
spark_version=spark_version,
hadoop_version=hadoop_version,
hive_version=hive_version)
try:
# We copy the shell script to be under pyspark/python/pyspark so that the launcher scripts
# find it where expected. The rest of the files aren't copied because they are accessed
# using Python imports instead which will be resolved correctly.
try:
os.makedirs("pyspark/python/pyspark")
except OSError:
# Don't worry if the directory already exists.
pass
copyfile("pyspark/shell.py", "pyspark/python/pyspark/shell.py")
if (in_spark):
# Construct the symlink farm - this is necessary since we can't refer to the path above the
# package root and we need to copy the jars and scripts which are up above the python root.
if _supports_symlinks():
os.symlink(JARS_PATH, JARS_TARGET)
os.symlink(SCRIPTS_PATH, SCRIPTS_TARGET)
os.symlink(USER_SCRIPTS_PATH, USER_SCRIPTS_TARGET)
os.symlink(EXAMPLES_PATH, EXAMPLES_TARGET)
os.symlink(DATA_PATH, DATA_TARGET)
os.symlink(LICENSES_PATH, LICENSES_TARGET)
else:
# For windows fall back to the slower copytree
copytree(JARS_PATH, JARS_TARGET)
copytree(SCRIPTS_PATH, SCRIPTS_TARGET)
copytree(USER_SCRIPTS_PATH, USER_SCRIPTS_TARGET)
copytree(EXAMPLES_PATH, EXAMPLES_TARGET)
copytree(DATA_PATH, DATA_TARGET)
copytree(LICENSES_PATH, LICENSES_TARGET)
else:
# If we are not inside of SPARK_HOME verify we have the required symlink farm
if not os.path.exists(JARS_TARGET):
print("To build packaging must be in the python directory under the SPARK_HOME.",
file=sys.stderr)
if not os.path.isdir(SCRIPTS_TARGET):
print(incorrect_invocation_message, file=sys.stderr)
sys.exit(-1)
# Scripts directive requires a list of each script path and does not take wild cards.
script_names = os.listdir(SCRIPTS_TARGET)
scripts = list(map(lambda script: os.path.join(SCRIPTS_TARGET, script), script_names))
# We add find_spark_home.py to the bin directory we install so that pip installed PySpark
# will search for SPARK_HOME with Python.
scripts.append("pyspark/find_spark_home.py")
with open('README.md') as f:
long_description = f.read()
setup(
name='pyspark',
version=VERSION,
description='Apache Spark Python API',
long_description=long_description,
long_description_content_type="text/markdown",
author='Spark Developers',
author_email='[email protected]',
url='https://github.com/apache/spark/tree/master/python',
packages=['pyspark',
'pyspark.cloudpickle',
'pyspark.mllib',
'pyspark.mllib.linalg',
'pyspark.mllib.stat',
'pyspark.ml',
'pyspark.ml.linalg',
'pyspark.ml.param',
'pyspark.sql',
'pyspark.sql.avro',
'pyspark.sql.pandas',
'pyspark.streaming',
'pyspark.bin',
'pyspark.sbin',
'pyspark.jars',
'pyspark.python.pyspark',
'pyspark.python.lib',
'pyspark.data',
'pyspark.licenses',
'pyspark.resource',
'pyspark.examples.src.main.python'],
include_package_data=True,
package_dir={
'pyspark.jars': 'deps/jars',
'pyspark.bin': 'deps/bin',
'pyspark.sbin': 'deps/sbin',
'pyspark.python.lib': 'lib',
'pyspark.data': 'deps/data',
'pyspark.licenses': 'deps/licenses',
'pyspark.examples.src.main.python': 'deps/examples',
},
package_data={
'pyspark.jars': ['*.jar'],
'pyspark.bin': ['*'],
'pyspark.sbin': ['spark-config.sh', 'spark-daemon.sh',
'start-history-server.sh',
'stop-history-server.sh', ],
'pyspark.python.lib': ['*.zip'],
'pyspark.data': ['*.txt', '*.data'],
'pyspark.licenses': ['*.txt'],
'pyspark.examples.src.main.python': ['*.py', '*/*.py']},
scripts=scripts,
license='http://www.apache.org/licenses/LICENSE-2.0',
# Don't forget to update python/docs/source/getting_started/install.rst
# if you're updating the versions or dependencies.
install_requires=['py4j==0.10.9.2'],
extras_require={
'ml': ['numpy>=1.7'],
'mllib': ['numpy>=1.7'],
'sql': [
'pandas>=%s' % _minimum_pandas_version,
'pyarrow>=%s' % _minimum_pyarrow_version,
]
},
python_requires='>=3.6',
classifiers=[
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Typing :: Typed'],
cmdclass={
'install': InstallCommand,
},
)
finally:
# We only cleanup the symlink farm if we were in Spark, otherwise we are installing rather than
# packaging.
if (in_spark):
# Depending on cleaning up the symlink farm or copied version
if _supports_symlinks():
os.remove(os.path.join(TEMP_PATH, "jars"))
os.remove(os.path.join(TEMP_PATH, "bin"))
os.remove(os.path.join(TEMP_PATH, "sbin"))
os.remove(os.path.join(TEMP_PATH, "examples"))
os.remove(os.path.join(TEMP_PATH, "data"))
os.remove(os.path.join(TEMP_PATH, "licenses"))
else:
rmtree(os.path.join(TEMP_PATH, "jars"))
rmtree(os.path.join(TEMP_PATH, "bin"))
rmtree(os.path.join(TEMP_PATH, "sbin"))
rmtree(os.path.join(TEMP_PATH, "examples"))
rmtree(os.path.join(TEMP_PATH, "data"))
rmtree(os.path.join(TEMP_PATH, "licenses"))
os.rmdir(TEMP_PATH)
| apache-2.0 |
nelson-liu/scikit-learn | doc/tutorial/text_analytics/solutions/exercise_01_language_train_model.py | 73 | 2264 | """Build a language detector model
The goal of this exercise is to train a linear classifier on text features
that represent sequences of up to 3 consecutive characters so as to be
recognize natural languages by using the frequencies of short character
sequences as 'fingerprints'.
"""
# Author: Olivier Grisel <[email protected]>
# License: Simplified BSD
import sys
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import Perceptron
from sklearn.pipeline import Pipeline
from sklearn.datasets import load_files
from sklearn.model_selection import train_test_split
from sklearn import metrics
# The training data folder must be passed as first argument
languages_data_folder = sys.argv[1]
dataset = load_files(languages_data_folder)
# Split the dataset in training and test set:
docs_train, docs_test, y_train, y_test = train_test_split(
dataset.data, dataset.target, test_size=0.5)
# TASK: Build a vectorizer that splits strings into sequence of 1 to 3
# characters instead of word tokens
vectorizer = TfidfVectorizer(ngram_range=(1, 3), analyzer='char',
use_idf=False)
# TASK: Build a vectorizer / classifier pipeline using the previous analyzer
# the pipeline instance should stored in a variable named clf
clf = Pipeline([
('vec', vectorizer),
('clf', Perceptron()),
])
# TASK: Fit the pipeline on the training set
clf.fit(docs_train, y_train)
# TASK: Predict the outcome on the testing set in a variable named y_predicted
y_predicted = clf.predict(docs_test)
# Print the classification report
print(metrics.classification_report(y_test, y_predicted,
target_names=dataset.target_names))
# Plot the confusion matrix
cm = metrics.confusion_matrix(y_test, y_predicted)
print(cm)
#import matlotlib.pyplot as plt
#plt.matshow(cm, cmap=plt.cm.jet)
#plt.show()
# Predict the result on some short new sentences:
sentences = [
u'This is a language detection test.',
u'Ceci est un test de d\xe9tection de la langue.',
u'Dies ist ein Test, um die Sprache zu erkennen.',
]
predicted = clf.predict(sentences)
for s, p in zip(sentences, predicted):
print(u'The language of "%s" is "%s"' % (s, dataset.target_names[p]))
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.