repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
ttthy1/2017sejongAI | week11/category_predictor2.py | 1 | 1761 | #패키지 호출
from sklearn.datasets import fetch_20newsgroups
from sklearn.naive_bayes import MultinomialNB
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import CountVectorizer
#카테고리 맵 정의
category_map = {'talk.politics.misc': 'Politics', 'rec.autos': 'Autos', 'rec.sport.hockey': 'Hockey', 'sci.electronics': 'Electronics', 'sci.med': 'Medicine'}
#학습데이터셋을 받음
training_data = fetch_20newsgroups(subset='train', categories=category_map.keys(), shuffle=True, random_state=5)
# countvectorizer 객체를 사용해 단어 빈도를 추출
count_vectorizer = CountVectorizer()
train_tc = count_vectorizer.fit_transform(training_data.data)
print("\nDimensions of training data:", train_tc.shape)
# tf-idf 변환기 생성
tfidf = TfidfTransformer()
train_tfidf = tfidf.fit_transform(train_tc)
# 테스트 데이터 정의
input_data = [
'You need to be careful with cars when you are driving on slippery roads', 'A lot of devices can be operated wirelessly',
'Players need to be careful when they are close to goal posts',
'Political debates help us understand the perspectives of both sides'
]
# 다항 분포 나이브 베이즈 분류기 학습
classifier = MultinomialNB().fit(train_tfidf, training_data.target)
# count vectorizer를 사용해 입력 데이터 변환
input_tc = count_vectorizer.transform(input_data)
# tfidf 변환기를 사용해 벡터 데이터 변환
input_tfidf = tfidf.transform(input_tc)
# 카테고리 예측
predictions = classifier.predict(input_tfidf)
# 결과 출력
for sent, category in zip(input_data, predictions):
print('\nInput:', sent, '\nPredicted category:', \
category_map[training_data.target_names[category]])
| gpl-3.0 |
brummer-simon/RIOT | tests/pkg_emlearn/generate_digit.py | 11 | 1304 | #!/usr/bin/env python3
"""Generate a binary file from a sample image of the MNIST dataset.
Pixel of the sample are stored as float32, images have size 8x8.
"""
import os
import argparse
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn import datasets
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
def main(args):
output_path = os.path.join(SCRIPT_DIR, args.output)
digits = datasets.load_digits()
rnd = 42
_, data, _, _ = train_test_split(digits.data, digits.target,
random_state=rnd)
data = data[args.index]
np.ndarray.tofile(data.astype('float32'), output_path)
if args.no_plot is False:
plt.gray()
plt.imshow(data.reshape(8, 8))
plt.show()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--index", type=int, default=0,
help="Image index in MNIST test dataset")
parser.add_argument("-o", "--output", type=str, default='digit',
help="Output filename")
parser.add_argument("--no-plot", default=False, action='store_true',
help="Disable image display in matplotlib")
main(parser.parse_args())
| lgpl-2.1 |
MTG/sms-tools | software/models_interface/hprModel_function.py | 1 | 3588 | # function to call the main analysis/synthesis functions in software/models/hprModel.py
import numpy as np
import matplotlib.pyplot as plt
import os, sys
from scipy.signal import get_window
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../models/'))
import utilFunctions as UF
import hprModel as HPR
import stft as STFT
def main(inputFile='../../sounds/sax-phrase-short.wav', window='blackman', M=601, N=1024, t=-100,
minSineDur=0.1, nH=100, minf0=350, maxf0=700, f0et=5, harmDevSlope=0.01):
"""
Perform analysis/synthesis using the harmonic plus residual model
inputFile: input sound file (monophonic with sampling rate of 44100)
window: analysis window type (rectangular, hanning, hamming, blackman, blackmanharris)
M: analysis window size; N: fft size (power of two, bigger or equal than M)
t: magnitude threshold of spectral peaks; minSineDur: minimum duration of sinusoidal tracks
nH: maximum number of harmonics; minf0: minimum fundamental frequency in sound
maxf0: maximum fundamental frequency in sound; f0et: maximum error accepted in f0 detection algorithm
harmDevSlope: allowed deviation of harmonic tracks, higher harmonics have higher allowed deviation
"""
# size of fft used in synthesis
Ns = 512
# hop size (has to be 1/4 of Ns)
H = 128
# read input sound
(fs, x) = UF.wavread(inputFile)
# compute analysis window
w = get_window(window, M)
# find harmonics and residual
hfreq, hmag, hphase, xr = HPR.hprModelAnal(x, fs, w, N, H, t, minSineDur, nH, minf0, maxf0, f0et, harmDevSlope)
# compute spectrogram of residual
mXr, pXr = STFT.stftAnal(xr, w, N, H)
# synthesize hpr model
y, yh = HPR.hprModelSynth(hfreq, hmag, hphase, xr, Ns, H, fs)
# output sound file (monophonic with sampling rate of 44100)
outputFileSines = 'output_sounds/' + os.path.basename(inputFile)[:-4] + '_hprModel_sines.wav'
outputFileResidual = 'output_sounds/' + os.path.basename(inputFile)[:-4] + '_hprModel_residual.wav'
outputFile = 'output_sounds/' + os.path.basename(inputFile)[:-4] + '_hprModel.wav'
# write sounds files for harmonics, residual, and the sum
UF.wavwrite(yh, fs, outputFileSines)
UF.wavwrite(xr, fs, outputFileResidual)
UF.wavwrite(y, fs, outputFile)
# create figure to plot
plt.figure(figsize=(9, 6))
# frequency range to plot
maxplotfreq = 5000.0
# plot the input sound
plt.subplot(3,1,1)
plt.plot(np.arange(x.size)/float(fs), x)
plt.axis([0, x.size/float(fs), min(x), max(x)])
plt.ylabel('amplitude')
plt.xlabel('time (sec)')
plt.title('input sound: x')
# plot the magnitude spectrogram of residual
plt.subplot(3,1,2)
maxplotbin = int(N*maxplotfreq/fs)
numFrames = int(mXr[:,0].size)
frmTime = H*np.arange(numFrames)/float(fs)
binFreq = np.arange(maxplotbin+1)*float(fs)/N
plt.pcolormesh(frmTime, binFreq, np.transpose(mXr[:,:maxplotbin+1]))
plt.autoscale(tight=True)
# plot harmonic frequencies on residual spectrogram
if (hfreq.shape[1] > 0):
harms = hfreq*np.less(hfreq,maxplotfreq)
harms[harms==0] = np.nan
numFrames = int(harms[:,0].size)
frmTime = H*np.arange(numFrames)/float(fs)
plt.plot(frmTime, harms, color='k', ms=3, alpha=1)
plt.xlabel('time(s)')
plt.ylabel('frequency(Hz)')
plt.autoscale(tight=True)
plt.title('harmonics + residual spectrogram')
# plot the output sound
plt.subplot(3,1,3)
plt.plot(np.arange(y.size)/float(fs), y)
plt.axis([0, y.size/float(fs), min(y), max(y)])
plt.ylabel('amplitude')
plt.xlabel('time (sec)')
plt.title('output sound: y')
plt.tight_layout()
plt.ion()
plt.show()
if __name__ == "__main__":
main()
| agpl-3.0 |
yudingding6197/fin_script | debug/sync_cixin.py | 1 | 1186 | #!/usr/bin/env python
# -*- coding:gbk -*-
import sys
import os
import pandas as pd
from openpyxl import Workbook
from openpyxl.reader.excel import load_workbook
# ¶ÁÈ¡../data/entry/cixin/ÏÂ×îеÄcixin_***.xlsxÎļþ£¬µÃµ½Î´´ò¿ªµÄ´ÎÐÂÖµ
# Ê×ÏÈÊÖ¶¯Ö´ÐÐcixin_data.pyµÃµ½×îеÄCXÐÅÏ¢£¬ÔÙ´ÓÖнâÎö
# Main
cx_path = "../data/entry/_no_open_cx.txt"
path = "../data/entry/cixin/"
file = ''
for (dirpath, dirnames, filenames) in os.walk(path):
if len(filenames)>0:
file = filenames[-1]
#print('dirpath = ' + dirpath)
if file=='':
print "Not find file in path:", path
exit(0)
yzcx_df = pd.DataFrame()
sheet_st = 'Sheet'
wb = load_workbook(path+file)
ws = wb.get_sheet_by_name(sheet_st)
code_list = []
for rx in range(2,ws.max_row+1):
w1 = ws.cell(row = rx, column = 1).value
w2 = ws.cell(row = rx, column = 2).value
w3 = ws.cell(row = rx, column = 3).value
w4 = ws.cell(row = rx, column = 4).value
if int(w4)==0:
temp_list = [w1,w2,w3,w4]
df1 = pd.DataFrame([temp_list])
yzcx_df = yzcx_df.append(df1)
code_list.append(w1)
if len(code_list)==0:
print "No CIXIN List"
exit(0)
cx_file = open(cx_path, 'w')
for code in code_list:
cx_file.write(code + "\n")
cx_file.close()
| gpl-2.0 |
abhishekgahlot/scikit-learn | examples/applications/plot_tomography_l1_reconstruction.py | 45 | 5463 | """
======================================================================
Compressive sensing: tomography reconstruction with L1 prior (Lasso)
======================================================================
This example shows the reconstruction of an image from a set of parallel
projections, acquired along different angles. Such a dataset is acquired in
**computed tomography** (CT).
Without any prior information on the sample, the number of projections
required to reconstruct the image is of the order of the linear size
``l`` of the image (in pixels). For simplicity we consider here a sparse
image, where only pixels on the boundary of objects have a non-zero
value. Such data could correspond for example to a cellular material.
Note however that most images are sparse in a different basis, such as
the Haar wavelets. Only ``l/7`` projections are acquired, therefore it is
necessary to use prior information available on the sample (its
sparsity): this is an example of **compressive sensing**.
The tomography projection operation is a linear transformation. In
addition to the data-fidelity term corresponding to a linear regression,
we penalize the L1 norm of the image to account for its sparsity. The
resulting optimization problem is called the :ref:`lasso`. We use the
class :class:`sklearn.linear_model.Lasso`, that uses the coordinate descent
algorithm. Importantly, this implementation is more computationally efficient
on a sparse matrix, than the projection operator used here.
The reconstruction with L1 penalization gives a result with zero error
(all pixels are successfully labeled with 0 or 1), even if noise was
added to the projections. In comparison, an L2 penalization
(:class:`sklearn.linear_model.Ridge`) produces a large number of labeling
errors for the pixels. Important artifacts are observed on the
reconstructed image, contrary to the L1 penalization. Note in particular
the circular artifact separating the pixels in the corners, that have
contributed to fewer projections than the central disk.
"""
print(__doc__)
# Author: Emmanuelle Gouillart <[email protected]>
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from scipy import ndimage
from sklearn.linear_model import Lasso
from sklearn.linear_model import Ridge
import matplotlib.pyplot as plt
def _weights(x, dx=1, orig=0):
x = np.ravel(x)
floor_x = np.floor((x - orig) / dx)
alpha = (x - orig - floor_x * dx) / dx
return np.hstack((floor_x, floor_x + 1)), np.hstack((1 - alpha, alpha))
def _generate_center_coordinates(l_x):
l_x = float(l_x)
X, Y = np.mgrid[:l_x, :l_x]
center = l_x / 2.
X += 0.5 - center
Y += 0.5 - center
return X, Y
def build_projection_operator(l_x, n_dir):
""" Compute the tomography design matrix.
Parameters
----------
l_x : int
linear size of image array
n_dir : int
number of angles at which projections are acquired.
Returns
-------
p : sparse matrix of shape (n_dir l_x, l_x**2)
"""
X, Y = _generate_center_coordinates(l_x)
angles = np.linspace(0, np.pi, n_dir, endpoint=False)
data_inds, weights, camera_inds = [], [], []
data_unravel_indices = np.arange(l_x ** 2)
data_unravel_indices = np.hstack((data_unravel_indices,
data_unravel_indices))
for i, angle in enumerate(angles):
Xrot = np.cos(angle) * X - np.sin(angle) * Y
inds, w = _weights(Xrot, dx=1, orig=X.min())
mask = np.logical_and(inds >= 0, inds < l_x)
weights += list(w[mask])
camera_inds += list(inds[mask] + i * l_x)
data_inds += list(data_unravel_indices[mask])
proj_operator = sparse.coo_matrix((weights, (camera_inds, data_inds)))
return proj_operator
def generate_synthetic_data():
""" Synthetic binary data """
rs = np.random.RandomState(0)
n_pts = 36.
x, y = np.ogrid[0:l, 0:l]
mask_outer = (x - l / 2) ** 2 + (y - l / 2) ** 2 < (l / 2) ** 2
mask = np.zeros((l, l))
points = l * rs.rand(2, n_pts)
mask[(points[0]).astype(np.int), (points[1]).astype(np.int)] = 1
mask = ndimage.gaussian_filter(mask, sigma=l / n_pts)
res = np.logical_and(mask > mask.mean(), mask_outer)
return res - ndimage.binary_erosion(res)
# Generate synthetic images, and projections
l = 128
proj_operator = build_projection_operator(l, l / 7.)
data = generate_synthetic_data()
proj = proj_operator * data.ravel()[:, np.newaxis]
proj += 0.15 * np.random.randn(*proj.shape)
# Reconstruction with L2 (Ridge) penalization
rgr_ridge = Ridge(alpha=0.2)
rgr_ridge.fit(proj_operator, proj.ravel())
rec_l2 = rgr_ridge.coef_.reshape(l, l)
# Reconstruction with L1 (Lasso) penalization
# the best value of alpha was determined using cross validation
# with LassoCV
rgr_lasso = Lasso(alpha=0.001)
rgr_lasso.fit(proj_operator, proj.ravel())
rec_l1 = rgr_lasso.coef_.reshape(l, l)
plt.figure(figsize=(8, 3.3))
plt.subplot(131)
plt.imshow(data, cmap=plt.cm.gray, interpolation='nearest')
plt.axis('off')
plt.title('original image')
plt.subplot(132)
plt.imshow(rec_l2, cmap=plt.cm.gray, interpolation='nearest')
plt.title('L2 penalization')
plt.axis('off')
plt.subplot(133)
plt.imshow(rec_l1, cmap=plt.cm.gray, interpolation='nearest')
plt.title('L1 penalization')
plt.axis('off')
plt.subplots_adjust(hspace=0.01, wspace=0.01, top=1, bottom=0, left=0,
right=1)
plt.show()
| bsd-3-clause |
lizardsystem/lizard-damage | lizard_damage/results.py | 1 | 10390 | """Process results for a DamageEvent.
The idea is that during a calculation a ResultCollector object is kept
around, and generated results (like land use images for a given tile) can
be "thrown to" it."""
import glob
import os
import shutil
import subprocess
import tempfile
import zipfile
from PIL import Image
from pyproj import Proj
import matplotlib as mpl
import numpy as np
ZIP_FILENAME = 'result.zip'
RD = str(
"+proj=sterea +lat_0=52.15616055555555 +lon_0=5.38763888888889 +k=0.999908"
" +x_0=155000 +y_0=463000 +ellps=bessel +units=m +towgs84=565.2369,"
"50.0087,465.658,-0.406857330322398,0.350732676542563,-1.8703473836068,"
"4.0812 +no_defs <>"
)
WGS84 = str('+proj=latlong +datum=WGS84')
rd_proj = Proj(RD)
wgs84_proj = Proj(WGS84)
CDICT_HEIGHT = {
'red': ((0.0, 51. / 256, 51. / 256),
(0.5, 237. / 256, 237. / 256),
(1.0, 83. / 256, 83. / 256)),
'green': ((0.0, 114. / 256, 114. / 256),
(0.5, 245. / 256, 245. / 256),
(1.0, 83. / 256, 83. / 256)),
'blue': ((0.0, 54. / 256, 54. / 256),
(0.5, 170. / 256, 170. / 256),
(1.0, 83. / 256, 83. / 256)),
}
CDICT_WATER_DEPTH = {
'red': ((0.0, 170. / 256, 170. / 256),
(0.5, 65. / 256, 65. / 256),
(1.0, 4. / 256, 4. / 256)),
'green': ((0.0, 200. / 256, 200. / 256),
(0.5, 120. / 256, 120. / 256),
(1.0, 65. / 256, 65. / 256)),
'blue': ((0.0, 255. / 256, 255. / 256),
(0.5, 221. / 256, 221. / 256),
(1.0, 176. / 256, 176. / 256)),
}
class ResultCollector(object):
def __init__(self, workdir, all_leaves, logger):
"""Start a new ResultCollector.
Workdir is a damage event's workdir. All result files are placed
in that directory, or subdirectories of it.
all_leaves is an iterable of (ahn_name, extent) tuples that
is mainly used to know what the entire extent is going to be
in advance.
All files are placed in the damage event's directory.
Results that are tracked:
- Files to be added to a result zipfile
- Landuse tiles
- Water depth tiles
- Height tiles
- Damage tiles.
The damage tiles are added as ASC's to the result zipfile.
All four types of tile are saved as images for showing using Google.
The damage tiles are somewhat special in that they will first be
saved, and need to have roads drawn in them afterwards.
"""
self.workdir = workdir
self.tempdir = os.path.join(self.workdir, 'tmp')
if not os.path.exists(self.tempdir):
os.makedirs(self.tempdir)
self.logger = logger
# We want to know all leaves in advance, so we can make images for
# the entire region, or sections of it, without having to let them
# correspond 1:1 to the tiles.
self.all_leaves = {
ahn_name: extent for (ahn_name, extent) in all_leaves
}
self.riskmap_data = []
# Create an empty zipfile, throw away the old one if needed.
self.zipfile = mk(self.workdir, ZIP_FILENAME)
if os.path.exists(self.zipfile):
os.remove(self.zipfile)
self.mins = {'depth': float("+inf"), 'height': float("+inf")}
self.maxes = {'depth': float("-inf"), 'height': float("-inf")}
def png_path(self, result_type, tile):
return mk(self.workdir, result_type, "{}.png".format(tile))
def save_ma(
self, tile, masked_array, result_type, ds_template=None,
repetition_time=None):
# self.save_ma_to_geoimage(tile, masked_array, result_type)
# ^^^ disable because google maps api no longer supports this,
# and because tmp takes excessive space because of this
# (uncompressed) storage.
if result_type == 'damage':
filename = self.save_ma_to_asc(
tile, masked_array, result_type, ds_template, repetition_time)
if repetition_time is not None:
# TODO (Reinout wants to know where this is used. The file is
# deleted after adding it to the zipfile, so....)
self.riskmap_data.append(
(tile, repetition_time, filename))
def save_ma_to_asc(
self, tile, masked_array, result_type, ds_template,
repetition_time):
from lizard_damage import calc
if repetition_time is not None:
filename = 'schade_{}_T{}.asc'.format(tile, repetition_time)
else:
filename = 'schade_{}.asc'.format(tile)
filename = os.path.join(self.tempdir, filename)
calc.write_result(
name=filename,
ma_result=masked_array,
ds_template=ds_template)
return filename
def save_csv_data_for_zipfile(self, zipname, csvdata):
from lizard_damage import calc
filename = calc.mkstemp_and_close()
calc.write_table(name=filename, **csvdata)
self.save_file_for_zipfile(filename, zipname, delete_after=True)
def save_file_for_zipfile(self, file_path, zipname, delete_after=False):
with zipfile.ZipFile(self.zipfile, 'a', zipfile.ZIP_DEFLATED) as myzip:
self.logger.info('zipping %s...' % zipname)
myzip.write(file_path, zipname)
if delete_after:
self.logger.info(
'removing %r (%s in arc)' % (file_path, zipname))
os.remove(file_path)
def build_damage_geotiff(self):
orig_dir = os.getcwd()
os.chdir(self.tempdir)
asc_files = glob.glob('*.asc')
if not asc_files:
self.logger.info(
"No asc files as input, not writing out a geotiff.")
for asc_file in asc_files:
tiff_file = asc_file.replace('.asc', '.tiff')
cmd = ("gdal_translate %s %s "
"-co compress=deflate -co tiled=yes "
"-ot float32 -a_srs EPSG:28992")
os.system(cmd % (asc_file, tiff_file))
self.save_file_for_zipfile(tiff_file, tiff_file)
file_with_tiff_filenames = tempfile.NamedTemporaryFile()
tiff_files = glob.glob('*.tiff')
for tiff_file in tiff_files:
file_with_tiff_filenames.write(tiff_file + "\n")
file_with_tiff_filenames.flush()
vrt_file = 'schade.vrt'
cmd = "gdalbuildvrt -input_file_list %s %s" % (
file_with_tiff_filenames.name, vrt_file)
self.logger.debug(cmd)
os.system(cmd)
file_with_tiff_filenames.close() # Deletes the temporary file
if os.path.exists(vrt_file):
self.save_file_for_zipfile(vrt_file, vrt_file)
os.chdir(orig_dir)
def finalize(self):
"""Make final version of the data:
- Warp all generated geoimages to WGS84.
"""
self.extents = {}
for tile in self.all_leaves:
for result_type in ('height', 'depth'):
tmp_filename = os.path.join(
self.tempdir, "{}.{}".format(tile, result_type))
if os.path.exists(tmp_filename):
masked_array = np.load(tmp_filename)
os.remove(tmp_filename)
normalize = mpl.colors.Normalize(
vmin=self.mins[result_type],
vmax=self.maxes[result_type])
if result_type == 'height':
cdict = CDICT_HEIGHT
elif result_type == 'depth':
cdict = CDICT_WATER_DEPTH
colormap = mpl.colors.LinearSegmentedColormap(
'something', cdict, N=1024)
rgba = colormap(normalize(masked_array), bytes=True)
if result_type == 'depth':
rgba[:, :, 3] = np.where(
np.greater(masked_array.filled(0), 0), 255, 0)
filename = self.png_path(result_type, tile)
Image.fromarray(rgba).save(filename, 'PNG')
write_extent_pgw(filename.replace('.png', '.pgw'),
self.all_leaves[tile])
for result_type in ('damage', 'landuse', 'height', 'depth'):
png = self.png_path(result_type, tile)
if os.path.exists(png):
result_extent = rd_to_wgs84(png)
self.extents[(tile, result_type)] = result_extent
def cleanup_tmp_dir(self):
shutil.rmtree(self.tempdir)
def all_images(self):
"""Generate path and extent of all created images. Path is relative
to the workdir. Only use after finalizing."""
for ((tile, result_type), extent) in self.extents.items():
png_path = self.png_path(result_type, tile)
if os.path.exists(png_path):
relative = png_path[len(self.workdir):]
yield (result_type, relative, extent)
def write_extent_pgw(name, extent):
"""write pgw file:
0.5
0.000
0.000
0.5
<x ul corner>
<y ul corner>
extent is a 4-tuple
"""
f = open(name, 'w')
f.write('0.5\n0.000\n0.000\n-0.5\n')
f.write('%f\n%f' % (min(extent[0], extent[2]), max(extent[1], extent[3])))
f.close()
def mk(*parts):
"""Combine parts using os.path.join, then make sure the directory
exists."""
path = os.path.join(*parts)
directory = os.path.dirname(path)
if not os.path.exists(directory):
os.makedirs(directory)
return path
def rd_to_wgs84(png):
from lizard_damage import models
# Step 1: warp using gdalwarp to lon/lat in .tif
# Warp png file, output is tif.
tif = png.replace('.png', '.tif')
subprocess.call([
'gdalwarp', png, tif,
'-t_srs', "+proj=latlong +datum=WGS84", '-s_srs', RD.strip()])
# Step 2: convert .tif back to .png
im = Image.open(tif)
im.save(png, 'PNG')
# Step 3: We can't save this WGS84 as a PGW (or at least, we don't).
# Remove the old PGW and return this extent.
result_extent = models.extent_from_geotiff(tif)
os.remove(png.replace('.png', '.pgw'))
# Step 4: remove TIF
os.remove(tif)
return result_extent
| gpl-3.0 |
smmribeiro/intellij-community | python/helpers/pydev/_pydevd_bundle/pydevd_vars.py | 7 | 26282 | """ pydevd_vars deals with variables:
resolution/conversion to XML.
"""
import math
import pickle
from _pydev_bundle.pydev_imports import quote
from _pydev_imps._pydev_saved_modules import thread
from _pydevd_bundle.pydevd_constants import get_frame, get_current_thread_id, xrange, NUMPY_NUMERIC_TYPES, NUMPY_FLOATING_POINT_TYPES
from _pydevd_bundle.pydevd_custom_frames import get_custom_frame
from _pydevd_bundle.pydevd_xml import ExceptionOnEvaluate, get_type, var_to_xml
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
import sys # @Reimport
try:
from collections import OrderedDict
except:
OrderedDict = dict
from _pydev_imps._pydev_saved_modules import threading
import traceback
from _pydevd_bundle import pydevd_save_locals
from _pydev_bundle.pydev_imports import Exec, execfile
from _pydevd_bundle.pydevd_utils import VariableWithOffset
SENTINEL_VALUE = []
DEFAULT_DF_FORMAT = "s"
# ------------------------------------------------------------------------------------------------------ class for errors
class VariableError(RuntimeError): pass
class FrameNotFoundError(RuntimeError): pass
def _iter_frames(initialFrame):
'''NO-YIELD VERSION: Iterates through all the frames starting at the specified frame (which will be the first returned item)'''
# cannot use yield
frames = []
while initialFrame is not None:
frames.append(initialFrame)
initialFrame = initialFrame.f_back
return frames
def dump_frames(thread_id):
sys.stdout.write('dumping frames\n')
if thread_id != get_current_thread_id(threading.currentThread()):
raise VariableError("find_frame: must execute on same thread")
curFrame = get_frame()
for frame in _iter_frames(curFrame):
sys.stdout.write('%s\n' % pickle.dumps(frame))
# ===============================================================================
# AdditionalFramesContainer
# ===============================================================================
class AdditionalFramesContainer:
lock = thread.allocate_lock()
additional_frames = {} # dict of dicts
def add_additional_frame_by_id(thread_id, frames_by_id):
AdditionalFramesContainer.additional_frames[thread_id] = frames_by_id
addAdditionalFrameById = add_additional_frame_by_id # Backward compatibility
def remove_additional_frame_by_id(thread_id):
del AdditionalFramesContainer.additional_frames[thread_id]
removeAdditionalFrameById = remove_additional_frame_by_id # Backward compatibility
def has_additional_frames_by_id(thread_id):
return thread_id in AdditionalFramesContainer.additional_frames
def get_additional_frames_by_id(thread_id):
return AdditionalFramesContainer.additional_frames.get(thread_id)
def find_frame(thread_id, frame_id):
""" returns a frame on the thread that has a given frame_id """
try:
curr_thread_id = get_current_thread_id(threading.currentThread())
if thread_id != curr_thread_id:
try:
return get_custom_frame(thread_id, frame_id) # I.e.: thread_id could be a stackless frame id + thread_id.
except:
pass
raise VariableError("find_frame: must execute on same thread (%s != %s)" % (thread_id, curr_thread_id))
lookingFor = int(frame_id)
if AdditionalFramesContainer.additional_frames:
if thread_id in AdditionalFramesContainer.additional_frames:
frame = AdditionalFramesContainer.additional_frames[thread_id].get(lookingFor)
if frame is not None:
return frame
curFrame = get_frame()
if frame_id == "*":
return curFrame # any frame is specified with "*"
frameFound = None
for frame in _iter_frames(curFrame):
if lookingFor == id(frame):
frameFound = frame
del frame
break
del frame
# Important: python can hold a reference to the frame from the current context
# if an exception is raised, so, if we don't explicitly add those deletes
# we might have those variables living much more than we'd want to.
# I.e.: sys.exc_info holding reference to frame that raises exception (so, other places
# need to call sys.exc_clear())
del curFrame
if frameFound is None:
msgFrames = ''
i = 0
for frame in _iter_frames(get_frame()):
i += 1
msgFrames += str(id(frame))
if i % 5 == 0:
msgFrames += '\n'
else:
msgFrames += ' - '
# Note: commented this error message out (it may commonly happen
# if a message asking for a frame is issued while a thread is paused
# but the thread starts running before the message is actually
# handled).
# Leaving code to uncomment during tests.
# err_msg = '''find_frame: frame not found.
# Looking for thread_id:%s, frame_id:%s
# Current thread_id:%s, available frames:
# %s\n
# ''' % (thread_id, lookingFor, curr_thread_id, msgFrames)
#
# sys.stderr.write(err_msg)
return None
return frameFound
except:
import traceback
traceback.print_exc()
return None
def getVariable(thread_id, frame_id, scope, attrs):
"""
returns the value of a variable
:scope: can be BY_ID, EXPRESSION, GLOBAL, LOCAL, FRAME
BY_ID means we'll traverse the list of all objects alive to get the object.
:attrs: after reaching the proper scope, we have to get the attributes until we find
the proper location (i.e.: obj\tattr1\tattr2).
:note: when BY_ID is used, the frame_id is considered the id of the object to find and
not the frame (as we don't care about the frame in this case).
"""
if scope == 'BY_ID':
if thread_id != get_current_thread_id(threading.currentThread()):
raise VariableError("getVariable: must execute on same thread")
try:
import gc
objects = gc.get_objects()
except:
pass # Not all python variants have it.
else:
frame_id = int(frame_id)
for var in objects:
if id(var) == frame_id:
if attrs is not None:
attrList = attrs.split('\t')
for k in attrList:
_type, _typeName, resolver = get_type(var)
var = resolver.resolve(var, k)
return var
# If it didn't return previously, we coudn't find it by id (i.e.: alrceady garbage collected).
sys.stderr.write('Unable to find object with id: %s\n' % (frame_id,))
return None
frame = find_frame(thread_id, frame_id)
if frame is None:
return {}
if attrs is not None:
attrList = attrs.split('\t')
else:
attrList = []
for attr in attrList:
attr.replace("@_@TAB_CHAR@_@", '\t')
if scope == 'EXPRESSION':
for count in xrange(len(attrList)):
if count == 0:
# An Expression can be in any scope (globals/locals), therefore it needs to evaluated as an expression
var = evaluate_expression(thread_id, frame_id, attrList[count], False)
else:
_type, _typeName, resolver = get_type(var)
var = resolver.resolve(var, attrList[count])
else:
if scope == "GLOBAL":
var = frame.f_globals
del attrList[0] # globals are special, and they get a single dummy unused attribute
else:
# in a frame access both locals and globals as Python does
var = {}
var.update(frame.f_globals)
var.update(frame.f_locals)
for k in attrList:
_type, _typeName, resolver = get_type(var)
var = resolver.resolve(var, k)
return var
def get_offset(attrs):
"""
Extract offset from the given attributes.
:param attrs: The string of a compound variable fields split by tabs.
If an offset is given, it must go the first element.
:return: The value of offset if given or 0.
"""
offset = 0
if attrs is not None:
try:
offset = int(attrs.split('\t')[0])
except ValueError:
pass
return offset
def resolve_compound_variable_fields(thread_id, frame_id, scope, attrs):
"""
Resolve compound variable in debugger scopes by its name and attributes
:param thread_id: id of the variable's thread
:param frame_id: id of the variable's frame
:param scope: can be BY_ID, EXPRESSION, GLOBAL, LOCAL, FRAME
:param attrs: after reaching the proper scope, we have to get the attributes until we find
the proper location (i.e.: obj\tattr1\tattr2)
:return: a dictionary of variables's fields
:note: PyCharm supports progressive loading of large collections and uses the `attrs`
parameter to pass the offset, e.g. 300\t\\obj\tattr1\tattr2 should return
the value of attr2 starting from the 300th element. This hack makes it possible
to add the support of progressive loading without extending of the protocol.
"""
offset = get_offset(attrs)
orig_attrs, attrs = attrs, attrs.split('\t', 1)[1] if offset else attrs
var = getVariable(thread_id, frame_id, scope, attrs)
try:
_type, _typeName, resolver = get_type(var)
return _typeName, resolver.get_dictionary(VariableWithOffset(var, offset) if offset else var)
except:
sys.stderr.write('Error evaluating: thread_id: %s\nframe_id: %s\nscope: %s\nattrs: %s\n' % (
thread_id, frame_id, scope, orig_attrs,))
traceback.print_exc()
def resolve_var_object(var, attrs):
"""
Resolve variable's attribute
:param var: an object of variable
:param attrs: a sequence of variable's attributes separated by \t (i.e.: obj\tattr1\tattr2)
:return: a value of resolved variable's attribute
"""
if attrs is not None:
attr_list = attrs.split('\t')
else:
attr_list = []
for k in attr_list:
type, _typeName, resolver = get_type(var)
var = resolver.resolve(var, k)
return var
def resolve_compound_var_object_fields(var, attrs):
"""
Resolve compound variable by its object and attributes
:param var: an object of variable
:param attrs: a sequence of variable's attributes separated by \t (i.e.: obj\tattr1\tattr2)
:return: a dictionary of variables's fields
"""
offset = get_offset(attrs)
attrs = attrs.split('\t', 1)[1] if offset else attrs
attr_list = attrs.split('\t')
for k in attr_list:
type, _typeName, resolver = get_type(var)
var = resolver.resolve(var, k)
try:
type, _typeName, resolver = get_type(var)
return resolver.get_dictionary(VariableWithOffset(var, offset) if offset else var)
except:
traceback.print_exc()
def custom_operation(thread_id, frame_id, scope, attrs, style, code_or_file, operation_fn_name):
"""
We'll execute the code_or_file and then search in the namespace the operation_fn_name to execute with the given var.
code_or_file: either some code (i.e.: from pprint import pprint) or a file to be executed.
operation_fn_name: the name of the operation to execute after the exec (i.e.: pprint)
"""
expressionValue = getVariable(thread_id, frame_id, scope, attrs)
try:
namespace = {'__name__': '<custom_operation>'}
if style == "EXECFILE":
namespace['__file__'] = code_or_file
execfile(code_or_file, namespace, namespace)
else: # style == EXEC
namespace['__file__'] = '<customOperationCode>'
Exec(code_or_file, namespace, namespace)
return str(namespace[operation_fn_name](expressionValue))
except:
traceback.print_exc()
def eval_in_context(expression, globals, locals):
result = None
try:
result = eval(expression, globals, locals)
except Exception:
s = StringIO()
traceback.print_exc(file=s)
result = s.getvalue()
try:
try:
etype, value, tb = sys.exc_info()
result = value
finally:
etype = value = tb = None
except:
pass
result = ExceptionOnEvaluate(result)
# Ok, we have the initial error message, but let's see if we're dealing with a name mangling error...
try:
if '__' in expression:
# Try to handle '__' name mangling...
split = expression.split('.')
curr = locals.get(split[0])
for entry in split[1:]:
if entry.startswith('__') and not hasattr(curr, entry):
entry = '_%s%s' % (curr.__class__.__name__, entry)
curr = getattr(curr, entry)
result = curr
except:
pass
return result
def evaluate_expression(thread_id, frame_id, expression, doExec):
'''returns the result of the evaluated expression
@param doExec: determines if we should do an exec or an eval
'''
frame = find_frame(thread_id, frame_id)
if frame is None:
return
# Not using frame.f_globals because of https://sourceforge.net/tracker2/?func=detail&aid=2541355&group_id=85796&atid=577329
# (Names not resolved in generator expression in method)
# See message: http://mail.python.org/pipermail/python-list/2009-January/526522.html
updated_globals = {}
updated_globals.update(frame.f_globals)
updated_globals.update(frame.f_locals) # locals later because it has precedence over the actual globals
try:
expression = str(expression.replace('@LINE@', '\n'))
if doExec:
try:
# try to make it an eval (if it is an eval we can print it, otherwise we'll exec it and
# it will have whatever the user actually did)
compiled = compile(expression, '<string>', 'eval')
except:
Exec(expression, updated_globals, frame.f_locals)
pydevd_save_locals.save_locals(frame)
else:
result = eval(compiled, updated_globals, frame.f_locals)
if result is not None: # Only print if it's not None (as python does)
sys.stdout.write('%s\n' % (result,))
return
else:
return eval_in_context(expression, updated_globals, frame.f_locals)
finally:
# Should not be kept alive if an exception happens and this frame is kept in the stack.
del updated_globals
del frame
def change_attr_expression(thread_id, frame_id, attr, expression, dbg, value=SENTINEL_VALUE):
'''Changes some attribute in a given frame.
'''
frame = find_frame(thread_id, frame_id)
if frame is None:
return
try:
expression = expression.replace('@LINE@', '\n')
if dbg.plugin and value is SENTINEL_VALUE:
result = dbg.plugin.change_variable(frame, attr, expression)
if result:
return result
if value is SENTINEL_VALUE:
# It is possible to have variables with names like '.0', ',,,foo', etc in scope by setting them with
# `sys._getframe().f_locals`. In particular, the '.0' variable name is used to denote the list iterator when we stop in
# list comprehension expressions. This variable evaluates to 0. by `eval`, which is not what we want and this is the main
# reason we have to check if the expression exists in the global and local scopes before trying to evaluate it.
value = frame.f_locals.get(expression) or frame.f_globals.get(expression) or eval(expression, frame.f_globals, frame.f_locals)
if attr[:7] == "Globals":
attr = attr[8:]
if attr in frame.f_globals:
frame.f_globals[attr] = value
return frame.f_globals[attr]
else:
if pydevd_save_locals.is_save_locals_available():
frame.f_locals[attr] = value
pydevd_save_locals.save_locals(frame)
return frame.f_locals[attr]
# default way (only works for changing it in the topmost frame)
result = value
Exec('%s=%s' % (attr, expression), frame.f_globals, frame.f_locals)
return result
except Exception:
traceback.print_exc()
MAXIMUM_ARRAY_SIZE = float('inf')
def array_to_xml(array, name, roffset, coffset, rows, cols, format):
array, xml, r, c, f = array_to_meta_xml(array, name, format)
format = '%' + f
if rows == -1 and cols == -1:
rows = r
cols = c
rows = min(rows, MAXIMUM_ARRAY_SIZE)
cols = min(cols, MAXIMUM_ARRAY_SIZE)
# there is no obvious rule for slicing (at least 5 choices)
if len(array) == 1 and (rows > 1 or cols > 1):
array = array[0]
if array.size > len(array):
array = array[roffset:, coffset:]
rows = min(rows, len(array))
cols = min(cols, len(array[0]))
if len(array) == 1:
array = array[0]
elif array.size == len(array):
if roffset == 0 and rows == 1:
array = array[coffset:]
cols = min(cols, len(array))
elif coffset == 0 and cols == 1:
array = array[roffset:]
rows = min(rows, len(array))
def get_value(row, col):
value = array
if rows == 1 or cols == 1:
if rows == 1 and cols == 1:
value = array[0]
else:
value = array[(col if rows == 1 else row)]
if "ndarray" in str(type(value)):
value = value[0]
else:
value = array[row][col]
return value
xml += array_data_to_xml(rows, cols, lambda r: (get_value(r, c) for c in range(cols)), format)
return xml
class ExceedingArrayDimensionsException(Exception):
pass
def array_to_meta_xml(array, name, format):
type = array.dtype.kind
slice = name
l = len(array.shape)
# initial load, compute slice
if format == '%':
if l > 2:
slice += '[0]' * (l - 2)
for r in range(l - 2):
array = array[0]
if type == 'f':
format = '.5f'
elif type == 'i' or type == 'u':
format = 'd'
else:
format = 's'
else:
format = format.replace('%', '')
l = len(array.shape)
reslice = ""
if l > 2:
raise ExceedingArrayDimensionsException()
elif l == 1:
# special case with 1D arrays arr[i, :] - row, but arr[:, i] - column with equal shape and ndim
# http://stackoverflow.com/questions/16837946/numpy-a-2-rows-1-column-file-loadtxt-returns-1row-2-columns
# explanation: http://stackoverflow.com/questions/15165170/how-do-i-maintain-row-column-orientation-of-vectors-in-numpy?rq=1
# we use kind of a hack - get information about memory from C_CONTIGUOUS
is_row = array.flags['C_CONTIGUOUS']
if is_row:
rows = 1
cols = len(array)
if cols < len(array):
reslice = '[0:%s]' % (cols)
array = array[0:cols]
else:
cols = 1
rows = len(array)
if rows < len(array):
reslice = '[0:%s]' % (rows)
array = array[0:rows]
elif l == 2:
rows = array.shape[-2]
cols = array.shape[-1]
if cols < array.shape[-1] or rows < array.shape[-2]:
reslice = '[0:%s, 0:%s]' % (rows, cols)
array = array[0:rows, 0:cols]
# avoid slice duplication
if not slice.endswith(reslice):
slice += reslice
bounds = (0, 0)
if type in NUMPY_NUMERIC_TYPES and array.size != 0:
bounds = (array.min(), array.max())
return array, slice_to_xml(slice, rows, cols, format, type, bounds), rows, cols, format
def get_column_formatter_by_type(initial_format, column_type):
if column_type in NUMPY_NUMERIC_TYPES and initial_format:
if column_type in NUMPY_FLOATING_POINT_TYPES and initial_format.strip() == DEFAULT_DF_FORMAT:
# use custom formatting for floats when default formatting is set
return array_default_format(column_type)
return initial_format
else:
return array_default_format(column_type)
def get_formatted_row_elements(row, iat, dim, cols, format, dtypes):
for c in range(cols):
val = iat[row, c] if dim > 1 else iat[row]
col_formatter = get_column_formatter_by_type(format, dtypes[c])
try:
yield ("%" + col_formatter) % (val,)
except TypeError:
yield ("%" + DEFAULT_DF_FORMAT) % (val,)
def array_default_format(type):
if type == 'f':
return '.5f'
elif type == 'i' or type == 'u':
return 'd'
else:
return 's'
def get_label(label):
return str(label) if not isinstance(label, tuple) else '/'.join(map(str, label))
DATAFRAME_HEADER_LOAD_MAX_SIZE = 100
def dataframe_to_xml(df, name, roffset, coffset, rows, cols, format):
"""
:type df: pandas.core.frame.DataFrame
:type name: str
:type coffset: int
:type roffset: int
:type rows: int
:type cols: int
:type format: str
"""
original_df = df
dim = len(df.axes)
num_rows = df.shape[0]
num_cols = df.shape[1] if dim > 1 else 1
format = format.replace('%', '')
if not format:
if num_rows > 0 and num_cols == 1: # series or data frame with one column
try:
kind = df.dtype.kind
except AttributeError:
try:
kind = df.dtypes[0].kind
except (IndexError, KeyError):
kind = 'O'
format = array_default_format(kind)
else:
format = array_default_format(DEFAULT_DF_FORMAT)
xml = slice_to_xml(name, num_rows, num_cols, format, "", (0, 0))
if (rows, cols) == (-1, -1):
rows, cols = num_rows, num_cols
elif (rows, cols) == (0, 0):
# return header only
r = min(num_rows, DATAFRAME_HEADER_LOAD_MAX_SIZE)
c = min(num_cols, DATAFRAME_HEADER_LOAD_MAX_SIZE)
xml += header_data_to_xml(r, c, [""] * num_cols, [(0, 0)] * num_cols, lambda x: DEFAULT_DF_FORMAT, original_df, dim)
return xml
rows = min(rows, MAXIMUM_ARRAY_SIZE)
cols = min(cols, MAXIMUM_ARRAY_SIZE, num_cols)
# need to precompute column bounds here before slicing!
col_bounds = [None] * cols
dtypes = [None] * cols
if dim > 1:
for col in range(cols):
dtype = df.dtypes.iloc[coffset + col].kind
dtypes[col] = dtype
if dtype in NUMPY_NUMERIC_TYPES and df.size != 0:
cvalues = df.iloc[:, coffset + col]
bounds = (cvalues.min(), cvalues.max())
else:
bounds = (0, 0)
col_bounds[col] = bounds
else:
dtype = df.dtype.kind
dtypes[0] = dtype
col_bounds[0] = (df.min(), df.max()) if dtype in NUMPY_NUMERIC_TYPES and df.size != 0 else (0, 0)
df = df.iloc[roffset: roffset + rows, coffset: coffset + cols] if dim > 1 else df.iloc[roffset: roffset + rows]
rows = df.shape[0]
cols = df.shape[1] if dim > 1 else 1
def col_to_format(column_type):
return get_column_formatter_by_type(format, column_type)
iat = df.iat if dim == 1 or len(df.columns.unique()) == len(df.columns) else df.iloc
def formatted_row_elements(row):
return get_formatted_row_elements(row, iat, dim, cols, format, dtypes)
xml += header_data_to_xml(rows, cols, dtypes, col_bounds, col_to_format, df, dim)
xml += array_data_to_xml(rows, cols, formatted_row_elements, format)
return xml
def array_data_to_xml(rows, cols, get_row, format):
xml = "<arraydata rows=\"%s\" cols=\"%s\"/>\n" % (rows, cols)
for row in range(rows):
xml += "<row index=\"%s\"/>\n" % row
for value in get_row(row):
xml += var_to_xml(value, '', format=format)
return xml
def slice_to_xml(slice, rows, cols, format, type, bounds):
return '<array slice=\"%s\" rows=\"%s\" cols=\"%s\" format=\"%s\" type=\"%s\" max=\"%s\" min=\"%s\"/>' % \
(slice, rows, cols, quote(format), type, bounds[1], bounds[0])
def header_data_to_xml(rows, cols, dtypes, col_bounds, col_to_format, df, dim):
xml = "<headerdata rows=\"%s\" cols=\"%s\">\n" % (rows, cols)
for col in range(cols):
col_label = quote(get_label(df.axes[1].values[col]) if dim > 1 else str(col))
bounds = col_bounds[col]
col_format = "%" + col_to_format(dtypes[col])
xml += '<colheader index=\"%s\" label=\"%s\" type=\"%s\" format=\"%s\" max=\"%s\" min=\"%s\" />\n' % \
(str(col), col_label, dtypes[col], col_to_format(dtypes[col]), col_format % bounds[1], col_format % bounds[0])
for row in range(rows):
xml += "<rowheader index=\"%s\" label = \"%s\"/>\n" % (str(row), get_label(df.axes[0].values[row]))
xml += "</headerdata>\n"
return xml
def is_able_to_format_number(format):
try:
format % math.pi
except Exception:
return False
return True
TYPE_TO_XML_CONVERTERS = {
"ndarray": array_to_xml,
"DataFrame": dataframe_to_xml,
"Series": dataframe_to_xml,
"GeoDataFrame": dataframe_to_xml,
"GeoSeries": dataframe_to_xml
}
def table_like_struct_to_xml(array, name, roffset, coffset, rows, cols, format):
_, type_name, _ = get_type(array)
format = format if is_able_to_format_number(format) else '%'
if type_name in TYPE_TO_XML_CONVERTERS:
return "<xml>%s</xml>" % TYPE_TO_XML_CONVERTERS[type_name](array, name, roffset, coffset, rows, cols, format)
else:
raise VariableError("type %s not supported" % type_name)
| apache-2.0 |
JohanComparat/pyEmerge | bin/plot_XLF.py | 1 | 2079 | import numpy as n
import glob
import h5py
import os
import time
import sys
out_dir = os.path.join(os.path.join("/afs/mpe/www/people/comparat/", "eRoMok", "h5", "LX_function" ))
h5_files = n.array(glob.glob(os.path.join(os.environ['MD10'], "h5", "hlist_?.?????_emerge.hdf5")))
h5_files.sort()
bins = n.arange(38,48,0.25)
xb = (bins[1:] + bins[:-1]) / 2.
hh = 0.6777
volume=1000.**3./hh**3.
zmin, zmax, z_center, Lxmin, Lxmax, Lx_c, Nobj, phi, phierr = n.loadtxt(os.path.join(os.environ['DARKSIM_DIR'], 'observations', 'LXFunction', 'miyaji_2015.ascii'), unpack=True)
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as p
def plot_XLF(h5_file):
f1 = h5py.File(h5_file, "r")
print(h5_file)
redshift = f1.attrs['redshift']
p.figure(1, (6,6))
sel = (redshift>zmin)&(redshift<zmax)
if len(sel.nonzero()[0])>0:
p.errorbar(Lx_c[sel], phi[sel], xerr=[Lx_c[sel]-Lxmin[sel], Lxmax[sel]-Lx_c[sel]], yerr=phierr[sel], label='Mi15 '+str(z_center[sel][0]), ls='dashed')
mass = n.log10(f1['/moster_2013_data/stellar_mass'].value )
lsar = f1['/agn_properties/log_lambda_sar'].value
active = (f1['/agn_properties/agn_activity'].value ==1 )
sel = (mass>0) & (mass!=n.inf) & (n.isnan(mass)==False) & (lsar>0) & (active)
print( h5_file, len(mass), len(mass[sel]), len(mass[sel])>0 )
LX = mass[sel]+lsar[sel]
counts, bb = n.histogram(LX, bins=bins)
dN_dVdlogM = counts/(bins[1:]-bins[:-1])/volume/n.log(10)
ok = (dN_dVdlogM>0)
p.plot(xb[ok], dN_dVdlogM[ok], label='AGN sim Bo16', lw=2)#, ls='dashed')
#g10(dN_dVdlogM_g_AGN[ok]), label='AGN simulated')#, lw=2)
p.xlabel('LX [2-10 keV]')
p.ylabel('Phi ')
p.xlim((40., 46.))
p.yscale('log')
#p.ylim((-9,-2))
p.title('z='+str(n.round(redshift,3)))
p.grid()
p.legend(loc=0, frameon=False)
print(f1.attrs['file_name'])
p.savefig(os.path.join(out_dir, "MD10_"+str(f1.attrs['aexp'])+"_XLF.png"))
p.clf()
f1.close()
#plot_SMF(h5_files[50])
#plot_SMF(h5_files[65])
for h5_file in h5_files:#[::-1]:
try:
plot_XLF(h5_file)
except( ValueError, KeyError ):
pass
| unlicense |
scikit-optimize/scikit-optimize.github.io | 0.7/_downloads/2f6e22007265fe3158cce44853e94a58/strategy-comparison.py | 1 | 4645 | """
==========================
Comparing surrogate models
==========================
Tim Head, July 2016.
Reformatted by Holger Nahrstaedt 2020
.. currentmodule:: skopt
Bayesian optimization or sequential model-based optimization uses a surrogate
model to model the expensive to evaluate function `func`. There are several
choices for what kind of surrogate model to use. This notebook compares the
performance of:
* gaussian processes,
* extra trees, and
* random forests
as surrogate models. A purely random optimization strategy is also used as
a baseline.
"""
print(__doc__)
import numpy as np
np.random.seed(123)
import matplotlib.pyplot as plt
#############################################################################
# Toy model
# =========
#
# We will use the :class:`benchmarks.branin` function as toy model for the expensive function.
# In a real world application this function would be unknown and expensive
# to evaluate.
from skopt.benchmarks import branin as _branin
def branin(x, noise_level=0.):
return _branin(x) + noise_level * np.random.randn()
#############################################################################
from matplotlib.colors import LogNorm
def plot_branin():
fig, ax = plt.subplots()
x1_values = np.linspace(-5, 10, 100)
x2_values = np.linspace(0, 15, 100)
x_ax, y_ax = np.meshgrid(x1_values, x2_values)
vals = np.c_[x_ax.ravel(), y_ax.ravel()]
fx = np.reshape([branin(val) for val in vals], (100, 100))
cm = ax.pcolormesh(x_ax, y_ax, fx,
norm=LogNorm(vmin=fx.min(),
vmax=fx.max()))
minima = np.array([[-np.pi, 12.275], [+np.pi, 2.275], [9.42478, 2.475]])
ax.plot(minima[:, 0], minima[:, 1], "r.", markersize=14,
lw=0, label="Minima")
cb = fig.colorbar(cm)
cb.set_label("f(x)")
ax.legend(loc="best", numpoints=1)
ax.set_xlabel("X1")
ax.set_xlim([-5, 10])
ax.set_ylabel("X2")
ax.set_ylim([0, 15])
plot_branin()
#############################################################################
# This shows the value of the two-dimensional branin function and
# the three minima.
#
#
# Objective
# =========
#
# The objective of this example is to find one of these minima in as
# few iterations as possible. One iteration is defined as one call
# to the :class:`benchmarks.branin` function.
#
# We will evaluate each model several times using a different seed for the
# random number generator. Then compare the average performance of these
# models. This makes the comparison more robust against models that get
# "lucky".
from functools import partial
from skopt import gp_minimize, forest_minimize, dummy_minimize
func = partial(branin, noise_level=2.0)
bounds = [(-5.0, 10.0), (0.0, 15.0)]
n_calls = 60
#############################################################################
def run(minimizer, n_iter=5):
return [minimizer(func, bounds, n_calls=n_calls, random_state=n)
for n in range(n_iter)]
# Random search
dummy_res = run(dummy_minimize)
# Gaussian processes
gp_res = run(gp_minimize)
# Random forest
rf_res = run(partial(forest_minimize, base_estimator="RF"))
# Extra trees
et_res = run(partial(forest_minimize, base_estimator="ET"))
#############################################################################
# Note that this can take a few minutes.
from skopt.plots import plot_convergence
plot = plot_convergence(("dummy_minimize", dummy_res),
("gp_minimize", gp_res),
("forest_minimize('rf')", rf_res),
("forest_minimize('et)", et_res),
true_minimum=0.397887, yscale="log")
plot.legend(loc="best", prop={'size': 6}, numpoints=1)
#############################################################################
# This plot shows the value of the minimum found (y axis) as a function
# of the number of iterations performed so far (x axis). The dashed red line
# indicates the true value of the minimum of the :class:`benchmarks.branin` function.
#
# For the first ten iterations all methods perform equally well as they all
# start by creating ten random samples before fitting their respective model
# for the first time. After iteration ten the next point at which
# to evaluate :class:`benchmarks.branin` is guided by the model, which is where differences
# start to appear.
#
# Each minimizer only has access to noisy observations of the objective
# function, so as time passes (more iterations) it will start observing
# values that are below the true value simply because they are fluctuations.
| bsd-3-clause |
tjduigna/exatomic | exatomic/core/editor.py | 3 | 2918 | # -*- coding: utf-8 -*-
# Copyright (c) 2015-2018, Exa Analytics Development Team
# Distributed under the terms of the Apache License 2.0
"""
Atomic Editor
###################
This module provides a text file editor that can be used to transform commonly
found file formats directly into :class:`~exatomic.container.Universe` objects.
"""
import six
import pandas as pd
from exa import Editor as _Editor
from exa import TypedMeta
from .universe import Universe
from .frame import compute_frame_from_atom
class Editor(six.with_metaclass(TypedMeta, _Editor)):
"""
Base atomic editor class for converting between file formats and to (or
from) :class:`~exatomic.container.Universe` objects.
Note:
Functions defined in the editor that generate typed attributes (see
below) should be names "parse_{data object name}".
See Also:
For a list of typed attributes, see :class:`~exatomic.core.universe.Universe`.
"""
_getter_prefix = "parse"
def parse_frame(self):
"""
Create a minimal :class:`~exatomic.frame.Frame` from the (parsed)
:class:`~exatomic.core.atom.Atom` object.
"""
self.frame = compute_frame_from_atom(self.atom)
def to_universe(self, **kws):
"""
Convert the editor to a :class:`~exatomic.core.universe.Universe` object.
Args:
name (str): Name
description (str): Description of parsed file
meta (dict): Optional dictionary of metadata
verbose (bool): Verbose information on failed parse methods
ignore (bool): Ignore failed parse methods
"""
name = kws.pop("name", None)
description = kws.pop("description", None)
meta = kws.pop("meta", None)
verbose = kws.pop("verbose", True)
ignore = kws.pop("ignore", False)
if hasattr(self, 'meta') and self.meta is not None:
if meta is not None:
meta.update(self.meta)
else:
meta = self.meta
kwargs = {'name': name, 'meta': meta,
'description': description}
attrs = [attr.replace('parse_', '')
for attr in vars(self.__class__).keys()
if attr.startswith('parse_')]
extras = {key: val for key, val in vars(self).items()
if isinstance(val, pd.DataFrame)
and key[1:] not in attrs}
for attr in attrs:
result = None
try:
result = getattr(self, attr)
except Exception as e:
if not ignore:
if not str(e).startswith('Please compute'):
print('parse_{} failed with: {}'.format(attr, e))
if result is not None:
kwargs[attr] = result
kwargs.update(kws)
kwargs.update(extras)
return Universe(**kwargs)
| apache-2.0 |
rajat1994/scikit-learn | sklearn/decomposition/tests/test_sparse_pca.py | 142 | 5990 | # Author: Vlad Niculae
# License: BSD 3 clause
import sys
import numpy as np
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import if_not_mac_os
from sklearn.decomposition import SparsePCA, MiniBatchSparsePCA
from sklearn.utils import check_random_state
def generate_toy_data(n_components, n_samples, image_size, random_state=None):
n_features = image_size[0] * image_size[1]
rng = check_random_state(random_state)
U = rng.randn(n_samples, n_components)
V = rng.randn(n_components, n_features)
centers = [(3, 3), (6, 7), (8, 1)]
sz = [1, 2, 1]
for k in range(n_components):
img = np.zeros(image_size)
xmin, xmax = centers[k][0] - sz[k], centers[k][0] + sz[k]
ymin, ymax = centers[k][1] - sz[k], centers[k][1] + sz[k]
img[xmin:xmax][:, ymin:ymax] = 1.0
V[k, :] = img.ravel()
# Y is defined by : Y = UV + noise
Y = np.dot(U, V)
Y += 0.1 * rng.randn(Y.shape[0], Y.shape[1]) # Add noise
return Y, U, V
# SparsePCA can be a bit slow. To avoid having test times go up, we
# test different aspects of the code in the same test
def test_correct_shapes():
rng = np.random.RandomState(0)
X = rng.randn(12, 10)
spca = SparsePCA(n_components=8, random_state=rng)
U = spca.fit_transform(X)
assert_equal(spca.components_.shape, (8, 10))
assert_equal(U.shape, (12, 8))
# test overcomplete decomposition
spca = SparsePCA(n_components=13, random_state=rng)
U = spca.fit_transform(X)
assert_equal(spca.components_.shape, (13, 10))
assert_equal(U.shape, (12, 13))
def test_fit_transform():
alpha = 1
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
spca_lars = SparsePCA(n_components=3, method='lars', alpha=alpha,
random_state=0)
spca_lars.fit(Y)
# Test that CD gives similar results
spca_lasso = SparsePCA(n_components=3, method='cd', random_state=0,
alpha=alpha)
spca_lasso.fit(Y)
assert_array_almost_equal(spca_lasso.components_, spca_lars.components_)
@if_not_mac_os()
def test_fit_transform_parallel():
alpha = 1
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
spca_lars = SparsePCA(n_components=3, method='lars', alpha=alpha,
random_state=0)
spca_lars.fit(Y)
U1 = spca_lars.transform(Y)
# Test multiple CPUs
spca = SparsePCA(n_components=3, n_jobs=2, method='lars', alpha=alpha,
random_state=0).fit(Y)
U2 = spca.transform(Y)
assert_true(not np.all(spca_lars.components_ == 0))
assert_array_almost_equal(U1, U2)
def test_transform_nan():
# Test that SparsePCA won't return NaN when there is 0 feature in all
# samples.
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
Y[:, 0] = 0
estimator = SparsePCA(n_components=8)
assert_false(np.any(np.isnan(estimator.fit_transform(Y))))
def test_fit_transform_tall():
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 65, (8, 8), random_state=rng) # tall array
spca_lars = SparsePCA(n_components=3, method='lars',
random_state=rng)
U1 = spca_lars.fit_transform(Y)
spca_lasso = SparsePCA(n_components=3, method='cd', random_state=rng)
U2 = spca_lasso.fit(Y).transform(Y)
assert_array_almost_equal(U1, U2)
def test_initialization():
rng = np.random.RandomState(0)
U_init = rng.randn(5, 3)
V_init = rng.randn(3, 4)
model = SparsePCA(n_components=3, U_init=U_init, V_init=V_init, max_iter=0,
random_state=rng)
model.fit(rng.randn(5, 4))
assert_array_equal(model.components_, V_init)
def test_mini_batch_correct_shapes():
rng = np.random.RandomState(0)
X = rng.randn(12, 10)
pca = MiniBatchSparsePCA(n_components=8, random_state=rng)
U = pca.fit_transform(X)
assert_equal(pca.components_.shape, (8, 10))
assert_equal(U.shape, (12, 8))
# test overcomplete decomposition
pca = MiniBatchSparsePCA(n_components=13, random_state=rng)
U = pca.fit_transform(X)
assert_equal(pca.components_.shape, (13, 10))
assert_equal(U.shape, (12, 13))
def test_mini_batch_fit_transform():
raise SkipTest("skipping mini_batch_fit_transform.")
alpha = 1
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
spca_lars = MiniBatchSparsePCA(n_components=3, random_state=0,
alpha=alpha).fit(Y)
U1 = spca_lars.transform(Y)
# Test multiple CPUs
if sys.platform == 'win32': # fake parallelism for win32
import sklearn.externals.joblib.parallel as joblib_par
_mp = joblib_par.multiprocessing
joblib_par.multiprocessing = None
try:
U2 = MiniBatchSparsePCA(n_components=3, n_jobs=2, alpha=alpha,
random_state=0).fit(Y).transform(Y)
finally:
joblib_par.multiprocessing = _mp
else: # we can efficiently use parallelism
U2 = MiniBatchSparsePCA(n_components=3, n_jobs=2, alpha=alpha,
random_state=0).fit(Y).transform(Y)
assert_true(not np.all(spca_lars.components_ == 0))
assert_array_almost_equal(U1, U2)
# Test that CD gives similar results
spca_lasso = MiniBatchSparsePCA(n_components=3, method='cd', alpha=alpha,
random_state=0).fit(Y)
assert_array_almost_equal(spca_lasso.components_, spca_lars.components_)
| bsd-3-clause |
sfepy/sfepy | sfepy/terms/terms_dg.py | 3 | 26757 | r"""
Discontinous Galekrin method specific terms
Note
----
In einsum calls the following convention is used:
`i` represents iterating over all cells of a region;
`n` represents iterating over selected cells of a region, for example
over cells on boundary;
`b` represents iterating over basis functions of state variable;
`d` represents iterating over basis functions of test variable;
`k`, `l` , `m` represent iterating over geometric dimensions, for example
coordinates of velocity or facet normal vector or rows and columns of diffusion
tensor;
`q` represents iterating over quadrature points;
`f` represents iterating over facets of cell;
"""
import numpy as nm
# sfepy imports
from sfepy.terms.terms import Term, terms
from sfepy.base.base import output
class DGTerm(Term):
r"""
Abstract base class for DG terms, provides alternative call_function and
eval_real methods to accommodate returning iels and vals.
"""
poly_space_base = "legendre"
def call_function(self, out, fargs):
try:
out, status = self.function(out, *fargs)
except (RuntimeError, ValueError):
terms.errclear()
raise
if status:
terms.errclear()
raise ValueError('term evaluation failed! (%s)' % self.name)
return out, status
def eval_real(self, shape, fargs, mode='eval', term_mode=None,
diff_var=None, **kwargs):
out = nm.empty(shape, dtype=nm.float64)
if mode == 'weak':
out, status = self.call_function(out, fargs)
else:
status = self.call_function(out, fargs)
return out, status
@staticmethod
def _get_nbrhd_dof_indexes(cells, nrbhs, field):
"""Get indexes of DOFs for active and of active neighbouring cells
Parameters
----------
cells : array_like
cells indicies
nrbhs : array_like
cell - nbrhd indicies
field : DGField
Returns
-------
iels : ndarray
inner and outer DOF indicies, i.e. diagonal indicies and then their
corresponding neighbour indicies
"""
inner_iels = field.bubble_dofs
inner_iels = nm.stack((nm.repeat(inner_iels, field.n_el_nod),
nm.tile(inner_iels, field.n_el_nod).flatten()),
axis=-1)
outer_iels = nm.stack(
(nm.repeat(field.bubble_dofs[cells], field.n_el_nod),
nm.tile(field.bubble_dofs[nrbhs], field.n_el_nod).flatten()),
axis=-1)
iels = nm.vstack((inner_iels, outer_iels))
return iels
class AdvectionDGFluxTerm(DGTerm):
r"""
Lax-Friedrichs flux term for advection of scalar quantity :math:`p` with the
advection velocity :math:`\ul{a}` given as a material parameter (a known
function of space and time).
:Definition:
.. math::
\int_{\partial{T_K}} \ul{n} \cdot \ul{f}^{*} (p_{in}, p_{out})q
where
.. math::
\ul{f}^{*}(p_{in}, p_{out}) = \ul{a} \frac{p_{in} + p_{out}}{2} +
(1 - \alpha) \ul{n} C \frac{ p_{in} - p_{out}}{2},
:math:`\alpha \in [0, 1]`; :math:`\alpha = 0` for upwind scheme,
:math:`\alpha = 1` for central scheme, and
.. math::
C = \max_{p \in [?, ?]}\left\lvert n_x a_1 +
n_y a_2 \right\rvert =
\max_{p \in [?, ?]} \left\lvert \ul{n} \cdot \ul{a} \right\rvert
the :math:`p_{in}` resp. :math:`p_{out}`
is solution on the boundary of the element
provided by element itself resp. its
neighbor and :math:`\ul{a}` is advection velocity.
:Arguments 1:
- material : :math:`\ul{a}`
- virtual : :math:`q`
- state : :math:`p`
:Arguments 3:
- material : :math:`\ul{a}`
- virtual : :math:`q`
- state : :math:`p`
- opt_material : :math:`\alpha`
"""
alpha = 0
name = "dw_dg_advect_laxfrie_flux"
modes = ("weak",)
arg_types = ('opt_material', 'material_advelo', 'virtual', 'state')
arg_shapes = [{'opt_material' : '.: 1',
'material_advelo': 'D, 1',
'virtual' : (1, 'state'),
'state' : 1
},
{'opt_material': None}]
integration = 'volume'
symbolic = {'expression': 'div(a*p)*w',
'map' : {'p': 'state', 'a': 'material', 'v': 'virtual'}
}
def get_fargs(self, alpha, advelo, test, state,
mode=None, term_mode=None, diff_var=None, **kwargs):
if alpha is not None:
self.alpha = alpha
field = state.field
region = field.region
if "DG" not in field.family_name:
raise ValueError("Used DG term with non DG field {} of family {}"
.format(field.name, field.family_name))
fargs = (state, diff_var, field, region, advelo[:, 0, :, 0])
return fargs
def function(self, out, state, diff_var, field, region, advelo):
if diff_var is not None:
fc_n = field.get_cell_normals_per_facet(region)
C = nm.abs(nm.einsum("ifk,ik->if", fc_n, advelo))
nbrhd_idx = field.get_facet_neighbor_idx(region, state.eq_map)
active_cells, active_facets = nm.where(nbrhd_idx[:, :, 0] >= 0)
active_nrbhs = nbrhd_idx[active_cells, active_facets, 0]
in_fc_b, out_fc_b, whs = field.get_both_facet_base_vals(state, region)
# TODO broadcast advelo to facets
# - maybe somehow get values of advelo at them?
# compute values
inner_diff = nm.einsum("nfk, nfk->nf",
fc_n,
advelo[:, None, :]
+ nm.einsum("nfk, nf->nfk",
(1 - self.alpha) * fc_n, C)) / 2.
outer_diff = nm.einsum("nfk, nfk->nf",
fc_n,
advelo[:, None, :]
- nm.einsum("nfk, nf->nfk",
(1 - self.alpha) * fc_n, C)) / 2.
inner_vals = nm.einsum("nf, ndfq, nbfq, nfq -> ndb",
inner_diff,
in_fc_b,
in_fc_b,
whs)
outer_vals = nm.einsum("i, idq, ibq, iq -> idb",
outer_diff[active_cells, active_facets],
in_fc_b[active_cells, :, active_facets],
out_fc_b[active_cells, :, active_facets],
whs[active_cells, active_facets])
vals = nm.vstack((inner_vals, outer_vals))
vals = vals.flatten()
# compute positions within matrix
iels = self._get_nbrhd_dof_indexes(active_cells, active_nrbhs, field)
out = (vals, iels[:, 0], iels[:, 1], state, state)
else:
fc_n = field.get_cell_normals_per_facet(region)
# get maximal wave speeds at facets
C = nm.abs(nm.einsum("ifk,ik->if", fc_n, advelo))
facet_base_vals = field.get_facet_base(base_only=True)
in_fc_v, out_fc_v, weights = field.get_both_facet_state_vals(state,
region)
# get sane facet base shape
fc_b = facet_base_vals[:, 0, :, 0, :].T
# (n_el_nod, n_el_facet, n_qp)
fc_v_avg = (in_fc_v + out_fc_v)/2.
fc_v_jmp = in_fc_v - out_fc_v
central = nm.einsum("ik,ifq->ifkq", advelo, fc_v_avg)
upwind = (1 - self.alpha) / 2. * nm.einsum("if,ifk,ifq->ifkq",
C, fc_n, fc_v_jmp)
cell_fluxes = nm.einsum("ifk,ifkq,dfq,ifq->id",
fc_n, central + upwind, fc_b, weights)
out[:] = 0.0
n_el_nod = field.n_el_nod
for i in range(n_el_nod):
out[:, :, i, 0] = cell_fluxes[:, i, None]
status = None
return out, status
class DiffusionDGFluxTerm(DGTerm):
r"""
Basic DG diffusion flux term for scalar quantity.
:Definition:
.. math::
\int_{\partial{T_K}} D \langle \nabla p \rangle [q] \mbox{ , }
\int_{\partial{T_K}} D \langle \nabla q \rangle [p]
where
.. math::
\langle \nabla \phi \rangle = \frac{\nabla\phi_{in} + \nabla\phi_{out}}{2}
.. math::
[\phi] = \phi_{in} - \phi_{out}
:math:
The :math:`p_{in}` resp. :math:`p_{out}`
is solution on the boundary of the element
provided by element itself resp. its neighbour.
:Arguments 1:
- material : :math:`D`
- state : :math:`p`
- virtual : :math:`q`
:Arguments 2:
- material : :math:`D`
- virtual : :math:`q`
- state : :math:`p`
"""
name = "dw_dg_diffusion_flux"
arg_types = (('material', 'state', 'virtual'), # left
('material', 'virtual', 'state') # right
)
arg_shapes = [{'material': '1, 1',
'virtual/avg_state': (1, None),
'state/avg_state' : 1,
'virtual/avg_virtual': (1, None),
'state/avg_virtual' : 1,
}]
integration = 'volume'
modes = ('avg_state', 'avg_virtual')
def get_fargs(self, diff_tensor, test, state,
mode=None, term_mode=None, diff_var=None, **kwargs):
field = state.field
region = field.region
if "DG" not in field.family_name:
raise ValueError("Used DG term with non DG field {} of family {}"
.format(field.name, field.family_name))
if self.mode == "avg_state":
# put state where it is expected by the function
state = test
fargs = (state, diff_var, field, region, diff_tensor[:, 0, :, :])
return fargs
def function(self, out, state, diff_var, field, region, D):
D = Term.tile_mat(D, out.shape[0])
if diff_var is not None: # matrix mode
# outR = out.copy()[..., 0:1]
out = self._function_matrix(out, state, diff_var, field, region, D)
# vals, ielsi, ielsj = out[:3]
# from scipy.sparse import coo_matrix
# extra = coo_matrix((vals, (ielsi, ielsj)),
# shape=2*(field.n_el_nod * field.n_cell,))
# M = extra.toarray()
# u = state.data[0]
# Mu = nm.dot(M, u).reshape((field.n_cell, field.n_el_nod))
#
# outR = self._function_residual(outR, state, diff_var, field,
# region, D).squeeze()
# from matplotlib import pyplot as plt
# plt.imshow((Mu - outR).T, aspect="auto")
# plt.colorbar()
#
# nbrhd_idx = field.get_facet_neighbor_idx(region, state.eq_map)
# bcells = nm.where(nbrhd_idx[:, :, 0] < 0)[0],
# plt.vlines(bcells, -.5, 15, alpha=.3)
# plt.show()
else: # residual mode
out = self._function_residual(out, state, diff_var, field, region, D)
status = None
return out, status
def _function_matrix(self, out, state, diff_var, field, region, D):
fc_n = field.get_cell_normals_per_facet(region)
nbrhd_idx = field.get_facet_neighbor_idx(region, state.eq_map)
active_cells, active_facets = nm.where(nbrhd_idx[:, :, 0] >= 0)
active_nrbhs = nbrhd_idx[active_cells, active_facets, 0]
inner_facet_base, outer_facet_base, whs = \
field.get_both_facet_base_vals(state, region, derivative=False)
inner_facet_base_d, outer_facet_base_d, _ = \
field.get_both_facet_base_vals(state, region, derivative=True)
if self.mode == 'avg_state':
# content of diagonal
inner_vals = nm.einsum("nkl, nbfkq, nfk, ndfq, nfq->ndb",
D,
inner_facet_base_d / 2, # state
fc_n,
inner_facet_base, # test
whs)
outer_vals = nm.einsum(
"ikl, ibkq, ik, idq, iq->idb",
D[active_cells],
outer_facet_base_d[active_cells, :, active_facets] / 2, # state
fc_n[active_cells, active_facets],
inner_facet_base[active_cells, :, active_facets], # test
whs[active_cells, active_facets])
elif self.mode == 'avg_virtual':
# content of diagonal
inner_vals = nm.einsum("nkl, ndfkq, nfk, nbfq, nfq->ndb",
D,
inner_facet_base_d / 2, # test
fc_n,
inner_facet_base, # state
whs)
outer_vals = nm.einsum("ikl, idkq, ik, ibq, iq->idb",
D[active_cells],
inner_facet_base_d[active_cells, :, active_facets] / 2, # test
fc_n[active_cells, active_facets],
- outer_facet_base[active_cells, :, active_facets], # state
whs[active_cells, active_facets])
iels = self._get_nbrhd_dof_indexes(active_cells, active_nrbhs, field)
vals = nm.vstack((inner_vals, outer_vals))
vals = vals.flatten()
# i j
out = (vals, iels[:, 0], iels[:, 1], state, state)
return out
def _function_residual(self, out, state, diff_var, field, region, D):
fc_n = field.get_cell_normals_per_facet(region)
# get base values
inner_facet_base, outer_facet_base, _ = \
field.get_both_facet_base_vals(state, region, derivative=False)
inner_facet_base_d, outer_facet_base_d, _ = \
field.get_both_facet_base_vals(state, region, derivative=True)
# get state values
inner_facet_state_d, outer_facet_state_d, _ = \
field.get_both_facet_state_vals(state, region, derivative=True)
inner_facet_state, outer_facet_state, weights = \
field.get_both_facet_state_vals(state, region, derivative=False)
if self.mode == 'avg_state':
avgDdState = (nm.einsum("ikl,ifkq->ifkq",
D, inner_facet_state_d) +
nm.einsum("ikl,ifkq ->ifkq",
D, outer_facet_state_d)) / 2.
# outer_facet_base is in DG zero - hence the jump is inner value
jmpBase = inner_facet_base
cell_fluxes = nm.einsum("ifkq , ifk, idfq, ifq -> id",
avgDdState, fc_n, jmpBase, weights)
elif self.mode == 'avg_virtual':
avgDdbase = (nm.einsum("ikl,idfkq->idfkq",
D, inner_facet_base_d)) / 2.
jmpState = inner_facet_state - outer_facet_state
cell_fluxes = nm.einsum("idfkq, ifk, ifq , ifq -> id",
avgDdbase, fc_n, jmpState, weights)
out[:] = 0.0
n_el_nod = field.n_el_nod
for i in range(n_el_nod):
out[:, :, i, 0] = cell_fluxes[:, i, None]
return out
class DiffusionInteriorPenaltyTerm(DGTerm):
r"""
Penalty term used to counteract discontinuity arising when
modeling diffusion using Discontinuous Galerkin schemes.
:Definition:
.. math::
\int_{\partial{T_K}} \bar{D} C_w \frac{Ord^2}{d(\partial{T_K})}[p][q]
where
.. math::
[\phi] = \phi_{in} - \phi_{out}
:math:
the :math:`p_{in}` resp. :math:`p_{out}`
is solution on the boundary of the element
provided by element itself resp. its neighbour.
:Arguments:
- material : :math:`D`
- material : :math:`C_w`
- state : :math:`p`
- virtual : :math:`q`
"""
name = "dw_dg_interior_penalty"
modes = ("weak",)
arg_types = ('material', 'material_Cw',
'virtual', 'state')
arg_shapes = [{'material': '1, 1',
'material_Cw': '.: 1',
'virtual' : (1, 'state'),
'state' : 1
}]
def get_fargs(self, diff_tensor, Cw, test, state, mode=None,
term_mode=None, diff_var=None, **kwargs):
field = state.field
region = field.region
if "DG" not in field.family_name:
raise ValueError("Used DG term with non DG field {} of family {}"
.format(field.name, field.family_name))
fargs = (state, diff_var, field, region, Cw, diff_tensor[:, 0, :, :])
return fargs
def function(self, out, state, diff_var, field, region, Cw, diff_tensor):
approx_order = field.approx_order
inner_facet_base, outer_facet_base, whs = \
field.get_both_facet_base_vals(state, region, derivative=False)
facet_vols = nm.sum(whs, axis=-1)
# nu characterizes diffusion tensor, so far we use diagonal average
nu = nm.trace(diff_tensor, axis1=-2, axis2=-1)[..., None] / \
diff_tensor.shape[1]
sigma = nu * Cw * approx_order ** 2 / facet_vols
if diff_var is not None:
nbrhd_idx = field.get_facet_neighbor_idx(region, state.eq_map)
active_cells, active_facets = nm.where(nbrhd_idx[:, :, 0] >= 0)
active_nrbhs = nbrhd_idx[active_cells, active_facets, 0]
inner = nm.einsum("nf, ndfq, nbfq, nfq -> ndb",
sigma,
inner_facet_base, # test
inner_facet_base, # state
whs)
outer = nm.einsum("i, idq, ibq, iq -> idb",
sigma[active_cells, active_facets],
inner_facet_base[active_cells, :, active_facets], # test
- outer_facet_base[active_cells, :, active_facets], # state
whs[active_cells, active_facets])
vals = nm.vstack((inner, outer))
vals = vals.flatten()
iels = self._get_nbrhd_dof_indexes(active_cells, active_nrbhs, field)
out = (vals, iels[:, 0], iels[:, 1], state, state)
else:
inner_facet_state, outer_facet_state, whs = \
field.get_both_facet_state_vals(state, region,
derivative=False
)
inner_facet_base, outer_facet_base, _ = \
field.get_both_facet_base_vals(state, region,
derivative=False
)
jmp_state = inner_facet_state - outer_facet_state
jmp_base = inner_facet_base # - outer_facet_base
n_el_nod = nm.shape(inner_facet_base)[1]
cell_penalty = nm.einsum("nf,nfq,ndfq,nfq->nd",
sigma, jmp_state, jmp_base, whs)
out[:] = 0.0
for i in range(n_el_nod):
out[:, :, i, 0] = cell_penalty[:, i, None]
status = None
return out, status
class NonlinearHyperbolicDGFluxTerm(DGTerm):
r"""
Lax-Friedrichs flux term for nonlinear hyperpolic term of scalar quantity
:math:`p` with the vector function :math:`\ul{f}` given as a material
parameter.
:Definition:
.. math::
\int_{\partial{T_K}} \ul{n} \cdot f^{*} (p_{in}, p_{out})q
where
.. math::
\ul{f}^{*}(p_{in}, p_{out}) = \frac{\ul{f}(p_{in})
+ \ul{f}(p_{out})}{2} +
(1 - \alpha) \ul{n} C \frac{ p_{in} - p_{out}}{2},
:math:`\alpha \in [0, 1]`; :math:`\alpha = 0` for upwind scheme,
:math:`\alpha = 1` for central scheme, and
.. math::
C =
\max_{p \in [?, ?]}\left\lvert
n_x \frac{d f_1}{d p} + n_y \frac{d f_2}{d p}
+ \cdots
\right\rvert =
\max_{p \in [?, ?]} \left\lvert
\vec{n}\cdot\frac{d\ul{f}}{dp}(p)
\right\rvert
the :math:`p_{in}` resp. :math:`p_{out}`
is solution on the boundary of the element
provided by element itself resp. its
neighbor.
:Arguments 1:
- material : :math:`\ul{f}`
- material : :math:`\frac{d\ul{f}}{d p}`
- virtual : :math:`q`
- state : :math:`p`
:Arguments 3:
- material : :math:`\ul{f}`
- material : :math:`\frac{d\ul{f}}{d p}`
- virtual : :math:`q`
- state : :math:`p`
- opt_material : :math:`\alpha`
"""
alf = 0
name = "dw_dg_nonlinear_laxfrie_flux"
modes = ("weak",)
arg_types = ('opt_material', 'fun', 'fun_d', 'virtual', 'state')
arg_shapes = [{'opt_material' : '.: 1',
'material_fun' : '.: 1',
'material_fun_d': '.: 1',
'virtual' : (1, 'state'),
'state' : 1
},
{'opt_material': None}]
integration = 'volume'
symbolic = {'expression': 'div(f(p))*w',
'map' : {'p': 'state', 'v': 'virtual', 'f': 'function'}
}
def get_fargs(self, alpha, fun, dfun, test, state,
mode=None, term_mode=None, diff_var=None, **kwargs):
if alpha is not None:
self.alf = nm.max(alpha) # extract alpha value regardless of shape
self.fun = fun
self.dfun = dfun
if diff_var is not None:
output("Diff var is not None in nonlinear, residual only " +
"term"" {} ! Skipping.".format(self.name))
return None, None, None, 0, 0
else:
field = state.field
region = field.region
if "DG" not in field.family_name:
raise ValueError(
"Used DG term with non DG field {} of family {}!"
.format(field.name, field.family_name))
fargs = (state, field, region, fun, dfun)
return fargs
def function(self, out, state, field, region, f, df):
if state is None:
out[:] = 0.0
return None
fc_n = field.get_cell_normals_per_facet(region)
facet_base_vals = field.get_facet_base(base_only=True)
in_fc_v, out_fc_v, weights = field.get_both_facet_state_vals(state, region)
fc_b = facet_base_vals[:, 0, :, 0, :].T # (n_el_nod, n_el_facet, n_qp)
n_el_nod = field.n_el_nod
# get maximal wave speeds at facets
df_in = df(in_fc_v)
df_out = df(out_fc_v)
fc_n__dot__df_in = nm.einsum("ifk,ifqk->ifq", fc_n, df_in)
fc_n__dot__df_out = nm.einsum("ifk,ifqk->ifq", fc_n, df_out)
dfdn = nm.stack((fc_n__dot__df_in, fc_n__dot__df_out), axis=-1)
C = nm.amax(nm.abs(dfdn), axis=(-2, -1))
fc_f_avg = (f(in_fc_v) + f(out_fc_v)) / 2.
fc_v_jmp = in_fc_v - out_fc_v
central = fc_f_avg
upwind = (1 - self.alf) / 2. * nm.einsum("if,ifk,ifq->ifqk",
C, fc_n, fc_v_jmp)
cell_fluxes = nm.einsum("ifk,ifqk,dfq,ifq->id",
fc_n, central + upwind, fc_b, weights)
out[:] = 0.0
for i in range(n_el_nod):
out[:, :, i, 0] = cell_fluxes[:, i, None]
status = None
return out, status
from sfepy.linalg import dot_sequences
class NonlinearScalarDotGradTerm(Term):
r"""
Product of virtual and divergence of vector function of state or volume dot
product of vector function of state and gradient of scalar virtual.
:Definition:
.. math::
\int_{\Omega} q \cdot \nabla \cdot \ul{f}(p) = \int_{\Omega} q \cdot
\text{div} \ul{f}(p) \mbox{ , }
\int_{\Omega} \ul{f}(p) \cdot \nabla q
:Arguments 1:
- function : :math:`\ul{f}`
- virtual : :math:`q`
- state : :math:`p`
:Arguments 2:
- function : :math:`\ul{f}`
- state : :math:`p`
- virtual : :math:`q`
TODO maybe this term would fit better to terms_dot?
"""
name = 'dw_ns_dot_grad_s'
arg_types = (('fun', 'fun_d', 'virtual', 'state'),
('fun', 'fun_d', 'state', 'virtual'))
arg_shapes = [{'material_fun' : '.: 1',
'material_fun_d' : '.: 1',
'virtual/grad_state' : (1, None),
'state/grad_state' : 1,
'virtual/grad_virtual': (1, None),
'state/grad_virtual' : 1}]
modes = ('grad_state', 'grad_virtual')
@staticmethod
def function(out, out_qp, geo, fmode):
status = geo.integrate(out, out_qp)
return status
def get_fargs(self, fun, dfun, var1, var2,
mode=None, term_mode=None, diff_var=None, **kwargs):
vg1, _ = self.get_mapping(var1)
vg2, _ = self.get_mapping(var2)
if diff_var is None:
if self.mode == 'grad_state':
# TODO rewrite using einsum?
geo = vg1
bf_t = vg1.bf.transpose((0, 1, 3, 2))
val_grad_qp = self.get(var2, 'grad')
out_qp = dot_sequences(bf_t, val_grad_qp, 'ATB')
else:
geo = vg2
val_qp = fun(self.get(var1, 'val'))[..., 0, :].swapaxes(-2, -1)
out_qp = dot_sequences(vg2.bfg, val_qp, 'ATB')
fmode = 0
else:
raise ValueError("Matrix mode not supported for {}"
.format(self.name))
# however it could be with use of dfun
if self.mode == 'grad_state':
geo = vg1
bf_t = vg1.bf.transpose((0, 1, 3, 2))
out_qp = dot_sequences(bf_t, vg2.bfg, 'ATB')
else:
geo = vg2
out_qp = dot_sequences(vg2.bfg, vg1.bf, 'ATB')
fmode = 1
return out_qp, geo, fmode
| bsd-3-clause |
weissj3/MWTools | Plotting/PlotDiskFraction.py | 1 | 2442 | #!/usr/bin/python
import sys
sys.path.insert(0, '../Newby-tools/utilities')
sys.path.insert(0, '../Newby-tools/ThreadedTWGWithDisk')
import numpy as np
import astro_coordinates as ac
import matplotlib.pyplot as plt
import math as ma
import pylab as lab
import os
from os.path import isfile, join
def thinDisk(z, r):
return ma.exp(-r/2.250 - abs(z)/.250)
def thickDisk(z, r):
return ma.exp(-r/3.50 - abs(z)/.700)
def spheroid(z, q, r):
return 1.0 / (1.0 + ((r * r) + (z/q) * (z/q)) ** (1.75))
ThinDiskWeight = .91875 / .08 * thickDisk(0.0, 8.5) / thinDisk(0.0,8.5)
BackgroundWeight = .00125 / .99875 * (thickDisk(0.0,8.5) + ThinDiskWeight * thinDisk(0.0, 8.5))/spheroid(0.0, 0.8, 8.5)
R = []
ThinDisk = []
ThickDisk = []
Spheroid = []
for i in range(100):
R.append(float(i) / 10.0);
x,y,z = ac.GC2xyz(135., 0.0, float(i)/10.0, 19)
r = ma.sqrt(x * x + y * y)
tmpThin = ThinDiskWeight * thinDisk(z, r)
tmpThick = thickDisk(z, r)
tmpSpher = BackgroundWeight * spheroid(z, 0.8, r)
total = tmpThin + tmpThick + tmpSpher
ThinDisk.append(tmpThin / total)
ThickDisk.append(tmpThick / total)
Spheroid.append(tmpSpher / total)
plt.plot(R, ThinDisk, "-g", label="Thin Disk ($\mu=135$)", lw=1.5)
plt.plot(R, ThickDisk, "-r", label="Thick Disk ($\mu=135$)", lw=1.5)
plt.plot(R, Spheroid, "-b", label="Hernquist ($\mu=135$)", lw=1.5)
R = []
ThinDisk = []
ThickDisk = []
Spheroid = []
for i in range(100):
R.append(float(i) / 10.0);
x,y,z = ac.GC2xyz(230., 0.0, float(i)/10.0, 19)
r = ma.sqrt(x * x + y * y)
tmpThin = ThinDiskWeight * thinDisk(z, r)
tmpThick = thickDisk(z, r)
tmpSpher = BackgroundWeight * spheroid(z, 0.8, r)
total = tmpThin + tmpThick + tmpSpher
ThinDisk.append(tmpThin / total)
ThickDisk.append(tmpThick / total)
Spheroid.append(tmpSpher / total)
plt.plot(R, ThinDisk, "--g", label="Thin Disk ($\mu=230$)", lw=1.5)
plt.plot(R, ThickDisk, "--r", label="Thick Disk ($\mu=230$)", lw=1.5)
plt.plot(R, Spheroid, "--b", label="Hernquist ($\mu=230$)", lw=1.5)
plt.axvspan(0., 2.3, alpha=0.3, color='grey')
plt.xlabel("Heliocentric R (kpc)", fontsize=14)
plt.ylabel("Stellar Fraction", fontsize=14)
plt.xticks(fontsize=14) # fontsize of the tick labels
plt.yticks(fontsize=14) # fontsize of the tick labels
#plt.title("Star Fraction vs Height Above The Disk", fontsize=18)
plt.legend(bbox_to_anchor=(1.0, 0.66), fontsize=10)
plt.show()
| mit |
khkaminska/scikit-learn | examples/model_selection/plot_underfitting_overfitting.py | 230 | 2649 | """
============================
Underfitting vs. Overfitting
============================
This example demonstrates the problems of underfitting and overfitting and
how we can use linear regression with polynomial features to approximate
nonlinear functions. The plot shows the function that we want to approximate,
which is a part of the cosine function. In addition, the samples from the
real function and the approximations of different models are displayed. The
models have polynomial features of different degrees. We can see that a
linear function (polynomial with degree 1) is not sufficient to fit the
training samples. This is called **underfitting**. A polynomial of degree 4
approximates the true function almost perfectly. However, for higher degrees
the model will **overfit** the training data, i.e. it learns the noise of the
training data.
We evaluate quantitatively **overfitting** / **underfitting** by using
cross-validation. We calculate the mean squared error (MSE) on the validation
set, the higher, the less likely the model generalizes correctly from the
training data.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import LinearRegression
from sklearn import cross_validation
np.random.seed(0)
n_samples = 30
degrees = [1, 4, 15]
true_fun = lambda X: np.cos(1.5 * np.pi * X)
X = np.sort(np.random.rand(n_samples))
y = true_fun(X) + np.random.randn(n_samples) * 0.1
plt.figure(figsize=(14, 5))
for i in range(len(degrees)):
ax = plt.subplot(1, len(degrees), i + 1)
plt.setp(ax, xticks=(), yticks=())
polynomial_features = PolynomialFeatures(degree=degrees[i],
include_bias=False)
linear_regression = LinearRegression()
pipeline = Pipeline([("polynomial_features", polynomial_features),
("linear_regression", linear_regression)])
pipeline.fit(X[:, np.newaxis], y)
# Evaluate the models using crossvalidation
scores = cross_validation.cross_val_score(pipeline,
X[:, np.newaxis], y, scoring="mean_squared_error", cv=10)
X_test = np.linspace(0, 1, 100)
plt.plot(X_test, pipeline.predict(X_test[:, np.newaxis]), label="Model")
plt.plot(X_test, true_fun(X_test), label="True function")
plt.scatter(X, y, label="Samples")
plt.xlabel("x")
plt.ylabel("y")
plt.xlim((0, 1))
plt.ylim((-2, 2))
plt.legend(loc="best")
plt.title("Degree {}\nMSE = {:.2e}(+/- {:.2e})".format(
degrees[i], -scores.mean(), scores.std()))
plt.show()
| bsd-3-clause |
Eric89GXL/mne-python | tutorials/source-modeling/plot_beamformer_lcmv.py | 3 | 12824 | """
Source reconstruction using an LCMV beamformer
==============================================
This tutorial gives an overview of the beamformer method
and shows how to use an LCMV beamformer to reconstruct source activity.
.. contents:: Page contents
:local:
:depth: 2
"""
# Authors: Britta Westner <[email protected]>
# Eric Larson <[email protected]>
#
# License: BSD (3-clause)
import matplotlib.pyplot as plt
import mne
from mne.datasets import sample, fetch_fsaverage
from mne.beamformer import make_lcmv, apply_lcmv
###############################################################################
# Introduction to beamformers
# ---------------------------
# A beamformer is a spatial filter that reconstructs source activity by
# scanning through a grid of pre-defined source points and estimating activity
# at each of those source points independently. A set of weights is
# constructed for each defined source location which defines the contribution
# of each sensor to this source.
# Beamformers are often used for their focal reconstructions and their ability
# to reconstruct deeper sources. They can also suppress external noise sources.
# The beamforming method applied in this tutorial is the linearly constrained
# minimum variance (LCMV) beamformer :footcite:`VanVeenEtAl1997` operates on
# time series.
# Frequency-resolved data can be reconstructed with the dynamic imaging of
# coherent sources (DICS) beamforming method :footcite:`GrossEtAl2001`.
# As we will see in the following, the spatial filter is computed from two
# ingredients: the forward model solution and the covariance matrix of the
# data.
###############################################################################
# Data processing
# ---------------
# We will use the sample data set for this tutorial and reconstruct source
# activity on the trials with left auditory stimulation.
data_path = sample.data_path()
subjects_dir = data_path + '/subjects'
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
# Read the raw data
raw = mne.io.read_raw_fif(raw_fname)
raw.info['bads'] = ['MEG 2443'] # bad MEG channel
# Set up the epoching
event_id = 1 # those are the trials with left-ear auditory stimuli
tmin, tmax = -0.2, 0.5
events = mne.find_events(raw)
# pick relevant channels
raw.pick(['meg', 'eog']) # pick channels of interest
# Create epochs
proj = False # already applied
epochs = mne.Epochs(raw, events, event_id, tmin, tmax,
baseline=(None, 0), preload=True, proj=proj,
reject=dict(grad=4000e-13, mag=4e-12, eog=150e-6))
# for speed purposes, cut to a window of interest
evoked = epochs.average().crop(0.05, 0.15)
# Visualize averaged sensor space data
evoked.plot_joint()
del raw # save memory
###############################################################################
# Computing the covariance matrices
# ---------------------------------
# Spatial filters use the data covariance to estimate the filter
# weights. The data covariance matrix will be `inverted`_ during the spatial
# filter computation, so it is valuable to plot the covariance matrix and its
# eigenvalues to gauge whether matrix inversion will be possible.
# Also, because we want to combine different channel types (magnetometers and
# gradiometers), we need to account for the different amplitude scales of these
# channel types. To do this we will supply a noise covariance matrix to the
# beamformer, which will be used for whitening.
# The data covariance matrix should be estimated from a time window that
# includes the brain signal of interest,
# and incorporate enough samples for a stable estimate. A rule of thumb is to
# use more samples than there are channels in the data set; see
# :footcite:`BrookesEtAl2008` for more detailed advice on covariance estimation
# for beamformers. Here, we use a time
# window incorporating the expected auditory response at around 100 ms post
# stimulus and extend the period to account for a low number of trials (72) and
# low sampling rate of 150 Hz.
data_cov = mne.compute_covariance(epochs, tmin=0.01, tmax=0.25,
method='empirical')
noise_cov = mne.compute_covariance(epochs, tmin=tmin, tmax=0,
method='empirical')
data_cov.plot(epochs.info)
del epochs
###############################################################################
# When looking at the covariance matrix plots, we can see that our data is
# slightly rank-deficient as the rank is not equal to the number of channels.
# Thus, we will have to regularize the covariance matrix before inverting it
# in the beamformer calculation. This can be achieved by setting the parameter
# ``reg=0.05`` when calculating the spatial filter with
# :func:`~mne.beamformer.make_lcmv`. This corresponds to loading the diagonal
# of the covariance matrix with 5% of the sensor power.
###############################################################################
# The forward model
# -----------------
# The forward model is the other important ingredient for the computation of a
# spatial filter. Here, we will load the forward model from disk; more
# information on how to create a forward model can be found in this tutorial:
# :ref:`tut-forward`.
# Note that beamformers are usually computed in a :class:`volume source space
# <mne.VolSourceEstimate>`, because estimating only cortical surface
# activation can misrepresent the data.
# Read forward model
fwd_fname = data_path + '/MEG/sample/sample_audvis-meg-vol-7-fwd.fif'
forward = mne.read_forward_solution(fwd_fname)
###############################################################################
# Handling depth bias
# -------------------
#
# The forward model solution is inherently biased toward superficial sources.
# When analyzing single conditions it is best to mitigate the depth bias
# somehow. There are several ways to do this:
#
# - :func:`mne.beamformer.make_lcmv` has a ``depth`` parameter that normalizes
# the forward model prior to computing the spatial filters. See the docstring
# for details.
# - Unit-noise gain beamformers handle depth bias by normalizing the
# weights of the spatial filter. Choose this by setting
# ``weight_norm='unit-noise-gain'``.
# - When computing the Neural activity index, the depth bias is handled by
# normalizing both the weights and the estimated noise (see
# :footcite:`VanVeenEtAl1997`). Choose this by setting ``weight_norm='nai'``.
#
# Note that when comparing conditions, the depth bias will cancel out and it is
# possible to set both parameters to ``None``.
#
#
# Compute the spatial filter
# --------------------------
# Now we can compute the spatial filter. We'll use a unit-noise gain beamformer
# to deal with depth bias, and will also optimize the orientation of the
# sources such that output power is maximized.
# This is achieved by setting ``pick_ori='max-power'``.
# This gives us one source estimate per source (i.e., voxel), which is known
# as a scalar beamformer.
filters = make_lcmv(evoked.info, forward, data_cov, reg=0.05,
noise_cov=noise_cov, pick_ori='max-power',
weight_norm='unit-noise-gain', rank=None)
# You can save the filter for later use with:
# filters.save('filters-lcmv.h5')
###############################################################################
# It is also possible to compute a vector beamformer, which gives back three
# estimates per voxel, corresponding to the three direction components of the
# source. This can be achieved by setting
# ``pick_ori='vector'`` and will yield a :class:`volume vector source estimate
# <mne.VolVectorSourceEstimate>`. So we will compute another set of filters
# using the vector beamformer approach:
filters_vec = make_lcmv(evoked.info, forward, data_cov, reg=0.05,
noise_cov=noise_cov, pick_ori='vector',
weight_norm='unit-noise-gain', rank=None)
# save a bit of memory
src = forward['src']
del forward
###############################################################################
# Apply the spatial filter
# ------------------------
# The spatial filter can be applied to different data types: raw, epochs,
# evoked data or the data covariance matrix to gain a static image of power.
# The function to apply the spatial filter to :class:`~mne.Evoked` data is
# :func:`~mne.beamformer.apply_lcmv` which is
# what we will use here. The other functions are
# :func:`~mne.beamformer.apply_lcmv_raw`,
# :func:`~mne.beamformer.apply_lcmv_epochs`, and
# :func:`~mne.beamformer.apply_lcmv_cov`.
stc = apply_lcmv(evoked, filters, max_ori_out='signed')
stc_vec = apply_lcmv(evoked, filters_vec, max_ori_out='signed')
del filters, filters_vec
###############################################################################
# Visualize the reconstructed source activity
# -------------------------------------------
# We can visualize the source estimate in different ways, e.g. as a volume
# rendering, an overlay onto the MRI, or as an overlay onto a glass brain.
#
# The plots for the scalar beamformer show brain activity in the right temporal
# lobe around 100 ms post stimulus. This is expected given the left-ear
# auditory stimulation of the experiment.
lims = [0.3, 0.45, 0.6]
kwargs = dict(src=src, subject='sample', subjects_dir=subjects_dir,
initial_time=0.087, verbose=True)
###############################################################################
# On MRI slices (orthoview; 2D)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
stc.plot(mode='stat_map', clim=dict(kind='value', pos_lims=lims), **kwargs)
###############################################################################
# On MNI glass brain (orthoview; 2D)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
stc.plot(mode='glass_brain', clim=dict(kind='value', lims=lims), **kwargs)
###############################################################################
# Volumetric rendering (3D) with vectors
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# These plots can also be shown using a volumetric rendering via
# :meth:`~mne.VolVectorSourceEstimate.plot_3d`. Let's try visualizing the
# vector beamformer case. Here we get three source time courses out per voxel
# (one for each component of the dipole moment: x, y, and z), which appear
# as small vectors in the visualization (in the 2D plotters, only the
# magnitude can be shown):
# sphinx_gallery_thumbnail_number = 7
brain = stc_vec.plot_3d(
clim=dict(kind='value', lims=lims), hemi='both',
views=['coronal', 'sagittal', 'axial'], size=(800, 300),
view_layout='horizontal', show_traces=0.3, **kwargs)
###############################################################################
# Visualize the activity of the maximum voxel with all three components
# ---------------------------------------------------------------------
# We can also visualize all three components in the peak voxel. For this, we
# will first find the peak voxel and then plot the time courses of this voxel.
peak_vox, _ = stc_vec.get_peak(tmin=0.08, tmax=0.1, vert_as_index=True)
ori_labels = ['x', 'y', 'z']
fig, ax = plt.subplots(1)
for ori, label in zip(stc_vec.data[peak_vox, :, :], ori_labels):
ax.plot(stc_vec.times, ori, label='%s component' % label)
ax.legend(loc='lower right')
ax.set(title='Activity per orientation in the peak voxel', xlabel='Time (s)',
ylabel='Amplitude (a. u.)')
mne.viz.utils.plt_show()
del stc_vec
###############################################################################
# Morph the output to fsaverage
# -----------------------------
#
# We can also use volumetric morphing to get the data to fsaverage space. This
# is for example necessary when comparing activity across subjects. Here, we
# will use the scalar beamformer example.
# We pass a :class:`mne.SourceMorph` as the ``src`` argument to
# `mne.VolSourceEstimate.plot`. To save some computational load when applying
# the morph, we will crop the ``stc``:
fetch_fsaverage(subjects_dir) # ensure fsaverage src exists
fname_fs_src = subjects_dir + '/fsaverage/bem/fsaverage-vol-5-src.fif'
src_fs = mne.read_source_spaces(fname_fs_src)
morph = mne.compute_source_morph(
src, subject_from='sample', src_to=src_fs, subjects_dir=subjects_dir,
niter_sdr=[10, 10, 5], niter_affine=[10, 10, 5], # just for speed
verbose=True)
stc_fs = morph.apply(stc)
del stc
stc_fs.plot(
src=src_fs, mode='stat_map', initial_time=0.085, subjects_dir=subjects_dir,
clim=dict(kind='value', pos_lims=lims), verbose=True)
###############################################################################
# References
# ----------
#
# .. footbibliography::
#
#
# .. LINKS
#
# .. _`inverted`: https://en.wikipedia.org/wiki/Invertible_matrix
| bsd-3-clause |
wanggang3333/scikit-learn | examples/svm/plot_svm_scale_c.py | 223 | 5375 | """
==============================================
Scaling the regularization parameter for SVCs
==============================================
The following example illustrates the effect of scaling the
regularization parameter when using :ref:`svm` for
:ref:`classification <svm_classification>`.
For SVC classification, we are interested in a risk minimization for the
equation:
.. math::
C \sum_{i=1, n} \mathcal{L} (f(x_i), y_i) + \Omega (w)
where
- :math:`C` is used to set the amount of regularization
- :math:`\mathcal{L}` is a `loss` function of our samples
and our model parameters.
- :math:`\Omega` is a `penalty` function of our model parameters
If we consider the loss function to be the individual error per
sample, then the data-fit term, or the sum of the error for each sample, will
increase as we add more samples. The penalization term, however, will not
increase.
When using, for example, :ref:`cross validation <cross_validation>`, to
set the amount of regularization with `C`, there will be a
different amount of samples between the main problem and the smaller problems
within the folds of the cross validation.
Since our loss function is dependent on the amount of samples, the latter
will influence the selected value of `C`.
The question that arises is `How do we optimally adjust C to
account for the different amount of training samples?`
The figures below are used to illustrate the effect of scaling our
`C` to compensate for the change in the number of samples, in the
case of using an `l1` penalty, as well as the `l2` penalty.
l1-penalty case
-----------------
In the `l1` case, theory says that prediction consistency
(i.e. that under given hypothesis, the estimator
learned predicts as well as a model knowing the true distribution)
is not possible because of the bias of the `l1`. It does say, however,
that model consistency, in terms of finding the right set of non-zero
parameters as well as their signs, can be achieved by scaling
`C1`.
l2-penalty case
-----------------
The theory says that in order to achieve prediction consistency, the
penalty parameter should be kept constant
as the number of samples grow.
Simulations
------------
The two figures below plot the values of `C` on the `x-axis` and the
corresponding cross-validation scores on the `y-axis`, for several different
fractions of a generated data-set.
In the `l1` penalty case, the cross-validation-error correlates best with
the test-error, when scaling our `C` with the number of samples, `n`,
which can be seen in the first figure.
For the `l2` penalty case, the best result comes from the case where `C`
is not scaled.
.. topic:: Note:
Two separate datasets are used for the two different plots. The reason
behind this is the `l1` case works better on sparse data, while `l2`
is better suited to the non-sparse case.
"""
print(__doc__)
# Author: Andreas Mueller <[email protected]>
# Jaques Grobler <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.svm import LinearSVC
from sklearn.cross_validation import ShuffleSplit
from sklearn.grid_search import GridSearchCV
from sklearn.utils import check_random_state
from sklearn import datasets
rnd = check_random_state(1)
# set up dataset
n_samples = 100
n_features = 300
# l1 data (only 5 informative features)
X_1, y_1 = datasets.make_classification(n_samples=n_samples,
n_features=n_features, n_informative=5,
random_state=1)
# l2 data: non sparse, but less features
y_2 = np.sign(.5 - rnd.rand(n_samples))
X_2 = rnd.randn(n_samples, n_features / 5) + y_2[:, np.newaxis]
X_2 += 5 * rnd.randn(n_samples, n_features / 5)
clf_sets = [(LinearSVC(penalty='l1', loss='squared_hinge', dual=False,
tol=1e-3),
np.logspace(-2.3, -1.3, 10), X_1, y_1),
(LinearSVC(penalty='l2', loss='squared_hinge', dual=True,
tol=1e-4),
np.logspace(-4.5, -2, 10), X_2, y_2)]
colors = ['b', 'g', 'r', 'c']
for fignum, (clf, cs, X, y) in enumerate(clf_sets):
# set up the plot for each regressor
plt.figure(fignum, figsize=(9, 10))
for k, train_size in enumerate(np.linspace(0.3, 0.7, 3)[::-1]):
param_grid = dict(C=cs)
# To get nice curve, we need a large number of iterations to
# reduce the variance
grid = GridSearchCV(clf, refit=False, param_grid=param_grid,
cv=ShuffleSplit(n=n_samples, train_size=train_size,
n_iter=250, random_state=1))
grid.fit(X, y)
scores = [x[1] for x in grid.grid_scores_]
scales = [(1, 'No scaling'),
((n_samples * train_size), '1/n_samples'),
]
for subplotnum, (scaler, name) in enumerate(scales):
plt.subplot(2, 1, subplotnum + 1)
plt.xlabel('C')
plt.ylabel('CV Score')
grid_cs = cs * float(scaler) # scale the C's
plt.semilogx(grid_cs, scores, label="fraction %.2f" %
train_size)
plt.title('scaling=%s, penalty=%s, loss=%s' %
(name, clf.penalty, clf.loss))
plt.legend(loc="best")
plt.show()
| bsd-3-clause |
Midafi/scikit-image | doc/examples/plot_circular_elliptical_hough_transform.py | 3 | 4760 | """
========================================
Circular and Elliptical Hough Transforms
========================================
The Hough transform in its simplest form is a `method to detect
straight lines <http://en.wikipedia.org/wiki/Hough_transform>`__
but it can also be used to detect circles or ellipses.
The algorithm assumes that the edge is detected and it is robust against
noise or missing points.
Circle detection
================
In the following example, the Hough transform is used to detect
coin positions and match their edges. We provide a range of
plausible radii. For each radius, two circles are extracted and
we finally keep the five most prominent candidates.
The result shows that coin positions are well-detected.
Algorithm overview
------------------
Given a black circle on a white background, we first guess its
radius (or a range of radii) to construct a new circle.
This circle is applied on each black pixel of the original picture
and the coordinates of this circle are voting in an accumulator.
From this geometrical construction, the original circle center
position receives the highest score.
Note that the accumulator size is built to be larger than the
original picture in order to detect centers outside the frame.
Its size is extended by two times the larger radius.
"""
import numpy as np
import matplotlib.pyplot as plt
from skimage import data, color
from skimage.transform import hough_circle
from skimage.feature import peak_local_max, canny
from skimage.draw import circle_perimeter
from skimage.util import img_as_ubyte
# Load picture and detect edges
image = img_as_ubyte(data.coins()[0:95, 70:370])
edges = canny(image, sigma=3, low_threshold=10, high_threshold=50)
fig, ax = plt.subplots(ncols=1, nrows=1, figsize=(5, 2))
# Detect two radii
hough_radii = np.arange(15, 30, 2)
hough_res = hough_circle(edges, hough_radii)
centers = []
accums = []
radii = []
for radius, h in zip(hough_radii, hough_res):
# For each radius, extract two circles
num_peaks = 2
peaks = peak_local_max(h, num_peaks=num_peaks)
centers.extend(peaks)
accums.extend(h[peaks[:, 0], peaks[:, 1]])
radii.extend([radius] * num_peaks)
# Draw the most prominent 5 circles
image = color.gray2rgb(image)
for idx in np.argsort(accums)[::-1][:5]:
center_x, center_y = centers[idx]
radius = radii[idx]
cx, cy = circle_perimeter(center_y, center_x, radius)
image[cy, cx] = (220, 20, 20)
ax.imshow(image, cmap=plt.cm.gray)
"""
.. image:: PLOT2RST.current_figure
Ellipse detection
=================
In this second example, the aim is to detect the edge of a coffee cup.
Basically, this is a projection of a circle, i.e. an ellipse.
The problem to solve is much more difficult because five parameters have to be
determined, instead of three for circles.
Algorithm overview
------------------
The algorithm takes two different points belonging to the ellipse. It assumes
that it is the main axis. A loop on all the other points determines how much
an ellipse passes to them. A good match corresponds to high accumulator values.
A full description of the algorithm can be found in reference [1]_.
References
----------
.. [1] Xie, Yonghong, and Qiang Ji. "A new efficient ellipse detection
method." Pattern Recognition, 2002. Proceedings. 16th International
Conference on. Vol. 2. IEEE, 2002
"""
import matplotlib.pyplot as plt
from skimage import data, color
from skimage.feature import canny
from skimage.transform import hough_ellipse
from skimage.draw import ellipse_perimeter
# Load picture, convert to grayscale and detect edges
image_rgb = data.coffee()[0:220, 160:420]
image_gray = color.rgb2gray(image_rgb)
edges = canny(image_gray, sigma=2.0,
low_threshold=0.55, high_threshold=0.8)
# Perform a Hough Transform
# The accuracy corresponds to the bin size of a major axis.
# The value is chosen in order to get a single high accumulator.
# The threshold eliminates low accumulators
result = hough_ellipse(edges, accuracy=20, threshold=250,
min_size=100, max_size=120)
result.sort(order='accumulator')
# Estimated parameters for the ellipse
best = list(result[-1])
yc, xc, a, b = [int(round(x)) for x in best[1:5]]
orientation = best[5]
# Draw the ellipse on the original image
cy, cx = ellipse_perimeter(yc, xc, a, b, orientation)
image_rgb[cy, cx] = (0, 0, 255)
# Draw the edge (white) and the resulting ellipse (red)
edges = color.gray2rgb(edges)
edges[cy, cx] = (250, 0, 0)
fig2, (ax1, ax2) = plt.subplots(ncols=2, nrows=1, figsize=(8, 4))
ax1.set_title('Original picture')
ax1.imshow(image_rgb)
ax2.set_title('Edge (white) and result (red)')
ax2.imshow(edges)
plt.show()
"""
.. image:: PLOT2RST.current_figure
"""
| bsd-3-clause |
hugobowne/scikit-learn | sklearn/utils/tests/test_estimator_checks.py | 69 | 3894 | import scipy.sparse as sp
import numpy as np
import sys
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.base import BaseEstimator, ClassifierMixin
from sklearn.utils.testing import assert_raises_regex, assert_true
from sklearn.utils.estimator_checks import check_estimator
from sklearn.utils.estimator_checks import check_estimators_unfitted
from sklearn.ensemble import AdaBoostClassifier
from sklearn.linear_model import MultiTaskElasticNet
from sklearn.utils.validation import check_X_y, check_array
class CorrectNotFittedError(ValueError):
"""Exception class to raise if estimator is used before fitting.
Like NotFittedError, it inherits from ValueError, but not from
AttributeError. Used for testing only.
"""
class BaseBadClassifier(BaseEstimator, ClassifierMixin):
def fit(self, X, y):
return self
def predict(self, X):
return np.ones(X.shape[0])
class NoCheckinPredict(BaseBadClassifier):
def fit(self, X, y):
X, y = check_X_y(X, y)
return self
class NoSparseClassifier(BaseBadClassifier):
def fit(self, X, y):
X, y = check_X_y(X, y, accept_sparse=['csr', 'csc'])
if sp.issparse(X):
raise ValueError("Nonsensical Error")
return self
def predict(self, X):
X = check_array(X)
return np.ones(X.shape[0])
class CorrectNotFittedErrorClassifier(BaseBadClassifier):
def fit(self, X, y):
X, y = check_X_y(X, y)
self.coef_ = np.ones(X.shape[1])
return self
def predict(self, X):
if not hasattr(self, 'coef_'):
raise CorrectNotFittedError("estimator is not fitted yet")
X = check_array(X)
return np.ones(X.shape[0])
def test_check_estimator():
# tests that the estimator actually fails on "bad" estimators.
# not a complete test of all checks, which are very extensive.
# check that we have a set_params and can clone
msg = "it does not implement a 'get_params' methods"
assert_raises_regex(TypeError, msg, check_estimator, object)
# check that we have a fit method
msg = "object has no attribute 'fit'"
assert_raises_regex(AttributeError, msg, check_estimator, BaseEstimator)
# check that fit does input validation
msg = "TypeError not raised by fit"
assert_raises_regex(AssertionError, msg, check_estimator, BaseBadClassifier)
# check that predict does input validation (doesn't accept dicts in input)
msg = "Estimator doesn't check for NaN and inf in predict"
assert_raises_regex(AssertionError, msg, check_estimator, NoCheckinPredict)
# check for sparse matrix input handling
name = NoSparseClassifier.__name__
msg = "Estimator " + name + " doesn't seem to fail gracefully on sparse data"
# the check for sparse input handling prints to the stdout,
# instead of raising an error, so as not to remove the original traceback.
# that means we need to jump through some hoops to catch it.
old_stdout = sys.stdout
string_buffer = StringIO()
sys.stdout = string_buffer
try:
check_estimator(NoSparseClassifier)
except:
pass
finally:
sys.stdout = old_stdout
assert_true(msg in string_buffer.getvalue())
# doesn't error on actual estimator
check_estimator(AdaBoostClassifier)
check_estimator(MultiTaskElasticNet)
def test_check_estimators_unfitted():
# check that a ValueError/AttributeError is raised when calling predict
# on an unfitted estimator
msg = "AttributeError or ValueError not raised by predict"
assert_raises_regex(AssertionError, msg, check_estimators_unfitted,
"estimator", NoSparseClassifier)
# check that CorrectNotFittedError inherit from either ValueError
# or AttributeError
check_estimators_unfitted("estimator", CorrectNotFittedErrorClassifier)
| bsd-3-clause |
afronski/playground-notes | introduction-to-big-data-with-apache-spark/solutions/lab2_apache_log_student.py | 1 | 38489 |
# coding: utf-8
# version 1.0.1
# # + 
# # **Web Server Log Analysis with Apache Spark**
#
# ####This lab will demonstrate how easy it is to perform web server log analysis with Apache Spark.
#
# ####Server log analysis is an ideal use case for Spark. It's a very large, common data source and contains a rich set of information. Spark allows you to store your logs in files on disk cheaply, while still providing a quick and simple way to perform data analysis on them. This homework will show you how to use Apache Spark on real-world text-based production logs and fully harness the power of that data. Log data comes from many sources, such as web, file, and compute servers, application logs, user-generated content, and can be used for monitoring servers, improving business and customer intelligence, building recommendation systems, fraud detection, and much more.
# ### How to complete this assignment
#
# ####This assignment is broken up into sections with bite-sized examples for demonstrating Spark functionality for log processing. For each problem, you should start by thinking about the algorithm that you will use to *efficiently* process the log in a parallel, distributed manner. This means using the various [RDD](http://spark.apache.org/docs/latest/api/python/pyspark.html#pyspark.RDD) operations along with [`lambda` functions](https://docs.python.org/2/tutorial/controlflow.html#lambda-expressions) that are applied at each worker.
#
# ####This assignment consists of 4 parts:
# #### *Part 1*: Apache Web Server Log file format
# #### *Part 2*: Sample Analyses on the Web Server Log File
# #### *Part 3*: Analyzing Web Server Log File
# #### *Part 4*: Exploring 404 Response Codes
# ### **Part 1: Apache Web Server Log file format**
# ####The log files that we use for this assignment are in the [Apache Common Log Format (CLF)](http://httpd.apache.org/docs/1.3/logs.html#common). The log file entries produced in CLF will look something like this:
# `127.0.0.1 - - [01/Aug/1995:00:00:01 -0400] "GET /images/launch-logo.gif HTTP/1.0" 200 1839`
#
# ####Each part of this log entry is described below.
# * `127.0.0.1`
# ####This is the IP address (or host name, if available) of the client (remote host) which made the request to the server.
#
# * `-`
# ####The "hyphen" in the output indicates that the requested piece of information (user identity from remote machine) is not available.
#
# * `-`
# ####The "hyphen" in the output indicates that the requested piece of information (user identity from local logon) is not available.
#
# * `[01/Aug/1995:00:00:01 -0400]`
# ####The time that the server finished processing the request. The format is:
# `[day/month/year:hour:minute:second timezone]`
# * ####day = 2 digits
# * ####month = 3 letters
# * ####year = 4 digits
# * ####hour = 2 digits
# * ####minute = 2 digits
# * ####second = 2 digits
# * ####zone = (\+ | \-) 4 digits
#
# * `"GET /images/launch-logo.gif HTTP/1.0"`
# ####This is the first line of the request string from the client. It consists of a three components: the request method (e.g., `GET`, `POST`, etc.), the endpoint (a [Uniform Resource Identifier](http://en.wikipedia.org/wiki/Uniform_resource_identifier)), and the client protocol version.
#
# * `200`
# ####This is the status code that the server sends back to the client. This information is very valuable, because it reveals whether the request resulted in a successful response (codes beginning in 2), a redirection (codes beginning in 3), an error caused by the client (codes beginning in 4), or an error in the server (codes beginning in 5). The full list of possible status codes can be found in the HTTP specification ([RFC 2616](https://www.ietf.org/rfc/rfc2616.txt) section 10).
#
# * `1839`
# ####The last entry indicates the size of the object returned to the client, not including the response headers. If no content was returned to the client, this value will be "-" (or sometimes 0).
#
# ####Note that log files contain information supplied directly by the client, without escaping. Therefore, it is possible for malicious clients to insert control-characters in the log files, *so care must be taken in dealing with raw logs.*
#
# ### NASA-HTTP Web Server Log
# ####For this assignment, we will use a data set from NASA Kennedy Space Center WWW server in Florida. The full data set is freely available (http://ita.ee.lbl.gov/html/contrib/NASA-HTTP.html) and contains two month's of all HTTP requests. We are using a subset that only contains several days worth of requests.
# ### **(1a) Parsing Each Log Line**
# ####Using the CLF as defined above, we create a regular expression pattern to extract the nine fields of the log line using the Python regular expression [`search` function](https://docs.python.org/2/library/re.html#regular-expression-objects). The function returns a pair consisting of a Row object and 1. If the log line fails to match the regular expression, the function returns a pair consisting of the log line string and 0. A '-' value in the content size field is cleaned up by substituting it with 0. The function converts the log line's date string into a Python `datetime` object using the given `parse_apache_time` function.
# In[1]:
import re
import datetime
from pyspark.sql import Row
month_map = {'Jan': 1, 'Feb': 2, 'Mar':3, 'Apr':4, 'May':5, 'Jun':6, 'Jul':7,
'Aug':8, 'Sep': 9, 'Oct':10, 'Nov': 11, 'Dec': 12}
def parse_apache_time(s):
""" Convert Apache time format into a Python datetime object
Args:
s (str): date and time in Apache time format
Returns:
datetime: datetime object (ignore timezone for now)
"""
return datetime.datetime(int(s[7:11]),
month_map[s[3:6]],
int(s[0:2]),
int(s[12:14]),
int(s[15:17]),
int(s[18:20]))
def parseApacheLogLine(logline):
""" Parse a line in the Apache Common Log format
Args:
logline (str): a line of text in the Apache Common Log format
Returns:
tuple: either a dictionary containing the parts of the Apache Access Log and 1,
or the original invalid log line and 0
"""
match = re.search(APACHE_ACCESS_LOG_PATTERN, logline)
if match is None:
return (logline, 0)
size_field = match.group(9)
if size_field == '-':
size = long(0)
else:
size = long(match.group(9))
return (Row(
host = match.group(1),
client_identd = match.group(2),
user_id = match.group(3),
date_time = parse_apache_time(match.group(4)),
method = match.group(5),
endpoint = match.group(6),
protocol = match.group(7),
response_code = int(match.group(8)),
content_size = size
), 1)
# In[2]:
# A regular expression pattern to extract fields from the log line
APACHE_ACCESS_LOG_PATTERN = '^(\S+) (\S+) (\S+) \[([\w:/]+\s[+\-]\d{4})\] "(\S+) (\S+)\s*(\S*)" (\d{3}) (\S+)'
# ### **(1b) Configuration and Initial RDD Creation**
# ####We are ready to specify the input log file and create an RDD containing the parsed log file data. The log file has already been downloaded for you.
#
# ####To create the primary RDD that we'll use in the rest of this assignment, we first load the text file using [`sc.textfile(logFile)`](http://spark.apache.org/docs/latest/api/python/pyspark.html#pyspark.SparkContext.textFile) to convert each line of the file into an element in an RDD.
# ####Next, we use [`map(parseApacheLogLine)`](http://spark.apache.org/docs/latest/api/python/pyspark.html#pyspark.RDD.map) to apply the parse function to each element (that is, a line from the log file) in the RDD and turn each line into a pair [`Row` object](http://spark.apache.org/docs/latest/api/python/pyspark.sql.html#pyspark.sql.Row).
# ####Finally, we cache the RDD in memory since we'll use it throughout this notebook.
# In[4]:
import sys
import os
from test_helper import Test
baseDir = os.path.join('data')
inputPath = os.path.join('cs100', 'lab2', 'apache.access.log.PROJECT')
logFile = os.path.join(baseDir, inputPath)
def parseLogs():
""" Read and parse log file """
parsed_logs = (sc
.textFile(logFile)
.map(parseApacheLogLine)
.cache())
access_logs = (parsed_logs
.filter(lambda s: s[1] == 1)
.map(lambda s: s[0])
.cache())
failed_logs = (parsed_logs
.filter(lambda s: s[1] == 0)
.map(lambda s: s[0]))
failed_logs_count = failed_logs.count()
if failed_logs_count > 0:
print 'Number of invalid logline: %d' % failed_logs.count()
for line in failed_logs.take(20):
print 'Invalid logline: %s' % line
print 'Read %d lines, successfully parsed %d lines, failed to parse %d lines' % (parsed_logs.count(), access_logs.count(), failed_logs.count())
return parsed_logs, access_logs, failed_logs
parsed_logs, access_logs, failed_logs = parseLogs()
# ### **(1c) Data Cleaning**
# #### Notice that there are a large number of log lines that failed to parse. Examine the sample of invalid lines and compare them to the correctly parsed line, an example is included below. Based on your observations, alter the `APACHE_ACCESS_LOG_PATTERN` regular expression below so that the failed lines will correctly parse, and press `Shift-Enter` to rerun `parseLogs()`.
#
# `127.0.0.1 - - [01/Aug/1995:00:00:01 -0400] "GET /images/launch-logo.gif HTTP/1.0" 200 1839`
#
# #### If you not familar with Python regular expression [`search` function](https://docs.python.org/2/library/re.html#regular-expression-objects), now would be a good time to check up on the [documentation](https://developers.google.com/edu/python/regular-expressions). One tip that might be useful is to use an online tester like http://pythex.org or http://www.pythonregex.com. To use it, copy and paste the regular expression string below (located between the single quotes ') and test it against one of the 'Invalid logline' above.
# In[6]:
APACHE_ACCESS_LOG_PATTERN = '^(\S+) (\S+) (\S+) \[([\w:/]+\s[\+\-]\d{4})\] "(\S+) (\S+)\s*(\S*)\s*" (\d{3}) (\S+)'
parsed_logs, access_logs, failed_logs = parseLogs()
# In[7]:
# TEST Data cleaning (1c)
Test.assertEquals(failed_logs.count(), 0, 'incorrect failed_logs.count()')
Test.assertEquals(parsed_logs.count(), 1043177 , 'incorrect parsed_logs.count()')
Test.assertEquals(access_logs.count(), parsed_logs.count(), 'incorrect access_logs.count()')
# ### **Part 2: Sample Analyses on the Web Server Log File**
#
# ####Now that we have an RDD containing the log file as a set of Row objects, we can perform various analyses.
#
# #### **(2a) Example: Content Size Statistics**
#
# ####Let's compute some statistics about the sizes of content being returned by the web server. In particular, we'd like to know what are the average, minimum, and maximum content sizes.
#
# ####We can compute the statistics by applying a `map` to the `access_logs` RDD. The `lambda` function we want for the map is to extract the `content_size` field from the RDD. The map produces a new RDD containing only the `content_sizes` (one element for each Row object in the `access_logs` RDD). To compute the minimum and maximum statistics, we can use [`min()`](http://spark.apache.org/docs/latest/api/python/pyspark.html#pyspark.RDD.min) and [`max()`](http://spark.apache.org/docs/latest/api/python/pyspark.html#pyspark.RDD.max) functions on the new RDD. We can compute the average statistic by using the [`reduce`](http://spark.apache.org/docs/latest/api/python/pyspark.html#pyspark.RDD.reduce) function with a `lambda` function that sums the two inputs, which represent two elements from the new RDD that are being reduced together. The result of the `reduce()` is the total content size from the log and it is to be divided by the number of requests as determined using the [`count()`](http://spark.apache.org/docs/latest/api/python/pyspark.html#pyspark.RDD.count) function on the new RDD.
# In[8]:
# Calculate statistics based on the content size.
content_sizes = access_logs.map(lambda log: log.content_size).cache()
print 'Content Size Avg: %i, Min: %i, Max: %s' % (
content_sizes.reduce(lambda a, b : a + b) / content_sizes.count(),
content_sizes.min(),
content_sizes.max())
# #### **(2b) Example: Response Code Analysis**
# ####Next, lets look at the response codes that appear in the log. As with the content size analysis, first we create a new RDD by using a `lambda` function to extract the `response_code` field from the `access_logs` RDD. The difference here is that we will use a [pair tuple](https://docs.python.org/2/tutorial/datastructures.html?highlight=tuple#tuples-and-sequences) instead of just the field itself. Using a pair tuple consisting of the response code and 1 will let us count how many records have a particular response code. Using the new RDD, we perform a [`reduceByKey`](http://spark.apache.org/docs/latest/api/python/pyspark.html#pyspark.RDD.reduceByKey) function. `reduceByKey` performs a reduce on a per-key basis by applying the `lambda` function to each element, pairwise with the same key. We use the simple `lambda` function of adding the two values. Then, we cache the resulting RDD and create a list by using the [`take`](http://spark.apache.org/docs/latest/api/python/pyspark.html#pyspark.RDD.take) function.
# In[9]:
# Response Code to Count
responseCodeToCount = (access_logs
.map(lambda log: (log.response_code, 1))
.reduceByKey(lambda a, b : a + b)
.cache())
responseCodeToCountList = responseCodeToCount.take(100)
print 'Found %d response codes' % len(responseCodeToCountList)
print 'Response Code Counts: %s' % responseCodeToCountList
assert len(responseCodeToCountList) == 7
assert sorted(responseCodeToCountList) == [(200, 940847), (302, 16244), (304, 79824), (403, 58), (404, 6185), (500, 2), (501, 17)]
# #### **(2c) Example: Response Code Graphing with `matplotlib`**
# ####Now, lets visualize the results from the last example. We can visualize the results from the last example using [`matplotlib`](http://matplotlib.org/). First we need to extract the labels and fractions for the graph. We do this with two separate `map` functions with a `lambda` functions. The first `map` function extracts a list of of the response code values, and the second `map` function extracts a list of the per response code counts divided by the total size of the access logs. Next, we create a figure with `figure()` constructor and use the `pie()` method to create the pie plot.
# In[10]:
labels = responseCodeToCount.map(lambda (x, y): x).collect()
print labels
count = access_logs.count()
fracs = responseCodeToCount.map(lambda (x, y): (float(y) / count)).collect()
print fracs
# In[11]:
import matplotlib.pyplot as plt
def pie_pct_format(value):
""" Determine the appropriate format string for the pie chart percentage label
Args:
value: value of the pie slice
Returns:
str: formated string label; if the slice is too small to fit, returns an empty string for label
"""
return '' if value < 7 else '%.0f%%' % value
fig = plt.figure(figsize=(4.5, 4.5), facecolor='white', edgecolor='white')
colors = ['yellowgreen', 'lightskyblue', 'gold', 'purple', 'lightcoral', 'yellow', 'black']
explode = (0.05, 0.05, 0.1, 0, 0, 0, 0)
patches, texts, autotexts = plt.pie(fracs, labels=labels, colors=colors,
explode=explode, autopct=pie_pct_format,
shadow=False, startangle=125)
for text, autotext in zip(texts, autotexts):
if autotext.get_text() == '':
text.set_text('') # If the slice is small to fit, don't show a text label
plt.legend(labels, loc=(0.80, -0.1), shadow=True)
pass
# #### **(2d) Example: Frequent Hosts**
# ####Let's look at hosts that have accessed the server multiple times (e.g., more than ten times). As with the response code analysis in (2b), first we create a new RDD by using a `lambda` function to extract the `host` field from the `access_logs` RDD using a pair tuple consisting of the host and 1 which will let us count how many records were created by a particular host's request. Using the new RDD, we perform a `reduceByKey` function with a `lambda` function that adds the two values. We then filter the result based on the count of accesses by each host (the second element of each pair) being greater than ten. Next, we extract the host name by performing a `map` with a `lambda` function that returns the first element of each pair. Finally, we extract 20 elements from the resulting RDD - *note that the choice of which elements are returned is not guaranteed to be deterministic.*
# In[12]:
# Any hosts that has accessed the server more than 10 times.
hostCountPairTuple = access_logs.map(lambda log: (log.host, 1))
hostSum = hostCountPairTuple.reduceByKey(lambda a, b : a + b)
hostMoreThan10 = hostSum.filter(lambda s: s[1] > 10)
hostsPick20 = (hostMoreThan10
.map(lambda s: s[0])
.take(20))
print 'Any 20 hosts that have accessed more then 10 times: %s' % hostsPick20
# An example: [u'204.120.34.185', u'204.243.249.9', u'slip1-32.acs.ohio-state.edu', u'lapdog-14.baylor.edu', u'199.77.67.3', u'gs1.cs.ttu.edu', u'haskell.limbex.com', u'alfred.uib.no', u'146.129.66.31', u'manaus.bologna.maraut.it', u'dialup98-110.swipnet.se', u'slip-ppp02.feldspar.com', u'ad03-053.compuserve.com', u'srawlin.opsys.nwa.com', u'199.202.200.52', u'ix-den7-23.ix.netcom.com', u'151.99.247.114', u'w20-575-104.mit.edu', u'205.25.227.20', u'ns.rmc.com']
# #### **(2e) Example: Visualizing Endpoints**
# ####Now, lets visualize the number of hits to endpoints (URIs) in the log. To perform this task, we first create a new RDD by using a `lambda` function to extract the `endpoint` field from the `access_logs` RDD using a pair tuple consisting of the endpoint and 1 which will let us count how many records were created by a particular host's request. Using the new RDD, we perform a `reduceByKey` function with a `lambda` function that adds the two values. We then cache the results.
#
# ####Next we visualize the results using `matplotlib`. We previously imported the `matplotlib.pyplot` library, so we do not need to import it again. We perform two separate `map` functions with `lambda` functions. The first `map` function extracts a list of endpoint values, and the second `map` function extracts a list of the visits per endpoint values. Next, we create a figure with `figure()` constructor, set various features of the plot (axis limits, grid lines, and labels), and use the `plot()` method to create the line plot.
# In[13]:
endpoints = (access_logs
.map(lambda log: (log.endpoint, 1))
.reduceByKey(lambda a, b : a + b)
.cache())
ends = endpoints.map(lambda (x, y): x).collect()
counts = endpoints.map(lambda (x, y): y).collect()
fig = plt.figure(figsize=(8,4.2), facecolor='white', edgecolor='white')
plt.axis([0, len(ends), 0, max(counts)])
plt.grid(b=True, which='major', axis='y')
plt.xlabel('Endpoints')
plt.ylabel('Number of Hits')
plt.plot(counts)
pass
# #### **(2f) Example: Top Endpoints**
# ####For the final example, we'll look at the top endpoints (URIs) in the log. To determine them, we first create a new RDD by using a `lambda` function to extract the `endpoint` field from the `access_logs` RDD using a pair tuple consisting of the endpoint and 1 which will let us count how many records were created by a particular host's request. Using the new RDD, we perform a `reduceByKey` function with a `lambda` function that adds the two values. We then extract the top ten endpoints by performing a [`takeOrdered`](http://spark.apache.org/docs/latest/api/python/pyspark.html#pyspark.RDD.takeOrdered) with a value of 10 and a `lambda` function that multiplies the count (the second element of each pair) by -1 to create a sorted list with the top endpoints at the bottom.
# In[14]:
# Top Endpoints
endpointCounts = (access_logs
.map(lambda log: (log.endpoint, 1))
.reduceByKey(lambda a, b : a + b))
topEndpoints = endpointCounts.takeOrdered(10, lambda s: -1 * s[1])
print 'Top Ten Endpoints: %s' % topEndpoints
assert topEndpoints == [(u'/images/NASA-logosmall.gif', 59737), (u'/images/KSC-logosmall.gif', 50452), (u'/images/MOSAIC-logosmall.gif', 43890), (u'/images/USA-logosmall.gif', 43664), (u'/images/WORLD-logosmall.gif', 43277), (u'/images/ksclogo-medium.gif', 41336), (u'/ksc.html', 28582), (u'/history/apollo/images/apollo-logo1.gif', 26778), (u'/images/launch-logo.gif', 24755), (u'/', 20292)], 'incorrect Top Ten Endpoints'
# ### **Part 3: Analyzing Web Server Log File**
#
# ####Now it is your turn to perform analyses on web server log files.
# #### **(3a) Exercise: Top Ten Error Endpoints**
# ####What are the top ten endpoints which did not have return code 200? Create a sorted list containing top ten endpoints and the number of times that they were accessed with non-200 return code.
#
# ####Think about the steps that you need to perform to determine which endpoints did not have a 200 return code, how you will uniquely count those endpoints, and sort the list.
#
# ####You might want to refer back to the previous Lab (Lab 1 Word Count) for insights.
# In[15]:
not200 = access_logs.filter(lambda log: log.response_code <> 200)
endpointCountPairTuple = not200.map(lambda log: (log.endpoint, 1))
endpointSum = endpointCountPairTuple.reduceByKey(lambda a, b : a + b)
topTenErrURLs = endpointSum.takeOrdered(10, lambda s: -1 * s[1])
print 'Top Ten failed URLs: %s' % topTenErrURLs
# In[16]:
# TEST Top ten error endpoints (3a)
Test.assertEquals(endpointSum.count(), 7689, 'incorrect count for endpointSum')
Test.assertEquals(topTenErrURLs, [(u'/images/NASA-logosmall.gif', 8761), (u'/images/KSC-logosmall.gif', 7236), (u'/images/MOSAIC-logosmall.gif', 5197), (u'/images/USA-logosmall.gif', 5157), (u'/images/WORLD-logosmall.gif', 5020), (u'/images/ksclogo-medium.gif', 4728), (u'/history/apollo/images/apollo-logo1.gif', 2907), (u'/images/launch-logo.gif', 2811), (u'/', 2199), (u'/images/ksclogosmall.gif', 1622)], 'incorrect Top Ten failed URLs (topTenErrURLs)')
# #### **(3b) Exercise: Number of Unique Hosts**
# ####How many unique hosts are there in the entire log?
#
# ####Think about the steps that you need to perform to count the number of different hosts in the log.
# In[19]:
hosts = access_logs.map(lambda log: log.host)
uniqueHosts = hosts.countByValue()
uniqueHostCount = len(uniqueHosts)
print 'Unique hosts: %d' % uniqueHostCount
# In[20]:
# TEST Number of unique hosts (3b)
Test.assertEquals(uniqueHostCount, 54507, 'incorrect uniqueHostCount')
# #### **(3c) Exercise: Number of Unique Daily Hosts**
# ####For an advanced exercise, let's determine the number of unique hosts in the entire log on a day-by-day basis. This computation will give us counts of the number of unique daily hosts. We'd like a list sorted by increasing day of the month which includes the day of the month and the associated number of unique hosts for that day. Make sure you cache the resulting RDD `dailyHosts` so that we can reuse it in the next exercise.
#
# ####Think about the steps that you need to perform to count the number of different hosts that make requests *each* day.
# ####*Since the log only covers a single month, you can ignore the month.*
# In[74]:
def empty(a):
return set([ a ])
def add(acc, a):
acc.update([ a ])
return acc
def merge(acc1, acc2):
return acc1.union(acc2)
dayToHostPairTuple = access_logs.map(lambda log: (log.date_time.day, log.host))
dayGroupedHosts = dayToHostPairTuple.combineByKey(empty, add, merge)
dayHostCount = dayGroupedHosts.map(lambda (day, hosts): (day, len(hosts)))
dailyHosts = dayHostCount.sortByKey().cache()
dailyHostsList = dailyHosts.take(30)
print 'Unique hosts per day: %s' % dailyHostsList
# In[75]:
# TEST Number of unique daily hosts (3c)
Test.assertEquals(dailyHosts.count(), 21, 'incorrect dailyHosts.count()')
Test.assertEquals(dailyHostsList, [(1, 2582), (3, 3222), (4, 4190), (5, 2502), (6, 2537), (7, 4106), (8, 4406), (9, 4317), (10, 4523), (11, 4346), (12, 2864), (13, 2650), (14, 4454), (15, 4214), (16, 4340), (17, 4385), (18, 4168), (19, 2550), (20, 2560), (21, 4134), (22, 4456)], 'incorrect dailyHostsList')
Test.assertTrue(dailyHosts.is_cached, 'incorrect dailyHosts.is_cached')
# #### **(3d) Exercise: Visualizing the Number of Unique Daily Hosts**
# ####Using the results from the previous exercise, use `matplotlib` to plot a "Line" graph of the unique hosts requests by day.
# #### `daysWithHosts` should be a list of days and `hosts` should be a list of number of unique hosts for each corresponding day.
# #### * How could you convert a RDD into a list? See the [`collect()` method](http://spark.apache.org/docs/latest/api/python/pyspark.html?highlight=collect#pyspark.RDD.collect)*
# In[50]:
daysWithHosts = dailyHosts.map(lambda (day, hosts): day).collect()
hosts = dailyHosts.map(lambda (day, hosts): hosts).collect()
# In[51]:
# TEST Visualizing unique daily hosts (3d)
test_days = range(1, 23)
test_days.remove(2)
Test.assertEquals(daysWithHosts, test_days, 'incorrect days')
Test.assertEquals(hosts, [2582, 3222, 4190, 2502, 2537, 4106, 4406, 4317, 4523, 4346, 2864, 2650, 4454, 4214, 4340, 4385, 4168, 2550, 2560, 4134, 4456], 'incorrect hosts')
# In[52]:
fig = plt.figure(figsize=(8,4.5), facecolor='white', edgecolor='white')
plt.axis([min(daysWithHosts), max(daysWithHosts), 0, max(hosts)+500])
plt.grid(b=True, which='major', axis='y')
plt.xlabel('Day')
plt.ylabel('Hosts')
plt.plot(daysWithHosts, hosts)
pass
# #### **(3e) Exercise: Average Number of Daily Requests per Hosts**
# ####Next, let's determine the average number of requests on a day-by-day basis. We'd like a list by increasing day of the month and the associated average number of requests per host for that day. Make sure you cache the resulting RDD `avgDailyReqPerHost` so that we can reuse it in the next exercise.
# ####To compute the average number of requests per host, get the total number of request across all hosts and divide that by the number of unique hosts.
# ####*Since the log only covers a single month, you can skip checking for the month.*
# ####*Also to keep it simple, when calculating the approximate average use the integer value - you do not need to upcast to float*
# In[80]:
reqsPerDay = access_logs.map(lambda log: (log.date_time.day, 1)).reduceByKey(lambda a, b: a + b).sortByKey()
groupedByDay = reqsPerDay.join(dailyHosts)
avgDailyReqPerHost = groupedByDay.map(lambda (day, (r, h)): (day, r / h)).sortByKey().cache()
avgDailyReqPerHostList = avgDailyReqPerHost.take(30)
print 'Average number of daily requests per Hosts is %s' % avgDailyReqPerHostList
# In[81]:
# TEST Average number of daily requests per hosts (3e)
Test.assertEquals(avgDailyReqPerHostList, [(1, 13), (3, 12), (4, 14), (5, 12), (6, 12), (7, 13), (8, 13), (9, 14), (10, 13), (11, 14), (12, 13), (13, 13), (14, 13), (15, 13), (16, 13), (17, 13), (18, 13), (19, 12), (20, 12), (21, 13), (22, 12)], 'incorrect avgDailyReqPerHostList')
Test.assertTrue(avgDailyReqPerHost.is_cached, 'incorrect avgDailyReqPerHost.is_cache')
# #### **(3f) Exercise: Visualizing the Average Daily Requests per Unique Host**
# ####Using the result `avgDailyReqPerHost` from the previous exercise, use `matplotlib` to plot a "Line" graph of the average daily requests per unique host by day.
# #### `daysWithAvg` should be a list of days and `avgs` should be a list of average daily requests per unique hosts for each corresponding day.
# In[83]:
daysWithAvg = avgDailyReqPerHost.map(lambda (day, r): day).collect()
avgs = avgDailyReqPerHost.map(lambda (day, r): r).collect()
# In[84]:
# TEST Average Daily Requests per Unique Host (3f)
Test.assertEquals(daysWithAvg, [1, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22], 'incorrect days')
Test.assertEquals(avgs, [13, 12, 14, 12, 12, 13, 13, 14, 13, 14, 13, 13, 13, 13, 13, 13, 13, 12, 12, 13, 12], 'incorrect avgs')
# In[85]:
fig = plt.figure(figsize=(8,4.2), facecolor='white', edgecolor='white')
plt.axis([0, max(daysWithAvg), 0, max(avgs)+2])
plt.grid(b=True, which='major', axis='y')
plt.xlabel('Day')
plt.ylabel('Average')
plt.plot(daysWithAvg, avgs)
pass
# ### **Part 4: Exploring 404 Response Codes**
#
# ####Let's drill down and explore the error 404 response code records. 404 errors are returned when an endpoint is not found by the server (i.e., a missing page or object).
# #### **(4a) Exercise: Counting 404 Response Codes**
# #### Create a RDD containing only log records with a 404 response code. Make sure you `cache()` the RDD `badRecords` as we will use it in the rest of this exercise.
#
# #### How many 404 records are in the log?
# In[88]:
badRecords = (access_logs
.filter(lambda log: log.response_code == 404)
.cache())
print 'Found %d 404 URLs' % badRecords.count()
# In[89]:
# TEST Counting 404 (4a)
Test.assertEquals(badRecords.count(), 6185, 'incorrect badRecords.count()')
Test.assertTrue(badRecords.is_cached, 'incorrect badRecords.is_cached')
# #### **(4b) Exercise: Listing 404 Response Code Records**
# ####Using the RDD containing only log records with a 404 response code that you cached in part (4a), print out a list up to 40 **distinct** endpoints that generate 404 errors - *no endpoint should appear more than once in your list.*
# In[93]:
badEndpoints = badRecords.map(lambda log: log.endpoint)
badUniqueEndpoints = badEndpoints.distinct()
badUniqueEndpointsPick40 = badUniqueEndpoints.take(40)
print '404 URLS: %s' % badUniqueEndpointsPick40
# In[94]:
# TEST Listing 404 records (4b)
badUniqueEndpointsSet40 = set(badUniqueEndpointsPick40)
Test.assertEquals(len(badUniqueEndpointsSet40), 40, 'badUniqueEndpointsPick40 not distinct')
# #### **(4c) Exercise: Listing the Top Twenty 404 Response Code Endpoints**
# ####Using the RDD containing only log records with a 404 response code that you cached in part (4a), print out a list of the top twenty endpoints that generate the most 404 errors.
# ####*Remember, top endpoints should be in sorted order*
# In[96]:
badEndpointsCountPairTuple = badRecords.map(lambda log: (log.endpoint, 1))
badEndpointsSum = badEndpointsCountPairTuple.reduceByKey(lambda a, b: a + b)
badEndpointsTop20 = badEndpointsSum.takeOrdered(20, lambda s: -1 * s[1])
print 'Top Twenty 404 URLs: %s' % badEndpointsTop20
# In[97]:
# TEST Top twenty 404 URLs (4c)
Test.assertEquals(badEndpointsTop20, [(u'/pub/winvn/readme.txt', 633), (u'/pub/winvn/release.txt', 494), (u'/shuttle/missions/STS-69/mission-STS-69.html', 431), (u'/images/nasa-logo.gif', 319), (u'/elv/DELTA/uncons.htm', 178), (u'/shuttle/missions/sts-68/ksc-upclose.gif', 156), (u'/history/apollo/sa-1/sa-1-patch-small.gif', 146), (u'/images/crawlerway-logo.gif', 120), (u'/://spacelink.msfc.nasa.gov', 117), (u'/history/apollo/pad-abort-test-1/pad-abort-test-1-patch-small.gif', 100), (u'/history/apollo/a-001/a-001-patch-small.gif', 97), (u'/images/Nasa-logo.gif', 85), (u'/shuttle/resources/orbiters/atlantis.gif', 64), (u'/history/apollo/images/little-joe.jpg', 62), (u'/images/lf-logo.gif', 59), (u'/shuttle/resources/orbiters/discovery.gif', 56), (u'/shuttle/resources/orbiters/challenger.gif', 54), (u'/robots.txt', 53), (u'/elv/new01.gif>', 43), (u'/history/apollo/pad-abort-test-2/pad-abort-test-2-patch-small.gif', 38)], 'incorrect badEndpointsTop20')
# #### **(4d) Exercise: Listing the Top Twenty-five 404 Response Code Hosts**
# ####Instead of looking at the endpoints that generated 404 errors, let's look at the hosts that encountered 404 errors. Using the RDD containing only log records with a 404 response code that you cached in part (4a), print out a list of the top twenty-five hosts that generate the most 404 errors.
# In[98]:
errHostsCountPairTuple = badRecords.map(lambda log: (log.host, 1))
errHostsSum = errHostsCountPairTuple.reduceByKey(lambda a, b: a + b)
errHostsTop25 = errHostsSum.takeOrdered(25, lambda s: -1 * s[1])
print 'Top 25 hosts that generated errors: %s' % errHostsTop25
# In[99]:
# TEST Top twenty-five 404 response code hosts (4d)
Test.assertEquals(len(errHostsTop25), 25, 'length of errHostsTop25 is not 25')
Test.assertEquals(len(set(errHostsTop25) - set([(u'maz3.maz.net', 39), (u'piweba3y.prodigy.com', 39), (u'gate.barr.com', 38), (u'm38-370-9.mit.edu', 37), (u'ts8-1.westwood.ts.ucla.edu', 37), (u'nexus.mlckew.edu.au', 37), (u'204.62.245.32', 33), (u'163.206.104.34', 27), (u'spica.sci.isas.ac.jp', 27), (u'www-d4.proxy.aol.com', 26), (u'www-c4.proxy.aol.com', 25), (u'203.13.168.24', 25), (u'203.13.168.17', 25), (u'internet-gw.watson.ibm.com', 24), (u'scooter.pa-x.dec.com', 23), (u'crl5.crl.com', 23), (u'piweba5y.prodigy.com', 23), (u'onramp2-9.onr.com', 22), (u'slip145-189.ut.nl.ibm.net', 22), (u'198.40.25.102.sap2.artic.edu', 21), (u'gn2.getnet.com', 20), (u'msp1-16.nas.mr.net', 20), (u'isou24.vilspa.esa.es', 19), (u'dial055.mbnet.mb.ca', 19), (u'tigger.nashscene.com', 19)])), 0, 'incorrect errHostsTop25')
# #### **(4e) Exercise: Listing 404 Response Codes per Day**
# ####Let's explore the 404 records temporally. Break down the 404 requests by day (`cache()` the RDD `errDateSorted`) and get the daily counts sorted by day as a list.
# ####*Since the log only covers a single month, you can ignore the month in your checks.*
# In[103]:
errDateCountPairTuple = badRecords.map(lambda log: (log.date_time.day, 1))
errDateSum = errDateCountPairTuple.reduceByKey(lambda a,b : a + b)
errDateSorted = errDateSum.sortByKey().cache()
errByDate = errDateSorted.collect()
print '404 Errors by day: %s' % errByDate
# In[104]:
# TEST 404 response codes per day (4e)
Test.assertEquals(errByDate, [(1, 243), (3, 303), (4, 346), (5, 234), (6, 372), (7, 532), (8, 381), (9, 279), (10, 314), (11, 263), (12, 195), (13, 216), (14, 287), (15, 326), (16, 258), (17, 269), (18, 255), (19, 207), (20, 312), (21, 305), (22, 288)], 'incorrect errByDate')
Test.assertTrue(errDateSorted.is_cached, 'incorrect errDateSorted.is_cached')
# #### **(4f) Exercise: Visualizing the 404 Response Codes by Day**
# ####Using the results from the previous exercise, use `matplotlib` to plot a "Line" or "Bar" graph of the 404 response codes by day.
# In[105]:
daysWithErrors404 = errDateSorted.map(lambda (day, num): day).collect()
errors404ByDay = errDateSorted.map(lambda (day, num): num).collect()
# In[106]:
# TEST Visualizing the 404 Response Codes by Day (4f)
Test.assertEquals(daysWithErrors404, [1, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22], 'incorrect daysWithErrors404')
Test.assertEquals(errors404ByDay, [243, 303, 346, 234, 372, 532, 381, 279, 314, 263, 195, 216, 287, 326, 258, 269, 255, 207, 312, 305, 288], 'incorrect errors404ByDay')
# In[107]:
fig = plt.figure(figsize=(8,4.2), facecolor='white', edgecolor='white')
plt.axis([0, max(daysWithErrors404), 0, max(errors404ByDay)])
plt.grid(b=True, which='major', axis='y')
plt.xlabel('Day')
plt.ylabel('404 Errors')
plt.plot(daysWithErrors404, errors404ByDay)
pass
# #### **(4g) Exercise: Top Five Days for 404 Response Codes **
# ####Using the RDD `errDateSorted` you cached in the part (4e), what are the top five days for 404 response codes and the corresponding counts of 404 response codes?
# In[110]:
topErrDate = errDateSorted.takeOrdered(5, lambda s: s[1] * -1)
print 'Top Five dates for 404 requests: %s' % topErrDate
# In[111]:
# TEST Five dates for 404 requests (4g)
Test.assertEquals(topErrDate, [(7, 532), (8, 381), (6, 372), (4, 346), (15, 326)], 'incorrect topErrDate')
# #### **(4h) Exercise: Hourly 404 Response Codes**
# ####Using the RDD `badRecords` you cached in the part (4a) and by hour of the day and in increasing order, create an RDD containing how many requests had a 404 return code for each hour of the day (midnight starts at 0). Cache the resulting RDD hourRecordsSorted and print that as a list.
# In[112]:
hourCountPairTuple = badRecords.map(lambda log: (log.date_time.hour, 1))
hourRecordsSum = hourCountPairTuple.reduceByKey(lambda a,b: a + b)
hourRecordsSorted = hourRecordsSum.sortByKey().cache()
errHourList = hourRecordsSorted.collect()
print 'Top hours for 404 requests: %s' % errHourList
# In[113]:
# TEST Hourly 404 response codes (4h)
Test.assertEquals(errHourList, [(0, 175), (1, 171), (2, 422), (3, 272), (4, 102), (5, 95), (6, 93), (7, 122), (8, 199), (9, 185), (10, 329), (11, 263), (12, 438), (13, 397), (14, 318), (15, 347), (16, 373), (17, 330), (18, 268), (19, 269), (20, 270), (21, 241), (22, 234), (23, 272)], 'incorrect errHourList')
Test.assertTrue(hourRecordsSorted.is_cached, 'incorrect hourRecordsSorted.is_cached')
# #### **(4i) Exercise: Visualizing the 404 Response Codes by Hour**
# ####Using the results from the previous exercise, use `matplotlib` to plot a "Line" or "Bar" graph of the 404 response codes by hour.
# In[114]:
hoursWithErrors404 = hourRecordsSorted.map(lambda (day, num): day).collect()
errors404ByHours = hourRecordsSorted.map(lambda (day, num): num).collect()
# In[115]:
# TEST Visualizing the 404 Response Codes by Hour (4i)
Test.assertEquals(hoursWithErrors404, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23], 'incorrect hoursWithErrors404')
Test.assertEquals(errors404ByHours, [175, 171, 422, 272, 102, 95, 93, 122, 199, 185, 329, 263, 438, 397, 318, 347, 373, 330, 268, 269, 270, 241, 234, 272], 'incorrect errors404ByHours')
# In[116]:
fig = plt.figure(figsize=(8,4.2), facecolor='white', edgecolor='white')
plt.axis([0, max(hoursWithErrors404), 0, max(errors404ByHours)])
plt.grid(b=True, which='major', axis='y')
plt.xlabel('Hour')
plt.ylabel('404 Errors')
plt.plot(hoursWithErrors404, errors404ByHours)
pass
| mit |
josherick/bokeh | bokeh/protocol.py | 37 | 3282 | from __future__ import absolute_import
import json
import logging
import datetime as dt
import calendar
import decimal
from .util.serialization import transform_series, transform_array
import numpy as np
try:
import pandas as pd
is_pandas = True
except ImportError:
is_pandas = False
try:
from dateutil.relativedelta import relativedelta
is_dateutil = True
except ImportError:
is_dateutil = False
from .settings import settings
log = logging.getLogger(__name__)
class BokehJSONEncoder(json.JSONEncoder):
def transform_python_types(self, obj):
"""handle special scalars, default to default json encoder
"""
# Pandas Timestamp
if is_pandas and isinstance(obj, pd.tslib.Timestamp):
return obj.value / 10**6.0 #nanosecond to millisecond
elif np.issubdtype(type(obj), np.float):
return float(obj)
elif np.issubdtype(type(obj), np.int):
return int(obj)
elif np.issubdtype(type(obj), np.bool_):
return bool(obj)
# Datetime
# datetime is a subclass of date.
elif isinstance(obj, dt.datetime):
return calendar.timegm(obj.timetuple()) * 1000. + obj.microsecond / 1000.
# Date
elif isinstance(obj, dt.date):
return calendar.timegm(obj.timetuple()) * 1000.
# Numpy datetime64
elif isinstance(obj, np.datetime64):
epoch_delta = obj - np.datetime64('1970-01-01T00:00:00Z')
return (epoch_delta / np.timedelta64(1, 'ms'))
# Time
elif isinstance(obj, dt.time):
return (obj.hour * 3600 + obj.minute * 60 + obj.second) * 1000 + obj.microsecond / 1000.
elif is_dateutil and isinstance(obj, relativedelta):
return dict(years=obj.years, months=obj.months, days=obj.days, hours=obj.hours,
minutes=obj.minutes, seconds=obj.seconds, microseconds=obj.microseconds)
# Decimal
elif isinstance(obj, decimal.Decimal):
return float(obj)
else:
return super(BokehJSONEncoder, self).default(obj)
def default(self, obj):
#argh! local import!
from .plot_object import PlotObject
from .properties import HasProps
from .colors import Color
## array types
if is_pandas and isinstance(obj, (pd.Series, pd.Index)):
return transform_series(obj)
elif isinstance(obj, np.ndarray):
return transform_array(obj)
elif isinstance(obj, PlotObject):
return obj.ref
elif isinstance(obj, HasProps):
return obj.changed_properties_with_values()
elif isinstance(obj, Color):
return obj.to_css()
else:
return self.transform_python_types(obj)
def serialize_json(obj, encoder=BokehJSONEncoder, **kwargs):
if settings.pretty(False):
kwargs["indent"] = 4
return json.dumps(obj, cls=encoder, allow_nan=False, **kwargs)
deserialize_json = json.loads
serialize_web = serialize_json
deserialize_web = deserialize_json
def status_obj(status):
return {'msgtype': 'status',
'status': status}
def error_obj(error_msg):
return {
'msgtype': 'error',
'error_msg': error_msg}
| bsd-3-clause |
alephu5/Soundbyte | environment/lib/python3.3/site-packages/pandas/io/common.py | 1 | 4208 | """Common IO api utilities"""
import sys
import zipfile
from contextlib import contextmanager, closing
from pandas.compat import StringIO
from pandas import compat
if compat.PY3:
from urllib.request import urlopen, pathname2url
_urlopen = urlopen
from urllib.parse import urlparse as parse_url
import urllib.parse as compat_parse
from urllib.parse import (uses_relative, uses_netloc, uses_params,
urlencode, urljoin)
from urllib.error import URLError
from http.client import HTTPException
else:
from urllib2 import urlopen as _urlopen
from urllib import urlencode, pathname2url
from urlparse import urlparse as parse_url
from urlparse import uses_relative, uses_netloc, uses_params, urljoin
from urllib2 import URLError
from httplib import HTTPException
from contextlib import contextmanager, closing
from functools import wraps
# @wraps(_urlopen)
@contextmanager
def urlopen(*args, **kwargs):
with closing(_urlopen(*args, **kwargs)) as f:
yield f
_VALID_URLS = set(uses_relative + uses_netloc + uses_params)
_VALID_URLS.discard('')
class PerformanceWarning(Warning):
pass
class DtypeWarning(Warning):
pass
def _is_url(url):
"""Check to see if a URL has a valid protocol.
Parameters
----------
url : str or unicode
Returns
-------
isurl : bool
If `url` has a valid protocol return True otherwise False.
"""
try:
return parse_url(url).scheme in _VALID_URLS
except:
return False
def _is_s3_url(url):
"""Check for an s3 url"""
try:
return parse_url(url).scheme == 's3'
except:
return False
def maybe_read_encoded_stream(reader, encoding=None):
"""read an encoded stream from the reader and transform the bytes to
unicode if required based on the encoding
Parameters
----------
reader : a streamable file-like object
encoding : optional, the encoding to attempt to read
Returns
-------
a tuple of (a stream of decoded bytes, the encoding which was used)
"""
if compat.PY3 or encoding is not None: # pragma: no cover
if encoding:
errors = 'strict'
else:
errors = 'replace'
encoding = 'utf-8'
reader = StringIO(reader.read().decode(encoding, errors))
else:
encoding = None
return reader, encoding
def get_filepath_or_buffer(filepath_or_buffer, encoding=None):
"""
If the filepath_or_buffer is a url, translate and return the buffer
passthru otherwise.
Parameters
----------
filepath_or_buffer : a url, filepath, or buffer
encoding : the encoding to use to decode py3 bytes, default is 'utf-8'
Returns
-------
a filepath_or_buffer, the encoding
"""
if _is_url(filepath_or_buffer):
req = _urlopen(str(filepath_or_buffer))
return maybe_read_encoded_stream(req, encoding)
if _is_s3_url(filepath_or_buffer):
try:
import boto
except:
raise ImportError("boto is required to handle s3 files")
# Assuming AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY
# are environment variables
parsed_url = parse_url(filepath_or_buffer)
conn = boto.connect_s3()
b = conn.get_bucket(parsed_url.netloc)
k = boto.s3.key.Key(b)
k.key = parsed_url.path
filepath_or_buffer = StringIO(k.get_contents_as_string())
return filepath_or_buffer, None
return filepath_or_buffer, None
def file_path_to_url(path):
"""
converts an absolute native path to a FILE URL.
Parameters
----------
path : a path in native format
Returns
-------
a valid FILE URL
"""
return urljoin('file:', pathname2url(path))
# ZipFile is not a context manager for <= 2.6
# must be tuple index here since 2.6 doesn't use namedtuple for version_info
if sys.version_info[1] <= 6:
@contextmanager
def ZipFile(*args, **kwargs):
with closing(zipfile.ZipFile(*args, **kwargs)) as zf:
yield zf
else:
ZipFile = zipfile.ZipFile
| gpl-3.0 |
galactics/beyond | tests/propagators/test_cw.py | 2 | 7542 | from pytest import fixture, mark, xfail
import numpy as np
from beyond.orbits import Orbit, Ephem
from beyond.dates import Date, timedelta
from beyond.propagators.cw import ClohessyWiltshire
from beyond.orbits.man import ImpulsiveMan, ContinuousMan
from beyond.frames.frames import HillFrame
def plot_ephem(ephem):
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
dates = list(ephem.dates)
ax = plt.subplot(111)
plt.plot(dates, ephem[:, 0], label=ephem[0].propagator.frame.orientation[0])
plt.plot(dates, ephem[:, 1], label=ephem[0].propagator.frame.orientation[1])
plt.plot(dates, ephem[:, 2], label=ephem[0].propagator.frame.orientation[2])
ax.xaxis.set_major_formatter(mdates.DateFormatter("%H:%M:%S"))
plt.legend()
plt.show()
@fixture(params=["QSW", "TNW"])
def propagator(request):
hill = HillFrame(orientation=request.param)
return ClohessyWiltshire(6800000.0, frame=hill)
@fixture
def lower_circular(propagator):
"""Define an orbit 600 m below the target, and 1500 behind
"""
return Orbit(
propagator._mat6 @ [-600, -1500, 0, 0, 1.5 * propagator.n * 600, 0],
Date(2020, 5, 24),
"cartesian",
"Hill",
propagator,
)
@fixture
def tangential(propagator):
return Orbit(
propagator._mat6 @ [0, -100, 0, 0, 0, 0],
Date(2020, 5, 24),
"cartesian",
"Hill",
propagator,
)
def test_circular_lower(lower_circular):
"""This test checks if the stability of a circular lower orbit
"""
orb = lower_circular.propagate(timedelta(minutes=5))
radial = 0 if lower_circular.propagator.frame.orientation == "QSW" else 1
tan = 1 if lower_circular.propagator.frame.orientation == "QSW" else 0
assert np.isclose(orb[radial], lower_circular[radial])
assert orb[tan] > lower_circular[tan]
assert orb[2] == lower_circular[2]
def test_stable_tangential(tangential):
orb = tangential.propagate(timedelta(minutes=5))
radial = 0 if tangential.propagator.frame.orientation == "QSW" else 1
tan = 1 if tangential.propagator.frame.orientation == "QSW" else 0
assert orb[radial] == tangential[radial]
assert orb[tan] == tangential[tan]
assert orb[2] == tangential[2]
@mark.parametrize("kind", ["impulive", "continuous"])
def test_man_hohmann(kind, lower_circular):
"""Check stability of a Hohmann transfer to nullified radial distance
i.e. if the tangential distance does not evolve
"""
orb = lower_circular
man_start = orb.date + timedelta(seconds=60)
if kind == "impulive":
man_stop = man_start + timedelta(seconds=np.pi / orb.propagator.n)
else:
man_stop = timedelta(seconds=2 * np.pi / orb.propagator.n)
delta_a = 600
dv = 1.5 * orb.propagator.n * delta_a / 6
if kind == "impulive":
orb.maneuvers = [
ImpulsiveMan(man_start, orb.propagator._mat3 @ [0, dv, 0]),
ImpulsiveMan(man_stop, orb.propagator._mat3 @ [0, dv, 0]),
]
else:
orb.maneuvers = [
ContinuousMan(man_start, man_stop, dv=2 * orb.propagator._mat3 @ [0, dv, 0]),
]
# ephem = orb.ephem(stop=man_stop + timedelta(hours=1), step=timedelta(seconds=60))
# plot_ephem(ephem)
orb2 = orb.propagate(man_stop + timedelta(seconds=60))
orb3 = orb.propagate(man_stop + timedelta(seconds=120))
radial = 0 if orb.propagator.frame.orientation == "QSW" else 1
tan = 1 if orb.propagator.frame.orientation == "QSW" else 0
assert np.isclose(orb2[radial], orb3[radial])
assert np.isclose(orb2[tan], orb3[tan])
assert orb2[2] == orb3[2]
# Propagate during the continuous maneuver
if kind == "continuous":
orb4 = orb.propagate(man_start + timedelta(minutes=25))
assert orb.maneuvers[0].check(orb4.date)
if orb.propagator.frame.orientation == "QSW":
assert - delta_a < orb4[radial] < 0
else:
assert delta_a > orb4[radial] > 0
@mark.parametrize("kind", ["impulive", "continuous"])
def test_man_eccentric_boost(kind, tangential):
orb = tangential
man_start = orb.date + timedelta(seconds=60)
if kind == "impulive":
man_stop = man_start + timedelta(seconds=np.pi / orb.propagator.n)
else:
man_stop = timedelta(seconds=2 * np.pi / orb.propagator.n)
forward = 70 # Advance 70 m closer to the target
dv = forward * orb.propagator.n / 4
if kind == "impulive":
orb.maneuvers = [
ImpulsiveMan(man_start, orb.propagator._mat3 @ [-dv, 0, 0]),
ImpulsiveMan(man_stop, orb.propagator._mat3 @ [-dv, 0, 0]),
]
else:
orb.maneuvers = ContinuousMan(man_start, man_stop, dv=2*orb.propagator._mat3 @ [-dv, 0, 0])
# ephem = orb.ephem(stop=man_stop + timedelta(hours=1), step=timedelta(seconds=60))
# plot_ephem(ephem)
orb2 = orb.propagate(man_stop + timedelta(seconds=120))
radial = 0 if orb.propagator.frame.orientation == "QSW" else 1
tan = 1 if orb.propagator.frame.orientation == "QSW" else 0
assert np.isclose(orb[radial], orb2[radial])
assert np.isclose(orb2[tan] - orb[tan], forward)
assert np.isclose(orb2[tan], -30)
assert orb[2] == orb2[2]
def test_man_tangential_boost(tangential):
orb = tangential
man_start = orb.date + timedelta(seconds=60)
man_stop = man_start + timedelta(seconds=2 * np.pi / orb.propagator.n)
forward = 70 # Advance 70 m closer to the target
dv = forward * orb.propagator.n / (6 * np.pi)
orb.maneuvers = [
ImpulsiveMan(man_start, orb.propagator._mat3 @ [0, -dv, 0]),
ImpulsiveMan(man_stop, orb.propagator._mat3 @ [0, dv, 0]),
]
# ephem = orb.ephem(stop=man_stop + timedelta(hours=1), step=timedelta(seconds=60))
# plot_ephem(ephem)
orb2 = orb.propagate(man_stop + timedelta(seconds=120))
radial = 0 if orb.propagator.frame.orientation == "QSW" else 1
tan = 1 if orb.propagator.frame.orientation == "QSW" else 0
assert np.isclose(orb[radial], orb2[radial])
assert np.isclose(orb2[tan] - orb[tan], forward)
assert np.isclose(orb2[tan], -30)
assert orb[2] == orb2[2]
def test_man_tangetial_linear(tangential):
orb = tangential
forward = 60
dv = 0.1
duration = timedelta(seconds=abs(forward / dv))
dv1 = orb.propagator._mat3 @ [0, dv, 0]
accel = (orb.propagator._mat3 @ [-1, 0, 0]) * 2 * orb.propagator.n * dv
man_start = orb.date + timedelta(seconds=10)
duration = timedelta(seconds=forward / dv)
man_stop = man_start + duration
orb.maneuvers = [
ImpulsiveMan(man_start, dv1),
ContinuousMan(man_start, duration, accel=accel),
ImpulsiveMan(man_stop, -dv1),
]
# ephem = orb.ephem(stop=man_stop + timedelta(hours=1), step=timedelta(seconds=60))
# plot_ephem(ephem)
orb2 = orb.propagate(man_stop + timedelta(seconds=120))
radial = 0 if orb.propagator.frame.orientation == "QSW" else 1
tan = 1 if orb.propagator.frame.orientation == "QSW" else 0
assert duration.total_seconds() == 600
assert np.isclose(orb2[radial], 0)
assert np.isclose(orb2[tan] - orb[tan], forward)
assert orb2[2] == orb[2]
def test_from_orbit(lower_circular, orbit):
if isinstance(orbit, Ephem):
xfail("Ephem are not handled by ClohessyWiltshire propagator")
propagator = ClohessyWiltshire.from_orbit(orbit, lower_circular.propagator.frame.orientation)
lower_circular.propagator = propagator
| mit |
amolkahat/pandas | pandas/core/frame.py | 1 | 293955 | # pylint: disable=E1101
# pylint: disable=W0212,W0703,W0622
"""
DataFrame
---------
An efficient 2D container for potentially mixed-type time series or other
labeled data series.
Similar to its R counterpart, data.frame, except providing automatic data
alignment and a host of useful data manipulation methods having to do with the
labeling information
"""
from __future__ import division
import collections
import functools
import itertools
import sys
import warnings
from textwrap import dedent
import numpy as np
import numpy.ma as ma
from pandas._libs import lib, algos as libalgos
from pandas.util._decorators import (Appender, Substitution,
rewrite_axis_style_signature,
deprecate_kwarg)
from pandas.util._validators import (validate_bool_kwarg,
validate_axis_style_args)
from pandas import compat
from pandas.compat import (range, map, zip, lrange, lmap, lzip, StringIO, u,
OrderedDict, PY36, raise_with_traceback,
string_and_binary_types)
from pandas.compat.numpy import function as nv
from pandas.core.dtypes.cast import (
maybe_upcast,
cast_scalar_to_array,
construct_1d_arraylike_from_scalar,
infer_dtype_from_scalar,
maybe_cast_to_datetime,
maybe_infer_to_datetimelike,
maybe_convert_platform,
maybe_downcast_to_dtype,
invalidate_string_dtypes,
coerce_to_dtypes,
maybe_upcast_putmask,
find_common_type)
from pandas.core.dtypes.common import (
is_categorical_dtype,
is_object_dtype,
is_extension_type,
is_extension_array_dtype,
is_datetimetz,
is_datetime64_any_dtype,
is_bool_dtype,
is_integer_dtype,
is_float_dtype,
is_integer,
is_scalar,
is_dtype_equal,
needs_i8_conversion,
_get_dtype_from_object,
ensure_float64,
ensure_int64,
ensure_platform_int,
is_list_like,
is_nested_list_like,
is_iterator,
is_sequence,
is_named_tuple)
from pandas.core.dtypes.concat import _get_sliced_frame_result_type
from pandas.core.dtypes.generic import ABCSeries, ABCIndexClass, ABCMultiIndex
from pandas.core.dtypes.missing import isna, notna
from pandas.core import algorithms
from pandas.core import common as com
from pandas.core import nanops
from pandas.core import ops
from pandas.core.accessor import CachedAccessor
from pandas.core.arrays import Categorical, ExtensionArray
from pandas.core.config import get_option
from pandas.core.generic import NDFrame, _shared_docs
from pandas.core.index import (Index, MultiIndex, ensure_index,
ensure_index_from_sequences)
from pandas.core.indexes import base as ibase
from pandas.core.indexes.datetimes import DatetimeIndex
from pandas.core.indexes.period import PeriodIndex
from pandas.core.indexes.timedeltas import TimedeltaIndex
from pandas.core.indexing import (maybe_droplevels, convert_to_index_sliceable,
check_bool_indexer)
from pandas.core.internals import (BlockManager,
create_block_manager_from_arrays,
create_block_manager_from_blocks)
from pandas.core.series import Series
from pandas.io.formats import console
from pandas.io.formats import format as fmt
from pandas.io.formats.printing import pprint_thing
import pandas.plotting._core as gfx
# ---------------------------------------------------------------------
# Docstring templates
_shared_doc_kwargs = dict(
axes='index, columns', klass='DataFrame',
axes_single_arg="{0 or 'index', 1 or 'columns'}",
axis="""axis : {0 or 'index', 1 or 'columns'}, default 0
If 0 or 'index': apply function to each column.
If 1 or 'columns': apply function to each row.""",
optional_by="""
by : str or list of str
Name or list of names to sort by.
- if `axis` is 0 or `'index'` then `by` may contain index
levels and/or column labels
- if `axis` is 1 or `'columns'` then `by` may contain column
levels and/or index labels
.. versionchanged:: 0.23.0
Allow specifying index or column level names.""",
versionadded_to_excel='',
optional_labels="""labels : array-like, optional
New labels / index to conform the axis specified by 'axis' to.""",
optional_axis="""axis : int or str, optional
Axis to target. Can be either the axis name ('index', 'columns')
or number (0, 1).""",
)
_numeric_only_doc = """numeric_only : boolean, default None
Include only float, int, boolean data. If None, will attempt to use
everything, then use only numeric data
"""
_merge_doc = """
Merge DataFrame or named Series objects with a database-style join.
The join is done on columns or indexes. If joining columns on
columns, the DataFrame indexes *will be ignored*. Otherwise if joining indexes
on indexes or indexes on a column or columns, the index will be passed on.
Parameters
----------%s
right : DataFrame or named Series
Object to merge with.
how : {'left', 'right', 'outer', 'inner'}, default 'inner'
Type of merge to be performed.
* left: use only keys from left frame, similar to a SQL left outer join;
preserve key order.
* right: use only keys from right frame, similar to a SQL right outer join;
preserve key order.
* outer: use union of keys from both frames, similar to a SQL full outer
join; sort keys lexicographically.
* inner: use intersection of keys from both frames, similar to a SQL inner
join; preserve the order of the left keys.
on : label or list
Column or index level names to join on. These must be found in both
DataFrames. If `on` is None and not merging on indexes then this defaults
to the intersection of the columns in both DataFrames.
left_on : label or list, or array-like
Column or index level names to join on in the left DataFrame. Can also
be an array or list of arrays of the length of the left DataFrame.
These arrays are treated as if they are columns.
right_on : label or list, or array-like
Column or index level names to join on in the right DataFrame. Can also
be an array or list of arrays of the length of the right DataFrame.
These arrays are treated as if they are columns.
left_index : bool, default False
Use the index from the left DataFrame as the join key(s). If it is a
MultiIndex, the number of keys in the other DataFrame (either the index
or a number of columns) must match the number of levels.
right_index : bool, default False
Use the index from the right DataFrame as the join key. Same caveats as
left_index.
sort : bool, default False
Sort the join keys lexicographically in the result DataFrame. If False,
the order of the join keys depends on the join type (how keyword).
suffixes : tuple of (str, str), default ('_x', '_y')
Suffix to apply to overlapping column names in the left and right
side, respectively. To raise an exception on overlapping columns use
(False, False).
copy : bool, default True
If False, avoid copy if possible.
indicator : bool or str, default False
If True, adds a column to output DataFrame called "_merge" with
information on the source of each row.
If string, column with information on source of each row will be added to
output DataFrame, and column will be named value of string.
Information column is Categorical-type and takes on a value of "left_only"
for observations whose merge key only appears in 'left' DataFrame,
"right_only" for observations whose merge key only appears in 'right'
DataFrame, and "both" if the observation's merge key is found in both.
validate : str, optional
If specified, checks if merge is of specified type.
* "one_to_one" or "1:1": check if merge keys are unique in both
left and right datasets.
* "one_to_many" or "1:m": check if merge keys are unique in left
dataset.
* "many_to_one" or "m:1": check if merge keys are unique in right
dataset.
* "many_to_many" or "m:m": allowed, but does not result in checks.
.. versionadded:: 0.21.0
Returns
-------
DataFrame
A DataFrame of the two merged objects.
Notes
-----
Support for specifying index levels as the `on`, `left_on`, and
`right_on` parameters was added in version 0.23.0
Support for merging named Series objects was added in version 0.24.0
See Also
--------
merge_ordered : merge with optional filling/interpolation.
merge_asof : merge on nearest keys.
DataFrame.join : similar method using indices.
Examples
--------
>>> df1 = pd.DataFrame({'lkey': ['foo', 'bar', 'baz', 'foo'],
... 'value': [1, 2, 3, 5]})
>>> df2 = pd.DataFrame({'rkey': ['foo', 'bar', 'baz', 'foo'],
... 'value': [5, 6, 7, 8]})
>>> df1
lkey value
0 foo 1
1 bar 2
2 baz 3
3 foo 5
>>> df2
rkey value
0 foo 5
1 bar 6
2 baz 7
3 foo 8
Merge df1 and df2 on the lkey and rkey columns. The value columns have
the default suffixes, _x and _y, appended.
>>> df1.merge(df2, left_on='lkey', right_on='rkey')
lkey value_x rkey value_y
0 foo 1 foo 5
1 foo 1 foo 8
2 foo 5 foo 5
3 foo 5 foo 8
4 bar 2 bar 6
5 baz 3 baz 7
Merge DataFrames df1 and df2 with specified left and right suffixes
appended to any overlapping columns.
>>> df1.merge(df2, left_on='lkey', right_on='rkey',
... suffixes=('_left', '_right'))
lkey value_left rkey value_right
0 foo 1 foo 5
1 foo 1 foo 8
2 foo 5 foo 5
3 foo 5 foo 8
4 bar 2 bar 6
5 baz 3 baz 7
Merge DataFrames df1 and df2, but raise an exception if the DataFrames have
any overlapping columns.
>>> df1.merge(df2, left_on='lkey', right_on='rkey', suffixes=(False, False))
Traceback (most recent call last):
...
ValueError: columns overlap but no suffix specified:
Index(['value'], dtype='object')
"""
# -----------------------------------------------------------------------
# DataFrame class
class DataFrame(NDFrame):
""" Two-dimensional size-mutable, potentially heterogeneous tabular data
structure with labeled axes (rows and columns). Arithmetic operations
align on both row and column labels. Can be thought of as a dict-like
container for Series objects. The primary pandas data structure.
Parameters
----------
data : ndarray (structured or homogeneous), Iterable, dict, or DataFrame
Dict can contain Series, arrays, constants, or list-like objects
.. versionchanged :: 0.23.0
If data is a dict, argument order is maintained for Python 3.6
and later.
index : Index or array-like
Index to use for resulting frame. Will default to RangeIndex if
no indexing information part of input data and no index provided
columns : Index or array-like
Column labels to use for resulting frame. Will default to
RangeIndex (0, 1, 2, ..., n) if no column labels are provided
dtype : dtype, default None
Data type to force. Only a single dtype is allowed. If None, infer
copy : boolean, default False
Copy data from inputs. Only affects DataFrame / 2d ndarray input
Examples
--------
Constructing DataFrame from a dictionary.
>>> d = {'col1': [1, 2], 'col2': [3, 4]}
>>> df = pd.DataFrame(data=d)
>>> df
col1 col2
0 1 3
1 2 4
Notice that the inferred dtype is int64.
>>> df.dtypes
col1 int64
col2 int64
dtype: object
To enforce a single dtype:
>>> df = pd.DataFrame(data=d, dtype=np.int8)
>>> df.dtypes
col1 int8
col2 int8
dtype: object
Constructing DataFrame from numpy ndarray:
>>> df2 = pd.DataFrame(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]),
... columns=['a', 'b', 'c'])
>>> df2
a b c
0 1 2 3
1 4 5 6
2 7 8 9
See also
--------
DataFrame.from_records : constructor from tuples, also record arrays
DataFrame.from_dict : from dicts of Series, arrays, or dicts
DataFrame.from_items : from sequence of (key, value) pairs
pandas.read_csv, pandas.read_table, pandas.read_clipboard
"""
@property
def _constructor(self):
return DataFrame
_constructor_sliced = Series
_deprecations = NDFrame._deprecations | frozenset(
['sortlevel', 'get_value', 'set_value', 'from_csv', 'from_items'])
_accessors = set()
@property
def _constructor_expanddim(self):
from pandas.core.panel import Panel
return Panel
def __init__(self, data=None, index=None, columns=None, dtype=None,
copy=False):
if data is None:
data = {}
if dtype is not None:
dtype = self._validate_dtype(dtype)
if isinstance(data, DataFrame):
data = data._data
if isinstance(data, BlockManager):
mgr = self._init_mgr(data, axes=dict(index=index, columns=columns),
dtype=dtype, copy=copy)
elif isinstance(data, dict):
mgr = self._init_dict(data, index, columns, dtype=dtype)
elif isinstance(data, ma.MaskedArray):
import numpy.ma.mrecords as mrecords
# masked recarray
if isinstance(data, mrecords.MaskedRecords):
mgr = _masked_rec_array_to_mgr(data, index, columns, dtype,
copy)
# a masked array
else:
mask = ma.getmaskarray(data)
if mask.any():
data, fill_value = maybe_upcast(data, copy=True)
data[mask] = fill_value
else:
data = data.copy()
mgr = self._init_ndarray(data, index, columns, dtype=dtype,
copy=copy)
elif isinstance(data, (np.ndarray, Series, Index)):
if data.dtype.names:
data_columns = list(data.dtype.names)
data = {k: data[k] for k in data_columns}
if columns is None:
columns = data_columns
mgr = self._init_dict(data, index, columns, dtype=dtype)
elif getattr(data, 'name', None) is not None:
mgr = self._init_dict({data.name: data}, index, columns,
dtype=dtype)
else:
mgr = self._init_ndarray(data, index, columns, dtype=dtype,
copy=copy)
# For data is list-like, or Iterable (will consume into list)
elif (isinstance(data, compat.Iterable)
and not isinstance(data, string_and_binary_types)):
if not isinstance(data, compat.Sequence):
data = list(data)
if len(data) > 0:
if is_list_like(data[0]) and getattr(data[0], 'ndim', 1) == 1:
if is_named_tuple(data[0]) and columns is None:
columns = data[0]._fields
arrays, columns = _to_arrays(data, columns, dtype=dtype)
columns = ensure_index(columns)
# set the index
if index is None:
if isinstance(data[0], Series):
index = _get_names_from_index(data)
elif isinstance(data[0], Categorical):
index = ibase.default_index(len(data[0]))
else:
index = ibase.default_index(len(data))
mgr = _arrays_to_mgr(arrays, columns, index, columns,
dtype=dtype)
else:
mgr = self._init_ndarray(data, index, columns, dtype=dtype,
copy=copy)
else:
mgr = self._init_dict({}, index, columns, dtype=dtype)
else:
try:
arr = np.array(data, dtype=dtype, copy=copy)
except (ValueError, TypeError) as e:
exc = TypeError('DataFrame constructor called with '
'incompatible data and dtype: {e}'.format(e=e))
raise_with_traceback(exc)
if arr.ndim == 0 and index is not None and columns is not None:
values = cast_scalar_to_array((len(index), len(columns)),
data, dtype=dtype)
mgr = self._init_ndarray(values, index, columns,
dtype=values.dtype, copy=False)
else:
raise ValueError('DataFrame constructor not properly called!')
NDFrame.__init__(self, mgr, fastpath=True)
def _init_dict(self, data, index, columns, dtype=None):
"""
Segregate Series based on type and coerce into matrices.
Needs to handle a lot of exceptional cases.
"""
if columns is not None:
arrays = Series(data, index=columns, dtype=object)
data_names = arrays.index
missing = arrays.isnull()
if index is None:
# GH10856
# raise ValueError if only scalars in dict
index = extract_index(arrays[~missing])
else:
index = ensure_index(index)
# no obvious "empty" int column
if missing.any() and not is_integer_dtype(dtype):
if dtype is None or np.issubdtype(dtype, np.flexible):
# 1783
nan_dtype = object
else:
nan_dtype = dtype
v = construct_1d_arraylike_from_scalar(np.nan, len(index),
nan_dtype)
arrays.loc[missing] = [v] * missing.sum()
else:
keys = com.dict_keys_to_ordered_list(data)
columns = data_names = Index(keys)
arrays = [data[k] for k in keys]
return _arrays_to_mgr(arrays, data_names, index, columns, dtype=dtype)
def _init_ndarray(self, values, index, columns, dtype=None, copy=False):
# input must be a ndarray, list, Series, index
if isinstance(values, Series):
if columns is None:
if values.name is not None:
columns = [values.name]
if index is None:
index = values.index
else:
values = values.reindex(index)
# zero len case (GH #2234)
if not len(values) and columns is not None and len(columns):
values = np.empty((0, 1), dtype=object)
# helper to create the axes as indexes
def _get_axes(N, K, index=index, columns=columns):
# return axes or defaults
if index is None:
index = ibase.default_index(N)
else:
index = ensure_index(index)
if columns is None:
columns = ibase.default_index(K)
else:
columns = ensure_index(columns)
return index, columns
# we could have a categorical type passed or coerced to 'category'
# recast this to an _arrays_to_mgr
if (is_categorical_dtype(getattr(values, 'dtype', None)) or
is_categorical_dtype(dtype)):
if not hasattr(values, 'dtype'):
values = _prep_ndarray(values, copy=copy)
values = values.ravel()
elif copy:
values = values.copy()
index, columns = _get_axes(len(values), 1)
return _arrays_to_mgr([values], columns, index, columns,
dtype=dtype)
elif (is_datetimetz(values) or is_extension_array_dtype(values)):
# GH19157
if columns is None:
columns = [0]
return _arrays_to_mgr([values], columns, index, columns,
dtype=dtype)
# by definition an array here
# the dtypes will be coerced to a single dtype
values = _prep_ndarray(values, copy=copy)
if dtype is not None:
if not is_dtype_equal(values.dtype, dtype):
try:
values = values.astype(dtype)
except Exception as orig:
e = ValueError("failed to cast to '{dtype}' (Exception "
"was: {orig})".format(dtype=dtype,
orig=orig))
raise_with_traceback(e)
index, columns = _get_axes(*values.shape)
values = values.T
# if we don't have a dtype specified, then try to convert objects
# on the entire block; this is to convert if we have datetimelike's
# embedded in an object type
if dtype is None and is_object_dtype(values):
values = maybe_infer_to_datetimelike(values)
return create_block_manager_from_blocks([values], [columns, index])
@property
def axes(self):
"""
Return a list representing the axes of the DataFrame.
It has the row axis labels and column axis labels as the only members.
They are returned in that order.
Examples
--------
>>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]})
>>> df.axes
[RangeIndex(start=0, stop=2, step=1), Index(['coll', 'col2'],
dtype='object')]
"""
return [self.index, self.columns]
@property
def shape(self):
"""
Return a tuple representing the dimensionality of the DataFrame.
See Also
--------
ndarray.shape
Examples
--------
>>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]})
>>> df.shape
(2, 2)
>>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4],
... 'col3': [5, 6]})
>>> df.shape
(2, 3)
"""
return len(self.index), len(self.columns)
@property
def _is_homogeneous_type(self):
"""
Whether all the columns in a DataFrame have the same type.
Returns
-------
bool
Examples
--------
>>> DataFrame({"A": [1, 2], "B": [3, 4]})._is_homogeneous_type
True
>>> DataFrame({"A": [1, 2], "B": [3.0, 4.0]})._is_homogeneous_type
False
Items with the same type but different sizes are considered
different types.
>>> DataFrame({
... "A": np.array([1, 2], dtype=np.int32),
... "B": np.array([1, 2], dtype=np.int64)})._is_homogeneous_type
False
"""
if self._data.any_extension_types:
return len({block.dtype for block in self._data.blocks}) == 1
else:
return not self._data.is_mixed_type
def _repr_fits_vertical_(self):
"""
Check length against max_rows.
"""
max_rows = get_option("display.max_rows")
return len(self) <= max_rows
def _repr_fits_horizontal_(self, ignore_width=False):
"""
Check if full repr fits in horizontal boundaries imposed by the display
options width and max_columns. In case off non-interactive session, no
boundaries apply.
ignore_width is here so ipnb+HTML output can behave the way
users expect. display.max_columns remains in effect.
GH3541, GH3573
"""
width, height = console.get_console_size()
max_columns = get_option("display.max_columns")
nb_columns = len(self.columns)
# exceed max columns
if ((max_columns and nb_columns > max_columns) or
((not ignore_width) and width and nb_columns > (width // 2))):
return False
# used by repr_html under IPython notebook or scripts ignore terminal
# dims
if ignore_width or not console.in_interactive_session():
return True
if (get_option('display.width') is not None or
console.in_ipython_frontend()):
# check at least the column row for excessive width
max_rows = 1
else:
max_rows = get_option("display.max_rows")
# when auto-detecting, so width=None and not in ipython front end
# check whether repr fits horizontal by actually checking
# the width of the rendered repr
buf = StringIO()
# only care about the stuff we'll actually print out
# and to_string on entire frame may be expensive
d = self
if not (max_rows is None): # unlimited rows
# min of two, where one may be None
d = d.iloc[:min(max_rows, len(d))]
else:
return True
d.to_string(buf=buf)
value = buf.getvalue()
repr_width = max(len(l) for l in value.split('\n'))
return repr_width < width
def _info_repr(self):
"""True if the repr should show the info view."""
info_repr_option = (get_option("display.large_repr") == "info")
return info_repr_option and not (self._repr_fits_horizontal_() and
self._repr_fits_vertical_())
def __unicode__(self):
"""
Return a string representation for a particular DataFrame
Invoked by unicode(df) in py2 only. Yields a Unicode String in both
py2/py3.
"""
buf = StringIO(u(""))
if self._info_repr():
self.info(buf=buf)
return buf.getvalue()
max_rows = get_option("display.max_rows")
max_cols = get_option("display.max_columns")
show_dimensions = get_option("display.show_dimensions")
if get_option("display.expand_frame_repr"):
width, _ = console.get_console_size()
else:
width = None
self.to_string(buf=buf, max_rows=max_rows, max_cols=max_cols,
line_width=width, show_dimensions=show_dimensions)
return buf.getvalue()
def _repr_html_(self):
"""
Return a html representation for a particular DataFrame.
Mainly for IPython notebook.
"""
# qtconsole doesn't report its line width, and also
# behaves badly when outputting an HTML table
# that doesn't fit the window, so disable it.
# XXX: In IPython 3.x and above, the Qt console will not attempt to
# display HTML, so this check can be removed when support for
# IPython 2.x is no longer needed.
if console.in_qtconsole():
# 'HTML output is disabled in QtConsole'
return None
if self._info_repr():
buf = StringIO(u(""))
self.info(buf=buf)
# need to escape the <class>, should be the first line.
val = buf.getvalue().replace('<', r'<', 1)
val = val.replace('>', r'>', 1)
return '<pre>' + val + '</pre>'
if get_option("display.notebook_repr_html"):
max_rows = get_option("display.max_rows")
max_cols = get_option("display.max_columns")
show_dimensions = get_option("display.show_dimensions")
return self.to_html(max_rows=max_rows, max_cols=max_cols,
show_dimensions=show_dimensions, notebook=True)
else:
return None
@property
def style(self):
"""
Property returning a Styler object containing methods for
building a styled HTML representation fo the DataFrame.
See Also
--------
pandas.io.formats.style.Styler
"""
from pandas.io.formats.style import Styler
return Styler(self)
def iteritems(self):
r"""
Iterator over (column name, Series) pairs.
Iterates over the DataFrame columns, returning a tuple with
the column name and the content as a Series.
Yields
------
label : object
The column names for the DataFrame being iterated over.
content : Series
The column entries belonging to each label, as a Series.
See Also
--------
DataFrame.iterrows : Iterate over DataFrame rows as
(index, Series) pairs.
DataFrame.itertuples : Iterate over DataFrame rows as namedtuples
of the values.
Examples
--------
>>> df = pd.DataFrame({'species': ['bear', 'bear', 'marsupial'],
... 'population': [1864, 22000, 80000]},
... index=['panda', 'polar', 'koala'])
>>> df
species population
panda bear 1864
polar bear 22000
koala marsupial 80000
>>> for label, content in df.iteritems():
... print('label:', label)
... print('content:', content, sep='\n')
...
label: species
content:
panda bear
polar bear
koala marsupial
Name: species, dtype: object
label: population
content:
panda 1864
polar 22000
koala 80000
Name: population, dtype: int64
"""
if self.columns.is_unique and hasattr(self, '_item_cache'):
for k in self.columns:
yield k, self._get_item_cache(k)
else:
for i, k in enumerate(self.columns):
yield k, self._ixs(i, axis=1)
def iterrows(self):
"""
Iterate over DataFrame rows as (index, Series) pairs.
Notes
-----
1. Because ``iterrows`` returns a Series for each row,
it does **not** preserve dtypes across the rows (dtypes are
preserved across columns for DataFrames). For example,
>>> df = pd.DataFrame([[1, 1.5]], columns=['int', 'float'])
>>> row = next(df.iterrows())[1]
>>> row
int 1.0
float 1.5
Name: 0, dtype: float64
>>> print(row['int'].dtype)
float64
>>> print(df['int'].dtype)
int64
To preserve dtypes while iterating over the rows, it is better
to use :meth:`itertuples` which returns namedtuples of the values
and which is generally faster than ``iterrows``.
2. You should **never modify** something you are iterating over.
This is not guaranteed to work in all cases. Depending on the
data types, the iterator returns a copy and not a view, and writing
to it will have no effect.
Returns
-------
it : generator
A generator that iterates over the rows of the frame.
See also
--------
itertuples : Iterate over DataFrame rows as namedtuples of the values.
iteritems : Iterate over (column name, Series) pairs.
"""
columns = self.columns
klass = self._constructor_sliced
for k, v in zip(self.index, self.values):
s = klass(v, index=columns, name=k)
yield k, s
def itertuples(self, index=True, name="Pandas"):
"""
Iterate over DataFrame rows as namedtuples.
Parameters
----------
index : bool, default True
If True, return the index as the first element of the tuple.
name : str, default "Pandas"
The name of the returned namedtuples or None to return regular
tuples.
Yields
-------
collections.namedtuple
Yields a namedtuple for each row in the DataFrame with the first
field possibly being the index and following fields being the
column values.
Notes
-----
The column names will be renamed to positional names if they are
invalid Python identifiers, repeated, or start with an underscore.
With a large number of columns (>255), regular tuples are returned.
See Also
--------
DataFrame.iterrows : Iterate over DataFrame rows as (index, Series)
pairs.
DataFrame.iteritems : Iterate over (column name, Series) pairs.
Examples
--------
>>> df = pd.DataFrame({'num_legs': [4, 2], 'num_wings': [0, 2]},
... index=['dog', 'hawk'])
>>> df
num_legs num_wings
dog 4 0
hawk 2 2
>>> for row in df.itertuples():
... print(row)
...
Pandas(Index='dog', num_legs=4, num_wings=0)
Pandas(Index='hawk', num_legs=2, num_wings=2)
By setting the `index` parameter to False we can remove the index
as the first element of the tuple:
>>> for row in df.itertuples(index=False):
... print(row)
...
Pandas(num_legs=4, num_wings=0)
Pandas(num_legs=2, num_wings=2)
With the `name` parameter set we set a custom name for the yielded
namedtuples:
>>> for row in df.itertuples(name='Animal'):
... print(row)
...
Animal(Index='dog', num_legs=4, num_wings=0)
Animal(Index='hawk', num_legs=2, num_wings=2)
"""
arrays = []
fields = []
if index:
arrays.append(self.index)
fields.append("Index")
# use integer indexing because of possible duplicate column names
arrays.extend(self.iloc[:, k] for k in range(len(self.columns)))
# Python 3 supports at most 255 arguments to constructor, and
# things get slow with this many fields in Python 2
if name is not None and len(self.columns) + index < 256:
# `rename` is unsupported in Python 2.6
try:
itertuple = collections.namedtuple(name,
fields + list(self.columns),
rename=True)
return map(itertuple._make, zip(*arrays))
except Exception:
pass
# fallback to regular tuples
return zip(*arrays)
items = iteritems
def __len__(self):
"""Returns length of info axis, but here we use the index """
return len(self.index)
def dot(self, other):
"""
Matrix multiplication with DataFrame or Series objects. Can also be
called using `self @ other` in Python >= 3.5.
Parameters
----------
other : DataFrame or Series
Returns
-------
dot_product : DataFrame or Series
"""
if isinstance(other, (Series, DataFrame)):
common = self.columns.union(other.index)
if (len(common) > len(self.columns) or
len(common) > len(other.index)):
raise ValueError('matrices are not aligned')
left = self.reindex(columns=common, copy=False)
right = other.reindex(index=common, copy=False)
lvals = left.values
rvals = right.values
else:
left = self
lvals = self.values
rvals = np.asarray(other)
if lvals.shape[1] != rvals.shape[0]:
raise ValueError('Dot product shape mismatch, '
'{s} vs {r}'.format(s=lvals.shape,
r=rvals.shape))
if isinstance(other, DataFrame):
return self._constructor(np.dot(lvals, rvals), index=left.index,
columns=other.columns)
elif isinstance(other, Series):
return Series(np.dot(lvals, rvals), index=left.index)
elif isinstance(rvals, (np.ndarray, Index)):
result = np.dot(lvals, rvals)
if result.ndim == 2:
return self._constructor(result, index=left.index)
else:
return Series(result, index=left.index)
else: # pragma: no cover
raise TypeError('unsupported type: {oth}'.format(oth=type(other)))
def __matmul__(self, other):
""" Matrix multiplication using binary `@` operator in Python>=3.5 """
return self.dot(other)
def __rmatmul__(self, other):
""" Matrix multiplication using binary `@` operator in Python>=3.5 """
return self.T.dot(np.transpose(other)).T
# ----------------------------------------------------------------------
# IO methods (to / from other formats)
@classmethod
def from_dict(cls, data, orient='columns', dtype=None, columns=None):
"""
Construct DataFrame from dict of array-like or dicts.
Creates DataFrame object from dictionary by columns or by index
allowing dtype specification.
Parameters
----------
data : dict
Of the form {field : array-like} or {field : dict}.
orient : {'columns', 'index'}, default 'columns'
The "orientation" of the data. If the keys of the passed dict
should be the columns of the resulting DataFrame, pass 'columns'
(default). Otherwise if the keys should be rows, pass 'index'.
dtype : dtype, default None
Data type to force, otherwise infer.
columns : list, default None
Column labels to use when ``orient='index'``. Raises a ValueError
if used with ``orient='columns'``.
.. versionadded:: 0.23.0
Returns
-------
pandas.DataFrame
See Also
--------
DataFrame.from_records : DataFrame from ndarray (structured
dtype), list of tuples, dict, or DataFrame
DataFrame : DataFrame object creation using constructor
Examples
--------
By default the keys of the dict become the DataFrame columns:
>>> data = {'col_1': [3, 2, 1, 0], 'col_2': ['a', 'b', 'c', 'd']}
>>> pd.DataFrame.from_dict(data)
col_1 col_2
0 3 a
1 2 b
2 1 c
3 0 d
Specify ``orient='index'`` to create the DataFrame using dictionary
keys as rows:
>>> data = {'row_1': [3, 2, 1, 0], 'row_2': ['a', 'b', 'c', 'd']}
>>> pd.DataFrame.from_dict(data, orient='index')
0 1 2 3
row_1 3 2 1 0
row_2 a b c d
When using the 'index' orientation, the column names can be
specified manually:
>>> pd.DataFrame.from_dict(data, orient='index',
... columns=['A', 'B', 'C', 'D'])
A B C D
row_1 3 2 1 0
row_2 a b c d
"""
index = None
orient = orient.lower()
if orient == 'index':
if len(data) > 0:
# TODO speed up Series case
if isinstance(list(data.values())[0], (Series, dict)):
data = _from_nested_dict(data)
else:
data, index = list(data.values()), list(data.keys())
elif orient == 'columns':
if columns is not None:
raise ValueError("cannot use columns parameter with "
"orient='columns'")
else: # pragma: no cover
raise ValueError('only recognize index or columns for orient')
return cls(data, index=index, columns=columns, dtype=dtype)
def to_dict(self, orient='dict', into=dict):
"""
Convert the DataFrame to a dictionary.
The type of the key-value pairs can be customized with the parameters
(see below).
Parameters
----------
orient : str {'dict', 'list', 'series', 'split', 'records', 'index'}
Determines the type of the values of the dictionary.
- 'dict' (default) : dict like {column -> {index -> value}}
- 'list' : dict like {column -> [values]}
- 'series' : dict like {column -> Series(values)}
- 'split' : dict like
{'index' -> [index], 'columns' -> [columns], 'data' -> [values]}
- 'records' : list like
[{column -> value}, ... , {column -> value}]
- 'index' : dict like {index -> {column -> value}}
Abbreviations are allowed. `s` indicates `series` and `sp`
indicates `split`.
into : class, default dict
The collections.Mapping subclass used for all Mappings
in the return value. Can be the actual class or an empty
instance of the mapping type you want. If you want a
collections.defaultdict, you must pass it initialized.
.. versionadded:: 0.21.0
Returns
-------
dict, list or collections.Mapping
Return a collections.Mapping object representing the DataFrame.
The resulting transformation depends on the `orient` parameter.
See Also
--------
DataFrame.from_dict: Create a DataFrame from a dictionary.
DataFrame.to_json: Convert a DataFrame to JSON format.
Examples
--------
>>> df = pd.DataFrame({'col1': [1, 2],
... 'col2': [0.5, 0.75]},
... index=['row1', 'row2'])
>>> df
col1 col2
row1 1 0.50
row2 2 0.75
>>> df.to_dict()
{'col1': {'row1': 1, 'row2': 2}, 'col2': {'row1': 0.5, 'row2': 0.75}}
You can specify the return orientation.
>>> df.to_dict('series')
{'col1': row1 1
row2 2
Name: col1, dtype: int64,
'col2': row1 0.50
row2 0.75
Name: col2, dtype: float64}
>>> df.to_dict('split')
{'index': ['row1', 'row2'], 'columns': ['col1', 'col2'],
'data': [[1.0, 0.5], [2.0, 0.75]]}
>>> df.to_dict('records')
[{'col1': 1.0, 'col2': 0.5}, {'col1': 2.0, 'col2': 0.75}]
>>> df.to_dict('index')
{'row1': {'col1': 1, 'col2': 0.5}, 'row2': {'col1': 2, 'col2': 0.75}}
You can also specify the mapping type.
>>> from collections import OrderedDict, defaultdict
>>> df.to_dict(into=OrderedDict)
OrderedDict([('col1', OrderedDict([('row1', 1), ('row2', 2)])),
('col2', OrderedDict([('row1', 0.5), ('row2', 0.75)]))])
If you want a `defaultdict`, you need to initialize it:
>>> dd = defaultdict(list)
>>> df.to_dict('records', into=dd)
[defaultdict(<class 'list'>, {'col1': 1.0, 'col2': 0.5}),
defaultdict(<class 'list'>, {'col1': 2.0, 'col2': 0.75})]
"""
if not self.columns.is_unique:
warnings.warn("DataFrame columns are not unique, some "
"columns will be omitted.", UserWarning,
stacklevel=2)
# GH16122
into_c = com.standardize_mapping(into)
if orient.lower().startswith('d'):
return into_c(
(k, v.to_dict(into)) for k, v in compat.iteritems(self))
elif orient.lower().startswith('l'):
return into_c((k, v.tolist()) for k, v in compat.iteritems(self))
elif orient.lower().startswith('sp'):
return into_c((('index', self.index.tolist()),
('columns', self.columns.tolist()),
('data', lib.map_infer(self.values.ravel(),
com.maybe_box_datetimelike)
.reshape(self.values.shape).tolist())))
elif orient.lower().startswith('s'):
return into_c((k, com.maybe_box_datetimelike(v))
for k, v in compat.iteritems(self))
elif orient.lower().startswith('r'):
return [into_c((k, com.maybe_box_datetimelike(v))
for k, v in zip(self.columns, np.atleast_1d(row)))
for row in self.values]
elif orient.lower().startswith('i'):
if not self.index.is_unique:
raise ValueError(
"DataFrame index must be unique for orient='index'."
)
return into_c((t[0], dict(zip(self.columns, t[1:])))
for t in self.itertuples())
else:
raise ValueError("orient '{o}' not understood".format(o=orient))
def to_gbq(self, destination_table, project_id=None, chunksize=None,
reauth=False, if_exists='fail', private_key=None,
auth_local_webserver=False, table_schema=None, location=None,
progress_bar=True, verbose=None):
"""
Write a DataFrame to a Google BigQuery table.
This function requires the `pandas-gbq package
<https://pandas-gbq.readthedocs.io>`__.
See the `How to authenticate with Google BigQuery
<https://pandas-gbq.readthedocs.io/en/latest/howto/authentication.html>`__
guide for authentication instructions.
Parameters
----------
destination_table : str
Name of table to be written, in the form ``dataset.tablename``.
project_id : str, optional
Google BigQuery Account project ID. Optional when available from
the environment.
chunksize : int, optional
Number of rows to be inserted in each chunk from the dataframe.
Set to ``None`` to load the whole dataframe at once.
reauth : bool, default False
Force Google BigQuery to re-authenticate the user. This is useful
if multiple accounts are used.
if_exists : str, default 'fail'
Behavior when the destination table exists. Value can be one of:
``'fail'``
If table exists, do nothing.
``'replace'``
If table exists, drop it, recreate it, and insert data.
``'append'``
If table exists, insert data. Create if does not exist.
private_key : str, optional
Service account private key in JSON format. Can be file path
or string contents. This is useful for remote server
authentication (eg. Jupyter/IPython notebook on remote host).
auth_local_webserver : bool, default False
Use the `local webserver flow`_ instead of the `console flow`_
when getting user credentials.
.. _local webserver flow:
http://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_local_server
.. _console flow:
http://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_console
*New in version 0.2.0 of pandas-gbq*.
table_schema : list of dicts, optional
List of BigQuery table fields to which according DataFrame
columns conform to, e.g. ``[{'name': 'col1', 'type':
'STRING'},...]``. If schema is not provided, it will be
generated according to dtypes of DataFrame columns. See
BigQuery API documentation on available names of a field.
*New in version 0.3.1 of pandas-gbq*.
location : str, optional
Location where the load job should run. See the `BigQuery locations
documentation
<https://cloud.google.com/bigquery/docs/dataset-locations>`__ for a
list of available locations. The location must match that of the
target dataset.
*New in version 0.5.0 of pandas-gbq*.
progress_bar : bool, default True
Use the library `tqdm` to show the progress bar for the upload,
chunk by chunk.
*New in version 0.5.0 of pandas-gbq*.
verbose : bool, deprecated
Deprecated in Pandas-GBQ 0.4.0. Use the `logging module
to adjust verbosity instead
<https://pandas-gbq.readthedocs.io/en/latest/intro.html#logging>`__.
See Also
--------
pandas_gbq.to_gbq : This function in the pandas-gbq library.
pandas.read_gbq : Read a DataFrame from Google BigQuery.
"""
from pandas.io import gbq
return gbq.to_gbq(
self, destination_table, project_id=project_id,
chunksize=chunksize, reauth=reauth,
if_exists=if_exists, private_key=private_key,
auth_local_webserver=auth_local_webserver,
table_schema=table_schema, location=location,
progress_bar=progress_bar, verbose=verbose)
@classmethod
def from_records(cls, data, index=None, exclude=None, columns=None,
coerce_float=False, nrows=None):
"""
Convert structured or record ndarray to DataFrame
Parameters
----------
data : ndarray (structured dtype), list of tuples, dict, or DataFrame
index : string, list of fields, array-like
Field of array to use as the index, alternately a specific set of
input labels to use
exclude : sequence, default None
Columns or fields to exclude
columns : sequence, default None
Column names to use. If the passed data do not have names
associated with them, this argument provides names for the
columns. Otherwise this argument indicates the order of the columns
in the result (any names not found in the data will become all-NA
columns)
coerce_float : boolean, default False
Attempt to convert values of non-string, non-numeric objects (like
decimal.Decimal) to floating point, useful for SQL result sets
nrows : int, default None
Number of rows to read if data is an iterator
Returns
-------
df : DataFrame
"""
# Make a copy of the input columns so we can modify it
if columns is not None:
columns = ensure_index(columns)
if is_iterator(data):
if nrows == 0:
return cls()
try:
first_row = next(data)
except StopIteration:
return cls(index=index, columns=columns)
dtype = None
if hasattr(first_row, 'dtype') and first_row.dtype.names:
dtype = first_row.dtype
values = [first_row]
if nrows is None:
values += data
else:
values.extend(itertools.islice(data, nrows - 1))
if dtype is not None:
data = np.array(values, dtype=dtype)
else:
data = values
if isinstance(data, dict):
if columns is None:
columns = arr_columns = ensure_index(sorted(data))
arrays = [data[k] for k in columns]
else:
arrays = []
arr_columns = []
for k, v in compat.iteritems(data):
if k in columns:
arr_columns.append(k)
arrays.append(v)
arrays, arr_columns = _reorder_arrays(arrays, arr_columns,
columns)
elif isinstance(data, (np.ndarray, DataFrame)):
arrays, columns = _to_arrays(data, columns)
if columns is not None:
columns = ensure_index(columns)
arr_columns = columns
else:
arrays, arr_columns = _to_arrays(data, columns,
coerce_float=coerce_float)
arr_columns = ensure_index(arr_columns)
if columns is not None:
columns = ensure_index(columns)
else:
columns = arr_columns
if exclude is None:
exclude = set()
else:
exclude = set(exclude)
result_index = None
if index is not None:
if (isinstance(index, compat.string_types) or
not hasattr(index, "__iter__")):
i = columns.get_loc(index)
exclude.add(index)
if len(arrays) > 0:
result_index = Index(arrays[i], name=index)
else:
result_index = Index([], name=index)
else:
try:
to_remove = [arr_columns.get_loc(field) for field in index]
index_data = [arrays[i] for i in to_remove]
result_index = ensure_index_from_sequences(index_data,
names=index)
exclude.update(index)
except Exception:
result_index = index
if any(exclude):
arr_exclude = [x for x in exclude if x in arr_columns]
to_remove = [arr_columns.get_loc(col) for col in arr_exclude]
arrays = [v for i, v in enumerate(arrays) if i not in to_remove]
arr_columns = arr_columns.drop(arr_exclude)
columns = columns.drop(exclude)
mgr = _arrays_to_mgr(arrays, arr_columns, result_index, columns)
return cls(mgr)
def to_records(self, index=True, convert_datetime64=None):
"""
Convert DataFrame to a NumPy record array.
Index will be included as the first field of the record array if
requested.
Parameters
----------
index : bool, default True
Include index in resulting record array, stored in 'index'
field or using the index label, if set.
convert_datetime64 : bool, default None
.. deprecated:: 0.23.0
Whether to convert the index to datetime.datetime if it is a
DatetimeIndex.
Returns
-------
numpy.recarray
NumPy ndarray with the DataFrame labels as fields and each row
of the DataFrame as entries.
See Also
--------
DataFrame.from_records: convert structured or record ndarray
to DataFrame.
numpy.recarray: ndarray that allows field access using
attributes, analogous to typed columns in a
spreadsheet.
Examples
--------
>>> df = pd.DataFrame({'A': [1, 2], 'B': [0.5, 0.75]},
... index=['a', 'b'])
>>> df
A B
a 1 0.50
b 2 0.75
>>> df.to_records()
rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)],
dtype=[('index', 'O'), ('A', '<i8'), ('B', '<f8')])
If the DataFrame index has no label then the recarray field name
is set to 'index'. If the index has a label then this is used as the
field name:
>>> df.index = df.index.rename("I")
>>> df.to_records()
rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)],
dtype=[('I', 'O'), ('A', '<i8'), ('B', '<f8')])
The index can be excluded from the record array:
>>> df.to_records(index=False)
rec.array([(1, 0.5 ), (2, 0.75)],
dtype=[('A', '<i8'), ('B', '<f8')])
"""
if convert_datetime64 is not None:
warnings.warn("The 'convert_datetime64' parameter is "
"deprecated and will be removed in a future "
"version",
FutureWarning, stacklevel=2)
if index:
if is_datetime64_any_dtype(self.index) and convert_datetime64:
ix_vals = [self.index.to_pydatetime()]
else:
if isinstance(self.index, MultiIndex):
# array of tuples to numpy cols. copy copy copy
ix_vals = lmap(np.array, zip(*self.index.values))
else:
ix_vals = [self.index.values]
arrays = ix_vals + [self[c].get_values() for c in self.columns]
count = 0
index_names = list(self.index.names)
if isinstance(self.index, MultiIndex):
for i, n in enumerate(index_names):
if n is None:
index_names[i] = 'level_%d' % count
count += 1
elif index_names[0] is None:
index_names = ['index']
names = (lmap(compat.text_type, index_names) +
lmap(compat.text_type, self.columns))
else:
arrays = [self[c].get_values() for c in self.columns]
names = lmap(compat.text_type, self.columns)
formats = [v.dtype for v in arrays]
return np.rec.fromarrays(
arrays,
dtype={'names': names, 'formats': formats}
)
@classmethod
def from_items(cls, items, columns=None, orient='columns'):
"""Construct a dataframe from a list of tuples
.. deprecated:: 0.23.0
`from_items` is deprecated and will be removed in a future version.
Use :meth:`DataFrame.from_dict(dict(items)) <DataFrame.from_dict>`
instead.
:meth:`DataFrame.from_dict(OrderedDict(items)) <DataFrame.from_dict>`
may be used to preserve the key order.
Convert (key, value) pairs to DataFrame. The keys will be the axis
index (usually the columns, but depends on the specified
orientation). The values should be arrays or Series.
Parameters
----------
items : sequence of (key, value) pairs
Values should be arrays or Series.
columns : sequence of column labels, optional
Must be passed if orient='index'.
orient : {'columns', 'index'}, default 'columns'
The "orientation" of the data. If the keys of the
input correspond to column labels, pass 'columns'
(default). Otherwise if the keys correspond to the index,
pass 'index'.
Returns
-------
frame : DataFrame
"""
warnings.warn("from_items is deprecated. Please use "
"DataFrame.from_dict(dict(items), ...) instead. "
"DataFrame.from_dict(OrderedDict(items)) may be used to "
"preserve the key order.",
FutureWarning, stacklevel=2)
keys, values = lzip(*items)
if orient == 'columns':
if columns is not None:
columns = ensure_index(columns)
idict = dict(items)
if len(idict) < len(items):
if not columns.equals(ensure_index(keys)):
raise ValueError('With non-unique item names, passed '
'columns must be identical')
arrays = values
else:
arrays = [idict[k] for k in columns if k in idict]
else:
columns = ensure_index(keys)
arrays = values
# GH 17312
# Provide more informative error msg when scalar values passed
try:
return cls._from_arrays(arrays, columns, None)
except ValueError:
if not is_nested_list_like(values):
raise ValueError('The value in each (key, value) pair '
'must be an array, Series, or dict')
elif orient == 'index':
if columns is None:
raise TypeError("Must pass columns with orient='index'")
keys = ensure_index(keys)
# GH 17312
# Provide more informative error msg when scalar values passed
try:
arr = np.array(values, dtype=object).T
data = [lib.maybe_convert_objects(v) for v in arr]
return cls._from_arrays(data, columns, keys)
except TypeError:
if not is_nested_list_like(values):
raise ValueError('The value in each (key, value) pair '
'must be an array, Series, or dict')
else: # pragma: no cover
raise ValueError("'orient' must be either 'columns' or 'index'")
@classmethod
def _from_arrays(cls, arrays, columns, index, dtype=None):
mgr = _arrays_to_mgr(arrays, columns, index, columns, dtype=dtype)
return cls(mgr)
@classmethod
def from_csv(cls, path, header=0, sep=',', index_col=0, parse_dates=True,
encoding=None, tupleize_cols=None,
infer_datetime_format=False):
"""Read CSV file.
.. deprecated:: 0.21.0
Use :func:`pandas.read_csv` instead.
It is preferable to use the more powerful :func:`pandas.read_csv`
for most general purposes, but ``from_csv`` makes for an easy
roundtrip to and from a file (the exact counterpart of
``to_csv``), especially with a DataFrame of time series data.
This method only differs from the preferred :func:`pandas.read_csv`
in some defaults:
- `index_col` is ``0`` instead of ``None`` (take first column as index
by default)
- `parse_dates` is ``True`` instead of ``False`` (try parsing the index
as datetime by default)
So a ``pd.DataFrame.from_csv(path)`` can be replaced by
``pd.read_csv(path, index_col=0, parse_dates=True)``.
Parameters
----------
path : string file path or file handle / StringIO
header : int, default 0
Row to use as header (skip prior rows)
sep : string, default ','
Field delimiter
index_col : int or sequence, default 0
Column to use for index. If a sequence is given, a MultiIndex
is used. Different default from read_table
parse_dates : boolean, default True
Parse dates. Different default from read_table
tupleize_cols : boolean, default False
write multi_index columns as a list of tuples (if True)
or new (expanded format) if False)
infer_datetime_format: boolean, default False
If True and `parse_dates` is True for a column, try to infer the
datetime format based on the first datetime string. If the format
can be inferred, there often will be a large parsing speed-up.
See also
--------
pandas.read_csv
Returns
-------
y : DataFrame
"""
warnings.warn("from_csv is deprecated. Please use read_csv(...) "
"instead. Note that some of the default arguments are "
"different, so please refer to the documentation "
"for from_csv when changing your function calls",
FutureWarning, stacklevel=2)
from pandas.io.parsers import read_csv
return read_csv(path, header=header, sep=sep,
parse_dates=parse_dates, index_col=index_col,
encoding=encoding, tupleize_cols=tupleize_cols,
infer_datetime_format=infer_datetime_format)
def to_sparse(self, fill_value=None, kind='block'):
"""
Convert to SparseDataFrame.
Implement the sparse version of the DataFrame meaning that any data
matching a specific value it's omitted in the representation.
The sparse DataFrame allows for a more efficient storage.
Parameters
----------
fill_value : float, default None
The specific value that should be omitted in the representation.
kind : {'block', 'integer'}, default 'block'
The kind of the SparseIndex tracking where data is not equal to
the fill value:
- 'block' tracks only the locations and sizes of blocks of data.
- 'integer' keeps an array with all the locations of the data.
In most cases 'block' is recommended, since it's more memory
efficient.
Returns
-------
SparseDataFrame
The sparse representation of the DataFrame.
See Also
--------
DataFrame.to_dense :
Converts the DataFrame back to the its dense form.
Examples
--------
>>> df = pd.DataFrame([(np.nan, np.nan),
... (1., np.nan),
... (np.nan, 1.)])
>>> df
0 1
0 NaN NaN
1 1.0 NaN
2 NaN 1.0
>>> type(df)
<class 'pandas.core.frame.DataFrame'>
>>> sdf = df.to_sparse()
>>> sdf
0 1
0 NaN NaN
1 1.0 NaN
2 NaN 1.0
>>> type(sdf)
<class 'pandas.core.sparse.frame.SparseDataFrame'>
"""
from pandas.core.sparse.api import SparseDataFrame
return SparseDataFrame(self._series, index=self.index,
columns=self.columns, default_kind=kind,
default_fill_value=fill_value)
def to_panel(self):
"""
Transform long (stacked) format (DataFrame) into wide (3D, Panel)
format.
.. deprecated:: 0.20.0
Currently the index of the DataFrame must be a 2-level MultiIndex. This
may be generalized later
Returns
-------
panel : Panel
"""
# only support this kind for now
if (not isinstance(self.index, MultiIndex) or # pragma: no cover
len(self.index.levels) != 2):
raise NotImplementedError('Only 2-level MultiIndex are supported.')
if not self.index.is_unique:
raise ValueError("Can't convert non-uniquely indexed "
"DataFrame to Panel")
self._consolidate_inplace()
# minor axis must be sorted
if self.index.lexsort_depth < 2:
selfsorted = self.sort_index(level=0)
else:
selfsorted = self
major_axis, minor_axis = selfsorted.index.levels
major_labels, minor_labels = selfsorted.index.labels
shape = len(major_axis), len(minor_axis)
# preserve names, if any
major_axis = major_axis.copy()
major_axis.name = self.index.names[0]
minor_axis = minor_axis.copy()
minor_axis.name = self.index.names[1]
# create new axes
new_axes = [selfsorted.columns, major_axis, minor_axis]
# create new manager
new_mgr = selfsorted._data.reshape_nd(axes=new_axes,
labels=[major_labels,
minor_labels],
shape=shape,
ref_items=selfsorted.columns)
return self._constructor_expanddim(new_mgr)
@Appender(_shared_docs['to_excel'] % _shared_doc_kwargs)
def to_excel(self, excel_writer, sheet_name='Sheet1', na_rep='',
float_format=None, columns=None, header=True, index=True,
index_label=None, startrow=0, startcol=0, engine=None,
merge_cells=True, encoding=None, inf_rep='inf', verbose=True,
freeze_panes=None):
from pandas.io.formats.excel import ExcelFormatter
formatter = ExcelFormatter(self, na_rep=na_rep, cols=columns,
header=header,
float_format=float_format, index=index,
index_label=index_label,
merge_cells=merge_cells,
inf_rep=inf_rep)
formatter.write(excel_writer, sheet_name=sheet_name, startrow=startrow,
startcol=startcol, freeze_panes=freeze_panes,
engine=engine)
@deprecate_kwarg(old_arg_name='encoding', new_arg_name=None)
def to_stata(self, fname, convert_dates=None, write_index=True,
encoding="latin-1", byteorder=None, time_stamp=None,
data_label=None, variable_labels=None, version=114,
convert_strl=None):
"""
Export Stata binary dta files.
Parameters
----------
fname : path (string), buffer or path object
string, path object (pathlib.Path or py._path.local.LocalPath) or
object implementing a binary write() functions. If using a buffer
then the buffer will not be automatically closed after the file
data has been written.
convert_dates : dict
Dictionary mapping columns containing datetime types to stata
internal format to use when writing the dates. Options are 'tc',
'td', 'tm', 'tw', 'th', 'tq', 'ty'. Column can be either an integer
or a name. Datetime columns that do not have a conversion type
specified will be converted to 'tc'. Raises NotImplementedError if
a datetime column has timezone information.
write_index : bool
Write the index to Stata dataset.
encoding : str
Default is latin-1. Unicode is not supported.
byteorder : str
Can be ">", "<", "little", or "big". default is `sys.byteorder`.
time_stamp : datetime
A datetime to use as file creation date. Default is the current
time.
data_label : str
A label for the data set. Must be 80 characters or smaller.
variable_labels : dict
Dictionary containing columns as keys and variable labels as
values. Each label must be 80 characters or smaller.
.. versionadded:: 0.19.0
version : {114, 117}
Version to use in the output dta file. Version 114 can be used
read by Stata 10 and later. Version 117 can be read by Stata 13
or later. Version 114 limits string variables to 244 characters or
fewer while 117 allows strings with lengths up to 2,000,000
characters.
.. versionadded:: 0.23.0
convert_strl : list, optional
List of column names to convert to string columns to Stata StrL
format. Only available if version is 117. Storing strings in the
StrL format can produce smaller dta files if strings have more than
8 characters and values are repeated.
.. versionadded:: 0.23.0
Raises
------
NotImplementedError
* If datetimes contain timezone information
* Column dtype is not representable in Stata
ValueError
* Columns listed in convert_dates are neither datetime64[ns]
or datetime.datetime
* Column listed in convert_dates is not in DataFrame
* Categorical label contains more than 32,000 characters
.. versionadded:: 0.19.0
See Also
--------
pandas.read_stata : Import Stata data files
pandas.io.stata.StataWriter : low-level writer for Stata data files
pandas.io.stata.StataWriter117 : low-level writer for version 117 files
Examples
--------
>>> data.to_stata('./data_file.dta')
Or with dates
>>> data.to_stata('./date_data_file.dta', {2 : 'tw'})
Alternatively you can create an instance of the StataWriter class
>>> writer = StataWriter('./data_file.dta', data)
>>> writer.write_file()
With dates:
>>> writer = StataWriter('./date_data_file.dta', data, {2 : 'tw'})
>>> writer.write_file()
"""
kwargs = {}
if version not in (114, 117):
raise ValueError('Only formats 114 and 117 supported.')
if version == 114:
if convert_strl is not None:
raise ValueError('strl support is only available when using '
'format 117')
from pandas.io.stata import StataWriter as statawriter
else:
from pandas.io.stata import StataWriter117 as statawriter
kwargs['convert_strl'] = convert_strl
writer = statawriter(fname, self, convert_dates=convert_dates,
byteorder=byteorder, time_stamp=time_stamp,
data_label=data_label, write_index=write_index,
variable_labels=variable_labels, **kwargs)
writer.write_file()
def to_feather(self, fname):
"""
write out the binary feather-format for DataFrames
.. versionadded:: 0.20.0
Parameters
----------
fname : str
string file path
"""
from pandas.io.feather_format import to_feather
to_feather(self, fname)
def to_parquet(self, fname, engine='auto', compression='snappy',
index=None, **kwargs):
"""
Write a DataFrame to the binary parquet format.
.. versionadded:: 0.21.0
This function writes the dataframe as a `parquet file
<https://parquet.apache.org/>`_. You can choose different parquet
backends, and have the option of compression. See
:ref:`the user guide <io.parquet>` for more details.
Parameters
----------
fname : str
String file path.
engine : {'auto', 'pyarrow', 'fastparquet'}, default 'auto'
Parquet library to use. If 'auto', then the option
``io.parquet.engine`` is used. The default ``io.parquet.engine``
behavior is to try 'pyarrow', falling back to 'fastparquet' if
'pyarrow' is unavailable.
compression : {'snappy', 'gzip', 'brotli', None}, default 'snappy'
Name of the compression to use. Use ``None`` for no compression.
index : bool, default None
If ``True``, include the dataframe's index(es) in the file output.
If ``False``, they will not be written to the file. If ``None``,
the behavior depends on the chosen engine.
.. versionadded:: 0.24.0
**kwargs
Additional arguments passed to the parquet library. See
:ref:`pandas io <io.parquet>` for more details.
See Also
--------
read_parquet : Read a parquet file.
DataFrame.to_csv : Write a csv file.
DataFrame.to_sql : Write to a sql table.
DataFrame.to_hdf : Write to hdf.
Notes
-----
This function requires either the `fastparquet
<https://pypi.org/project/fastparquet>`_ or `pyarrow
<https://arrow.apache.org/docs/python/>`_ library.
Examples
--------
>>> df = pd.DataFrame(data={'col1': [1, 2], 'col2': [3, 4]})
>>> df.to_parquet('df.parquet.gzip', compression='gzip')
>>> pd.read_parquet('df.parquet.gzip')
col1 col2
0 1 3
1 2 4
"""
from pandas.io.parquet import to_parquet
to_parquet(self, fname, engine,
compression=compression, index=index, **kwargs)
@Substitution(header='Write out the column names. If a list of strings '
'is given, it is assumed to be aliases for the '
'column names')
@Substitution(shared_params=fmt.common_docstring,
returns=fmt.return_docstring)
def to_string(self, buf=None, columns=None, col_space=None, header=True,
index=True, na_rep='NaN', formatters=None, float_format=None,
sparsify=None, index_names=True, justify=None,
line_width=None, max_rows=None, max_cols=None,
show_dimensions=False):
"""
Render a DataFrame to a console-friendly tabular output.
%(shared_params)s
line_width : int, optional
Width to wrap a line in characters.
%(returns)s
See Also
--------
to_html : Convert DataFrame to HTML.
Examples
--------
>>> d = {'col1' : [1, 2, 3], 'col2' : [4, 5, 6]}
>>> df = pd.DataFrame(d)
>>> print(df.to_string())
col1 col2
0 1 4
1 2 5
2 3 6
"""
formatter = fmt.DataFrameFormatter(self, buf=buf, columns=columns,
col_space=col_space, na_rep=na_rep,
formatters=formatters,
float_format=float_format,
sparsify=sparsify, justify=justify,
index_names=index_names,
header=header, index=index,
line_width=line_width,
max_rows=max_rows,
max_cols=max_cols,
show_dimensions=show_dimensions)
formatter.to_string()
if buf is None:
result = formatter.buf.getvalue()
return result
@Substitution(header='whether to print column labels, default True')
@Substitution(shared_params=fmt.common_docstring,
returns=fmt.return_docstring)
def to_html(self, buf=None, columns=None, col_space=None, header=True,
index=True, na_rep='NaN', formatters=None, float_format=None,
sparsify=None, index_names=True, justify=None, bold_rows=True,
classes=None, escape=True, max_rows=None, max_cols=None,
show_dimensions=False, notebook=False, decimal='.',
border=None, table_id=None):
"""
Render a DataFrame as an HTML table.
%(shared_params)s
bold_rows : boolean, default True
Make the row labels bold in the output
classes : str or list or tuple, default None
CSS class(es) to apply to the resulting html table
escape : boolean, default True
Convert the characters <, >, and & to HTML-safe sequences.
notebook : {True, False}, default False
Whether the generated HTML is for IPython Notebook.
decimal : string, default '.'
Character recognized as decimal separator, e.g. ',' in Europe
.. versionadded:: 0.18.0
border : int
A ``border=border`` attribute is included in the opening
`<table>` tag. Default ``pd.options.html.border``.
.. versionadded:: 0.19.0
table_id : str, optional
A css id is included in the opening `<table>` tag if specified.
.. versionadded:: 0.23.0
%(returns)s
See Also
--------
to_string : Convert DataFrame to a string.
"""
if (justify is not None and
justify not in fmt._VALID_JUSTIFY_PARAMETERS):
raise ValueError("Invalid value for justify parameter")
formatter = fmt.DataFrameFormatter(self, buf=buf, columns=columns,
col_space=col_space, na_rep=na_rep,
formatters=formatters,
float_format=float_format,
sparsify=sparsify, justify=justify,
index_names=index_names,
header=header, index=index,
bold_rows=bold_rows, escape=escape,
max_rows=max_rows,
max_cols=max_cols,
show_dimensions=show_dimensions,
decimal=decimal, table_id=table_id)
# TODO: a generic formatter wld b in DataFrameFormatter
formatter.to_html(classes=classes, notebook=notebook, border=border)
if buf is None:
return formatter.buf.getvalue()
def info(self, verbose=None, buf=None, max_cols=None, memory_usage=None,
null_counts=None):
"""
Print a concise summary of a DataFrame.
This method prints information about a DataFrame including
the index dtype and column dtypes, non-null values and memory usage.
Parameters
----------
verbose : bool, optional
Whether to print the full summary. By default, the setting in
``pandas.options.display.max_info_columns`` is followed.
buf : writable buffer, defaults to sys.stdout
Where to send the output. By default, the output is printed to
sys.stdout. Pass a writable buffer if you need to further process
the output.
max_cols : int, optional
When to switch from the verbose to the truncated output. If the
DataFrame has more than `max_cols` columns, the truncated output
is used. By default, the setting in
``pandas.options.display.max_info_columns`` is used.
memory_usage : bool, str, optional
Specifies whether total memory usage of the DataFrame
elements (including the index) should be displayed. By default,
this follows the ``pandas.options.display.memory_usage`` setting.
True always show memory usage. False never shows memory usage.
A value of 'deep' is equivalent to "True with deep introspection".
Memory usage is shown in human-readable units (base-2
representation). Without deep introspection a memory estimation is
made based in column dtype and number of rows assuming values
consume the same memory amount for corresponding dtypes. With deep
memory introspection, a real memory usage calculation is performed
at the cost of computational resources.
null_counts : bool, optional
Whether to show the non-null counts. By default, this is shown
only if the frame is smaller than
``pandas.options.display.max_info_rows`` and
``pandas.options.display.max_info_columns``. A value of True always
shows the counts, and False never shows the counts.
Returns
-------
None
This method prints a summary of a DataFrame and returns None.
See Also
--------
DataFrame.describe: Generate descriptive statistics of DataFrame
columns.
DataFrame.memory_usage: Memory usage of DataFrame columns.
Examples
--------
>>> int_values = [1, 2, 3, 4, 5]
>>> text_values = ['alpha', 'beta', 'gamma', 'delta', 'epsilon']
>>> float_values = [0.0, 0.25, 0.5, 0.75, 1.0]
>>> df = pd.DataFrame({"int_col": int_values, "text_col": text_values,
... "float_col": float_values})
>>> df
int_col text_col float_col
0 1 alpha 0.00
1 2 beta 0.25
2 3 gamma 0.50
3 4 delta 0.75
4 5 epsilon 1.00
Prints information of all columns:
>>> df.info(verbose=True)
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 5 entries, 0 to 4
Data columns (total 3 columns):
int_col 5 non-null int64
text_col 5 non-null object
float_col 5 non-null float64
dtypes: float64(1), int64(1), object(1)
memory usage: 200.0+ bytes
Prints a summary of columns count and its dtypes but not per column
information:
>>> df.info(verbose=False)
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 5 entries, 0 to 4
Columns: 3 entries, int_col to float_col
dtypes: float64(1), int64(1), object(1)
memory usage: 200.0+ bytes
Pipe output of DataFrame.info to buffer instead of sys.stdout, get
buffer content and writes to a text file:
>>> import io
>>> buffer = io.StringIO()
>>> df.info(buf=buffer)
>>> s = buffer.getvalue()
>>> with open("df_info.txt", "w", encoding="utf-8") as f:
... f.write(s)
260
The `memory_usage` parameter allows deep introspection mode, specially
useful for big DataFrames and fine-tune memory optimization:
>>> random_strings_array = np.random.choice(['a', 'b', 'c'], 10 ** 6)
>>> df = pd.DataFrame({
... 'column_1': np.random.choice(['a', 'b', 'c'], 10 ** 6),
... 'column_2': np.random.choice(['a', 'b', 'c'], 10 ** 6),
... 'column_3': np.random.choice(['a', 'b', 'c'], 10 ** 6)
... })
>>> df.info()
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 1000000 entries, 0 to 999999
Data columns (total 3 columns):
column_1 1000000 non-null object
column_2 1000000 non-null object
column_3 1000000 non-null object
dtypes: object(3)
memory usage: 22.9+ MB
>>> df.info(memory_usage='deep')
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 1000000 entries, 0 to 999999
Data columns (total 3 columns):
column_1 1000000 non-null object
column_2 1000000 non-null object
column_3 1000000 non-null object
dtypes: object(3)
memory usage: 188.8 MB
"""
if buf is None: # pragma: no cover
buf = sys.stdout
lines = []
lines.append(str(type(self)))
lines.append(self.index._summary())
if len(self.columns) == 0:
lines.append('Empty {name}'.format(name=type(self).__name__))
fmt.buffer_put_lines(buf, lines)
return
cols = self.columns
# hack
if max_cols is None:
max_cols = get_option('display.max_info_columns',
len(self.columns) + 1)
max_rows = get_option('display.max_info_rows', len(self) + 1)
if null_counts is None:
show_counts = ((len(self.columns) <= max_cols) and
(len(self) < max_rows))
else:
show_counts = null_counts
exceeds_info_cols = len(self.columns) > max_cols
def _verbose_repr():
lines.append('Data columns (total %d columns):' %
len(self.columns))
space = max(len(pprint_thing(k)) for k in self.columns) + 4
counts = None
tmpl = "{count}{dtype}"
if show_counts:
counts = self.count()
if len(cols) != len(counts): # pragma: no cover
raise AssertionError(
'Columns must equal counts '
'({cols:d} != {counts:d})'.format(
cols=len(cols), counts=len(counts)))
tmpl = "{count} non-null {dtype}"
dtypes = self.dtypes
for i, col in enumerate(self.columns):
dtype = dtypes.iloc[i]
col = pprint_thing(col)
count = ""
if show_counts:
count = counts.iloc[i]
lines.append(_put_str(col, space) + tmpl.format(count=count,
dtype=dtype))
def _non_verbose_repr():
lines.append(self.columns._summary(name='Columns'))
def _sizeof_fmt(num, size_qualifier):
# returns size in human readable format
for x in ['bytes', 'KB', 'MB', 'GB', 'TB']:
if num < 1024.0:
return ("{num:3.1f}{size_q} "
"{x}".format(num=num, size_q=size_qualifier, x=x))
num /= 1024.0
return "{num:3.1f}{size_q} {pb}".format(num=num,
size_q=size_qualifier,
pb='PB')
if verbose:
_verbose_repr()
elif verbose is False: # specifically set to False, not nesc None
_non_verbose_repr()
else:
if exceeds_info_cols:
_non_verbose_repr()
else:
_verbose_repr()
counts = self.get_dtype_counts()
dtypes = ['{k}({kk:d})'.format(k=k[0], kk=k[1]) for k
in sorted(compat.iteritems(counts))]
lines.append('dtypes: {types}'.format(types=', '.join(dtypes)))
if memory_usage is None:
memory_usage = get_option('display.memory_usage')
if memory_usage:
# append memory usage of df to display
size_qualifier = ''
if memory_usage == 'deep':
deep = True
else:
# size_qualifier is just a best effort; not guaranteed to catch
# all cases (e.g., it misses categorical data even with object
# categories)
deep = False
if ('object' in counts or
self.index._is_memory_usage_qualified()):
size_qualifier = '+'
mem_usage = self.memory_usage(index=True, deep=deep).sum()
lines.append("memory usage: {mem}\n".format(
mem=_sizeof_fmt(mem_usage, size_qualifier)))
fmt.buffer_put_lines(buf, lines)
def memory_usage(self, index=True, deep=False):
"""
Return the memory usage of each column in bytes.
The memory usage can optionally include the contribution of
the index and elements of `object` dtype.
This value is displayed in `DataFrame.info` by default. This can be
suppressed by setting ``pandas.options.display.memory_usage`` to False.
Parameters
----------
index : bool, default True
Specifies whether to include the memory usage of the DataFrame's
index in returned Series. If ``index=True`` the memory usage of the
index the first item in the output.
deep : bool, default False
If True, introspect the data deeply by interrogating
`object` dtypes for system-level memory consumption, and include
it in the returned values.
Returns
-------
sizes : Series
A Series whose index is the original column names and whose values
is the memory usage of each column in bytes.
See Also
--------
numpy.ndarray.nbytes : Total bytes consumed by the elements of an
ndarray.
Series.memory_usage : Bytes consumed by a Series.
pandas.Categorical : Memory-efficient array for string values with
many repeated values.
DataFrame.info : Concise summary of a DataFrame.
Examples
--------
>>> dtypes = ['int64', 'float64', 'complex128', 'object', 'bool']
>>> data = dict([(t, np.ones(shape=5000).astype(t))
... for t in dtypes])
>>> df = pd.DataFrame(data)
>>> df.head()
int64 float64 complex128 object bool
0 1 1.0 (1+0j) 1 True
1 1 1.0 (1+0j) 1 True
2 1 1.0 (1+0j) 1 True
3 1 1.0 (1+0j) 1 True
4 1 1.0 (1+0j) 1 True
>>> df.memory_usage()
Index 80
int64 40000
float64 40000
complex128 80000
object 40000
bool 5000
dtype: int64
>>> df.memory_usage(index=False)
int64 40000
float64 40000
complex128 80000
object 40000
bool 5000
dtype: int64
The memory footprint of `object` dtype columns is ignored by default:
>>> df.memory_usage(deep=True)
Index 80
int64 40000
float64 40000
complex128 80000
object 160000
bool 5000
dtype: int64
Use a Categorical for efficient storage of an object-dtype column with
many repeated values.
>>> df['object'].astype('category').memory_usage(deep=True)
5168
"""
result = Series([c.memory_usage(index=False, deep=deep)
for col, c in self.iteritems()], index=self.columns)
if index:
result = Series(self.index.memory_usage(deep=deep),
index=['Index']).append(result)
return result
def transpose(self, *args, **kwargs):
"""
Transpose index and columns.
Reflect the DataFrame over its main diagonal by writing rows as columns
and vice-versa. The property :attr:`.T` is an accessor to the method
:meth:`transpose`.
Parameters
----------
copy : bool, default False
If True, the underlying data is copied. Otherwise (default), no
copy is made if possible.
*args, **kwargs
Additional keywords have no effect but might be accepted for
compatibility with numpy.
Returns
-------
DataFrame
The transposed DataFrame.
See Also
--------
numpy.transpose : Permute the dimensions of a given array.
Notes
-----
Transposing a DataFrame with mixed dtypes will result in a homogeneous
DataFrame with the `object` dtype. In such a case, a copy of the data
is always made.
Examples
--------
**Square DataFrame with homogeneous dtype**
>>> d1 = {'col1': [1, 2], 'col2': [3, 4]}
>>> df1 = pd.DataFrame(data=d1)
>>> df1
col1 col2
0 1 3
1 2 4
>>> df1_transposed = df1.T # or df1.transpose()
>>> df1_transposed
0 1
col1 1 2
col2 3 4
When the dtype is homogeneous in the original DataFrame, we get a
transposed DataFrame with the same dtype:
>>> df1.dtypes
col1 int64
col2 int64
dtype: object
>>> df1_transposed.dtypes
0 int64
1 int64
dtype: object
**Non-square DataFrame with mixed dtypes**
>>> d2 = {'name': ['Alice', 'Bob'],
... 'score': [9.5, 8],
... 'employed': [False, True],
... 'kids': [0, 0]}
>>> df2 = pd.DataFrame(data=d2)
>>> df2
name score employed kids
0 Alice 9.5 False 0
1 Bob 8.0 True 0
>>> df2_transposed = df2.T # or df2.transpose()
>>> df2_transposed
0 1
name Alice Bob
score 9.5 8
employed False True
kids 0 0
When the DataFrame has mixed dtypes, we get a transposed DataFrame with
the `object` dtype:
>>> df2.dtypes
name object
score float64
employed bool
kids int64
dtype: object
>>> df2_transposed.dtypes
0 object
1 object
dtype: object
"""
nv.validate_transpose(args, dict())
return super(DataFrame, self).transpose(1, 0, **kwargs)
T = property(transpose)
# ----------------------------------------------------------------------
# Picklability
# legacy pickle formats
def _unpickle_frame_compat(self, state): # pragma: no cover
if len(state) == 2: # pragma: no cover
series, idx = state
columns = sorted(series)
else:
series, cols, idx = state
columns = com._unpickle_array(cols)
index = com._unpickle_array(idx)
self._data = self._init_dict(series, index, columns, None)
def _unpickle_matrix_compat(self, state): # pragma: no cover
# old unpickling
(vals, idx, cols), object_state = state
index = com._unpickle_array(idx)
dm = DataFrame(vals, index=index, columns=com._unpickle_array(cols),
copy=False)
if object_state is not None:
ovals, _, ocols = object_state
objects = DataFrame(ovals, index=index,
columns=com._unpickle_array(ocols), copy=False)
dm = dm.join(objects)
self._data = dm._data
# ----------------------------------------------------------------------
# Getting and setting elements
def get_value(self, index, col, takeable=False):
"""Quickly retrieve single value at passed column and index
.. deprecated:: 0.21.0
Use .at[] or .iat[] accessors instead.
Parameters
----------
index : row label
col : column label
takeable : interpret the index/col as indexers, default False
Returns
-------
value : scalar value
"""
warnings.warn("get_value is deprecated and will be removed "
"in a future release. Please use "
".at[] or .iat[] accessors instead", FutureWarning,
stacklevel=2)
return self._get_value(index, col, takeable=takeable)
def _get_value(self, index, col, takeable=False):
if takeable:
series = self._iget_item_cache(col)
return com.maybe_box_datetimelike(series._values[index])
series = self._get_item_cache(col)
engine = self.index._engine
try:
return engine.get_value(series._values, index)
except (TypeError, ValueError):
# we cannot handle direct indexing
# use positional
col = self.columns.get_loc(col)
index = self.index.get_loc(index)
return self._get_value(index, col, takeable=True)
_get_value.__doc__ = get_value.__doc__
def set_value(self, index, col, value, takeable=False):
"""Put single value at passed column and index
.. deprecated:: 0.21.0
Use .at[] or .iat[] accessors instead.
Parameters
----------
index : row label
col : column label
value : scalar value
takeable : interpret the index/col as indexers, default False
Returns
-------
frame : DataFrame
If label pair is contained, will be reference to calling DataFrame,
otherwise a new object
"""
warnings.warn("set_value is deprecated and will be removed "
"in a future release. Please use "
".at[] or .iat[] accessors instead", FutureWarning,
stacklevel=2)
return self._set_value(index, col, value, takeable=takeable)
def _set_value(self, index, col, value, takeable=False):
try:
if takeable is True:
series = self._iget_item_cache(col)
return series._set_value(index, value, takeable=True)
series = self._get_item_cache(col)
engine = self.index._engine
engine.set_value(series._values, index, value)
return self
except (KeyError, TypeError):
# set using a non-recursive method & reset the cache
self.loc[index, col] = value
self._item_cache.pop(col, None)
return self
_set_value.__doc__ = set_value.__doc__
def _ixs(self, i, axis=0):
"""
i : int, slice, or sequence of integers
axis : int
"""
# irow
if axis == 0:
"""
Notes
-----
If slice passed, the resulting data will be a view
"""
if isinstance(i, slice):
return self[i]
else:
label = self.index[i]
if isinstance(label, Index):
# a location index by definition
result = self.take(i, axis=axis)
copy = True
else:
new_values = self._data.fast_xs(i)
if is_scalar(new_values):
return new_values
# if we are a copy, mark as such
copy = (isinstance(new_values, np.ndarray) and
new_values.base is None)
result = self._constructor_sliced(new_values,
index=self.columns,
name=self.index[i],
dtype=new_values.dtype)
result._set_is_copy(self, copy=copy)
return result
# icol
else:
"""
Notes
-----
If slice passed, the resulting data will be a view
"""
label = self.columns[i]
if isinstance(i, slice):
# need to return view
lab_slice = slice(label[0], label[-1])
return self.loc[:, lab_slice]
else:
if isinstance(label, Index):
return self._take(i, axis=1)
index_len = len(self.index)
# if the values returned are not the same length
# as the index (iow a not found value), iget returns
# a 0-len ndarray. This is effectively catching
# a numpy error (as numpy should really raise)
values = self._data.iget(i)
if index_len and not len(values):
values = np.array([np.nan] * index_len, dtype=object)
result = self._box_col_values(values, label)
# this is a cached value, mark it so
result._set_as_cached(label, self)
return result
def __getitem__(self, key):
key = com.apply_if_callable(key, self)
# shortcut if the key is in columns
try:
if self.columns.is_unique and key in self.columns:
if self.columns.nlevels > 1:
return self._getitem_multilevel(key)
return self._get_item_cache(key)
except (TypeError, ValueError):
# The TypeError correctly catches non hashable "key" (e.g. list)
# The ValueError can be removed once GH #21729 is fixed
pass
# Do we have a slicer (on rows)?
indexer = convert_to_index_sliceable(self, key)
if indexer is not None:
return self._slice(indexer, axis=0)
# Do we have a (boolean) DataFrame?
if isinstance(key, DataFrame):
return self._getitem_frame(key)
# Do we have a (boolean) 1d indexer?
if com.is_bool_indexer(key):
return self._getitem_bool_array(key)
# We are left with two options: a single key, and a collection of keys,
# We interpret tuples as collections only for non-MultiIndex
is_single_key = isinstance(key, tuple) or not is_list_like(key)
if is_single_key:
if self.columns.nlevels > 1:
return self._getitem_multilevel(key)
indexer = self.columns.get_loc(key)
if is_integer(indexer):
indexer = [indexer]
else:
if is_iterator(key):
key = list(key)
indexer = self.loc._convert_to_indexer(key, axis=1,
raise_missing=True)
# take() does not accept boolean indexers
if getattr(indexer, "dtype", None) == bool:
indexer = np.where(indexer)[0]
data = self._take(indexer, axis=1)
if is_single_key:
# What does looking for a single key in a non-unique index return?
# The behavior is inconsistent. It returns a Series, except when
# - the key itself is repeated (test on data.shape, #9519), or
# - we have a MultiIndex on columns (test on self.columns, #21309)
if data.shape[1] == 1 and not isinstance(self.columns, MultiIndex):
data = data[key]
return data
def _getitem_bool_array(self, key):
# also raises Exception if object array with NA values
# warning here just in case -- previously __setitem__ was
# reindexing but __getitem__ was not; it seems more reasonable to
# go with the __setitem__ behavior since that is more consistent
# with all other indexing behavior
if isinstance(key, Series) and not key.index.equals(self.index):
warnings.warn("Boolean Series key will be reindexed to match "
"DataFrame index.", UserWarning, stacklevel=3)
elif len(key) != len(self.index):
raise ValueError('Item wrong length %d instead of %d.' %
(len(key), len(self.index)))
# check_bool_indexer will throw exception if Series key cannot
# be reindexed to match DataFrame rows
key = check_bool_indexer(self.index, key)
indexer = key.nonzero()[0]
return self._take(indexer, axis=0)
def _getitem_multilevel(self, key):
loc = self.columns.get_loc(key)
if isinstance(loc, (slice, Series, np.ndarray, Index)):
new_columns = self.columns[loc]
result_columns = maybe_droplevels(new_columns, key)
if self._is_mixed_type:
result = self.reindex(columns=new_columns)
result.columns = result_columns
else:
new_values = self.values[:, loc]
result = self._constructor(new_values, index=self.index,
columns=result_columns)
result = result.__finalize__(self)
# If there is only one column being returned, and its name is
# either an empty string, or a tuple with an empty string as its
# first element, then treat the empty string as a placeholder
# and return the column as if the user had provided that empty
# string in the key. If the result is a Series, exclude the
# implied empty string from its name.
if len(result.columns) == 1:
top = result.columns[0]
if isinstance(top, tuple):
top = top[0]
if top == '':
result = result['']
if isinstance(result, Series):
result = self._constructor_sliced(result,
index=self.index,
name=key)
result._set_is_copy(self)
return result
else:
return self._get_item_cache(key)
def _getitem_frame(self, key):
if key.values.size and not is_bool_dtype(key.values):
raise ValueError('Must pass DataFrame with boolean values only')
return self.where(key)
def query(self, expr, inplace=False, **kwargs):
"""Query the columns of a frame with a boolean expression.
Parameters
----------
expr : string
The query string to evaluate. You can refer to variables
in the environment by prefixing them with an '@' character like
``@a + b``.
inplace : bool
Whether the query should modify the data in place or return
a modified copy
.. versionadded:: 0.18.0
kwargs : dict
See the documentation for :func:`pandas.eval` for complete details
on the keyword arguments accepted by :meth:`DataFrame.query`.
Returns
-------
q : DataFrame
Notes
-----
The result of the evaluation of this expression is first passed to
:attr:`DataFrame.loc` and if that fails because of a
multidimensional key (e.g., a DataFrame) then the result will be passed
to :meth:`DataFrame.__getitem__`.
This method uses the top-level :func:`pandas.eval` function to
evaluate the passed query.
The :meth:`~pandas.DataFrame.query` method uses a slightly
modified Python syntax by default. For example, the ``&`` and ``|``
(bitwise) operators have the precedence of their boolean cousins,
:keyword:`and` and :keyword:`or`. This *is* syntactically valid Python,
however the semantics are different.
You can change the semantics of the expression by passing the keyword
argument ``parser='python'``. This enforces the same semantics as
evaluation in Python space. Likewise, you can pass ``engine='python'``
to evaluate an expression using Python itself as a backend. This is not
recommended as it is inefficient compared to using ``numexpr`` as the
engine.
The :attr:`DataFrame.index` and
:attr:`DataFrame.columns` attributes of the
:class:`~pandas.DataFrame` instance are placed in the query namespace
by default, which allows you to treat both the index and columns of the
frame as a column in the frame.
The identifier ``index`` is used for the frame index; you can also
use the name of the index to identify it in a query. Please note that
Python keywords may not be used as identifiers.
For further details and examples see the ``query`` documentation in
:ref:`indexing <indexing.query>`.
See Also
--------
pandas.eval
DataFrame.eval
Examples
--------
>>> df = pd.DataFrame(np.random.randn(10, 2), columns=list('ab'))
>>> df.query('a > b')
>>> df[df.a > df.b] # same result as the previous expression
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if not isinstance(expr, compat.string_types):
msg = "expr must be a string to be evaluated, {0} given"
raise ValueError(msg.format(type(expr)))
kwargs['level'] = kwargs.pop('level', 0) + 1
kwargs['target'] = None
res = self.eval(expr, **kwargs)
try:
new_data = self.loc[res]
except ValueError:
# when res is multi-dimensional loc raises, but this is sometimes a
# valid query
new_data = self[res]
if inplace:
self._update_inplace(new_data)
else:
return new_data
def eval(self, expr, inplace=False, **kwargs):
"""
Evaluate a string describing operations on DataFrame columns.
Operates on columns only, not specific rows or elements. This allows
`eval` to run arbitrary code, which can make you vulnerable to code
injection if you pass user input to this function.
Parameters
----------
expr : str
The expression string to evaluate.
inplace : bool, default False
If the expression contains an assignment, whether to perform the
operation inplace and mutate the existing DataFrame. Otherwise,
a new DataFrame is returned.
.. versionadded:: 0.18.0.
kwargs : dict
See the documentation for :func:`~pandas.eval` for complete details
on the keyword arguments accepted by
:meth:`~pandas.DataFrame.query`.
Returns
-------
ndarray, scalar, or pandas object
The result of the evaluation.
See Also
--------
DataFrame.query : Evaluates a boolean expression to query the columns
of a frame.
DataFrame.assign : Can evaluate an expression or function to create new
values for a column.
pandas.eval : Evaluate a Python expression as a string using various
backends.
Notes
-----
For more details see the API documentation for :func:`~pandas.eval`.
For detailed examples see :ref:`enhancing performance with eval
<enhancingperf.eval>`.
Examples
--------
>>> df = pd.DataFrame({'A': range(1, 6), 'B': range(10, 0, -2)})
>>> df
A B
0 1 10
1 2 8
2 3 6
3 4 4
4 5 2
>>> df.eval('A + B')
0 11
1 10
2 9
3 8
4 7
dtype: int64
Assignment is allowed though by default the original DataFrame is not
modified.
>>> df.eval('C = A + B')
A B C
0 1 10 11
1 2 8 10
2 3 6 9
3 4 4 8
4 5 2 7
>>> df
A B
0 1 10
1 2 8
2 3 6
3 4 4
4 5 2
Use ``inplace=True`` to modify the original DataFrame.
>>> df.eval('C = A + B', inplace=True)
>>> df
A B C
0 1 10 11
1 2 8 10
2 3 6 9
3 4 4 8
4 5 2 7
"""
from pandas.core.computation.eval import eval as _eval
inplace = validate_bool_kwarg(inplace, 'inplace')
resolvers = kwargs.pop('resolvers', None)
kwargs['level'] = kwargs.pop('level', 0) + 1
if resolvers is None:
index_resolvers = self._get_index_resolvers()
resolvers = dict(self.iteritems()), index_resolvers
if 'target' not in kwargs:
kwargs['target'] = self
kwargs['resolvers'] = kwargs.get('resolvers', ()) + tuple(resolvers)
return _eval(expr, inplace=inplace, **kwargs)
def select_dtypes(self, include=None, exclude=None):
"""
Return a subset of the DataFrame's columns based on the column dtypes.
Parameters
----------
include, exclude : scalar or list-like
A selection of dtypes or strings to be included/excluded. At least
one of these parameters must be supplied.
Raises
------
ValueError
* If both of ``include`` and ``exclude`` are empty
* If ``include`` and ``exclude`` have overlapping elements
* If any kind of string dtype is passed in.
Returns
-------
subset : DataFrame
The subset of the frame including the dtypes in ``include`` and
excluding the dtypes in ``exclude``.
Notes
-----
* To select all *numeric* types, use ``np.number`` or ``'number'``
* To select strings you must use the ``object`` dtype, but note that
this will return *all* object dtype columns
* See the `numpy dtype hierarchy
<http://docs.scipy.org/doc/numpy/reference/arrays.scalars.html>`__
* To select datetimes, use ``np.datetime64``, ``'datetime'`` or
``'datetime64'``
* To select timedeltas, use ``np.timedelta64``, ``'timedelta'`` or
``'timedelta64'``
* To select Pandas categorical dtypes, use ``'category'``
* To select Pandas datetimetz dtypes, use ``'datetimetz'`` (new in
0.20.0) or ``'datetime64[ns, tz]'``
Examples
--------
>>> df = pd.DataFrame({'a': [1, 2] * 3,
... 'b': [True, False] * 3,
... 'c': [1.0, 2.0] * 3})
>>> df
a b c
0 1 True 1.0
1 2 False 2.0
2 1 True 1.0
3 2 False 2.0
4 1 True 1.0
5 2 False 2.0
>>> df.select_dtypes(include='bool')
b
0 True
1 False
2 True
3 False
4 True
5 False
>>> df.select_dtypes(include=['float64'])
c
0 1.0
1 2.0
2 1.0
3 2.0
4 1.0
5 2.0
>>> df.select_dtypes(exclude=['int'])
b c
0 True 1.0
1 False 2.0
2 True 1.0
3 False 2.0
4 True 1.0
5 False 2.0
"""
def _get_info_slice(obj, indexer):
"""Slice the info axis of `obj` with `indexer`."""
if not hasattr(obj, '_info_axis_number'):
msg = 'object of type {typ!r} has no info axis'
raise TypeError(msg.format(typ=type(obj).__name__))
slices = [slice(None)] * obj.ndim
slices[obj._info_axis_number] = indexer
return tuple(slices)
if not is_list_like(include):
include = (include,) if include is not None else ()
if not is_list_like(exclude):
exclude = (exclude,) if exclude is not None else ()
selection = tuple(map(frozenset, (include, exclude)))
if not any(selection):
raise ValueError('at least one of include or exclude must be '
'nonempty')
# convert the myriad valid dtypes object to a single representation
include, exclude = map(
lambda x: frozenset(map(_get_dtype_from_object, x)), selection)
for dtypes in (include, exclude):
invalidate_string_dtypes(dtypes)
# can't both include AND exclude!
if not include.isdisjoint(exclude):
raise ValueError('include and exclude overlap on {inc_ex}'.format(
inc_ex=(include & exclude)))
# empty include/exclude -> defaults to True
# three cases (we've already raised if both are empty)
# case 1: empty include, nonempty exclude
# we have True, True, ... True for include, same for exclude
# in the loop below we get the excluded
# and when we call '&' below we get only the excluded
# case 2: nonempty include, empty exclude
# same as case 1, but with include
# case 3: both nonempty
# the "union" of the logic of case 1 and case 2:
# we get the included and excluded, and return their logical and
include_these = Series(not bool(include), index=self.columns)
exclude_these = Series(not bool(exclude), index=self.columns)
def is_dtype_instance_mapper(idx, dtype):
return idx, functools.partial(issubclass, dtype.type)
for idx, f in itertools.starmap(is_dtype_instance_mapper,
enumerate(self.dtypes)):
if include: # checks for the case of empty include or exclude
include_these.iloc[idx] = any(map(f, include))
if exclude:
exclude_these.iloc[idx] = not any(map(f, exclude))
dtype_indexer = include_these & exclude_these
return self.loc[_get_info_slice(self, dtype_indexer)]
def _box_item_values(self, key, values):
items = self.columns[self.columns.get_loc(key)]
if values.ndim == 2:
return self._constructor(values.T, columns=items, index=self.index)
else:
return self._box_col_values(values, items)
def _box_col_values(self, values, items):
""" provide boxed values for a column """
klass = _get_sliced_frame_result_type(values, self)
return klass(values, index=self.index, name=items, fastpath=True)
def __setitem__(self, key, value):
key = com.apply_if_callable(key, self)
# see if we can slice the rows
indexer = convert_to_index_sliceable(self, key)
if indexer is not None:
return self._setitem_slice(indexer, value)
if isinstance(key, DataFrame) or getattr(key, 'ndim', None) == 2:
self._setitem_frame(key, value)
elif isinstance(key, (Series, np.ndarray, list, Index)):
self._setitem_array(key, value)
else:
# set column
self._set_item(key, value)
def _setitem_slice(self, key, value):
self._check_setitem_copy()
self.loc._setitem_with_indexer(key, value)
def _setitem_array(self, key, value):
# also raises Exception if object array with NA values
if com.is_bool_indexer(key):
if len(key) != len(self.index):
raise ValueError('Item wrong length %d instead of %d!' %
(len(key), len(self.index)))
key = check_bool_indexer(self.index, key)
indexer = key.nonzero()[0]
self._check_setitem_copy()
self.loc._setitem_with_indexer(indexer, value)
else:
if isinstance(value, DataFrame):
if len(value.columns) != len(key):
raise ValueError('Columns must be same length as key')
for k1, k2 in zip(key, value.columns):
self[k1] = value[k2]
else:
indexer = self.loc._convert_to_indexer(key, axis=1)
self._check_setitem_copy()
self.loc._setitem_with_indexer((slice(None), indexer), value)
def _setitem_frame(self, key, value):
# support boolean setting with DataFrame input, e.g.
# df[df > df2] = 0
if isinstance(key, np.ndarray):
if key.shape != self.shape:
raise ValueError(
'Array conditional must be same shape as self'
)
key = self._constructor(key, **self._construct_axes_dict())
if key.values.size and not is_bool_dtype(key.values):
raise TypeError(
'Must pass DataFrame or 2-d ndarray with boolean values only'
)
self._check_inplace_setting(value)
self._check_setitem_copy()
self._where(-key, value, inplace=True)
def _ensure_valid_index(self, value):
"""
ensure that if we don't have an index, that we can create one from the
passed value
"""
# GH5632, make sure that we are a Series convertible
if not len(self.index) and is_list_like(value):
try:
value = Series(value)
except (ValueError, NotImplementedError, TypeError):
raise ValueError('Cannot set a frame with no defined index '
'and a value that cannot be converted to a '
'Series')
self._data = self._data.reindex_axis(value.index.copy(), axis=1,
fill_value=np.nan)
def _set_item(self, key, value):
"""
Add series to DataFrame in specified column.
If series is a numpy-array (not a Series/TimeSeries), it must be the
same length as the DataFrames index or an error will be thrown.
Series/TimeSeries will be conformed to the DataFrames index to
ensure homogeneity.
"""
self._ensure_valid_index(value)
value = self._sanitize_column(key, value)
NDFrame._set_item(self, key, value)
# check if we are modifying a copy
# try to set first as we want an invalid
# value exception to occur first
if len(self):
self._check_setitem_copy()
def insert(self, loc, column, value, allow_duplicates=False):
"""
Insert column into DataFrame at specified location.
Raises a ValueError if `column` is already contained in the DataFrame,
unless `allow_duplicates` is set to True.
Parameters
----------
loc : int
Insertion index. Must verify 0 <= loc <= len(columns)
column : string, number, or hashable object
label of the inserted column
value : int, Series, or array-like
allow_duplicates : bool, optional
"""
self._ensure_valid_index(value)
value = self._sanitize_column(column, value, broadcast=False)
self._data.insert(loc, column, value,
allow_duplicates=allow_duplicates)
def assign(self, **kwargs):
r"""
Assign new columns to a DataFrame.
Returns a new object with all original columns in addition to new ones.
Existing columns that are re-assigned will be overwritten.
Parameters
----------
**kwargs : dict of {str: callable or Series}
The column names are keywords. If the values are
callable, they are computed on the DataFrame and
assigned to the new columns. The callable must not
change input DataFrame (though pandas doesn't check it).
If the values are not callable, (e.g. a Series, scalar, or array),
they are simply assigned.
Returns
-------
DataFrame
A new DataFrame with the new columns in addition to
all the existing columns.
Notes
-----
Assigning multiple columns within the same ``assign`` is possible.
For Python 3.6 and above, later items in '\*\*kwargs' may refer to
newly created or modified columns in 'df'; items are computed and
assigned into 'df' in order. For Python 3.5 and below, the order of
keyword arguments is not specified, you cannot refer to newly created
or modified columns. All items are computed first, and then assigned
in alphabetical order.
.. versionchanged :: 0.23.0
Keyword argument order is maintained for Python 3.6 and later.
Examples
--------
>>> df = pd.DataFrame({'temp_c': [17.0, 25.0]},
... index=['Portland', 'Berkeley'])
>>> df
temp_c
Portland 17.0
Berkeley 25.0
Where the value is a callable, evaluated on `df`:
>>> df.assign(temp_f=lambda x: x.temp_c * 9 / 5 + 32)
temp_c temp_f
Portland 17.0 62.6
Berkeley 25.0 77.0
Alternatively, the same behavior can be achieved by directly
referencing an existing Series or sequence:
>>> df.assign(temp_f=df['temp_c'] * 9 / 5 + 32)
temp_c temp_f
Portland 17.0 62.6
Berkeley 25.0 77.0
In Python 3.6+, you can create multiple columns within the same assign
where one of the columns depends on another one defined within the same
assign:
>>> df.assign(temp_f=lambda x: x['temp_c'] * 9 / 5 + 32,
... temp_k=lambda x: (x['temp_f'] + 459.67) * 5 / 9)
temp_c temp_f temp_k
Portland 17.0 62.6 290.15
Berkeley 25.0 77.0 298.15
"""
data = self.copy()
# >= 3.6 preserve order of kwargs
if PY36:
for k, v in kwargs.items():
data[k] = com.apply_if_callable(v, data)
else:
# <= 3.5: do all calculations first...
results = OrderedDict()
for k, v in kwargs.items():
results[k] = com.apply_if_callable(v, data)
# <= 3.5 and earlier
results = sorted(results.items())
# ... and then assign
for k, v in results:
data[k] = v
return data
def _sanitize_column(self, key, value, broadcast=True):
"""
Ensures new columns (which go into the BlockManager as new blocks) are
always copied and converted into an array.
Parameters
----------
key : object
value : scalar, Series, or array-like
broadcast : bool, default True
If ``key`` matches multiple duplicate column names in the
DataFrame, this parameter indicates whether ``value`` should be
tiled so that the returned array contains a (duplicated) column for
each occurrence of the key. If False, ``value`` will not be tiled.
Returns
-------
sanitized_column : numpy-array
"""
def reindexer(value):
# reindex if necessary
if value.index.equals(self.index) or not len(self.index):
value = value._values.copy()
else:
# GH 4107
try:
value = value.reindex(self.index)._values
except Exception as e:
# duplicate axis
if not value.index.is_unique:
raise e
# other
raise TypeError('incompatible index of inserted column '
'with frame index')
return value
if isinstance(value, Series):
value = reindexer(value)
elif isinstance(value, DataFrame):
# align right-hand-side columns if self.columns
# is multi-index and self[key] is a sub-frame
if isinstance(self.columns, MultiIndex) and key in self.columns:
loc = self.columns.get_loc(key)
if isinstance(loc, (slice, Series, np.ndarray, Index)):
cols = maybe_droplevels(self.columns[loc], key)
if len(cols) and not cols.equals(value.columns):
value = value.reindex(cols, axis=1)
# now align rows
value = reindexer(value).T
elif isinstance(value, ExtensionArray):
from pandas.core.series import _sanitize_index
# Explicitly copy here, instead of in _sanitize_index,
# as sanitize_index won't copy an EA, even with copy=True
value = value.copy()
value = _sanitize_index(value, self.index, copy=False)
elif isinstance(value, Index) or is_sequence(value):
from pandas.core.series import _sanitize_index
# turn me into an ndarray
value = _sanitize_index(value, self.index, copy=False)
if not isinstance(value, (np.ndarray, Index)):
if isinstance(value, list) and len(value) > 0:
value = maybe_convert_platform(value)
else:
value = com.asarray_tuplesafe(value)
elif value.ndim == 2:
value = value.copy().T
elif isinstance(value, Index):
value = value.copy(deep=True)
else:
value = value.copy()
# possibly infer to datetimelike
if is_object_dtype(value.dtype):
value = maybe_infer_to_datetimelike(value)
else:
# cast ignores pandas dtypes. so save the dtype first
infer_dtype, _ = infer_dtype_from_scalar(
value, pandas_dtype=True)
# upcast
value = cast_scalar_to_array(len(self.index), value)
value = maybe_cast_to_datetime(value, infer_dtype)
# return internal types directly
if is_extension_type(value) or is_extension_array_dtype(value):
return value
# broadcast across multiple columns if necessary
if broadcast and key in self.columns and value.ndim == 1:
if (not self.columns.is_unique or
isinstance(self.columns, MultiIndex)):
existing_piece = self[key]
if isinstance(existing_piece, DataFrame):
value = np.tile(value, (len(existing_piece.columns), 1))
return np.atleast_2d(np.asarray(value))
@property
def _series(self):
result = {}
for idx, item in enumerate(self.columns):
result[item] = Series(self._data.iget(idx), index=self.index,
name=item)
return result
def lookup(self, row_labels, col_labels):
"""Label-based "fancy indexing" function for DataFrame.
Given equal-length arrays of row and column labels, return an
array of the values corresponding to each (row, col) pair.
Parameters
----------
row_labels : sequence
The row labels to use for lookup
col_labels : sequence
The column labels to use for lookup
Notes
-----
Akin to::
result = []
for row, col in zip(row_labels, col_labels):
result.append(df.get_value(row, col))
Examples
--------
values : ndarray
The found values
"""
n = len(row_labels)
if n != len(col_labels):
raise ValueError('Row labels must have same size as column labels')
thresh = 1000
if not self._is_mixed_type or n > thresh:
values = self.values
ridx = self.index.get_indexer(row_labels)
cidx = self.columns.get_indexer(col_labels)
if (ridx == -1).any():
raise KeyError('One or more row labels was not found')
if (cidx == -1).any():
raise KeyError('One or more column labels was not found')
flat_index = ridx * len(self.columns) + cidx
result = values.flat[flat_index]
else:
result = np.empty(n, dtype='O')
for i, (r, c) in enumerate(zip(row_labels, col_labels)):
result[i] = self._get_value(r, c)
if is_object_dtype(result):
result = lib.maybe_convert_objects(result)
return result
# ----------------------------------------------------------------------
# Reindexing and alignment
def _reindex_axes(self, axes, level, limit, tolerance, method, fill_value,
copy):
frame = self
columns = axes['columns']
if columns is not None:
frame = frame._reindex_columns(columns, method, copy, level,
fill_value, limit, tolerance)
index = axes['index']
if index is not None:
frame = frame._reindex_index(index, method, copy, level,
fill_value, limit, tolerance)
return frame
def _reindex_index(self, new_index, method, copy, level, fill_value=np.nan,
limit=None, tolerance=None):
new_index, indexer = self.index.reindex(new_index, method=method,
level=level, limit=limit,
tolerance=tolerance)
return self._reindex_with_indexers({0: [new_index, indexer]},
copy=copy, fill_value=fill_value,
allow_dups=False)
def _reindex_columns(self, new_columns, method, copy, level,
fill_value=None, limit=None, tolerance=None):
new_columns, indexer = self.columns.reindex(new_columns, method=method,
level=level, limit=limit,
tolerance=tolerance)
return self._reindex_with_indexers({1: [new_columns, indexer]},
copy=copy, fill_value=fill_value,
allow_dups=False)
def _reindex_multi(self, axes, copy, fill_value):
""" we are guaranteed non-Nones in the axes! """
new_index, row_indexer = self.index.reindex(axes['index'])
new_columns, col_indexer = self.columns.reindex(axes['columns'])
if row_indexer is not None and col_indexer is not None:
indexer = row_indexer, col_indexer
new_values = algorithms.take_2d_multi(self.values, indexer,
fill_value=fill_value)
return self._constructor(new_values, index=new_index,
columns=new_columns)
else:
return self._reindex_with_indexers({0: [new_index, row_indexer],
1: [new_columns, col_indexer]},
copy=copy,
fill_value=fill_value)
@Appender(_shared_docs['align'] % _shared_doc_kwargs)
def align(self, other, join='outer', axis=None, level=None, copy=True,
fill_value=None, method=None, limit=None, fill_axis=0,
broadcast_axis=None):
return super(DataFrame, self).align(other, join=join, axis=axis,
level=level, copy=copy,
fill_value=fill_value,
method=method, limit=limit,
fill_axis=fill_axis,
broadcast_axis=broadcast_axis)
@Substitution(**_shared_doc_kwargs)
@Appender(NDFrame.reindex.__doc__)
@rewrite_axis_style_signature('labels', [('method', None),
('copy', True),
('level', None),
('fill_value', np.nan),
('limit', None),
('tolerance', None)])
def reindex(self, *args, **kwargs):
axes = validate_axis_style_args(self, args, kwargs, 'labels',
'reindex')
kwargs.update(axes)
# Pop these, since the values are in `kwargs` under different names
kwargs.pop('axis', None)
kwargs.pop('labels', None)
return super(DataFrame, self).reindex(**kwargs)
@Appender(_shared_docs['reindex_axis'] % _shared_doc_kwargs)
def reindex_axis(self, labels, axis=0, method=None, level=None, copy=True,
limit=None, fill_value=np.nan):
return super(DataFrame,
self).reindex_axis(labels=labels, axis=axis,
method=method, level=level, copy=copy,
limit=limit, fill_value=fill_value)
def drop(self, labels=None, axis=0, index=None, columns=None,
level=None, inplace=False, errors='raise'):
"""
Drop specified labels from rows or columns.
Remove rows or columns by specifying label names and corresponding
axis, or by specifying directly index or column names. When using a
multi-index, labels on different levels can be removed by specifying
the level.
Parameters
----------
labels : single label or list-like
Index or column labels to drop.
axis : {0 or 'index', 1 or 'columns'}, default 0
Whether to drop labels from the index (0 or 'index') or
columns (1 or 'columns').
index, columns : single label or list-like
Alternative to specifying axis (``labels, axis=1``
is equivalent to ``columns=labels``).
.. versionadded:: 0.21.0
level : int or level name, optional
For MultiIndex, level from which the labels will be removed.
inplace : bool, default False
If True, do operation inplace and return None.
errors : {'ignore', 'raise'}, default 'raise'
If 'ignore', suppress error and only existing labels are
dropped.
Returns
-------
dropped : pandas.DataFrame
See Also
--------
DataFrame.loc : Label-location based indexer for selection by label.
DataFrame.dropna : Return DataFrame with labels on given axis omitted
where (all or any) data are missing
DataFrame.drop_duplicates : Return DataFrame with duplicate rows
removed, optionally only considering certain columns
Series.drop : Return Series with specified index labels removed.
Raises
------
KeyError
If none of the labels are found in the selected axis
Examples
--------
>>> df = pd.DataFrame(np.arange(12).reshape(3,4),
... columns=['A', 'B', 'C', 'D'])
>>> df
A B C D
0 0 1 2 3
1 4 5 6 7
2 8 9 10 11
Drop columns
>>> df.drop(['B', 'C'], axis=1)
A D
0 0 3
1 4 7
2 8 11
>>> df.drop(columns=['B', 'C'])
A D
0 0 3
1 4 7
2 8 11
Drop a row by index
>>> df.drop([0, 1])
A B C D
2 8 9 10 11
Drop columns and/or rows of MultiIndex DataFrame
>>> midx = pd.MultiIndex(levels=[['lama', 'cow', 'falcon'],
... ['speed', 'weight', 'length']],
... labels=[[0, 0, 0, 1, 1, 1, 2, 2, 2],
... [0, 1, 2, 0, 1, 2, 0, 1, 2]])
>>> df = pd.DataFrame(index=midx, columns=['big', 'small'],
... data=[[45, 30], [200, 100], [1.5, 1], [30, 20],
... [250, 150], [1.5, 0.8], [320, 250],
... [1, 0.8], [0.3,0.2]])
>>> df
big small
lama speed 45.0 30.0
weight 200.0 100.0
length 1.5 1.0
cow speed 30.0 20.0
weight 250.0 150.0
length 1.5 0.8
falcon speed 320.0 250.0
weight 1.0 0.8
length 0.3 0.2
>>> df.drop(index='cow', columns='small')
big
lama speed 45.0
weight 200.0
length 1.5
falcon speed 320.0
weight 1.0
length 0.3
>>> df.drop(index='length', level=1)
big small
lama speed 45.0 30.0
weight 200.0 100.0
cow speed 30.0 20.0
weight 250.0 150.0
falcon speed 320.0 250.0
weight 1.0 0.8
"""
return super(DataFrame, self).drop(labels=labels, axis=axis,
index=index, columns=columns,
level=level, inplace=inplace,
errors=errors)
@rewrite_axis_style_signature('mapper', [('copy', True),
('inplace', False),
('level', None)])
def rename(self, *args, **kwargs):
"""Alter axes labels.
Function / dict values must be unique (1-to-1). Labels not contained in
a dict / Series will be left as-is. Extra labels listed don't throw an
error.
See the :ref:`user guide <basics.rename>` for more.
Parameters
----------
mapper, index, columns : dict-like or function, optional
dict-like or functions transformations to apply to
that axis' values. Use either ``mapper`` and ``axis`` to
specify the axis to target with ``mapper``, or ``index`` and
``columns``.
axis : int or str, optional
Axis to target with ``mapper``. Can be either the axis name
('index', 'columns') or number (0, 1). The default is 'index'.
copy : boolean, default True
Also copy underlying data
inplace : boolean, default False
Whether to return a new DataFrame. If True then value of copy is
ignored.
level : int or level name, default None
In case of a MultiIndex, only rename labels in the specified
level.
Returns
-------
renamed : DataFrame
See Also
--------
pandas.DataFrame.rename_axis
Examples
--------
``DataFrame.rename`` supports two calling conventions
* ``(index=index_mapper, columns=columns_mapper, ...)``
* ``(mapper, axis={'index', 'columns'}, ...)``
We *highly* recommend using keyword arguments to clarify your
intent.
>>> df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
>>> df.rename(index=str, columns={"A": "a", "B": "c"})
a c
0 1 4
1 2 5
2 3 6
>>> df.rename(index=str, columns={"A": "a", "C": "c"})
a B
0 1 4
1 2 5
2 3 6
Using axis-style parameters
>>> df.rename(str.lower, axis='columns')
a b
0 1 4
1 2 5
2 3 6
>>> df.rename({1: 2, 2: 4}, axis='index')
A B
0 1 4
2 2 5
4 3 6
"""
axes = validate_axis_style_args(self, args, kwargs, 'mapper', 'rename')
kwargs.update(axes)
# Pop these, since the values are in `kwargs` under different names
kwargs.pop('axis', None)
kwargs.pop('mapper', None)
return super(DataFrame, self).rename(**kwargs)
@Substitution(**_shared_doc_kwargs)
@Appender(NDFrame.fillna.__doc__)
def fillna(self, value=None, method=None, axis=None, inplace=False,
limit=None, downcast=None, **kwargs):
return super(DataFrame,
self).fillna(value=value, method=method, axis=axis,
inplace=inplace, limit=limit,
downcast=downcast, **kwargs)
@Appender(_shared_docs['replace'] % _shared_doc_kwargs)
def replace(self, to_replace=None, value=None, inplace=False, limit=None,
regex=False, method='pad'):
return super(DataFrame, self).replace(to_replace=to_replace,
value=value, inplace=inplace,
limit=limit, regex=regex,
method=method)
@Appender(_shared_docs['shift'] % _shared_doc_kwargs)
def shift(self, periods=1, freq=None, axis=0):
return super(DataFrame, self).shift(periods=periods, freq=freq,
axis=axis)
def set_index(self, keys, drop=True, append=False, inplace=False,
verify_integrity=False):
"""
Set the DataFrame index (row labels) using one or more existing
columns. By default yields a new object.
Parameters
----------
keys : column label or list of column labels / arrays
drop : boolean, default True
Delete columns to be used as the new index
append : boolean, default False
Whether to append columns to existing index
inplace : boolean, default False
Modify the DataFrame in place (do not create a new object)
verify_integrity : boolean, default False
Check the new index for duplicates. Otherwise defer the check until
necessary. Setting to False will improve the performance of this
method
Examples
--------
>>> df = pd.DataFrame({'month': [1, 4, 7, 10],
... 'year': [2012, 2014, 2013, 2014],
... 'sale':[55, 40, 84, 31]})
month sale year
0 1 55 2012
1 4 40 2014
2 7 84 2013
3 10 31 2014
Set the index to become the 'month' column:
>>> df.set_index('month')
sale year
month
1 55 2012
4 40 2014
7 84 2013
10 31 2014
Create a multi-index using columns 'year' and 'month':
>>> df.set_index(['year', 'month'])
sale
year month
2012 1 55
2014 4 40
2013 7 84
2014 10 31
Create a multi-index using a set of values and a column:
>>> df.set_index([[1, 2, 3, 4], 'year'])
month sale
year
1 2012 1 55
2 2014 4 40
3 2013 7 84
4 2014 10 31
Returns
-------
dataframe : DataFrame
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if not isinstance(keys, list):
keys = [keys]
missing = []
for col in keys:
if (is_scalar(col) or isinstance(col, tuple)) and col in self:
# tuples can be both column keys or list-likes
# if they are valid column keys, everything is fine
continue
elif is_scalar(col) and col not in self:
# tuples that are not column keys are considered list-like,
# not considered missing
missing.append(col)
elif (not is_list_like(col, allow_sets=False)
or getattr(col, 'ndim', 1) > 1):
raise TypeError('The parameter "keys" may only contain a '
'combination of valid column keys and '
'one-dimensional list-likes')
if missing:
raise KeyError('{}'.format(missing))
if inplace:
frame = self
else:
frame = self.copy()
arrays = []
names = []
if append:
names = [x for x in self.index.names]
if isinstance(self.index, ABCMultiIndex):
for i in range(self.index.nlevels):
arrays.append(self.index._get_level_values(i))
else:
arrays.append(self.index)
to_remove = []
for col in keys:
if isinstance(col, ABCMultiIndex):
for n in range(col.nlevels):
arrays.append(col._get_level_values(n))
names.extend(col.names)
elif isinstance(col, (ABCIndexClass, ABCSeries)):
# if Index then not MultiIndex (treated above)
arrays.append(col)
names.append(col.name)
elif isinstance(col, (list, np.ndarray)):
arrays.append(col)
names.append(None)
elif (is_list_like(col)
and not (isinstance(col, tuple) and col in self)):
# all other list-likes (but avoid valid column keys)
col = list(col) # ensure iterator do not get read twice etc.
arrays.append(col)
names.append(None)
# from here, col can only be a column label
else:
arrays.append(frame[col]._values)
names.append(col)
if drop:
to_remove.append(col)
index = ensure_index_from_sequences(arrays, names)
if verify_integrity and not index.is_unique:
duplicates = index[index.duplicated()].unique()
raise ValueError('Index has duplicate keys: {dup}'.format(
dup=duplicates))
# use set to handle duplicate column names gracefully in case of drop
for c in set(to_remove):
del frame[c]
# clear up memory usage
index._cleanup()
frame.index = index
if not inplace:
return frame
def reset_index(self, level=None, drop=False, inplace=False, col_level=0,
col_fill=''):
"""
For DataFrame with multi-level index, return new DataFrame with
labeling information in the columns under the index names, defaulting
to 'level_0', 'level_1', etc. if any are None. For a standard index,
the index name will be used (if set), otherwise a default 'index' or
'level_0' (if 'index' is already taken) will be used.
Parameters
----------
level : int, str, tuple, or list, default None
Only remove the given levels from the index. Removes all levels by
default
drop : boolean, default False
Do not try to insert index into dataframe columns. This resets
the index to the default integer index.
inplace : boolean, default False
Modify the DataFrame in place (do not create a new object)
col_level : int or str, default 0
If the columns have multiple levels, determines which level the
labels are inserted into. By default it is inserted into the first
level.
col_fill : object, default ''
If the columns have multiple levels, determines how the other
levels are named. If None then the index name is repeated.
Returns
-------
resetted : DataFrame
Examples
--------
>>> df = pd.DataFrame([('bird', 389.0),
... ('bird', 24.0),
... ('mammal', 80.5),
... ('mammal', np.nan)],
... index=['falcon', 'parrot', 'lion', 'monkey'],
... columns=('class', 'max_speed'))
>>> df
class max_speed
falcon bird 389.0
parrot bird 24.0
lion mammal 80.5
monkey mammal NaN
When we reset the index, the old index is added as a column, and a
new sequential index is used:
>>> df.reset_index()
index class max_speed
0 falcon bird 389.0
1 parrot bird 24.0
2 lion mammal 80.5
3 monkey mammal NaN
We can use the `drop` parameter to avoid the old index being added as
a column:
>>> df.reset_index(drop=True)
class max_speed
0 bird 389.0
1 bird 24.0
2 mammal 80.5
3 mammal NaN
You can also use `reset_index` with `MultiIndex`.
>>> index = pd.MultiIndex.from_tuples([('bird', 'falcon'),
... ('bird', 'parrot'),
... ('mammal', 'lion'),
... ('mammal', 'monkey')],
... names=['class', 'name'])
>>> columns = pd.MultiIndex.from_tuples([('speed', 'max'),
... ('species', 'type')])
>>> df = pd.DataFrame([(389.0, 'fly'),
... ( 24.0, 'fly'),
... ( 80.5, 'run'),
... (np.nan, 'jump')],
... index=index,
... columns=columns)
>>> df
speed species
max type
class name
bird falcon 389.0 fly
parrot 24.0 fly
mammal lion 80.5 run
monkey NaN jump
If the index has multiple levels, we can reset a subset of them:
>>> df.reset_index(level='class')
class speed species
max type
name
falcon bird 389.0 fly
parrot bird 24.0 fly
lion mammal 80.5 run
monkey mammal NaN jump
If we are not dropping the index, by default, it is placed in the top
level. We can place it in another level:
>>> df.reset_index(level='class', col_level=1)
speed species
class max type
name
falcon bird 389.0 fly
parrot bird 24.0 fly
lion mammal 80.5 run
monkey mammal NaN jump
When the index is inserted under another level, we can specify under
which one with the parameter `col_fill`:
>>> df.reset_index(level='class', col_level=1, col_fill='species')
species speed species
class max type
name
falcon bird 389.0 fly
parrot bird 24.0 fly
lion mammal 80.5 run
monkey mammal NaN jump
If we specify a nonexistent level for `col_fill`, it is created:
>>> df.reset_index(level='class', col_level=1, col_fill='genus')
genus speed species
class max type
name
falcon bird 389.0 fly
parrot bird 24.0 fly
lion mammal 80.5 run
monkey mammal NaN jump
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if inplace:
new_obj = self
else:
new_obj = self.copy()
def _maybe_casted_values(index, labels=None):
values = index._values
if not isinstance(index, (PeriodIndex, DatetimeIndex)):
if values.dtype == np.object_:
values = lib.maybe_convert_objects(values)
# if we have the labels, extract the values with a mask
if labels is not None:
mask = labels == -1
# we can have situations where the whole mask is -1,
# meaning there is nothing found in labels, so make all nan's
if mask.all():
values = np.empty(len(mask))
values.fill(np.nan)
else:
values = values.take(labels)
if mask.any():
values, changed = maybe_upcast_putmask(
values, mask, np.nan)
return values
new_index = ibase.default_index(len(new_obj))
if level is not None:
if not isinstance(level, (tuple, list)):
level = [level]
level = [self.index._get_level_number(lev) for lev in level]
if len(level) < self.index.nlevels:
new_index = self.index.droplevel(level)
if not drop:
if isinstance(self.index, MultiIndex):
names = [n if n is not None else ('level_%d' % i)
for (i, n) in enumerate(self.index.names)]
to_insert = lzip(self.index.levels, self.index.labels)
else:
default = 'index' if 'index' not in self else 'level_0'
names = ([default] if self.index.name is None
else [self.index.name])
to_insert = ((self.index, None),)
multi_col = isinstance(self.columns, MultiIndex)
for i, (lev, lab) in reversed(list(enumerate(to_insert))):
if not (level is None or i in level):
continue
name = names[i]
if multi_col:
col_name = (list(name) if isinstance(name, tuple)
else [name])
if col_fill is None:
if len(col_name) not in (1, self.columns.nlevels):
raise ValueError("col_fill=None is incompatible "
"with incomplete column name "
"{}".format(name))
col_fill = col_name[0]
lev_num = self.columns._get_level_number(col_level)
name_lst = [col_fill] * lev_num + col_name
missing = self.columns.nlevels - len(name_lst)
name_lst += [col_fill] * missing
name = tuple(name_lst)
# to ndarray and maybe infer different dtype
level_values = _maybe_casted_values(lev, lab)
new_obj.insert(0, name, level_values)
new_obj.index = new_index
if not inplace:
return new_obj
# ----------------------------------------------------------------------
# Reindex-based selection methods
@Appender(_shared_docs['isna'] % _shared_doc_kwargs)
def isna(self):
return super(DataFrame, self).isna()
@Appender(_shared_docs['isna'] % _shared_doc_kwargs)
def isnull(self):
return super(DataFrame, self).isnull()
@Appender(_shared_docs['notna'] % _shared_doc_kwargs)
def notna(self):
return super(DataFrame, self).notna()
@Appender(_shared_docs['notna'] % _shared_doc_kwargs)
def notnull(self):
return super(DataFrame, self).notnull()
def dropna(self, axis=0, how='any', thresh=None, subset=None,
inplace=False):
"""
Remove missing values.
See the :ref:`User Guide <missing_data>` for more on which values are
considered missing, and how to work with missing data.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
Determine if rows or columns which contain missing values are
removed.
* 0, or 'index' : Drop rows which contain missing values.
* 1, or 'columns' : Drop columns which contain missing value.
.. deprecated:: 0.23.0
Pass tuple or list to drop on multiple axes.
Only a single axis is allowed.
how : {'any', 'all'}, default 'any'
Determine if row or column is removed from DataFrame, when we have
at least one NA or all NA.
* 'any' : If any NA values are present, drop that row or column.
* 'all' : If all values are NA, drop that row or column.
thresh : int, optional
Require that many non-NA values.
subset : array-like, optional
Labels along other axis to consider, e.g. if you are dropping rows
these would be a list of columns to include.
inplace : bool, default False
If True, do operation inplace and return None.
Returns
-------
DataFrame
DataFrame with NA entries dropped from it.
See Also
--------
DataFrame.isna: Indicate missing values.
DataFrame.notna : Indicate existing (non-missing) values.
DataFrame.fillna : Replace missing values.
Series.dropna : Drop missing values.
Index.dropna : Drop missing indices.
Examples
--------
>>> df = pd.DataFrame({"name": ['Alfred', 'Batman', 'Catwoman'],
... "toy": [np.nan, 'Batmobile', 'Bullwhip'],
... "born": [pd.NaT, pd.Timestamp("1940-04-25"),
... pd.NaT]})
>>> df
name toy born
0 Alfred NaN NaT
1 Batman Batmobile 1940-04-25
2 Catwoman Bullwhip NaT
Drop the rows where at least one element is missing.
>>> df.dropna()
name toy born
1 Batman Batmobile 1940-04-25
Drop the columns where at least one element is missing.
>>> df.dropna(axis='columns')
name
0 Alfred
1 Batman
2 Catwoman
Drop the rows where all elements are missing.
>>> df.dropna(how='all')
name toy born
0 Alfred NaN NaT
1 Batman Batmobile 1940-04-25
2 Catwoman Bullwhip NaT
Keep only the rows with at least 2 non-NA values.
>>> df.dropna(thresh=2)
name toy born
1 Batman Batmobile 1940-04-25
2 Catwoman Bullwhip NaT
Define in which columns to look for missing values.
>>> df.dropna(subset=['name', 'born'])
name toy born
1 Batman Batmobile 1940-04-25
Keep the DataFrame with valid entries in the same variable.
>>> df.dropna(inplace=True)
>>> df
name toy born
1 Batman Batmobile 1940-04-25
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if isinstance(axis, (tuple, list)):
# GH20987
msg = ("supplying multiple axes to axis is deprecated and "
"will be removed in a future version.")
warnings.warn(msg, FutureWarning, stacklevel=2)
result = self
for ax in axis:
result = result.dropna(how=how, thresh=thresh, subset=subset,
axis=ax)
else:
axis = self._get_axis_number(axis)
agg_axis = 1 - axis
agg_obj = self
if subset is not None:
ax = self._get_axis(agg_axis)
indices = ax.get_indexer_for(subset)
check = indices == -1
if check.any():
raise KeyError(list(np.compress(check, subset)))
agg_obj = self.take(indices, axis=agg_axis)
count = agg_obj.count(axis=agg_axis)
if thresh is not None:
mask = count >= thresh
elif how == 'any':
mask = count == len(agg_obj._get_axis(agg_axis))
elif how == 'all':
mask = count > 0
else:
if how is not None:
raise ValueError('invalid how option: {h}'.format(h=how))
else:
raise TypeError('must specify how or thresh')
result = self._take(mask.nonzero()[0], axis=axis)
if inplace:
self._update_inplace(result)
else:
return result
def drop_duplicates(self, subset=None, keep='first', inplace=False):
"""
Return DataFrame with duplicate rows removed, optionally only
considering certain columns
Parameters
----------
subset : column label or sequence of labels, optional
Only consider certain columns for identifying duplicates, by
default use all of the columns
keep : {'first', 'last', False}, default 'first'
- ``first`` : Drop duplicates except for the first occurrence.
- ``last`` : Drop duplicates except for the last occurrence.
- False : Drop all duplicates.
inplace : boolean, default False
Whether to drop duplicates in place or to return a copy
Returns
-------
deduplicated : DataFrame
"""
if self.empty:
return self.copy()
inplace = validate_bool_kwarg(inplace, 'inplace')
duplicated = self.duplicated(subset, keep=keep)
if inplace:
inds, = (-duplicated).nonzero()
new_data = self._data.take(inds)
self._update_inplace(new_data)
else:
return self[-duplicated]
def duplicated(self, subset=None, keep='first'):
"""
Return boolean Series denoting duplicate rows, optionally only
considering certain columns
Parameters
----------
subset : column label or sequence of labels, optional
Only consider certain columns for identifying duplicates, by
default use all of the columns
keep : {'first', 'last', False}, default 'first'
- ``first`` : Mark duplicates as ``True`` except for the
first occurrence.
- ``last`` : Mark duplicates as ``True`` except for the
last occurrence.
- False : Mark all duplicates as ``True``.
Returns
-------
duplicated : Series
"""
from pandas.core.sorting import get_group_index
from pandas._libs.hashtable import duplicated_int64, _SIZE_HINT_LIMIT
if self.empty:
return Series()
def f(vals):
labels, shape = algorithms.factorize(
vals, size_hint=min(len(self), _SIZE_HINT_LIMIT))
return labels.astype('i8', copy=False), len(shape)
if subset is None:
subset = self.columns
elif (not np.iterable(subset) or
isinstance(subset, compat.string_types) or
isinstance(subset, tuple) and subset in self.columns):
subset = subset,
# Verify all columns in subset exist in the queried dataframe
# Otherwise, raise a KeyError, same as if you try to __getitem__ with a
# key that doesn't exist.
diff = Index(subset).difference(self.columns)
if not diff.empty:
raise KeyError(diff)
vals = (col.values for name, col in self.iteritems()
if name in subset)
labels, shape = map(list, zip(*map(f, vals)))
ids = get_group_index(labels, shape, sort=False, xnull=False)
return Series(duplicated_int64(ids, keep), index=self.index)
# ----------------------------------------------------------------------
# Sorting
@Substitution(**_shared_doc_kwargs)
@Appender(NDFrame.sort_values.__doc__)
def sort_values(self, by, axis=0, ascending=True, inplace=False,
kind='quicksort', na_position='last'):
inplace = validate_bool_kwarg(inplace, 'inplace')
axis = self._get_axis_number(axis)
if not isinstance(by, list):
by = [by]
if is_sequence(ascending) and len(by) != len(ascending):
raise ValueError('Length of ascending (%d) != length of by (%d)' %
(len(ascending), len(by)))
if len(by) > 1:
from pandas.core.sorting import lexsort_indexer
keys = []
for x in by:
k = self._get_label_or_level_values(x, axis=axis)
keys.append(k)
indexer = lexsort_indexer(keys, orders=ascending,
na_position=na_position)
indexer = ensure_platform_int(indexer)
else:
from pandas.core.sorting import nargsort
by = by[0]
k = self._get_label_or_level_values(by, axis=axis)
if isinstance(ascending, (tuple, list)):
ascending = ascending[0]
indexer = nargsort(k, kind=kind, ascending=ascending,
na_position=na_position)
new_data = self._data.take(indexer,
axis=self._get_block_manager_axis(axis),
verify=False)
if inplace:
return self._update_inplace(new_data)
else:
return self._constructor(new_data).__finalize__(self)
@Substitution(**_shared_doc_kwargs)
@Appender(NDFrame.sort_index.__doc__)
def sort_index(self, axis=0, level=None, ascending=True, inplace=False,
kind='quicksort', na_position='last', sort_remaining=True,
by=None):
# TODO: this can be combined with Series.sort_index impl as
# almost identical
inplace = validate_bool_kwarg(inplace, 'inplace')
# 10726
if by is not None:
warnings.warn("by argument to sort_index is deprecated, "
"please use .sort_values(by=...)",
FutureWarning, stacklevel=2)
if level is not None:
raise ValueError("unable to simultaneously sort by and level")
return self.sort_values(by, axis=axis, ascending=ascending,
inplace=inplace)
axis = self._get_axis_number(axis)
labels = self._get_axis(axis)
# make sure that the axis is lexsorted to start
# if not we need to reconstruct to get the correct indexer
labels = labels._sort_levels_monotonic()
if level is not None:
new_axis, indexer = labels.sortlevel(level, ascending=ascending,
sort_remaining=sort_remaining)
elif isinstance(labels, MultiIndex):
from pandas.core.sorting import lexsort_indexer
indexer = lexsort_indexer(labels._get_labels_for_sorting(),
orders=ascending,
na_position=na_position)
else:
from pandas.core.sorting import nargsort
# Check monotonic-ness before sort an index
# GH11080
if ((ascending and labels.is_monotonic_increasing) or
(not ascending and labels.is_monotonic_decreasing)):
if inplace:
return
else:
return self.copy()
indexer = nargsort(labels, kind=kind, ascending=ascending,
na_position=na_position)
baxis = self._get_block_manager_axis(axis)
new_data = self._data.take(indexer,
axis=baxis,
verify=False)
# reconstruct axis if needed
new_data.axes[baxis] = new_data.axes[baxis]._sort_levels_monotonic()
if inplace:
return self._update_inplace(new_data)
else:
return self._constructor(new_data).__finalize__(self)
def sortlevel(self, level=0, axis=0, ascending=True, inplace=False,
sort_remaining=True):
"""Sort multilevel index by chosen axis and primary level. Data will be
lexicographically sorted by the chosen level followed by the other
levels (in order).
.. deprecated:: 0.20.0
Use :meth:`DataFrame.sort_index`
Parameters
----------
level : int
axis : {0 or 'index', 1 or 'columns'}, default 0
ascending : boolean, default True
inplace : boolean, default False
Sort the DataFrame without creating a new instance
sort_remaining : boolean, default True
Sort by the other levels too.
Returns
-------
sorted : DataFrame
See Also
--------
DataFrame.sort_index(level=...)
"""
warnings.warn("sortlevel is deprecated, use sort_index(level= ...)",
FutureWarning, stacklevel=2)
return self.sort_index(level=level, axis=axis, ascending=ascending,
inplace=inplace, sort_remaining=sort_remaining)
def nlargest(self, n, columns, keep='first'):
"""
Return the first `n` rows ordered by `columns` in descending order.
Return the first `n` rows with the largest values in `columns`, in
descending order. The columns that are not specified are returned as
well, but not used for ordering.
This method is equivalent to
``df.sort_values(columns, ascending=False).head(n)``, but more
performant.
Parameters
----------
n : int
Number of rows to return.
columns : label or list of labels
Column label(s) to order by.
keep : {'first', 'last', 'all'}, default 'first'
Where there are duplicate values:
- `first` : prioritize the first occurrence(s)
- `last` : prioritize the last occurrence(s)
- ``all`` : do not drop any duplicates, even it means
selecting more than `n` items.
.. versionadded:: 0.24.0
Returns
-------
DataFrame
The first `n` rows ordered by the given columns in descending
order.
See Also
--------
DataFrame.nsmallest : Return the first `n` rows ordered by `columns` in
ascending order.
DataFrame.sort_values : Sort DataFrame by the values
DataFrame.head : Return the first `n` rows without re-ordering.
Notes
-----
This function cannot be used with all column types. For example, when
specifying columns with `object` or `category` dtypes, ``TypeError`` is
raised.
Examples
--------
>>> df = pd.DataFrame({'a': [1, 10, 8, 11, 8, 2],
... 'b': list('abdcef'),
... 'c': [1.0, 2.0, np.nan, 3.0, 4.0, 9.0]})
>>> df
a b c
0 1 a 1.0
1 10 b 2.0
2 8 d NaN
3 11 c 3.0
4 8 e 4.0
5 2 f 9.0
In the following example, we will use ``nlargest`` to select the three
rows having the largest values in column "a".
>>> df.nlargest(3, 'a')
a b c
3 11 c 3.0
1 10 b 2.0
2 8 d NaN
When using ``keep='last'``, ties are resolved in reverse order:
>>> df.nlargest(3, 'a', keep='last')
a b c
3 11 c 3.0
1 10 b 2.0
4 8 e 4.0
When using ``keep='all'``, all duplicate items are maintained:
>>> df.nlargest(3, 'a', keep='all')
a b c
3 11 c 3.0
1 10 b 2.0
2 8 d NaN
4 8 e 4.0
To order by the largest values in column "a" and then "c", we can
specify multiple columns like in the next example.
>>> df.nlargest(3, ['a', 'c'])
a b c
4 8 e 4.0
3 11 c 3.0
1 10 b 2.0
Attempting to use ``nlargest`` on non-numeric dtypes will raise a
``TypeError``:
>>> df.nlargest(3, 'b')
Traceback (most recent call last):
TypeError: Column 'b' has dtype object, cannot use method 'nlargest'
"""
return algorithms.SelectNFrame(self,
n=n,
keep=keep,
columns=columns).nlargest()
def nsmallest(self, n, columns, keep='first'):
"""Get the rows of a DataFrame sorted by the `n` smallest
values of `columns`.
Parameters
----------
n : int
Number of items to retrieve
columns : list or str
Column name or names to order by
keep : {'first', 'last', 'all'}, default 'first'
Where there are duplicate values:
- ``first`` : take the first occurrence.
- ``last`` : take the last occurrence.
- ``all`` : do not drop any duplicates, even it means
selecting more than `n` items.
.. versionadded:: 0.24.0
Returns
-------
DataFrame
Examples
--------
>>> df = pd.DataFrame({'a': [1, 10, 8, 11, 8, 2],
... 'b': list('abdcef'),
... 'c': [1.0, 2.0, np.nan, 3.0, 4.0, 9.0]})
>>> df
a b c
0 1 a 1.0
1 10 b 2.0
2 8 d NaN
3 11 c 3.0
4 8 e 4.0
5 2 f 9.0
In the following example, we will use ``nsmallest`` to select the
three rows having the smallest values in column "a".
>>> df.nsmallest(3, 'a')
a b c
0 1 a 1.0
5 2 f 9.0
2 8 d NaN
When using ``keep='last'``, ties are resolved in reverse order:
>>> df.nsmallest(3, 'a', keep='last')
a b c
0 1 a 1.0
5 2 f 9.0
4 8 e 4.0
When using ``keep='all'``, all duplicate items are maintained:
>>> df.nsmallest(3, 'a', keep='all')
a b c
0 1 a 1.0
5 2 f 9.0
2 8 d NaN
4 8 e 4.0
To order by the largest values in column "a" and then "c", we can
specify multiple columns like in the next example.
>>> df.nsmallest(3, ['a', 'c'])
a b c
0 1 a 1.0
5 2 f 9.0
4 8 e 4.0
Attempting to use ``nsmallest`` on non-numeric dtypes will raise a
``TypeError``:
>>> df.nsmallest(3, 'b')
Traceback (most recent call last):
TypeError: Column 'b' has dtype object, cannot use method 'nsmallest'
"""
return algorithms.SelectNFrame(self,
n=n,
keep=keep,
columns=columns).nsmallest()
def swaplevel(self, i=-2, j=-1, axis=0):
"""
Swap levels i and j in a MultiIndex on a particular axis
Parameters
----------
i, j : int, string (can be mixed)
Level of index to be swapped. Can pass level name as string.
Returns
-------
swapped : same type as caller (new object)
.. versionchanged:: 0.18.1
The indexes ``i`` and ``j`` are now optional, and default to
the two innermost levels of the index.
"""
result = self.copy()
axis = self._get_axis_number(axis)
if axis == 0:
result.index = result.index.swaplevel(i, j)
else:
result.columns = result.columns.swaplevel(i, j)
return result
def reorder_levels(self, order, axis=0):
"""
Rearrange index levels using input order.
May not drop or duplicate levels
Parameters
----------
order : list of int or list of str
List representing new level order. Reference level by number
(position) or by key (label).
axis : int
Where to reorder levels.
Returns
-------
type of caller (new object)
"""
axis = self._get_axis_number(axis)
if not isinstance(self._get_axis(axis),
MultiIndex): # pragma: no cover
raise TypeError('Can only reorder levels on a hierarchical axis.')
result = self.copy()
if axis == 0:
result.index = result.index.reorder_levels(order)
else:
result.columns = result.columns.reorder_levels(order)
return result
# ----------------------------------------------------------------------
# Arithmetic / combination related
def _combine_frame(self, other, func, fill_value=None, level=None):
this, other = self.align(other, join='outer', level=level, copy=False)
new_index, new_columns = this.index, this.columns
def _arith_op(left, right):
# for the mixed_type case where we iterate over columns,
# _arith_op(left, right) is equivalent to
# left._binop(right, func, fill_value=fill_value)
left, right = ops.fill_binop(left, right, fill_value)
return func(left, right)
if ops.should_series_dispatch(this, other, func):
# iterate over columns
return ops.dispatch_to_series(this, other, _arith_op)
else:
result = _arith_op(this.values, other.values)
return self._constructor(result,
index=new_index, columns=new_columns,
copy=False)
def _combine_match_index(self, other, func, level=None):
left, right = self.align(other, join='outer', axis=0, level=level,
copy=False)
assert left.index.equals(right.index)
if left._is_mixed_type or right._is_mixed_type:
# operate column-wise; avoid costly object-casting in `.values`
return ops.dispatch_to_series(left, right, func)
else:
# fastpath --> operate directly on values
with np.errstate(all="ignore"):
new_data = func(left.values.T, right.values).T
return self._constructor(new_data,
index=left.index, columns=self.columns,
copy=False)
def _combine_match_columns(self, other, func, level=None):
assert isinstance(other, Series)
left, right = self.align(other, join='outer', axis=1, level=level,
copy=False)
assert left.columns.equals(right.index)
return ops.dispatch_to_series(left, right, func, axis="columns")
def _combine_const(self, other, func, errors='raise'):
assert lib.is_scalar(other) or np.ndim(other) == 0
return ops.dispatch_to_series(self, other, func)
def combine(self, other, func, fill_value=None, overwrite=True):
"""
Perform column-wise combine with another DataFrame based on a
passed function.
Combines a DataFrame with `other` DataFrame using `func`
to element-wise combine columns. The row and column indexes of the
resulting DataFrame will be the union of the two.
Parameters
----------
other : DataFrame
The DataFrame to merge column-wise.
func : function
Function that takes two series as inputs and return a Series or a
scalar. Used to merge the two dataframes column by columns.
fill_value : scalar value, default None
The value to fill NaNs with prior to passing any column to the
merge func.
overwrite : boolean, default True
If True, columns in `self` that do not exist in `other` will be
overwritten with NaNs.
Returns
-------
result : DataFrame
Examples
--------
Combine using a simple function that chooses the smaller column.
>>> df1 = pd.DataFrame({'A': [0, 0], 'B': [4, 4]})
>>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]})
>>> take_smaller = lambda s1, s2: s1 if s1.sum() < s2.sum() else s2
>>> df1.combine(df2, take_smaller)
A B
0 0 3
1 0 3
Example using a true element-wise combine function.
>>> df1 = pd.DataFrame({'A': [5, 0], 'B': [2, 4]})
>>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]})
>>> df1.combine(df2, np.minimum)
A B
0 1 2
1 0 3
Using `fill_value` fills Nones prior to passing the column to the
merge function.
>>> df1 = pd.DataFrame({'A': [0, 0], 'B': [None, 4]})
>>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]})
>>> df1.combine(df2, take_smaller, fill_value=-5)
A B
0 0 -5.0
1 0 4.0
However, if the same element in both dataframes is None, that None
is preserved
>>> df1 = pd.DataFrame({'A': [0, 0], 'B': [None, 4]})
>>> df2 = pd.DataFrame({'A': [1, 1], 'B': [None, 3]})
>>> df1.combine(df2, take_smaller, fill_value=-5)
A B
0 0 NaN
1 0 3.0
Example that demonstrates the use of `overwrite` and behavior when
the axis differ between the dataframes.
>>> df1 = pd.DataFrame({'A': [0, 0], 'B': [4, 4]})
>>> df2 = pd.DataFrame({'B': [3, 3], 'C': [-10, 1],}, index=[1, 2])
>>> df1.combine(df2, take_smaller)
A B C
0 NaN NaN NaN
1 NaN 3.0 -10.0
2 NaN 3.0 1.0
>>> df1.combine(df2, take_smaller, overwrite=False)
A B C
0 0.0 NaN NaN
1 0.0 3.0 -10.0
2 NaN 3.0 1.0
Demonstrating the preference of the passed in dataframe.
>>> df2 = pd.DataFrame({'B': [3, 3], 'C': [1, 1],}, index=[1, 2])
>>> df2.combine(df1, take_smaller)
A B C
0 0.0 NaN NaN
1 0.0 3.0 NaN
2 NaN 3.0 NaN
>>> df2.combine(df1, take_smaller, overwrite=False)
A B C
0 0.0 NaN NaN
1 0.0 3.0 1.0
2 NaN 3.0 1.0
See Also
--------
DataFrame.combine_first : Combine two DataFrame objects and default to
non-null values in frame calling the method
"""
other_idxlen = len(other.index) # save for compare
this, other = self.align(other, copy=False)
new_index = this.index
if other.empty and len(new_index) == len(self.index):
return self.copy()
if self.empty and len(other) == other_idxlen:
return other.copy()
# sorts if possible
new_columns = this.columns.union(other.columns)
do_fill = fill_value is not None
result = {}
for col in new_columns:
series = this[col]
otherSeries = other[col]
this_dtype = series.dtype
other_dtype = otherSeries.dtype
this_mask = isna(series)
other_mask = isna(otherSeries)
# don't overwrite columns unecessarily
# DO propagate if this column is not in the intersection
if not overwrite and other_mask.all():
result[col] = this[col].copy()
continue
if do_fill:
series = series.copy()
otherSeries = otherSeries.copy()
series[this_mask] = fill_value
otherSeries[other_mask] = fill_value
if col not in self.columns:
# If self DataFrame does not have col in other DataFrame,
# try to promote series, which is all NaN, as other_dtype.
new_dtype = other_dtype
try:
series = series.astype(new_dtype, copy=False)
except ValueError:
# e.g. new_dtype is integer types
pass
else:
# if we have different dtypes, possibly promote
new_dtype = find_common_type([this_dtype, other_dtype])
if not is_dtype_equal(this_dtype, new_dtype):
series = series.astype(new_dtype)
if not is_dtype_equal(other_dtype, new_dtype):
otherSeries = otherSeries.astype(new_dtype)
arr = func(series, otherSeries)
arr = maybe_downcast_to_dtype(arr, this_dtype)
result[col] = arr
# convert_objects just in case
return self._constructor(result, index=new_index,
columns=new_columns)
def combine_first(self, other):
"""
Update null elements with value in the same location in `other`.
Combine two DataFrame objects by filling null values in one DataFrame
with non-null values from other DataFrame. The row and column indexes
of the resulting DataFrame will be the union of the two.
Parameters
----------
other : DataFrame
Provided DataFrame to use to fill null values.
Returns
-------
combined : DataFrame
Examples
--------
>>> df1 = pd.DataFrame({'A': [None, 0], 'B': [None, 4]})
>>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]})
>>> df1.combine_first(df2)
A B
0 1.0 3.0
1 0.0 4.0
Null values still persist if the location of that null value
does not exist in `other`
>>> df1 = pd.DataFrame({'A': [None, 0], 'B': [4, None]})
>>> df2 = pd.DataFrame({'B': [3, 3], 'C': [1, 1]}, index=[1, 2])
>>> df1.combine_first(df2)
A B C
0 NaN 4.0 NaN
1 0.0 3.0 1.0
2 NaN 3.0 1.0
See Also
--------
DataFrame.combine : Perform series-wise operation on two DataFrames
using a given function
"""
import pandas.core.computation.expressions as expressions
def extract_values(arr):
# Does two things:
# 1. maybe gets the values from the Series / Index
# 2. convert datelike to i8
if isinstance(arr, (ABCIndexClass, ABCSeries)):
arr = arr._values
if needs_i8_conversion(arr):
# TODO(DatetimelikeArray): just use .asi8
if is_extension_array_dtype(arr.dtype):
arr = arr.asi8
else:
arr = arr.view('i8')
return arr
def combiner(x, y):
mask = isna(x)
if isinstance(mask, (ABCIndexClass, ABCSeries)):
mask = mask._values
x_values = extract_values(x)
y_values = extract_values(y)
# If the column y in other DataFrame is not in first DataFrame,
# just return y_values.
if y.name not in self.columns:
return y_values
return expressions.where(mask, y_values, x_values)
return self.combine(other, combiner, overwrite=False)
def update(self, other, join='left', overwrite=True, filter_func=None,
raise_conflict=False):
"""
Modify in place using non-NA values from another DataFrame.
Aligns on indices. There is no return value.
Parameters
----------
other : DataFrame, or object coercible into a DataFrame
Should have at least one matching index/column label
with the original DataFrame. If a Series is passed,
its name attribute must be set, and that will be
used as the column name to align with the original DataFrame.
join : {'left'}, default 'left'
Only left join is implemented, keeping the index and columns of the
original object.
overwrite : bool, default True
How to handle non-NA values for overlapping keys:
* True: overwrite original DataFrame's values
with values from `other`.
* False: only update values that are NA in
the original DataFrame.
filter_func : callable(1d-array) -> boolean 1d-array, optional
Can choose to replace values other than NA. Return True for values
that should be updated.
raise_conflict : bool, default False
If True, will raise a ValueError if the DataFrame and `other`
both contain non-NA data in the same place.
Raises
------
ValueError
When `raise_conflict` is True and there's overlapping non-NA data.
See Also
--------
dict.update : Similar method for dictionaries.
DataFrame.merge : For column(s)-on-columns(s) operations.
Examples
--------
>>> df = pd.DataFrame({'A': [1, 2, 3],
... 'B': [400, 500, 600]})
>>> new_df = pd.DataFrame({'B': [4, 5, 6],
... 'C': [7, 8, 9]})
>>> df.update(new_df)
>>> df
A B
0 1 4
1 2 5
2 3 6
The DataFrame's length does not increase as a result of the update,
only values at matching index/column labels are updated.
>>> df = pd.DataFrame({'A': ['a', 'b', 'c'],
... 'B': ['x', 'y', 'z']})
>>> new_df = pd.DataFrame({'B': ['d', 'e', 'f', 'g', 'h', 'i']})
>>> df.update(new_df)
>>> df
A B
0 a d
1 b e
2 c f
For Series, it's name attribute must be set.
>>> df = pd.DataFrame({'A': ['a', 'b', 'c'],
... 'B': ['x', 'y', 'z']})
>>> new_column = pd.Series(['d', 'e'], name='B', index=[0, 2])
>>> df.update(new_column)
>>> df
A B
0 a d
1 b y
2 c e
>>> df = pd.DataFrame({'A': ['a', 'b', 'c'],
... 'B': ['x', 'y', 'z']})
>>> new_df = pd.DataFrame({'B': ['d', 'e']}, index=[1, 2])
>>> df.update(new_df)
>>> df
A B
0 a x
1 b d
2 c e
If `other` contains NaNs the corresponding values are not updated
in the original dataframe.
>>> df = pd.DataFrame({'A': [1, 2, 3],
... 'B': [400, 500, 600]})
>>> new_df = pd.DataFrame({'B': [4, np.nan, 6]})
>>> df.update(new_df)
>>> df
A B
0 1 4.0
1 2 500.0
2 3 6.0
"""
import pandas.core.computation.expressions as expressions
# TODO: Support other joins
if join != 'left': # pragma: no cover
raise NotImplementedError("Only left join is supported")
if not isinstance(other, DataFrame):
other = DataFrame(other)
other = other.reindex_like(self)
for col in self.columns:
this = self[col].values
that = other[col].values
if filter_func is not None:
with np.errstate(all='ignore'):
mask = ~filter_func(this) | isna(that)
else:
if raise_conflict:
mask_this = notna(that)
mask_that = notna(this)
if any(mask_this & mask_that):
raise ValueError("Data overlaps.")
if overwrite:
mask = isna(that)
else:
mask = notna(this)
# don't overwrite columns unecessarily
if mask.all():
continue
self[col] = expressions.where(mask, this, that)
# ----------------------------------------------------------------------
# Data reshaping
_shared_docs['pivot'] = """
Return reshaped DataFrame organized by given index / column values.
Reshape data (produce a "pivot" table) based on column values. Uses
unique values from specified `index` / `columns` to form axes of the
resulting DataFrame. This function does not support data
aggregation, multiple values will result in a MultiIndex in the
columns. See the :ref:`User Guide <reshaping>` for more on reshaping.
Parameters
----------%s
index : string or object, optional
Column to use to make new frame's index. If None, uses
existing index.
columns : string or object
Column to use to make new frame's columns.
values : string, object or a list of the previous, optional
Column(s) to use for populating new frame's values. If not
specified, all remaining columns will be used and the result will
have hierarchically indexed columns.
.. versionchanged :: 0.23.0
Also accept list of column names.
Returns
-------
DataFrame
Returns reshaped DataFrame.
Raises
------
ValueError:
When there are any `index`, `columns` combinations with multiple
values. `DataFrame.pivot_table` when you need to aggregate.
See Also
--------
DataFrame.pivot_table : generalization of pivot that can handle
duplicate values for one index/column pair.
DataFrame.unstack : pivot based on the index values instead of a
column.
Notes
-----
For finer-tuned control, see hierarchical indexing documentation along
with the related stack/unstack methods.
Examples
--------
>>> df = pd.DataFrame({'foo': ['one', 'one', 'one', 'two', 'two',
... 'two'],
... 'bar': ['A', 'B', 'C', 'A', 'B', 'C'],
... 'baz': [1, 2, 3, 4, 5, 6],
... 'zoo': ['x', 'y', 'z', 'q', 'w', 't']})
>>> df
foo bar baz zoo
0 one A 1 x
1 one B 2 y
2 one C 3 z
3 two A 4 q
4 two B 5 w
5 two C 6 t
>>> df.pivot(index='foo', columns='bar', values='baz')
bar A B C
foo
one 1 2 3
two 4 5 6
>>> df.pivot(index='foo', columns='bar')['baz']
bar A B C
foo
one 1 2 3
two 4 5 6
>>> df.pivot(index='foo', columns='bar', values=['baz', 'zoo'])
baz zoo
bar A B C A B C
foo
one 1 2 3 x y z
two 4 5 6 q w t
A ValueError is raised if there are any duplicates.
>>> df = pd.DataFrame({"foo": ['one', 'one', 'two', 'two'],
... "bar": ['A', 'A', 'B', 'C'],
... "baz": [1, 2, 3, 4]})
>>> df
foo bar baz
0 one A 1
1 one A 2
2 two B 3
3 two C 4
Notice that the first two rows are the same for our `index`
and `columns` arguments.
>>> df.pivot(index='foo', columns='bar', values='baz')
Traceback (most recent call last):
...
ValueError: Index contains duplicate entries, cannot reshape
"""
@Substitution('')
@Appender(_shared_docs['pivot'])
def pivot(self, index=None, columns=None, values=None):
from pandas.core.reshape.pivot import pivot
return pivot(self, index=index, columns=columns, values=values)
_shared_docs['pivot_table'] = """
Create a spreadsheet-style pivot table as a DataFrame. The levels in
the pivot table will be stored in MultiIndex objects (hierarchical
indexes) on the index and columns of the result DataFrame
Parameters
----------%s
values : column to aggregate, optional
index : column, Grouper, array, or list of the previous
If an array is passed, it must be the same length as the data. The
list can contain any of the other types (except list).
Keys to group by on the pivot table index. If an array is passed,
it is being used as the same manner as column values.
columns : column, Grouper, array, or list of the previous
If an array is passed, it must be the same length as the data. The
list can contain any of the other types (except list).
Keys to group by on the pivot table column. If an array is passed,
it is being used as the same manner as column values.
aggfunc : function, list of functions, dict, default numpy.mean
If list of functions passed, the resulting pivot table will have
hierarchical columns whose top level are the function names
(inferred from the function objects themselves)
If dict is passed, the key is column to aggregate and value
is function or list of functions
fill_value : scalar, default None
Value to replace missing values with
margins : boolean, default False
Add all row / columns (e.g. for subtotal / grand totals)
dropna : boolean, default True
Do not include columns whose entries are all NaN
margins_name : string, default 'All'
Name of the row / column that will contain the totals
when margins is True.
Examples
--------
>>> df = pd.DataFrame({"A": ["foo", "foo", "foo", "foo", "foo",
... "bar", "bar", "bar", "bar"],
... "B": ["one", "one", "one", "two", "two",
... "one", "one", "two", "two"],
... "C": ["small", "large", "large", "small",
... "small", "large", "small", "small",
... "large"],
... "D": [1, 2, 2, 3, 3, 4, 5, 6, 7]})
>>> df
A B C D
0 foo one small 1
1 foo one large 2
2 foo one large 2
3 foo two small 3
4 foo two small 3
5 bar one large 4
6 bar one small 5
7 bar two small 6
8 bar two large 7
>>> table = pivot_table(df, values='D', index=['A', 'B'],
... columns=['C'], aggfunc=np.sum)
>>> table
C large small
A B
bar one 4.0 5.0
two 7.0 6.0
foo one 4.0 1.0
two NaN 6.0
>>> table = pivot_table(df, values='D', index=['A', 'B'],
... columns=['C'], aggfunc=np.sum)
>>> table
C large small
A B
bar one 4.0 5.0
two 7.0 6.0
foo one 4.0 1.0
two NaN 6.0
>>> table = pivot_table(df, values=['D', 'E'], index=['A', 'C'],
... aggfunc={'D': np.mean,
... 'E': [min, max, np.mean]})
>>> table
D E
mean max median min
A C
bar large 5.500000 16 14.5 13
small 5.500000 15 14.5 14
foo large 2.000000 10 9.5 9
small 2.333333 12 11.0 8
Returns
-------
table : DataFrame
See also
--------
DataFrame.pivot : pivot without aggregation that can handle
non-numeric data
"""
@Substitution('')
@Appender(_shared_docs['pivot_table'])
def pivot_table(self, values=None, index=None, columns=None,
aggfunc='mean', fill_value=None, margins=False,
dropna=True, margins_name='All'):
from pandas.core.reshape.pivot import pivot_table
return pivot_table(self, values=values, index=index, columns=columns,
aggfunc=aggfunc, fill_value=fill_value,
margins=margins, dropna=dropna,
margins_name=margins_name)
def stack(self, level=-1, dropna=True):
"""
Stack the prescribed level(s) from columns to index.
Return a reshaped DataFrame or Series having a multi-level
index with one or more new inner-most levels compared to the current
DataFrame. The new inner-most levels are created by pivoting the
columns of the current dataframe:
- if the columns have a single level, the output is a Series;
- if the columns have multiple levels, the new index
level(s) is (are) taken from the prescribed level(s) and
the output is a DataFrame.
The new index levels are sorted.
Parameters
----------
level : int, str, list, default -1
Level(s) to stack from the column axis onto the index
axis, defined as one index or label, or a list of indices
or labels.
dropna : bool, default True
Whether to drop rows in the resulting Frame/Series with
missing values. Stacking a column level onto the index
axis can create combinations of index and column values
that are missing from the original dataframe. See Examples
section.
Returns
-------
DataFrame or Series
Stacked dataframe or series.
See Also
--------
DataFrame.unstack : Unstack prescribed level(s) from index axis
onto column axis.
DataFrame.pivot : Reshape dataframe from long format to wide
format.
DataFrame.pivot_table : Create a spreadsheet-style pivot table
as a DataFrame.
Notes
-----
The function is named by analogy with a collection of books
being re-organised from being side by side on a horizontal
position (the columns of the dataframe) to being stacked
vertically on top of of each other (in the index of the
dataframe).
Examples
--------
**Single level columns**
>>> df_single_level_cols = pd.DataFrame([[0, 1], [2, 3]],
... index=['cat', 'dog'],
... columns=['weight', 'height'])
Stacking a dataframe with a single level column axis returns a Series:
>>> df_single_level_cols
weight height
cat 0 1
dog 2 3
>>> df_single_level_cols.stack()
cat weight 0
height 1
dog weight 2
height 3
dtype: int64
**Multi level columns: simple case**
>>> multicol1 = pd.MultiIndex.from_tuples([('weight', 'kg'),
... ('weight', 'pounds')])
>>> df_multi_level_cols1 = pd.DataFrame([[1, 2], [2, 4]],
... index=['cat', 'dog'],
... columns=multicol1)
Stacking a dataframe with a multi-level column axis:
>>> df_multi_level_cols1
weight
kg pounds
cat 1 2
dog 2 4
>>> df_multi_level_cols1.stack()
weight
cat kg 1
pounds 2
dog kg 2
pounds 4
**Missing values**
>>> multicol2 = pd.MultiIndex.from_tuples([('weight', 'kg'),
... ('height', 'm')])
>>> df_multi_level_cols2 = pd.DataFrame([[1.0, 2.0], [3.0, 4.0]],
... index=['cat', 'dog'],
... columns=multicol2)
It is common to have missing values when stacking a dataframe
with multi-level columns, as the stacked dataframe typically
has more values than the original dataframe. Missing values
are filled with NaNs:
>>> df_multi_level_cols2
weight height
kg m
cat 1.0 2.0
dog 3.0 4.0
>>> df_multi_level_cols2.stack()
height weight
cat kg NaN 1.0
m 2.0 NaN
dog kg NaN 3.0
m 4.0 NaN
**Prescribing the level(s) to be stacked**
The first parameter controls which level or levels are stacked:
>>> df_multi_level_cols2.stack(0)
kg m
cat height NaN 2.0
weight 1.0 NaN
dog height NaN 4.0
weight 3.0 NaN
>>> df_multi_level_cols2.stack([0, 1])
cat height m 2.0
weight kg 1.0
dog height m 4.0
weight kg 3.0
dtype: float64
**Dropping missing values**
>>> df_multi_level_cols3 = pd.DataFrame([[None, 1.0], [2.0, 3.0]],
... index=['cat', 'dog'],
... columns=multicol2)
Note that rows where all values are missing are dropped by
default but this behaviour can be controlled via the dropna
keyword parameter:
>>> df_multi_level_cols3
weight height
kg m
cat NaN 1.0
dog 2.0 3.0
>>> df_multi_level_cols3.stack(dropna=False)
height weight
cat kg NaN NaN
m 1.0 NaN
dog kg NaN 2.0
m 3.0 NaN
>>> df_multi_level_cols3.stack(dropna=True)
height weight
cat m 1.0 NaN
dog kg NaN 2.0
m 3.0 NaN
"""
from pandas.core.reshape.reshape import stack, stack_multiple
if isinstance(level, (tuple, list)):
return stack_multiple(self, level, dropna=dropna)
else:
return stack(self, level, dropna=dropna)
def unstack(self, level=-1, fill_value=None):
"""
Pivot a level of the (necessarily hierarchical) index labels, returning
a DataFrame having a new level of column labels whose inner-most level
consists of the pivoted index labels. If the index is not a MultiIndex,
the output will be a Series (the analogue of stack when the columns are
not a MultiIndex).
The level involved will automatically get sorted.
Parameters
----------
level : int, string, or list of these, default -1 (last level)
Level(s) of index to unstack, can pass level name
fill_value : replace NaN with this value if the unstack produces
missing values
.. versionadded:: 0.18.0
See also
--------
DataFrame.pivot : Pivot a table based on column values.
DataFrame.stack : Pivot a level of the column labels (inverse operation
from `unstack`).
Examples
--------
>>> index = pd.MultiIndex.from_tuples([('one', 'a'), ('one', 'b'),
... ('two', 'a'), ('two', 'b')])
>>> s = pd.Series(np.arange(1.0, 5.0), index=index)
>>> s
one a 1.0
b 2.0
two a 3.0
b 4.0
dtype: float64
>>> s.unstack(level=-1)
a b
one 1.0 2.0
two 3.0 4.0
>>> s.unstack(level=0)
one two
a 1.0 3.0
b 2.0 4.0
>>> df = s.unstack(level=0)
>>> df.unstack()
one a 1.0
b 2.0
two a 3.0
b 4.0
dtype: float64
Returns
-------
unstacked : DataFrame or Series
"""
from pandas.core.reshape.reshape import unstack
return unstack(self, level, fill_value)
_shared_docs['melt'] = ("""
"Unpivots" a DataFrame from wide format to long format, optionally
leaving identifier variables set.
This function is useful to massage a DataFrame into a format where one
or more columns are identifier variables (`id_vars`), while all other
columns, considered measured variables (`value_vars`), are "unpivoted" to
the row axis, leaving just two non-identifier columns, 'variable' and
'value'.
%(versionadded)s
Parameters
----------
frame : DataFrame
id_vars : tuple, list, or ndarray, optional
Column(s) to use as identifier variables.
value_vars : tuple, list, or ndarray, optional
Column(s) to unpivot. If not specified, uses all columns that
are not set as `id_vars`.
var_name : scalar
Name to use for the 'variable' column. If None it uses
``frame.columns.name`` or 'variable'.
value_name : scalar, default 'value'
Name to use for the 'value' column.
col_level : int or string, optional
If columns are a MultiIndex then use this level to melt.
See also
--------
%(other)s
pivot_table
DataFrame.pivot
Examples
--------
>>> df = pd.DataFrame({'A': {0: 'a', 1: 'b', 2: 'c'},
... 'B': {0: 1, 1: 3, 2: 5},
... 'C': {0: 2, 1: 4, 2: 6}})
>>> df
A B C
0 a 1 2
1 b 3 4
2 c 5 6
>>> %(caller)sid_vars=['A'], value_vars=['B'])
A variable value
0 a B 1
1 b B 3
2 c B 5
>>> %(caller)sid_vars=['A'], value_vars=['B', 'C'])
A variable value
0 a B 1
1 b B 3
2 c B 5
3 a C 2
4 b C 4
5 c C 6
The names of 'variable' and 'value' columns can be customized:
>>> %(caller)sid_vars=['A'], value_vars=['B'],
... var_name='myVarname', value_name='myValname')
A myVarname myValname
0 a B 1
1 b B 3
2 c B 5
If you have multi-index columns:
>>> df.columns = [list('ABC'), list('DEF')]
>>> df
A B C
D E F
0 a 1 2
1 b 3 4
2 c 5 6
>>> %(caller)scol_level=0, id_vars=['A'], value_vars=['B'])
A variable value
0 a B 1
1 b B 3
2 c B 5
>>> %(caller)sid_vars=[('A', 'D')], value_vars=[('B', 'E')])
(A, D) variable_0 variable_1 value
0 a B E 1
1 b B E 3
2 c B E 5
""")
@Appender(_shared_docs['melt'] %
dict(caller='df.melt(',
versionadded='.. versionadded:: 0.20.0\n',
other='melt'))
def melt(self, id_vars=None, value_vars=None, var_name=None,
value_name='value', col_level=None):
from pandas.core.reshape.melt import melt
return melt(self, id_vars=id_vars, value_vars=value_vars,
var_name=var_name, value_name=value_name,
col_level=col_level)
# ----------------------------------------------------------------------
# Time series-related
def diff(self, periods=1, axis=0):
"""
First discrete difference of element.
Calculates the difference of a DataFrame element compared with another
element in the DataFrame (default is the element in the same column
of the previous row).
Parameters
----------
periods : int, default 1
Periods to shift for calculating difference, accepts negative
values.
axis : {0 or 'index', 1 or 'columns'}, default 0
Take difference over rows (0) or columns (1).
.. versionadded:: 0.16.1.
Returns
-------
diffed : DataFrame
See Also
--------
Series.diff: First discrete difference for a Series.
DataFrame.pct_change: Percent change over given number of periods.
DataFrame.shift: Shift index by desired number of periods with an
optional time freq.
Examples
--------
Difference with previous row
>>> df = pd.DataFrame({'a': [1, 2, 3, 4, 5, 6],
... 'b': [1, 1, 2, 3, 5, 8],
... 'c': [1, 4, 9, 16, 25, 36]})
>>> df
a b c
0 1 1 1
1 2 1 4
2 3 2 9
3 4 3 16
4 5 5 25
5 6 8 36
>>> df.diff()
a b c
0 NaN NaN NaN
1 1.0 0.0 3.0
2 1.0 1.0 5.0
3 1.0 1.0 7.0
4 1.0 2.0 9.0
5 1.0 3.0 11.0
Difference with previous column
>>> df.diff(axis=1)
a b c
0 NaN 0.0 0.0
1 NaN -1.0 3.0
2 NaN -1.0 7.0
3 NaN -1.0 13.0
4 NaN 0.0 20.0
5 NaN 2.0 28.0
Difference with 3rd previous row
>>> df.diff(periods=3)
a b c
0 NaN NaN NaN
1 NaN NaN NaN
2 NaN NaN NaN
3 3.0 2.0 15.0
4 3.0 4.0 21.0
5 3.0 6.0 27.0
Difference with following row
>>> df.diff(periods=-1)
a b c
0 -1.0 0.0 -3.0
1 -1.0 -1.0 -5.0
2 -1.0 -1.0 -7.0
3 -1.0 -2.0 -9.0
4 -1.0 -3.0 -11.0
5 NaN NaN NaN
"""
bm_axis = self._get_block_manager_axis(axis)
new_data = self._data.diff(n=periods, axis=bm_axis)
return self._constructor(new_data)
# ----------------------------------------------------------------------
# Function application
def _gotitem(self,
key, # type: Union[str, List[str]]
ndim, # type: int
subset=None # type: Union[Series, DataFrame, None]
):
# type: (...) -> Union[Series, DataFrame]
"""
sub-classes to define
return a sliced object
Parameters
----------
key : string / list of selections
ndim : 1,2
requested ndim of result
subset : object, default None
subset to act on
"""
if subset is None:
subset = self
elif subset.ndim == 1: # is Series
return subset
# TODO: _shallow_copy(subset)?
return subset[key]
_agg_doc = dedent("""
The aggregation operations are always performed over an axis, either the
index (default) or the column axis. This behavior is different from
`numpy` aggregation functions (`mean`, `median`, `prod`, `sum`, `std`,
`var`), where the default is to compute the aggregation of the flattened
array, e.g., ``numpy.mean(arr_2d)`` as opposed to ``numpy.mean(arr_2d,
axis=0)``.
`agg` is an alias for `aggregate`. Use the alias.
Examples
--------
>>> df = pd.DataFrame([[1, 2, 3],
... [4, 5, 6],
... [7, 8, 9],
... [np.nan, np.nan, np.nan]],
... columns=['A', 'B', 'C'])
Aggregate these functions over the rows.
>>> df.agg(['sum', 'min'])
A B C
sum 12.0 15.0 18.0
min 1.0 2.0 3.0
Different aggregations per column.
>>> df.agg({'A' : ['sum', 'min'], 'B' : ['min', 'max']})
A B
max NaN 8.0
min 1.0 2.0
sum 12.0 NaN
Aggregate over the columns.
>>> df.agg("mean", axis="columns")
0 2.0
1 5.0
2 8.0
3 NaN
dtype: float64
See also
--------
DataFrame.apply : Perform any type of operations.
DataFrame.transform : Perform transformation type operations.
pandas.core.groupby.GroupBy : Perform operations over groups.
pandas.core.resample.Resampler : Perform operations over resampled bins.
pandas.core.window.Rolling : Perform operations over rolling window.
pandas.core.window.Expanding : Perform operations over expanding window.
pandas.core.window.EWM : Perform operation over exponential weighted
window.
""")
@Appender(_agg_doc)
@Appender(_shared_docs['aggregate'] % dict(
versionadded='.. versionadded:: 0.20.0',
**_shared_doc_kwargs))
def aggregate(self, func, axis=0, *args, **kwargs):
axis = self._get_axis_number(axis)
result = None
try:
result, how = self._aggregate(func, axis=axis, *args, **kwargs)
except TypeError:
pass
if result is None:
return self.apply(func, axis=axis, args=args, **kwargs)
return result
def _aggregate(self, arg, axis=0, *args, **kwargs):
if axis == 1:
# NDFrame.aggregate returns a tuple, and we need to transpose
# only result
result, how = (super(DataFrame, self.T)
._aggregate(arg, *args, **kwargs))
result = result.T if result is not None else result
return result, how
return super(DataFrame, self)._aggregate(arg, *args, **kwargs)
agg = aggregate
@Appender(_shared_docs['transform'] % _shared_doc_kwargs)
def transform(self, func, axis=0, *args, **kwargs):
axis = self._get_axis_number(axis)
if axis == 1:
return super(DataFrame, self.T).transform(func, *args, **kwargs).T
return super(DataFrame, self).transform(func, *args, **kwargs)
def apply(self, func, axis=0, broadcast=None, raw=False, reduce=None,
result_type=None, args=(), **kwds):
"""
Apply a function along an axis of the DataFrame.
Objects passed to the function are Series objects whose index is
either the DataFrame's index (``axis=0``) or the DataFrame's columns
(``axis=1``). By default (``result_type=None``), the final return type
is inferred from the return type of the applied function. Otherwise,
it depends on the `result_type` argument.
Parameters
----------
func : function
Function to apply to each column or row.
axis : {0 or 'index', 1 or 'columns'}, default 0
Axis along which the function is applied:
* 0 or 'index': apply function to each column.
* 1 or 'columns': apply function to each row.
broadcast : bool, optional
Only relevant for aggregation functions:
* ``False`` or ``None`` : returns a Series whose length is the
length of the index or the number of columns (based on the
`axis` parameter)
* ``True`` : results will be broadcast to the original shape
of the frame, the original index and columns will be retained.
.. deprecated:: 0.23.0
This argument will be removed in a future version, replaced
by result_type='broadcast'.
raw : bool, default False
* ``False`` : passes each row or column as a Series to the
function.
* ``True`` : the passed function will receive ndarray objects
instead.
If you are just applying a NumPy reduction function this will
achieve much better performance.
reduce : bool or None, default None
Try to apply reduction procedures. If the DataFrame is empty,
`apply` will use `reduce` to determine whether the result
should be a Series or a DataFrame. If ``reduce=None`` (the
default), `apply`'s return value will be guessed by calling
`func` on an empty Series
(note: while guessing, exceptions raised by `func` will be
ignored).
If ``reduce=True`` a Series will always be returned, and if
``reduce=False`` a DataFrame will always be returned.
.. deprecated:: 0.23.0
This argument will be removed in a future version, replaced
by ``result_type='reduce'``.
result_type : {'expand', 'reduce', 'broadcast', None}, default None
These only act when ``axis=1`` (columns):
* 'expand' : list-like results will be turned into columns.
* 'reduce' : returns a Series if possible rather than expanding
list-like results. This is the opposite of 'expand'.
* 'broadcast' : results will be broadcast to the original shape
of the DataFrame, the original index and columns will be
retained.
The default behaviour (None) depends on the return value of the
applied function: list-like results will be returned as a Series
of those. However if the apply function returns a Series these
are expanded to columns.
.. versionadded:: 0.23.0
args : tuple
Positional arguments to pass to `func` in addition to the
array/series.
**kwds
Additional keyword arguments to pass as keywords arguments to
`func`.
Notes
-----
In the current implementation apply calls `func` twice on the
first column/row to decide whether it can take a fast or slow
code path. This can lead to unexpected behavior if `func` has
side-effects, as they will take effect twice for the first
column/row.
See also
--------
DataFrame.applymap: For elementwise operations
DataFrame.aggregate: only perform aggregating type operations
DataFrame.transform: only perform transforming type operations
Examples
--------
>>> df = pd.DataFrame([[4, 9],] * 3, columns=['A', 'B'])
>>> df
A B
0 4 9
1 4 9
2 4 9
Using a numpy universal function (in this case the same as
``np.sqrt(df)``):
>>> df.apply(np.sqrt)
A B
0 2.0 3.0
1 2.0 3.0
2 2.0 3.0
Using a reducing function on either axis
>>> df.apply(np.sum, axis=0)
A 12
B 27
dtype: int64
>>> df.apply(np.sum, axis=1)
0 13
1 13
2 13
dtype: int64
Retuning a list-like will result in a Series
>>> df.apply(lambda x: [1, 2], axis=1)
0 [1, 2]
1 [1, 2]
2 [1, 2]
dtype: object
Passing result_type='expand' will expand list-like results
to columns of a Dataframe
>>> df.apply(lambda x: [1, 2], axis=1, result_type='expand')
0 1
0 1 2
1 1 2
2 1 2
Returning a Series inside the function is similar to passing
``result_type='expand'``. The resulting column names
will be the Series index.
>>> df.apply(lambda x: pd.Series([1, 2], index=['foo', 'bar']), axis=1)
foo bar
0 1 2
1 1 2
2 1 2
Passing ``result_type='broadcast'`` will ensure the same shape
result, whether list-like or scalar is returned by the function,
and broadcast it along the axis. The resulting column names will
be the originals.
>>> df.apply(lambda x: [1, 2], axis=1, result_type='broadcast')
A B
0 1 2
1 1 2
2 1 2
Returns
-------
applied : Series or DataFrame
"""
from pandas.core.apply import frame_apply
op = frame_apply(self,
func=func,
axis=axis,
broadcast=broadcast,
raw=raw,
reduce=reduce,
result_type=result_type,
args=args,
kwds=kwds)
return op.get_result()
def applymap(self, func):
"""
Apply a function to a Dataframe elementwise.
This method applies a function that accepts and returns a scalar
to every element of a DataFrame.
Parameters
----------
func : callable
Python function, returns a single value from a single value.
Returns
-------
DataFrame
Transformed DataFrame.
See also
--------
DataFrame.apply : Apply a function along input axis of DataFrame
Examples
--------
>>> df = pd.DataFrame([[1, 2.12], [3.356, 4.567]])
>>> df
0 1
0 1.000 2.120
1 3.356 4.567
>>> df.applymap(lambda x: len(str(x)))
0 1
0 3 4
1 5 5
Note that a vectorized version of `func` often exists, which will
be much faster. You could square each number elementwise.
>>> df.applymap(lambda x: x**2)
0 1
0 1.000000 4.494400
1 11.262736 20.857489
But it's better to avoid applymap in that case.
>>> df ** 2
0 1
0 1.000000 4.494400
1 11.262736 20.857489
"""
# if we have a dtype == 'M8[ns]', provide boxed values
def infer(x):
if x.empty:
return lib.map_infer(x, func)
return lib.map_infer(x.astype(object).values, func)
return self.apply(infer)
# ----------------------------------------------------------------------
# Merging / joining methods
def append(self, other, ignore_index=False,
verify_integrity=False, sort=None):
"""
Append rows of `other` to the end of caller, returning a new object.
Columns in `other` that are not in the caller are added as new columns.
Parameters
----------
other : DataFrame or Series/dict-like object, or list of these
The data to append.
ignore_index : boolean, default False
If True, do not use the index labels.
verify_integrity : boolean, default False
If True, raise ValueError on creating index with duplicates.
sort : boolean, default None
Sort columns if the columns of `self` and `other` are not aligned.
The default sorting is deprecated and will change to not-sorting
in a future version of pandas. Explicitly pass ``sort=True`` to
silence the warning and sort. Explicitly pass ``sort=False`` to
silence the warning and not sort.
.. versionadded:: 0.23.0
Returns
-------
appended : DataFrame
Notes
-----
If a list of dict/series is passed and the keys are all contained in
the DataFrame's index, the order of the columns in the resulting
DataFrame will be unchanged.
Iteratively appending rows to a DataFrame can be more computationally
intensive than a single concatenate. A better solution is to append
those rows to a list and then concatenate the list with the original
DataFrame all at once.
See also
--------
pandas.concat : General function to concatenate DataFrame, Series
or Panel objects
Examples
--------
>>> df = pd.DataFrame([[1, 2], [3, 4]], columns=list('AB'))
>>> df
A B
0 1 2
1 3 4
>>> df2 = pd.DataFrame([[5, 6], [7, 8]], columns=list('AB'))
>>> df.append(df2)
A B
0 1 2
1 3 4
0 5 6
1 7 8
With `ignore_index` set to True:
>>> df.append(df2, ignore_index=True)
A B
0 1 2
1 3 4
2 5 6
3 7 8
The following, while not recommended methods for generating DataFrames,
show two ways to generate a DataFrame from multiple data sources.
Less efficient:
>>> df = pd.DataFrame(columns=['A'])
>>> for i in range(5):
... df = df.append({'A': i}, ignore_index=True)
>>> df
A
0 0
1 1
2 2
3 3
4 4
More efficient:
>>> pd.concat([pd.DataFrame([i], columns=['A']) for i in range(5)],
... ignore_index=True)
A
0 0
1 1
2 2
3 3
4 4
"""
if isinstance(other, (Series, dict)):
if isinstance(other, dict):
other = Series(other)
if other.name is None and not ignore_index:
raise TypeError('Can only append a Series if ignore_index=True'
' or if the Series has a name')
if other.name is None:
index = None
else:
# other must have the same index name as self, otherwise
# index name will be reset
index = Index([other.name], name=self.index.name)
idx_diff = other.index.difference(self.columns)
try:
combined_columns = self.columns.append(idx_diff)
except TypeError:
combined_columns = self.columns.astype(object).append(idx_diff)
other = other.reindex(combined_columns, copy=False)
other = DataFrame(other.values.reshape((1, len(other))),
index=index,
columns=combined_columns)
other = other._convert(datetime=True, timedelta=True)
if not self.columns.equals(combined_columns):
self = self.reindex(columns=combined_columns)
elif isinstance(other, list) and not isinstance(other[0], DataFrame):
other = DataFrame(other)
if (self.columns.get_indexer(other.columns) >= 0).all():
other = other.loc[:, self.columns]
from pandas.core.reshape.concat import concat
if isinstance(other, (list, tuple)):
to_concat = [self] + other
else:
to_concat = [self, other]
return concat(to_concat, ignore_index=ignore_index,
verify_integrity=verify_integrity,
sort=sort)
def join(self, other, on=None, how='left', lsuffix='', rsuffix='',
sort=False):
"""
Join columns with other DataFrame either on index or on a key
column. Efficiently Join multiple DataFrame objects by index at once by
passing a list.
Parameters
----------
other : DataFrame, Series with name field set, or list of DataFrame
Index should be similar to one of the columns in this one. If a
Series is passed, its name attribute must be set, and that will be
used as the column name in the resulting joined DataFrame
on : name, tuple/list of names, or array-like
Column or index level name(s) in the caller to join on the index
in `other`, otherwise joins index-on-index. If multiple
values given, the `other` DataFrame must have a MultiIndex. Can
pass an array as the join key if it is not already contained in
the calling DataFrame. Like an Excel VLOOKUP operation
how : {'left', 'right', 'outer', 'inner'}, default: 'left'
How to handle the operation of the two objects.
* left: use calling frame's index (or column if on is specified)
* right: use other frame's index
* outer: form union of calling frame's index (or column if on is
specified) with other frame's index, and sort it
lexicographically
* inner: form intersection of calling frame's index (or column if
on is specified) with other frame's index, preserving the order
of the calling's one
lsuffix : string
Suffix to use from left frame's overlapping columns
rsuffix : string
Suffix to use from right frame's overlapping columns
sort : boolean, default False
Order result DataFrame lexicographically by the join key. If False,
the order of the join key depends on the join type (how keyword)
Notes
-----
on, lsuffix, and rsuffix options are not supported when passing a list
of DataFrame objects
Support for specifying index levels as the `on` parameter was added
in version 0.23.0
Examples
--------
>>> caller = pd.DataFrame({'key': ['K0', 'K1', 'K2', 'K3', 'K4', 'K5'],
... 'A': ['A0', 'A1', 'A2', 'A3', 'A4', 'A5']})
>>> caller
A key
0 A0 K0
1 A1 K1
2 A2 K2
3 A3 K3
4 A4 K4
5 A5 K5
>>> other = pd.DataFrame({'key': ['K0', 'K1', 'K2'],
... 'B': ['B0', 'B1', 'B2']})
>>> other
B key
0 B0 K0
1 B1 K1
2 B2 K2
Join DataFrames using their indexes.
>>> caller.join(other, lsuffix='_caller', rsuffix='_other')
>>> A key_caller B key_other
0 A0 K0 B0 K0
1 A1 K1 B1 K1
2 A2 K2 B2 K2
3 A3 K3 NaN NaN
4 A4 K4 NaN NaN
5 A5 K5 NaN NaN
If we want to join using the key columns, we need to set key to be
the index in both caller and other. The joined DataFrame will have
key as its index.
>>> caller.set_index('key').join(other.set_index('key'))
>>> A B
key
K0 A0 B0
K1 A1 B1
K2 A2 B2
K3 A3 NaN
K4 A4 NaN
K5 A5 NaN
Another option to join using the key columns is to use the on
parameter. DataFrame.join always uses other's index but we can use any
column in the caller. This method preserves the original caller's
index in the result.
>>> caller.join(other.set_index('key'), on='key')
>>> A key B
0 A0 K0 B0
1 A1 K1 B1
2 A2 K2 B2
3 A3 K3 NaN
4 A4 K4 NaN
5 A5 K5 NaN
See also
--------
DataFrame.merge : For column(s)-on-columns(s) operations
Returns
-------
joined : DataFrame
"""
# For SparseDataFrame's benefit
return self._join_compat(other, on=on, how=how, lsuffix=lsuffix,
rsuffix=rsuffix, sort=sort)
def _join_compat(self, other, on=None, how='left', lsuffix='', rsuffix='',
sort=False):
from pandas.core.reshape.merge import merge
from pandas.core.reshape.concat import concat
if isinstance(other, Series):
if other.name is None:
raise ValueError('Other Series must have a name')
other = DataFrame({other.name: other})
if isinstance(other, DataFrame):
return merge(self, other, left_on=on, how=how,
left_index=on is None, right_index=True,
suffixes=(lsuffix, rsuffix), sort=sort)
else:
if on is not None:
raise ValueError('Joining multiple DataFrames only supported'
' for joining on index')
frames = [self] + list(other)
can_concat = all(df.index.is_unique for df in frames)
# join indexes only using concat
if can_concat:
if how == 'left':
how = 'outer'
join_axes = [self.index]
else:
join_axes = None
return concat(frames, axis=1, join=how, join_axes=join_axes,
verify_integrity=True)
joined = frames[0]
for frame in frames[1:]:
joined = merge(joined, frame, how=how, left_index=True,
right_index=True)
return joined
@Substitution('')
@Appender(_merge_doc, indents=2)
def merge(self, right, how='inner', on=None, left_on=None, right_on=None,
left_index=False, right_index=False, sort=False,
suffixes=('_x', '_y'), copy=True, indicator=False,
validate=None):
from pandas.core.reshape.merge import merge
return merge(self, right, how=how, on=on, left_on=left_on,
right_on=right_on, left_index=left_index,
right_index=right_index, sort=sort, suffixes=suffixes,
copy=copy, indicator=indicator, validate=validate)
def round(self, decimals=0, *args, **kwargs):
"""
Round a DataFrame to a variable number of decimal places.
Parameters
----------
decimals : int, dict, Series
Number of decimal places to round each column to. If an int is
given, round each column to the same number of places.
Otherwise dict and Series round to variable numbers of places.
Column names should be in the keys if `decimals` is a
dict-like, or in the index if `decimals` is a Series. Any
columns not included in `decimals` will be left as is. Elements
of `decimals` which are not columns of the input will be
ignored.
Examples
--------
>>> df = pd.DataFrame(np.random.random([3, 3]),
... columns=['A', 'B', 'C'], index=['first', 'second', 'third'])
>>> df
A B C
first 0.028208 0.992815 0.173891
second 0.038683 0.645646 0.577595
third 0.877076 0.149370 0.491027
>>> df.round(2)
A B C
first 0.03 0.99 0.17
second 0.04 0.65 0.58
third 0.88 0.15 0.49
>>> df.round({'A': 1, 'C': 2})
A B C
first 0.0 0.992815 0.17
second 0.0 0.645646 0.58
third 0.9 0.149370 0.49
>>> decimals = pd.Series([1, 0, 2], index=['A', 'B', 'C'])
>>> df.round(decimals)
A B C
first 0.0 1 0.17
second 0.0 1 0.58
third 0.9 0 0.49
Returns
-------
DataFrame object
See Also
--------
numpy.around
Series.round
"""
from pandas.core.reshape.concat import concat
def _dict_round(df, decimals):
for col, vals in df.iteritems():
try:
yield _series_round(vals, decimals[col])
except KeyError:
yield vals
def _series_round(s, decimals):
if is_integer_dtype(s) or is_float_dtype(s):
return s.round(decimals)
return s
nv.validate_round(args, kwargs)
if isinstance(decimals, (dict, Series)):
if isinstance(decimals, Series):
if not decimals.index.is_unique:
raise ValueError("Index of decimals must be unique")
new_cols = [col for col in _dict_round(self, decimals)]
elif is_integer(decimals):
# Dispatch to Series.round
new_cols = [_series_round(v, decimals)
for _, v in self.iteritems()]
else:
raise TypeError("decimals must be an integer, a dict-like or a "
"Series")
if len(new_cols) > 0:
return self._constructor(concat(new_cols, axis=1),
index=self.index,
columns=self.columns)
else:
return self
# ----------------------------------------------------------------------
# Statistical methods, etc.
def corr(self, method='pearson', min_periods=1):
"""
Compute pairwise correlation of columns, excluding NA/null values
Parameters
----------
method : {'pearson', 'kendall', 'spearman'} or callable
* pearson : standard correlation coefficient
* kendall : Kendall Tau correlation coefficient
* spearman : Spearman rank correlation
* callable: callable with input two 1d ndarrays
and returning a float
.. versionadded:: 0.24.0
min_periods : int, optional
Minimum number of observations required per pair of columns
to have a valid result. Currently only available for pearson
and spearman correlation
Returns
-------
y : DataFrame
Examples
--------
>>> import numpy as np
>>> histogram_intersection = lambda a, b: np.minimum(a, b
... ).sum().round(decimals=1)
>>> df = pd.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)],
... columns=['dogs', 'cats'])
>>> df.corr(method=histogram_intersection)
dogs cats
dogs 1.0 0.3
cats 0.3 1.0
"""
numeric_df = self._get_numeric_data()
cols = numeric_df.columns
idx = cols.copy()
mat = numeric_df.values
if method == 'pearson':
correl = libalgos.nancorr(ensure_float64(mat), minp=min_periods)
elif method == 'spearman':
correl = libalgos.nancorr_spearman(ensure_float64(mat),
minp=min_periods)
elif method == 'kendall' or callable(method):
if min_periods is None:
min_periods = 1
mat = ensure_float64(mat).T
corrf = nanops.get_corr_func(method)
K = len(cols)
correl = np.empty((K, K), dtype=float)
mask = np.isfinite(mat)
for i, ac in enumerate(mat):
for j, bc in enumerate(mat):
if i > j:
continue
valid = mask[i] & mask[j]
if valid.sum() < min_periods:
c = np.nan
elif i == j:
c = 1.
elif not valid.all():
c = corrf(ac[valid], bc[valid])
else:
c = corrf(ac, bc)
correl[i, j] = c
correl[j, i] = c
else:
raise ValueError("method must be either 'pearson', "
"'spearman', or 'kendall', '{method}' "
"was supplied".format(method=method))
return self._constructor(correl, index=idx, columns=cols)
def cov(self, min_periods=None):
"""
Compute pairwise covariance of columns, excluding NA/null values.
Compute the pairwise covariance among the series of a DataFrame.
The returned data frame is the `covariance matrix
<https://en.wikipedia.org/wiki/Covariance_matrix>`__ of the columns
of the DataFrame.
Both NA and null values are automatically excluded from the
calculation. (See the note below about bias from missing values.)
A threshold can be set for the minimum number of
observations for each value created. Comparisons with observations
below this threshold will be returned as ``NaN``.
This method is generally used for the analysis of time series data to
understand the relationship between different measures
across time.
Parameters
----------
min_periods : int, optional
Minimum number of observations required per pair of columns
to have a valid result.
Returns
-------
DataFrame
The covariance matrix of the series of the DataFrame.
See Also
--------
pandas.Series.cov : compute covariance with another Series
pandas.core.window.EWM.cov: exponential weighted sample covariance
pandas.core.window.Expanding.cov : expanding sample covariance
pandas.core.window.Rolling.cov : rolling sample covariance
Notes
-----
Returns the covariance matrix of the DataFrame's time series.
The covariance is normalized by N-1.
For DataFrames that have Series that are missing data (assuming that
data is `missing at random
<https://en.wikipedia.org/wiki/Missing_data#Missing_at_random>`__)
the returned covariance matrix will be an unbiased estimate
of the variance and covariance between the member Series.
However, for many applications this estimate may not be acceptable
because the estimate covariance matrix is not guaranteed to be positive
semi-definite. This could lead to estimate correlations having
absolute values which are greater than one, and/or a non-invertible
covariance matrix. See `Estimation of covariance matrices
<http://en.wikipedia.org/w/index.php?title=Estimation_of_covariance_
matrices>`__ for more details.
Examples
--------
>>> df = pd.DataFrame([(1, 2), (0, 3), (2, 0), (1, 1)],
... columns=['dogs', 'cats'])
>>> df.cov()
dogs cats
dogs 0.666667 -1.000000
cats -1.000000 1.666667
>>> np.random.seed(42)
>>> df = pd.DataFrame(np.random.randn(1000, 5),
... columns=['a', 'b', 'c', 'd', 'e'])
>>> df.cov()
a b c d e
a 0.998438 -0.020161 0.059277 -0.008943 0.014144
b -0.020161 1.059352 -0.008543 -0.024738 0.009826
c 0.059277 -0.008543 1.010670 -0.001486 -0.000271
d -0.008943 -0.024738 -0.001486 0.921297 -0.013692
e 0.014144 0.009826 -0.000271 -0.013692 0.977795
**Minimum number of periods**
This method also supports an optional ``min_periods`` keyword
that specifies the required minimum number of non-NA observations for
each column pair in order to have a valid result:
>>> np.random.seed(42)
>>> df = pd.DataFrame(np.random.randn(20, 3),
... columns=['a', 'b', 'c'])
>>> df.loc[df.index[:5], 'a'] = np.nan
>>> df.loc[df.index[5:10], 'b'] = np.nan
>>> df.cov(min_periods=12)
a b c
a 0.316741 NaN -0.150812
b NaN 1.248003 0.191417
c -0.150812 0.191417 0.895202
"""
numeric_df = self._get_numeric_data()
cols = numeric_df.columns
idx = cols.copy()
mat = numeric_df.values
if notna(mat).all():
if min_periods is not None and min_periods > len(mat):
baseCov = np.empty((mat.shape[1], mat.shape[1]))
baseCov.fill(np.nan)
else:
baseCov = np.cov(mat.T)
baseCov = baseCov.reshape((len(cols), len(cols)))
else:
baseCov = libalgos.nancorr(ensure_float64(mat), cov=True,
minp=min_periods)
return self._constructor(baseCov, index=idx, columns=cols)
def corrwith(self, other, axis=0, drop=False):
"""
Compute pairwise correlation between rows or columns of two DataFrame
objects.
Parameters
----------
other : DataFrame, Series
axis : {0 or 'index', 1 or 'columns'}, default 0
0 or 'index' to compute column-wise, 1 or 'columns' for row-wise
drop : boolean, default False
Drop missing indices from result, default returns union of all
Returns
-------
correls : Series
"""
axis = self._get_axis_number(axis)
this = self._get_numeric_data()
if isinstance(other, Series):
return this.apply(other.corr, axis=axis)
other = other._get_numeric_data()
left, right = this.align(other, join='inner', copy=False)
# mask missing values
left = left + right * 0
right = right + left * 0
if axis == 1:
left = left.T
right = right.T
# demeaned data
ldem = left - left.mean()
rdem = right - right.mean()
num = (ldem * rdem).sum()
dom = (left.count() - 1) * left.std() * right.std()
correl = num / dom
if not drop:
raxis = 1 if axis == 0 else 0
result_index = this._get_axis(raxis).union(other._get_axis(raxis))
correl = correl.reindex(result_index)
return correl
# ----------------------------------------------------------------------
# ndarray-like stats methods
def count(self, axis=0, level=None, numeric_only=False):
"""
Count non-NA cells for each column or row.
The values `None`, `NaN`, `NaT`, and optionally `numpy.inf` (depending
on `pandas.options.mode.use_inf_as_na`) are considered NA.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
If 0 or 'index' counts are generated for each column.
If 1 or 'columns' counts are generated for each **row**.
level : int or str, optional
If the axis is a `MultiIndex` (hierarchical), count along a
particular `level`, collapsing into a `DataFrame`.
A `str` specifies the level name.
numeric_only : boolean, default False
Include only `float`, `int` or `boolean` data.
Returns
-------
Series or DataFrame
For each column/row the number of non-NA/null entries.
If `level` is specified returns a `DataFrame`.
See Also
--------
Series.count: number of non-NA elements in a Series
DataFrame.shape: number of DataFrame rows and columns (including NA
elements)
DataFrame.isna: boolean same-sized DataFrame showing places of NA
elements
Examples
--------
Constructing DataFrame from a dictionary:
>>> df = pd.DataFrame({"Person":
... ["John", "Myla", "Lewis", "John", "Myla"],
... "Age": [24., np.nan, 21., 33, 26],
... "Single": [False, True, True, True, False]})
>>> df
Person Age Single
0 John 24.0 False
1 Myla NaN True
2 Lewis 21.0 True
3 John 33.0 True
4 Myla 26.0 False
Notice the uncounted NA values:
>>> df.count()
Person 5
Age 4
Single 5
dtype: int64
Counts for each **row**:
>>> df.count(axis='columns')
0 3
1 2
2 3
3 3
4 3
dtype: int64
Counts for one level of a `MultiIndex`:
>>> df.set_index(["Person", "Single"]).count(level="Person")
Age
Person
John 2
Lewis 1
Myla 1
"""
axis = self._get_axis_number(axis)
if level is not None:
return self._count_level(level, axis=axis,
numeric_only=numeric_only)
if numeric_only:
frame = self._get_numeric_data()
else:
frame = self
# GH #423
if len(frame._get_axis(axis)) == 0:
result = Series(0, index=frame._get_agg_axis(axis))
else:
if frame._is_mixed_type or frame._data.any_extension_types:
# the or any_extension_types is really only hit for single-
# column frames with an extension array
result = notna(frame).sum(axis=axis)
else:
# GH13407
series_counts = notna(frame).sum(axis=axis)
counts = series_counts.values
result = Series(counts, index=frame._get_agg_axis(axis))
return result.astype('int64')
def _count_level(self, level, axis=0, numeric_only=False):
if numeric_only:
frame = self._get_numeric_data()
else:
frame = self
count_axis = frame._get_axis(axis)
agg_axis = frame._get_agg_axis(axis)
if not isinstance(count_axis, MultiIndex):
raise TypeError("Can only count levels on hierarchical "
"{ax}.".format(ax=self._get_axis_name(axis)))
if frame._is_mixed_type:
# Since we have mixed types, calling notna(frame.values) might
# upcast everything to object
mask = notna(frame).values
else:
# But use the speedup when we have homogeneous dtypes
mask = notna(frame.values)
if axis == 1:
# We're transposing the mask rather than frame to avoid potential
# upcasts to object, which induces a ~20x slowdown
mask = mask.T
if isinstance(level, compat.string_types):
level = count_axis._get_level_number(level)
level_index = count_axis.levels[level]
labels = ensure_int64(count_axis.labels[level])
counts = lib.count_level_2d(mask, labels, len(level_index), axis=0)
result = DataFrame(counts, index=level_index, columns=agg_axis)
if axis == 1:
# Undo our earlier transpose
return result.T
else:
return result
def _reduce(self, op, name, axis=0, skipna=True, numeric_only=None,
filter_type=None, **kwds):
if axis is None and filter_type == 'bool':
labels = None
constructor = None
else:
# TODO: Make other agg func handle axis=None properly
axis = self._get_axis_number(axis)
labels = self._get_agg_axis(axis)
constructor = self._constructor
def f(x):
return op(x, axis=axis, skipna=skipna, **kwds)
# exclude timedelta/datetime unless we are uniform types
if axis == 1 and self._is_mixed_type and self._is_datelike_mixed_type:
numeric_only = True
if numeric_only is None:
try:
values = self.values
result = f(values)
if (filter_type == 'bool' and is_object_dtype(values) and
axis is None):
# work around https://github.com/numpy/numpy/issues/10489
# TODO: combine with hasattr(result, 'dtype') further down
# hard since we don't have `values` down there.
result = np.bool_(result)
except Exception as e:
# try by-column first
if filter_type is None and axis == 0:
try:
# this can end up with a non-reduction
# but not always. if the types are mixed
# with datelike then need to make sure a series
# we only end up here if we have not specified
# numeric_only and yet we have tried a
# column-by-column reduction, where we have mixed type.
# So let's just do what we can
from pandas.core.apply import frame_apply
opa = frame_apply(self,
func=f,
result_type='expand',
ignore_failures=True)
result = opa.get_result()
if result.ndim == self.ndim:
result = result.iloc[0]
return result
except Exception:
pass
if filter_type is None or filter_type == 'numeric':
data = self._get_numeric_data()
elif filter_type == 'bool':
data = self._get_bool_data()
else: # pragma: no cover
e = NotImplementedError(
"Handling exception with filter_type {f} not"
"implemented.".format(f=filter_type))
raise_with_traceback(e)
with np.errstate(all='ignore'):
result = f(data.values)
labels = data._get_agg_axis(axis)
else:
if numeric_only:
if filter_type is None or filter_type == 'numeric':
data = self._get_numeric_data()
elif filter_type == 'bool':
data = self._get_bool_data()
else: # pragma: no cover
msg = ("Generating numeric_only data with filter_type {f}"
"not supported.".format(f=filter_type))
raise NotImplementedError(msg)
values = data.values
labels = data._get_agg_axis(axis)
else:
values = self.values
result = f(values)
if hasattr(result, 'dtype') and is_object_dtype(result.dtype):
try:
if filter_type is None or filter_type == 'numeric':
result = result.astype(np.float64)
elif filter_type == 'bool' and notna(result).all():
result = result.astype(np.bool_)
except (ValueError, TypeError):
# try to coerce to the original dtypes item by item if we can
if axis == 0:
result = coerce_to_dtypes(result, self.dtypes)
if constructor is not None:
result = Series(result, index=labels)
return result
def nunique(self, axis=0, dropna=True):
"""
Return Series with number of distinct observations over requested
axis.
.. versionadded:: 0.20.0
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
dropna : boolean, default True
Don't include NaN in the counts.
Returns
-------
nunique : Series
Examples
--------
>>> df = pd.DataFrame({'A': [1, 2, 3], 'B': [1, 1, 1]})
>>> df.nunique()
A 3
B 1
>>> df.nunique(axis=1)
0 1
1 2
2 2
"""
return self.apply(Series.nunique, axis=axis, dropna=dropna)
def idxmin(self, axis=0, skipna=True):
"""
Return index of first occurrence of minimum over requested axis.
NA/null values are excluded.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
0 or 'index' for row-wise, 1 or 'columns' for column-wise
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result
will be NA.
Raises
------
ValueError
* If the row/column is empty
Returns
-------
idxmin : Series
Notes
-----
This method is the DataFrame version of ``ndarray.argmin``.
See Also
--------
Series.idxmin
"""
axis = self._get_axis_number(axis)
indices = nanops.nanargmin(self.values, axis=axis, skipna=skipna)
index = self._get_axis(axis)
result = [index[i] if i >= 0 else np.nan for i in indices]
return Series(result, index=self._get_agg_axis(axis))
def idxmax(self, axis=0, skipna=True):
"""
Return index of first occurrence of maximum over requested axis.
NA/null values are excluded.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
0 or 'index' for row-wise, 1 or 'columns' for column-wise
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result
will be NA.
Raises
------
ValueError
* If the row/column is empty
Returns
-------
idxmax : Series
Notes
-----
This method is the DataFrame version of ``ndarray.argmax``.
See Also
--------
Series.idxmax
"""
axis = self._get_axis_number(axis)
indices = nanops.nanargmax(self.values, axis=axis, skipna=skipna)
index = self._get_axis(axis)
result = [index[i] if i >= 0 else np.nan for i in indices]
return Series(result, index=self._get_agg_axis(axis))
def _get_agg_axis(self, axis_num):
""" let's be explicit about this """
if axis_num == 0:
return self.columns
elif axis_num == 1:
return self.index
else:
raise ValueError('Axis must be 0 or 1 (got %r)' % axis_num)
def mode(self, axis=0, numeric_only=False, dropna=True):
"""
Get the mode(s) of each element along the selected axis.
The mode of a set of values is the value that appears most often.
It can be multiple values.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to iterate over while searching for the mode:
* 0 or 'index' : get mode of each column
* 1 or 'columns' : get mode of each row
numeric_only : bool, default False
If True, only apply to numeric columns.
dropna : bool, default True
Don't consider counts of NaN/NaT.
.. versionadded:: 0.24.0
Returns
-------
DataFrame
The modes of each column or row.
See Also
--------
Series.mode : Return the highest frequency value in a Series.
Series.value_counts : Return the counts of values in a Series.
Examples
--------
>>> df = pd.DataFrame([('bird', 2, 2),
... ('mammal', 4, np.nan),
... ('arthropod', 8, 0),
... ('bird', 2, np.nan)],
... index=('falcon', 'horse', 'spider', 'ostrich'),
... columns=('species', 'legs', 'wings'))
>>> df
species legs wings
falcon bird 2 2.0
horse mammal 4 NaN
spider arthropod 8 0.0
ostrich bird 2 NaN
By default, missing values are not considered, and the mode of wings
are both 0 and 2. The second row of species and legs contains ``NaN``,
because they have only one mode, but the DataFrame has two rows.
>>> df.mode()
species legs wings
0 bird 2.0 0.0
1 NaN NaN 2.0
Setting ``dropna=False`` ``NaN`` values are considered and they can be
the mode (like for wings).
>>> df.mode(dropna=False)
species legs wings
0 bird 2 NaN
Setting ``numeric_only=True``, only the mode of numeric columns is
computed, and columns of other types are ignored.
>>> df.mode(numeric_only=True)
legs wings
0 2.0 0.0
1 NaN 2.0
To compute the mode over columns and not rows, use the axis parameter:
>>> df.mode(axis='columns', numeric_only=True)
0 1
falcon 2.0 NaN
horse 4.0 NaN
spider 0.0 8.0
ostrich 2.0 NaN
"""
data = self if not numeric_only else self._get_numeric_data()
def f(s):
return s.mode(dropna=dropna)
return data.apply(f, axis=axis)
def quantile(self, q=0.5, axis=0, numeric_only=True,
interpolation='linear'):
"""
Return values at the given quantile over requested axis.
Parameters
----------
q : float or array-like, default 0.5 (50% quantile)
0 <= q <= 1, the quantile(s) to compute
axis : {0, 1, 'index', 'columns'} (default 0)
0 or 'index' for row-wise, 1 or 'columns' for column-wise
numeric_only : boolean, default True
If False, the quantile of datetime and timedelta data will be
computed as well
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
.. versionadded:: 0.18.0
This optional parameter specifies the interpolation method to use,
when the desired quantile lies between two data points `i` and `j`:
* linear: `i + (j - i) * fraction`, where `fraction` is the
fractional part of the index surrounded by `i` and `j`.
* lower: `i`.
* higher: `j`.
* nearest: `i` or `j` whichever is nearest.
* midpoint: (`i` + `j`) / 2.
Returns
-------
quantiles : Series or DataFrame
- If ``q`` is an array, a DataFrame will be returned where the
index is ``q``, the columns are the columns of self, and the
values are the quantiles.
- If ``q`` is a float, a Series will be returned where the
index is the columns of self and the values are the quantiles.
Examples
--------
>>> df = pd.DataFrame(np.array([[1, 1], [2, 10], [3, 100], [4, 100]]),
columns=['a', 'b'])
>>> df.quantile(.1)
a 1.3
b 3.7
dtype: float64
>>> df.quantile([.1, .5])
a b
0.1 1.3 3.7
0.5 2.5 55.0
Specifying `numeric_only=False` will also compute the quantile of
datetime and timedelta data.
>>> df = pd.DataFrame({'A': [1, 2],
'B': [pd.Timestamp('2010'),
pd.Timestamp('2011')],
'C': [pd.Timedelta('1 days'),
pd.Timedelta('2 days')]})
>>> df.quantile(0.5, numeric_only=False)
A 1.5
B 2010-07-02 12:00:00
C 1 days 12:00:00
Name: 0.5, dtype: object
See Also
--------
pandas.core.window.Rolling.quantile
numpy.percentile
"""
self._check_percentile(q)
data = self._get_numeric_data() if numeric_only else self
axis = self._get_axis_number(axis)
is_transposed = axis == 1
if is_transposed:
data = data.T
result = data._data.quantile(qs=q,
axis=1,
interpolation=interpolation,
transposed=is_transposed)
if result.ndim == 2:
result = self._constructor(result)
else:
result = self._constructor_sliced(result, name=q)
if is_transposed:
result = result.T
return result
def to_timestamp(self, freq=None, how='start', axis=0, copy=True):
"""
Cast to DatetimeIndex of timestamps, at *beginning* of period
Parameters
----------
freq : string, default frequency of PeriodIndex
Desired frequency
how : {'s', 'e', 'start', 'end'}
Convention for converting period to timestamp; start of period
vs. end
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to convert (the index by default)
copy : boolean, default True
If false then underlying input data is not copied
Returns
-------
df : DataFrame with DatetimeIndex
"""
new_data = self._data
if copy:
new_data = new_data.copy()
axis = self._get_axis_number(axis)
if axis == 0:
new_data.set_axis(1, self.index.to_timestamp(freq=freq, how=how))
elif axis == 1:
new_data.set_axis(0, self.columns.to_timestamp(freq=freq, how=how))
else: # pragma: no cover
raise AssertionError('Axis must be 0 or 1. Got {ax!s}'.format(
ax=axis))
return self._constructor(new_data)
def to_period(self, freq=None, axis=0, copy=True):
"""
Convert DataFrame from DatetimeIndex to PeriodIndex with desired
frequency (inferred from index if not passed)
Parameters
----------
freq : string, default
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to convert (the index by default)
copy : boolean, default True
If False then underlying input data is not copied
Returns
-------
ts : TimeSeries with PeriodIndex
"""
new_data = self._data
if copy:
new_data = new_data.copy()
axis = self._get_axis_number(axis)
if axis == 0:
new_data.set_axis(1, self.index.to_period(freq=freq))
elif axis == 1:
new_data.set_axis(0, self.columns.to_period(freq=freq))
else: # pragma: no cover
raise AssertionError('Axis must be 0 or 1. Got {ax!s}'.format(
ax=axis))
return self._constructor(new_data)
def isin(self, values):
"""
Whether each element in the DataFrame is contained in values.
Parameters
----------
values : iterable, Series, DataFrame or dict
The result will only be true at a location if all the
labels match. If `values` is a Series, that's the index. If
`values` is a dict, the keys must be the column names,
which must match. If `values` is a DataFrame,
then both the index and column labels must match.
Returns
-------
DataFrame
DataFrame of booleans showing whether each element in the DataFrame
is contained in values.
See Also
--------
DataFrame.eq: Equality test for DataFrame.
Series.isin: Equivalent method on Series.
Series.str.contains: Test if pattern or regex is contained within a
string of a Series or Index.
Examples
--------
>>> df = pd.DataFrame({'num_legs': [2, 4], 'num_wings': [2, 0]},
... index=['falcon', 'dog'])
>>> df
num_legs num_wings
falcon 2 2
dog 4 0
When ``values`` is a list check whether every value in the DataFrame
is present in the list (which animals have 0 or 2 legs or wings)
>>> df.isin([0, 2])
num_legs num_wings
falcon True True
dog False True
When ``values`` is a dict, we can pass values to check for each
column separately:
>>> df.isin({'num_wings': [0, 3]})
num_legs num_wings
falcon False False
dog False True
When ``values`` is a Series or DataFrame the index and column must
match. Note that 'falcon' does not match based on the number of legs
in df2.
>>> other = pd.DataFrame({'num_legs': [8, 2],'num_wings': [0, 2]},
... index=['spider', 'falcon'])
>>> df.isin(other)
num_legs num_wings
falcon True True
dog False False
"""
if isinstance(values, dict):
from pandas.core.reshape.concat import concat
values = collections.defaultdict(list, values)
return concat((self.iloc[:, [i]].isin(values[col])
for i, col in enumerate(self.columns)), axis=1)
elif isinstance(values, Series):
if not values.index.is_unique:
raise ValueError("cannot compute isin with "
"a duplicate axis.")
return self.eq(values.reindex_like(self), axis='index')
elif isinstance(values, DataFrame):
if not (values.columns.is_unique and values.index.is_unique):
raise ValueError("cannot compute isin with "
"a duplicate axis.")
return self.eq(values.reindex_like(self))
else:
if not is_list_like(values):
raise TypeError("only list-like or dict-like objects are "
"allowed to be passed to DataFrame.isin(), "
"you passed a "
"{0!r}".format(type(values).__name__))
return DataFrame(
algorithms.isin(self.values.ravel(),
values).reshape(self.shape), self.index,
self.columns)
# ----------------------------------------------------------------------
# Add plotting methods to DataFrame
plot = CachedAccessor("plot", gfx.FramePlotMethods)
hist = gfx.hist_frame
boxplot = gfx.boxplot_frame
DataFrame._setup_axes(['index', 'columns'], info_axis=1, stat_axis=0,
axes_are_reversed=True, aliases={'rows': 0},
docs={
'index': 'The index (row labels) of the DataFrame.',
'columns': 'The column labels of the DataFrame.'})
DataFrame._add_numeric_operations()
DataFrame._add_series_or_dataframe_operations()
ops.add_flex_arithmetic_methods(DataFrame)
ops.add_special_arithmetic_methods(DataFrame)
def _arrays_to_mgr(arrays, arr_names, index, columns, dtype=None):
"""
Segregate Series based on type and coerce into matrices.
Needs to handle a lot of exceptional cases.
"""
# figure out the index, if necessary
if index is None:
index = extract_index(arrays)
else:
index = ensure_index(index)
# don't force copy because getting jammed in an ndarray anyway
arrays = _homogenize(arrays, index, dtype)
# from BlockManager perspective
axes = [ensure_index(columns), index]
return create_block_manager_from_arrays(arrays, arr_names, axes)
def extract_index(data):
from pandas.core.index import _union_indexes
index = None
if len(data) == 0:
index = Index([])
elif len(data) > 0:
raw_lengths = []
indexes = []
have_raw_arrays = False
have_series = False
have_dicts = False
for v in data:
if isinstance(v, Series):
have_series = True
indexes.append(v.index)
elif isinstance(v, dict):
have_dicts = True
indexes.append(list(v.keys()))
elif is_list_like(v) and getattr(v, 'ndim', 1) == 1:
have_raw_arrays = True
raw_lengths.append(len(v))
if not indexes and not raw_lengths:
raise ValueError('If using all scalar values, you must pass'
' an index')
if have_series or have_dicts:
index = _union_indexes(indexes)
if have_raw_arrays:
lengths = list(set(raw_lengths))
if len(lengths) > 1:
raise ValueError('arrays must all be same length')
if have_dicts:
raise ValueError('Mixing dicts with non-Series may lead to '
'ambiguous ordering.')
if have_series:
if lengths[0] != len(index):
msg = ('array length %d does not match index length %d' %
(lengths[0], len(index)))
raise ValueError(msg)
else:
index = ibase.default_index(lengths[0])
return ensure_index(index)
def _prep_ndarray(values, copy=True):
if not isinstance(values, (np.ndarray, Series, Index)):
if len(values) == 0:
return np.empty((0, 0), dtype=object)
def convert(v):
return maybe_convert_platform(v)
# we could have a 1-dim or 2-dim list here
# this is equiv of np.asarray, but does object conversion
# and platform dtype preservation
try:
if is_list_like(values[0]) or hasattr(values[0], 'len'):
values = np.array([convert(v) for v in values])
elif isinstance(values[0], np.ndarray) and values[0].ndim == 0:
# GH#21861
values = np.array([convert(v) for v in values])
else:
values = convert(values)
except (ValueError, TypeError):
values = convert(values)
else:
# drop subclass info, do not copy data
values = np.asarray(values)
if copy:
values = values.copy()
if values.ndim == 1:
values = values.reshape((values.shape[0], 1))
elif values.ndim != 2:
raise ValueError('Must pass 2-d input')
return values
def _to_arrays(data, columns, coerce_float=False, dtype=None):
"""
Return list of arrays, columns
"""
if isinstance(data, DataFrame):
if columns is not None:
arrays = [data._ixs(i, axis=1).values
for i, col in enumerate(data.columns) if col in columns]
else:
columns = data.columns
arrays = [data._ixs(i, axis=1).values for i in range(len(columns))]
return arrays, columns
if not len(data):
if isinstance(data, np.ndarray):
columns = data.dtype.names
if columns is not None:
return [[]] * len(columns), columns
return [], [] # columns if columns is not None else []
if isinstance(data[0], (list, tuple)):
return _list_to_arrays(data, columns, coerce_float=coerce_float,
dtype=dtype)
elif isinstance(data[0], compat.Mapping):
return _list_of_dict_to_arrays(data, columns,
coerce_float=coerce_float, dtype=dtype)
elif isinstance(data[0], Series):
return _list_of_series_to_arrays(data, columns,
coerce_float=coerce_float,
dtype=dtype)
elif isinstance(data[0], Categorical):
if columns is None:
columns = ibase.default_index(len(data))
return data, columns
elif (isinstance(data, (np.ndarray, Series, Index)) and
data.dtype.names is not None):
columns = list(data.dtype.names)
arrays = [data[k] for k in columns]
return arrays, columns
else:
# last ditch effort
data = lmap(tuple, data)
return _list_to_arrays(data, columns, coerce_float=coerce_float,
dtype=dtype)
def _masked_rec_array_to_mgr(data, index, columns, dtype, copy):
""" extract from a masked rec array and create the manager """
# essentially process a record array then fill it
fill_value = data.fill_value
fdata = ma.getdata(data)
if index is None:
index = _get_names_from_index(fdata)
if index is None:
index = ibase.default_index(len(data))
index = ensure_index(index)
if columns is not None:
columns = ensure_index(columns)
arrays, arr_columns = _to_arrays(fdata, columns)
# fill if needed
new_arrays = []
for fv, arr, col in zip(fill_value, arrays, arr_columns):
mask = ma.getmaskarray(data[col])
if mask.any():
arr, fv = maybe_upcast(arr, fill_value=fv, copy=True)
arr[mask] = fv
new_arrays.append(arr)
# create the manager
arrays, arr_columns = _reorder_arrays(new_arrays, arr_columns, columns)
if columns is None:
columns = arr_columns
mgr = _arrays_to_mgr(arrays, arr_columns, index, columns)
if copy:
mgr = mgr.copy()
return mgr
def _reorder_arrays(arrays, arr_columns, columns):
# reorder according to the columns
if (columns is not None and len(columns) and arr_columns is not None and
len(arr_columns)):
indexer = ensure_index(arr_columns).get_indexer(columns)
arr_columns = ensure_index([arr_columns[i] for i in indexer])
arrays = [arrays[i] for i in indexer]
return arrays, arr_columns
def _list_to_arrays(data, columns, coerce_float=False, dtype=None):
if len(data) > 0 and isinstance(data[0], tuple):
content = list(lib.to_object_array_tuples(data).T)
else:
# list of lists
content = list(lib.to_object_array(data).T)
return _convert_object_array(content, columns, dtype=dtype,
coerce_float=coerce_float)
def _list_of_series_to_arrays(data, columns, coerce_float=False, dtype=None):
from pandas.core.index import _get_objs_combined_axis
if columns is None:
columns = _get_objs_combined_axis(data, sort=False)
indexer_cache = {}
aligned_values = []
for s in data:
index = getattr(s, 'index', None)
if index is None:
index = ibase.default_index(len(s))
if id(index) in indexer_cache:
indexer = indexer_cache[id(index)]
else:
indexer = indexer_cache[id(index)] = index.get_indexer(columns)
values = com.values_from_object(s)
aligned_values.append(algorithms.take_1d(values, indexer))
values = np.vstack(aligned_values)
if values.dtype == np.object_:
content = list(values.T)
return _convert_object_array(content, columns, dtype=dtype,
coerce_float=coerce_float)
else:
return values.T, columns
def _list_of_dict_to_arrays(data, columns, coerce_float=False, dtype=None):
if columns is None:
gen = (list(x.keys()) for x in data)
sort = not any(isinstance(d, OrderedDict) for d in data)
columns = lib.fast_unique_multiple_list_gen(gen, sort=sort)
# assure that they are of the base dict class and not of derived
# classes
data = [(type(d) is dict) and d or dict(d) for d in data]
content = list(lib.dicts_to_array(data, list(columns)).T)
return _convert_object_array(content, columns, dtype=dtype,
coerce_float=coerce_float)
def _convert_object_array(content, columns, coerce_float=False, dtype=None):
if columns is None:
columns = ibase.default_index(len(content))
else:
if len(columns) != len(content): # pragma: no cover
# caller's responsibility to check for this...
raise AssertionError('{col:d} columns passed, passed data had '
'{con} columns'.format(col=len(columns),
con=len(content)))
# provide soft conversion of object dtypes
def convert(arr):
if dtype != object and dtype != np.object:
arr = lib.maybe_convert_objects(arr, try_float=coerce_float)
arr = maybe_cast_to_datetime(arr, dtype)
return arr
arrays = [convert(arr) for arr in content]
return arrays, columns
def _get_names_from_index(data):
has_some_name = any(getattr(s, 'name', None) is not None for s in data)
if not has_some_name:
return ibase.default_index(len(data))
index = lrange(len(data))
count = 0
for i, s in enumerate(data):
n = getattr(s, 'name', None)
if n is not None:
index[i] = n
else:
index[i] = 'Unnamed %d' % count
count += 1
return index
def _homogenize(data, index, dtype=None):
from pandas.core.series import _sanitize_array
oindex = None
homogenized = []
for v in data:
if isinstance(v, Series):
if dtype is not None:
v = v.astype(dtype)
if v.index is not index:
# Forces alignment. No need to copy data since we
# are putting it into an ndarray later
v = v.reindex(index, copy=False)
else:
if isinstance(v, dict):
if oindex is None:
oindex = index.astype('O')
if isinstance(index, (DatetimeIndex, TimedeltaIndex)):
v = com.dict_compat(v)
else:
v = dict(v)
v = lib.fast_multiget(v, oindex.values, default=np.nan)
v = _sanitize_array(v, index, dtype=dtype, copy=False,
raise_cast_failure=False)
homogenized.append(v)
return homogenized
def _from_nested_dict(data):
# TODO: this should be seriously cythonized
new_data = OrderedDict()
for index, s in compat.iteritems(data):
for col, v in compat.iteritems(s):
new_data[col] = new_data.get(col, OrderedDict())
new_data[col][index] = v
return new_data
def _put_str(s, space):
return u'{s}'.format(s=s)[:space].ljust(space)
| bsd-3-clause |
joshloyal/scikit-learn | sklearn/covariance/tests/test_robust_covariance.py | 28 | 3792 | # Author: Alexandre Gramfort <[email protected]>
# Gael Varoquaux <[email protected]>
# Virgile Fritsch <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.exceptions import NotFittedError
from sklearn import datasets
from sklearn.covariance import empirical_covariance, MinCovDet, \
EllipticEnvelope
from sklearn.covariance import fast_mcd
X = datasets.load_iris().data
X_1d = X[:, 0]
n_samples, n_features = X.shape
def test_mcd():
# Tests the FastMCD algorithm implementation
# Small data set
# test without outliers (random independent normal data)
launch_mcd_on_dataset(100, 5, 0, 0.01, 0.1, 80)
# test with a contaminated data set (medium contamination)
launch_mcd_on_dataset(100, 5, 20, 0.01, 0.01, 70)
# test with a contaminated data set (strong contamination)
launch_mcd_on_dataset(100, 5, 40, 0.1, 0.1, 50)
# Medium data set
launch_mcd_on_dataset(1000, 5, 450, 0.1, 0.1, 540)
# Large data set
launch_mcd_on_dataset(1700, 5, 800, 0.1, 0.1, 870)
# 1D data set
launch_mcd_on_dataset(500, 1, 100, 0.001, 0.001, 350)
def test_fast_mcd_on_invalid_input():
X = np.arange(100)
assert_raise_message(ValueError, 'Got X with X.ndim=1',
fast_mcd, X)
def test_mcd_class_on_invalid_input():
X = np.arange(100)
mcd = MinCovDet()
assert_raise_message(ValueError, 'Got X with X.ndim=1',
mcd.fit, X)
def launch_mcd_on_dataset(n_samples, n_features, n_outliers, tol_loc, tol_cov,
tol_support):
rand_gen = np.random.RandomState(0)
data = rand_gen.randn(n_samples, n_features)
# add some outliers
outliers_index = rand_gen.permutation(n_samples)[:n_outliers]
outliers_offset = 10. * \
(rand_gen.randint(2, size=(n_outliers, n_features)) - 0.5)
data[outliers_index] += outliers_offset
inliers_mask = np.ones(n_samples).astype(bool)
inliers_mask[outliers_index] = False
pure_data = data[inliers_mask]
# compute MCD by fitting an object
mcd_fit = MinCovDet(random_state=rand_gen).fit(data)
T = mcd_fit.location_
S = mcd_fit.covariance_
H = mcd_fit.support_
# compare with the estimates learnt from the inliers
error_location = np.mean((pure_data.mean(0) - T) ** 2)
assert(error_location < tol_loc)
error_cov = np.mean((empirical_covariance(pure_data) - S) ** 2)
assert(error_cov < tol_cov)
assert(np.sum(H) >= tol_support)
assert_array_almost_equal(mcd_fit.mahalanobis(data), mcd_fit.dist_)
def test_mcd_issue1127():
# Check that the code does not break with X.shape = (3, 1)
# (i.e. n_support = n_samples)
rnd = np.random.RandomState(0)
X = rnd.normal(size=(3, 1))
mcd = MinCovDet()
mcd.fit(X)
def test_outlier_detection():
rnd = np.random.RandomState(0)
X = rnd.randn(100, 10)
clf = EllipticEnvelope(contamination=0.1)
assert_raises(NotFittedError, clf.predict, X)
assert_raises(NotFittedError, clf.decision_function, X)
clf.fit(X)
y_pred = clf.predict(X)
decision = clf.decision_function(X, raw_values=True)
decision_transformed = clf.decision_function(X, raw_values=False)
assert_array_almost_equal(
decision, clf.mahalanobis(X))
assert_array_almost_equal(clf.mahalanobis(X), clf.dist_)
assert_almost_equal(clf.score(X, np.ones(100)),
(100 - y_pred[y_pred == -1].size) / 100.)
assert(sum(y_pred == -1) == sum(decision_transformed < 0))
| bsd-3-clause |
bearing/radwatch-analysis | Bar_Graph.py | 2 | 10010 | import csv
import numpy as np
import datetime
import matplotlib.pyplot as plt
metacols = 3
color_scheme = ['#00B2A5', '#D9661F', '#00B0DA', '#FDB515', '#ED4E33',
'#2D637F', '#9DAD33', '#53626F', '#EE1F60', '#6C3302',
'#C2B9A7', '#CFDD45', '#003262']
color_map = {'k40': 8, 'bi214': 5, 'tl208': 7, 'cs137': 10, 'cs134': 9}
isotope_key = ['k40', 'bi214', 'tl208', 'cs137', 'cs134']
color_scheme = np.asarray([color_scheme[color_map[key]]
for key in isotope_key])
def parse_time(date):
"""
parse_time formats the date given into the format month-day-year.
"""
if ('-' in date):
return ''
date_list = date.split('/')
return_list = []
for item in date_list:
return_list.append(int(item))
return datetime.datetime(year=2000+return_list[2], month=return_list[0],
day=return_list[1])
def unique_sample_names(sample_names):
"""
unique_sample_names takes a given list of sample names and returns a list
of all the unique names in the list
"""
ret = []
for name in sample_names:
if name not in ret:
ret.append(name)
return ret
def combine_measurements(sample_array, sample_names,
sample_dates):
"""
combine_measurements acquires all the samples with the same name and
their corresponding information and combines it into one sample
that's representative of the group. It returns sample information that
contains the name of the sample group, the average of the group data and
uncertainty, and the dates of the three most recent samples.
"""
u_sample_names = unique_sample_names(sample_names)
u_sample_array = []
u_sample_names_ret = []
for u_name in u_sample_names:
lst = []
row = 0
u_sample_dates = []
for name in sample_names:
if(u_name == name):
lst.append(sample_array[row, :])
if sample_dates[row] == '':
continue
u_sample_dates.append(sample_dates[row])
row += 1
u_sample_dates = np.sort(u_sample_dates)
if len(u_sample_dates):
u_sample_dates_ = ' ('+str(len(u_sample_dates))+')'
else:
u_sample_dates_ = ' ('+str(1)+')'
for date in u_sample_dates[-3:]:
u_sample_dates_ += "\n"+date.strftime('%m-%d-%y')
lst = np.asarray(lst)
# Perform sample group averaging with uncertainty standardization
k40avg = []
k40unc = []
bi214avg = []
bi214unc = []
tl208avg = []
tl208unc = []
cs137avg = []
cs137unc = []
cs134avg = []
cs134unc = []
sample_summary = []
for measure in lst:
if measure[0] <= measure[1]:
k40avg.append(0)
else:
k40avg.append(measure[0])
k40unc.append(measure[1]**2)
if measure[2] <= measure[3]:
bi214avg.append(0)
else:
bi214avg.append(measure[2])
bi214unc.append(measure[3]**2)
if measure[4] <= measure[5]:
tl208avg.append(0)
else:
tl208avg.append(measure[4])
tl208unc.append(measure[5]**2)
if measure[6] <= measure[7]:
cs137avg.append(0)
else:
cs137avg.append(measure[6])
cs137unc.append(measure[7]**2)
if measure[8] <= measure[9]:
cs134avg.append(0)
else:
cs134avg.append(measure[8])
cs134unc.append(measure[9]**2)
sample_summary.extend([np.mean(k40avg),
np.sqrt(sum(k40unc)) / len(k40unc)])
sample_summary.extend([np.mean(bi214avg),
np.sqrt(sum(bi214unc)) / len(bi214unc)])
sample_summary.extend([np.mean(tl208avg),
np.sqrt(sum(tl208unc)) / len(tl208unc)])
sample_summary.extend([np.mean(cs137avg),
np.sqrt(sum(cs137unc)) / len(cs137unc)])
sample_summary.extend([np.mean(cs134avg),
np.sqrt(sum(cs134unc)) / len(cs134unc)])
u_sample_array.append(sample_summary)
u_sample_names_ret.append(u_name + u_sample_dates_)
return np.asarray(u_sample_array), u_sample_names_ret
def create_barerror_plot(csv_file, title, log=True):
"""
create_barerror_plot reads a csv file and takes a title input and prepares
the information from the csv file for generating a bar plot. This
preparation includes cleaning the sample names of the UCB title and simply
showing the sample type.
"""
sample_list = []
name_list = []
date_list = []
header = []
with open(csv_file) as csvfile:
parser = csv.reader(csvfile)
header = parser.__next__()
dictparser = csv.DictReader(csvfile, header)
for row in dictparser:
tmp_list = []
if 'recal' in row[header[0]]:
# Remove recal from name
label = str(row[header[0]][7:-6])
else:
label = str(row[header[0]][7:])
name_list.append(label)
date_list.append(parse_time(row[header[1]]))
for ind in range(metacols, 2 * len(isotope_key) + 2, 2):
tmp_list.extend([float(row[header[ind]]),
float(row[header[ind+1]])])
sample_list.append(tmp_list)
sample_list = np.asarray(sample_list)
legend_key = []
sample_list, name_list = combine_measurements(sample_array=sample_list,
sample_names=name_list,
sample_dates=date_list)
data = np.zeros((len(name_list), int(sample_list.shape[1] / 2.)))
error = np.zeros((len(name_list), int(sample_list.shape[1] / 2.)))
loop = 0
# Create data and error lists for simple bar graph assembly
for item in range(0, sample_list.shape[1], 2):
legend_key.append(header[metacols + item])
data[:, loop] = sample_list[:, item]
error[:, loop] = sample_list[:, item + 1]
loop += 1
ax, fig = generate_barerror_logy(sample_names=name_list, data=data,
error=error, legend_key=legend_key,
title=title, log=log)
def generate_barerror_logy(sample_names, data, error, legend_key, title,
log=True):
"""
generate_barerror_logy generates a bar graph with error bars. Bars are
are generated on a log plot with arrows indicating a detection limit and
bars representing a concentration value.
"""
number_samples = len(sample_names)
index = np.arange(0.5, number_samples, dtype=np.float64)
width = float(0.15)
fig, ax = plt.subplots()
axis = []
mins = np.amin(data[np.nonzero(data)])
for sample in range(0, len(legend_key)):
error_color = []
for i in range(len(data[:, sample])):
# Determine whether to present a bar or detection limit
if data[:, sample][i] <= error[:, sample][i]:
data[:, sample][i] = 0
error_color.append(color_scheme[sample])
else:
error_color.append('black')
args = np.zeros((0))
left_edge = index + float(width) * float(sample)
if np.amin(data[:, sample]) == 0:
args = np.where(data[:, sample] == 0)
data[args, sample] += 1E-4
draw_arrows(axes=ax, xlocs=(left_edge + 0.5 * float(width))[args],
ylocs=error[args, sample], color=color_scheme[sample])
for pos, val, err, color in zip(left_edge, data[:, sample],
error[:, sample], error_color):
ax.errorbar(pos + 0.075, val, err, color=color)
axis.append(ax.bar(left=left_edge, height=tuple(data[:, sample]),
width=width, color=color_scheme[sample],
edgecolor="none", log=log))
ylims = ax.get_ylim()
upper_mult = 1
if log:
upper_mult = 10
ax.set_ylim([mins / 10, upper_mult * ylims[1]])
ax.set_xticks(index + float(len(legend_key)) / 2. * width)
ax.yaxis.set_tick_params(labelsize=18)
ax.set_xticklabels(sample_names, fontsize=18)
ax.tick_params(axis='x', color='w')
ax.legend([a[0] for a in axis], legend_key, loc='upper left')
ax.annotate('', xy=(0.88, 0.8999), xycoords='axes fraction',
xytext=(0.88, 0.9), textcoords='axes fraction',
arrowprops=dict(edgecolor='k', facecolor='k',
arrowstyle='-|>'))
ax.annotate('Detection Limit', xy=(0.888, 0.905), xytext=(0.888, 0.905),
textcoords='axes fraction', ha='left', va='center',
fontsize=20)
ax.set_title(title, fontsize=30)
ax.set_ylabel('Specific Activity' + legend_key[0].split(' ')[1],
fontsize=30)
plt.gcf().subplots_adjust(bottom=0.15, left=0.05, right=0.95)
plt.show()
return ax, fig
def draw_arrows(axes, xlocs, ylocs, color):
"""
draw_arrows places arrow hats for measurement detection limits to
distinguish them from regular error bars.
"""
num_els = len(xlocs)
if num_els == 0:
return
if len(ylocs.shape) > 1:
ylocs = np.squeeze(ylocs, axis=(0,))
for index in range(0, num_els):
dy = 1e-10
axes.annotate("", xy=(xlocs[index], ylocs[index] - dy),
xycoords='data', xytext=(xlocs[index], ylocs[index]),
textcoords='data', arrowprops=dict(edgecolor=color,
facecolor=color, arrowstyle="-|>"))
return
create_barerror_plot('Sampling_Table.csv', 'Sample Summary')
| mit |
sgenoud/scikit-learn | examples/neighbors/plot_regression.py | 7 | 1372 | """
============================
Nearest Neighbors regression
============================
Demonstrate the resolution of a regression problem
using a k-Nearest Neighbor and the interpolation of the
target using both barycenter and constant weights.
"""
print __doc__
# Author: Alexandre Gramfort <[email protected]>
# Fabian Pedregosa <[email protected]>
#
# License: BSD, (C) INRIA
###############################################################################
# Generate sample data
import numpy as np
import pylab as pl
from sklearn import neighbors
np.random.seed(0)
X = np.sort(5 * np.random.rand(40, 1), axis=0)
T = np.linspace(0, 5, 500)[:, np.newaxis]
y = np.sin(X).ravel()
# Add noise to targets
y[::5] += 1 * (0.5 - np.random.rand(8))
###############################################################################
# Fit regression model
n_neighbors = 5
for i, weights in enumerate(['uniform', 'distance']):
knn = neighbors.KNeighborsRegressor(n_neighbors, weights=weights)
y_ = knn.fit(X, y).predict(T)
pl.subplot(2, 1, i + 1)
pl.scatter(X, y, c='k', label='data')
pl.plot(T, y_, c='g', label='prediction')
pl.axis('tight')
pl.legend()
pl.title("KNeighborsRegressor (k = %i, weights = '%s')" % (n_neighbors,
weights))
pl.show()
| bsd-3-clause |
sanketloke/scikit-learn | sklearn/feature_selection/tests/test_base.py | 143 | 3670 | import numpy as np
from scipy import sparse as sp
from nose.tools import assert_raises, assert_equal
from numpy.testing import assert_array_equal
from sklearn.base import BaseEstimator
from sklearn.feature_selection.base import SelectorMixin
from sklearn.utils import check_array
class StepSelector(SelectorMixin, BaseEstimator):
"""Retain every `step` features (beginning with 0)"""
def __init__(self, step=2):
self.step = step
def fit(self, X, y=None):
X = check_array(X, 'csc')
self.n_input_feats = X.shape[1]
return self
def _get_support_mask(self):
mask = np.zeros(self.n_input_feats, dtype=bool)
mask[::self.step] = True
return mask
support = [True, False] * 5
support_inds = [0, 2, 4, 6, 8]
X = np.arange(20).reshape(2, 10)
Xt = np.arange(0, 20, 2).reshape(2, 5)
Xinv = X.copy()
Xinv[:, 1::2] = 0
y = [0, 1]
feature_names = list('ABCDEFGHIJ')
feature_names_t = feature_names[::2]
feature_names_inv = np.array(feature_names)
feature_names_inv[1::2] = ''
def test_transform_dense():
sel = StepSelector()
Xt_actual = sel.fit(X, y).transform(X)
Xt_actual2 = StepSelector().fit_transform(X, y)
assert_array_equal(Xt, Xt_actual)
assert_array_equal(Xt, Xt_actual2)
# Check dtype matches
assert_equal(np.int32, sel.transform(X.astype(np.int32)).dtype)
assert_equal(np.float32, sel.transform(X.astype(np.float32)).dtype)
# Check 1d list and other dtype:
names_t_actual = sel.transform([feature_names])
assert_array_equal(feature_names_t, names_t_actual.ravel())
# Check wrong shape raises error
assert_raises(ValueError, sel.transform, np.array([[1], [2]]))
def test_transform_sparse():
sparse = sp.csc_matrix
sel = StepSelector()
Xt_actual = sel.fit(sparse(X)).transform(sparse(X))
Xt_actual2 = sel.fit_transform(sparse(X))
assert_array_equal(Xt, Xt_actual.toarray())
assert_array_equal(Xt, Xt_actual2.toarray())
# Check dtype matches
assert_equal(np.int32, sel.transform(sparse(X).astype(np.int32)).dtype)
assert_equal(np.float32, sel.transform(sparse(X).astype(np.float32)).dtype)
# Check wrong shape raises error
assert_raises(ValueError, sel.transform, np.array([[1], [2]]))
def test_inverse_transform_dense():
sel = StepSelector()
Xinv_actual = sel.fit(X, y).inverse_transform(Xt)
assert_array_equal(Xinv, Xinv_actual)
# Check dtype matches
assert_equal(np.int32,
sel.inverse_transform(Xt.astype(np.int32)).dtype)
assert_equal(np.float32,
sel.inverse_transform(Xt.astype(np.float32)).dtype)
# Check 1d list and other dtype:
names_inv_actual = sel.inverse_transform([feature_names_t])
assert_array_equal(feature_names_inv, names_inv_actual.ravel())
# Check wrong shape raises error
assert_raises(ValueError, sel.inverse_transform, np.array([[1], [2]]))
def test_inverse_transform_sparse():
sparse = sp.csc_matrix
sel = StepSelector()
Xinv_actual = sel.fit(sparse(X)).inverse_transform(sparse(Xt))
assert_array_equal(Xinv, Xinv_actual.toarray())
# Check dtype matches
assert_equal(np.int32,
sel.inverse_transform(sparse(Xt).astype(np.int32)).dtype)
assert_equal(np.float32,
sel.inverse_transform(sparse(Xt).astype(np.float32)).dtype)
# Check wrong shape raises error
assert_raises(ValueError, sel.inverse_transform, np.array([[1], [2]]))
def test_get_support():
sel = StepSelector()
sel.fit(X, y)
assert_array_equal(support, sel.get_support())
assert_array_equal(support_inds, sel.get_support(indices=True))
| bsd-3-clause |
carrillo/scikit-learn | sklearn/externals/joblib/__init__.py | 72 | 4795 | """ Joblib is a set of tools to provide **lightweight pipelining in
Python**. In particular, joblib offers:
1. transparent disk-caching of the output values and lazy re-evaluation
(memoize pattern)
2. easy simple parallel computing
3. logging and tracing of the execution
Joblib is optimized to be **fast** and **robust** in particular on large
data and has specific optimizations for `numpy` arrays. It is
**BSD-licensed**.
============================== ============================================
**User documentation**: http://pythonhosted.org/joblib
**Download packages**: http://pypi.python.org/pypi/joblib#downloads
**Source code**: http://github.com/joblib/joblib
**Report issues**: http://github.com/joblib/joblib/issues
============================== ============================================
Vision
--------
The vision is to provide tools to easily achieve better performance and
reproducibility when working with long running jobs.
* **Avoid computing twice the same thing**: code is rerun over an
over, for instance when prototyping computational-heavy jobs (as in
scientific development), but hand-crafted solution to alleviate this
issue is error-prone and often leads to unreproducible results
* **Persist to disk transparently**: persisting in an efficient way
arbitrary objects containing large data is hard. Using
joblib's caching mechanism avoids hand-written persistence and
implicitly links the file on disk to the execution context of
the original Python object. As a result, joblib's persistence is
good for resuming an application status or computational job, eg
after a crash.
Joblib strives to address these problems while **leaving your code and
your flow control as unmodified as possible** (no framework, no new
paradigms).
Main features
------------------
1) **Transparent and fast disk-caching of output value:** a memoize or
make-like functionality for Python functions that works well for
arbitrary Python objects, including very large numpy arrays. Separate
persistence and flow-execution logic from domain logic or algorithmic
code by writing the operations as a set of steps with well-defined
inputs and outputs: Python functions. Joblib can save their
computation to disk and rerun it only if necessary::
>>> import numpy as np
>>> from sklearn.externals.joblib import Memory
>>> mem = Memory(cachedir='/tmp/joblib')
>>> import numpy as np
>>> a = np.vander(np.arange(3)).astype(np.float)
>>> square = mem.cache(np.square)
>>> b = square(a) # doctest: +ELLIPSIS
________________________________________________________________________________
[Memory] Calling square...
square(array([[ 0., 0., 1.],
[ 1., 1., 1.],
[ 4., 2., 1.]]))
___________________________________________________________square - 0...s, 0.0min
>>> c = square(a)
>>> # The above call did not trigger an evaluation
2) **Embarrassingly parallel helper:** to make is easy to write readable
parallel code and debug it quickly::
>>> from sklearn.externals.joblib import Parallel, delayed
>>> from math import sqrt
>>> Parallel(n_jobs=1)(delayed(sqrt)(i**2) for i in range(10))
[0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0]
3) **Logging/tracing:** The different functionalities will
progressively acquire better logging mechanism to help track what
has been ran, and capture I/O easily. In addition, Joblib will
provide a few I/O primitives, to easily define define logging and
display streams, and provide a way of compiling a report.
We want to be able to quickly inspect what has been run.
4) **Fast compressed Persistence**: a replacement for pickle to work
efficiently on Python objects containing large data (
*joblib.dump* & *joblib.load* ).
..
>>> import shutil ; shutil.rmtree('/tmp/joblib/')
"""
# PEP0440 compatible formatted version, see:
# https://www.python.org/dev/peps/pep-0440/
#
# Generic release markers:
# X.Y
# X.Y.Z # For bugfix releases
#
# Admissible pre-release markers:
# X.YaN # Alpha release
# X.YbN # Beta release
# X.YrcN # Release Candidate
# X.Y # Final release
#
# Dev branch marker is: 'X.Y.dev' or 'X.Y.devN' where N is an integer.
# 'X.Y.dev0' is the canonical version of 'X.Y.dev'
#
__version__ = '0.9.0b4'
from .memory import Memory, MemorizedResult
from .logger import PrintTime
from .logger import Logger
from .hashing import hash
from .numpy_pickle import dump
from .numpy_pickle import load
from .parallel import Parallel
from .parallel import delayed
from .parallel import cpu_count
| bsd-3-clause |
markm541374/gpbo | setup.py | 1 | 2236 | from setuptools import setup
from setuptools.extension import Extension
with open('gpbo/VERSION') as version_file:
version = version_file.read().strip()
def readme():
with open('README.md') as f:
return f.read()
compile_flags = ['-O3']
from numpy import get_include
extensions = [
Extension(name ="gpbo/core/ESutils",
sources = ["gpbo/core/ESutils.c"],
include_dirs = ['.','core',get_include()],
extra_compile_args=compile_flags
),
Extension(name ="gpbo/core/GPdc",
sources = ["gpbo/core/GPdc.c"],
include_dirs = ['.','core',get_include()],
extra_compile_args=compile_flags
),
Extension(name ="gpbo/core/PES",
sources = ["gpbo/core/PES.c"],
include_dirs = ['.','core',get_include()],
extra_compile_args=compile_flags
),
Extension(name ="gpbo/core/eprop",
sources = ["gpbo/core/eprop.c"],
include_dirs = ['.','core',get_include()],
extra_compile_args=compile_flags
),
Extension(name ="gpbo/core/slice",
sources = ["gpbo/core/slice.c"],
include_dirs = ['.','core',get_include()],
extra_compile_args=compile_flags
),
Extension(name ="gpbo/core/acquisitions",
sources = ["gpbo/core/acquisitions.c"],
include_dirs = ['.','core',get_include()],
extra_compile_args=compile_flags
),
Extension(name ="gpbo/core/optutils",
sources = ["gpbo/core/optutils.c"],
include_dirs = ['.','core',get_include()],
extra_compile_args=compile_flags
)
]
setup(name='gpbo',
version=version,
description='a package',
long_description=readme(),
url='https://github.com/markm541374/gpbo',
author='markm541374',
license='MIT',
packages=['gpbo','gpbo.core','gpbo.examples','gpbo.exps'],
package_dir={'gpbo':'gpbo'},
package_data={'gpbo':['cproj/*','VERSION','README.rst']},
install_requires=['numpy','scipy','tqdm','direct','matplotlib','pandas','emcee','cvxopt','cma','sklearn'],
ext_modules= extensions,
zip_safe=False)
| agpl-3.0 |
gkdb/gkdb | gkdb/core/ids_checks.py | 1 | 11263 | from itertools import chain
import json
import numpy as np
import scipy as sc
from scipy.interpolate import interp1d
from IPython import embed
from gkdb.core.equilibrium import get_values_min_max_consistency_check, calculate_a_N, check_r_minor_norm_shape_consistency
allowed_codes = ['GKW', 'GENE', 'test']
error_msg = lambda errors: 'Entry does not meet GKDB definition: {!s}'.format(errors)
def check_wrapper(check_function, ids, errors, *args, on_disallowance=None, **kwargs):
allow_entry = check_function(ids, errors, *args, **kwargs)
if on_disallowance == 'raise_immediately' and allow_entry is not True:
raise Exception(error_msg(errors))
return allow_entry
def check_json(json_path, on_disallowance='raise_at_end'):
with open(json_path) as file:
ids = json.load(file)
allow_entry = check_ids_entry(ids, on_disallowance=on_disallowance)
return allow_entry
def check_ids_entry(ids, on_disallowance='raise_at_end'):
on_disallowance = 'raise_at_end'
if on_disallowance not in ['raise_immediately', 'raise_at_end', 'print_at_end']:
raise Exception
allow_entry = True
errors = []
num_sp = len(ids['species'])
electron = None
allow_entry &= check_wrapper(check_code_allowed, ids, errors, on_disallowance=on_disallowance)
allow_entry &= check_wrapper(check_electron_definition, ids, errors, on_disallowance=on_disallowance)
allow_entry &= check_wrapper(check_quasineutrality, ids, errors, on_disallowance=on_disallowance)
allow_entry &= check_wrapper(check_centrifugal, ids, errors, on_disallowance=on_disallowance)
allow_entry &= check_wrapper(check_magnetic_flutter, ids, errors, on_disallowance=on_disallowance)
allow_entry &= check_wrapper(check_magnetic_compression, ids, errors, on_disallowance=on_disallowance)
allow_entry &= check_wrapper(check_number_of_modes, ids, errors, on_disallowance=on_disallowance)
#allow_entry &= check_wrapper(check_growth_rate_tolerance, ids, errors, on_disallowance=on_disallowance)
allow_entry &= check_wrapper(check_poloidal_angle_grid_bounds, ids, errors, on_disallowance=on_disallowance)
allow_entry &= check_wrapper(check_poloidal_angle_grid_lengths, ids, errors, on_disallowance=on_disallowance)
allow_entry &= check_wrapper(check_phi_rotation, ids, errors, on_disallowance=on_disallowance)
allow_entry &= check_wrapper(check_r_minor_norm_shape_consistency, ids, errors, on_disallowance=on_disallowance)
allow_entry &= check_wrapper(check_inconsistent_curvature_drift, ids, errors, on_disallowance=on_disallowance)
if not allow_entry:
if on_disallowance == 'raise_at_end':
raise Exception(error_msg(errors))
elif on_disallowance == 'print_at_end':
print(error_msg(errors))
return allow_entry
def check_code_allowed(ids, errors):
allow_entry = True
if not ids['code']['name'] in allowed_codes:
allow_entry = False
errors.append("Code '{!s}' not in allowed codes {!s}"
.format(ids['code']['name'], allowed_codes))
return allow_entry
electron_mandatory = {'mass_norm': 2.7237e-4,
'temperature_norm': 1,
'density_norm': 1}
def check_electron_definition(ids, errors):
allow_entry = True
for spec in ids['species']:
if spec['charge_norm'] == -1:
electron = spec
if electron is None:
allow_entry = False
errors.append('Electron species not found')
else:
for field, val in electron_mandatory.items():
if not np.isclose(electron[field], val):
allow_entry = False
errors.append("Invalid value for electron species field '{!s}'".format(field))
return allow_entry
def check_quasineutrality(ids, errors):
allow_entry = True
Zs = [spec['charge_norm'] for spec in ids['species']]
ns = [spec['density_norm'] for spec in ids['species']]
RLns = [spec['density_log_gradient_norm'] for spec in ids['species']]
quasi = np.isclose(sum([Z * n for Z, n in zip(Zs, ns)]), 0)
quasi_grad = np.isclose(sum([Z * n * RLn for Z, n, RLn in zip(Zs, ns, RLns)]), 0)
if not quasi:
allow_entry = False
errors.append("Entry is not quasineutral! Zn = {!s} and ns = {!s}".format(Zs, ns))
if not quasi_grad:
allow_entry = False
errors.append("Entry is not quasineutral for gradients! Zn = {!s}, ns = {!s} and Lns = {!s}".format(Zs, ns, Lns))
return allow_entry
def check_centrifugal(ids, errors):
allow_entry = True
u_N = ids['species_all']['velocity_tor_norm']
ms = [spec['mass_norm'] for spec in ids['species']]
Ts = [spec['temperature_norm'] for spec in ids['species']]
Mach = u_N * np.sqrt([m / T for m, T in zip(ms, Ts)])
Mach_bound = 0.2
if any(Mach > Mach_bound) and not ids['model']['include_centrifugal_effects']:
allow_entry = False
errors.append('Species with Mach > {!s} and include_centrifugal_effects is False.'.format(Mach_bound))
return allow_entry
def check_magnetic_flutter(ids, errors):
allow_entry = True
if ids['species_all']['beta_reference'] > 0:
if not ids['model']['include_a_field_parallel']:
allow_entry = False
errors.append('include_a_field_parallel should be true if beta_reference > 0')
return allow_entry
def check_magnetic_compression(ids, errors):
allow_entry = True
if ids['species_all']['beta_reference'] > 0.5:
if not ids['model']['include_b_field_parallel']:
allow_entry = False
errors.append('include_b_field_parallel should be true if beta_reference > 0')
return allow_entry
def check_number_of_modes(ids, errors):
allow_entry = True
non_linear_run = ids['model']['non_linear_run']
initial_value_run = ids['model']['initial_value_run']
if not non_linear_run:
for ii, wv in enumerate(ids['wavevector']):
num_eigenmodes = len(wv['eigenmode'])
if initial_value_run:
if num_eigenmodes != 1:
allow_entry = False
errors.append('For an initial value run, the number of eigenmodes per wavevector should be 1, wavevector {!s} has {!s} eigenmodes'.format(ii, num_eigenmodes))
else:
if num_eigenmodes < 1:
allow_entry = False
errors.append('For an eigenvalue run, the number of eigenmodes per wavevector should be at least 1, wavevector {!s} has {!s} eigenmodes'.format(ii, num_eigenmodes))
return allow_entry
def check_growth_rate_tolerance(ids, errors):
growth_rate_tolerance_bound = 10
allow_entry = True
for ii, wv in enumerate(ids['wavevector']):
for jj, eig in enumerate(wv['eigenmode']):
if eig['growth_rate_tolerance'] > growth_rate_tolerance_bound:
allow_entry = False
errors.append('Growth rate tolerance has to be under {!s}%. Is {!s} for wavevector {!s} eigenmode {!s}'.format(growth_rate_tolerance_bound, eig['growth_rate_tolerance'], ii, jj))
return allow_entry
def is_monotonic(array):
return all(np.diff(array) > 0)
def check_moment_rotation(poloidal_grid, phi_potential_im, phi_theta_0_bound, check_visually=False):
#xr = [poloidal_grid[ii] for ii in [176-1-20, 176-1, 176, 176+20]]
#yr = [phi_potential_im[ii] for ii in [176-1-20, 176-1, 176, 176+20]]
try:
p_ind = poloidal_grid.index(0)
except ValueError:
f = interp1d(poloidal_grid, phi_potential_im, kind='cubic')
phi_theta_0 = f(0)
if check_visually:
import matplotlib.pyplot as plt
plt.scatter(poloidal_grid, phi_potential_im)
x = np.linspace(poloidal_grid[0], poloidal_grid[-1], 100)
plt.plot(x, f(x))
plt.vlines(0, min(phi_potential_im), max(phi_potential_im), linestyles='--')
plt.hlines(phi_theta_0, x[0], x[-1], linestyles='--')
plt.title('({!s}, {!s})'.format(0, phi_theta_0))
plt.show()
else:
phi_theta_0 = phi_potential_im[p_ind]
if abs(phi_theta_0) < phi_theta_0_bound:
rotation_okay = True
else:
rotation_okay = False
return rotation_okay
def check_monoticity(ids, errors):
allow_entry = True
for ii, wv in enumerate(ids['wavevector']):
for jj, eig in enumerate(wv['eigenmode']):
grid = eig['poloidal_angle']
if not is_monotonic(grid):
allow_entry = False
errors.append('Poloidal angel grid should be monotonically increasing. For wavevector {!s} eigenmode {!s} it is not'.format(ii, jj))
return allow_entry
def check_poloidal_angle_grid_bounds(ids, errors):
allow_entry = True
non_linear_run = ids['model']['non_linear_run']
for ii, wv in enumerate(ids['wavevector']):
for jj, eig in enumerate(wv['eigenmode']):
if not non_linear_run:
grid = eig['poloidal_angle']
poloidal_turns = wv['poloidal_turns']
if not all([(el >= -poloidal_turns * np.pi) and el <= poloidal_turns * np.pi for el in grid]):
allow_entry = False
errors.append('Poloidal grid out of bounds! Should be between [-Np * pi, Np * pi]. For wavevector {!s} eigenmode {!s} it is not'.format(ii, jj))
return allow_entry
def check_phi_rotation(ids, errors):
allow_entry = True
for ii, wv in enumerate(ids['wavevector']):
for jj, eig in enumerate(wv['eigenmode']):
grid = eig['poloidal_angle']
if 'phi_potential_perturbed_norm_imaginary' in eig:
if not check_moment_rotation(grid, eig['phi_potential_perturbed_norm_imaginary'], 1e-3):
allow_entry = False
errors.append('Poloidal grid not rotated corretly! Im(phi(theta=0)) != 0 for wavevector {!s} eigenmode {!s}'.format(ii, jj))
return allow_entry
def check_poloidal_angle_grid_lengths(ids, errors):
allow_entry = True
non_linear_run = ids['model']['non_linear_run']
for ii, wv in enumerate(ids['wavevector']):
for jj, eig in enumerate(wv['eigenmode']):
grid = eig['poloidal_angle']
check_arrays_in = eig.items()
if 'moments_norm_rotating_frame' in eig:
check_arrays_in = chain(check_arrays_in, *[mom.items() for mom in eig['moments_norm_rotating_frame']])
for field, val in check_arrays_in:
if isinstance(val, list):
if len(val) != len(grid) and field not in ['moments_norm_rotating_frame', 'fluxes_norm']:
allow_entry = False
errors.append('Field {!s} for wavevector {!s} eigenmode {!s} same length as poloidal_grid'.format(field, ii, jj))
return allow_entry
def check_inconsistent_curvature_drift(ids, errors):
allow_entry = True
if ids['model']['include_b_field_parallel'] is True:
if ids['model']['inconsistent_curvature_drift'] is True:
allow_entry = False
errors.append('inconsistent_curvature_drift must be False if include_b_field_parallel is False.')
return allow_entry
| mit |
jseabold/statsmodels | statsmodels/stats/tests/test_influence.py | 5 | 8984 | # -*- coding: utf-8 -*-
"""
Created on Tue Jun 12 13:18:12 2018
Author: Josef Perktold
"""
from statsmodels.compat.pandas import testing as pdt
import os.path
import numpy as np
from numpy.testing import assert_allclose
import pandas as pd
import pytest
from statsmodels.regression.linear_model import OLS
from statsmodels.genmod.generalized_linear_model import GLM
from statsmodels.genmod import families
from statsmodels.stats.outliers_influence import MLEInfluence
cur_dir = os.path.abspath(os.path.dirname(__file__))
file_name = 'binary_constrict.csv'
file_path = os.path.join(cur_dir, 'results', file_name)
data_bin = pd.read_csv(file_path, index_col=0)
file_name = 'results_influence_logit.csv'
file_path = os.path.join(cur_dir, 'results', file_name)
results_sas_df = pd.read_csv(file_path, index_col=0)
def test_influence_glm_bernoulli():
# example uses Finney's data and is used in Pregibon 1981
df = data_bin
results_sas = np.asarray(results_sas_df)
res = GLM(df['constrict'], df[['const', 'log_rate', 'log_volumne']],
family=families.Binomial()).fit(attach_wls=True, atol=1e-10)
infl = res.get_influence(observed=False)
k_vars = 3
assert_allclose(infl.dfbetas, results_sas[:, 5:8], atol=1e-4)
assert_allclose(infl.d_params, results_sas[:, 5:8] * res.bse.values, atol=1e-4)
assert_allclose(infl.cooks_distance[0] * k_vars, results_sas[:, 8], atol=6e-5)
assert_allclose(infl.hat_matrix_diag, results_sas[:, 4], atol=6e-5)
c_bar = infl.cooks_distance[0] * 3 * (1 - infl.hat_matrix_diag)
assert_allclose(c_bar, results_sas[:, 9], atol=6e-5)
class InfluenceCompareExact(object):
# Mixin to compare and test two Influence instances
def test_basics(self):
infl1 = self.infl1
infl0 = self.infl0
assert_allclose(infl0.hat_matrix_diag, infl1.hat_matrix_diag,
rtol=1e-12)
assert_allclose(infl0.resid_studentized,
infl1.resid_studentized, rtol=1e-12, atol=1e-7)
cd_rtol = getattr(self, 'cd_rtol', 1e-7)
assert_allclose(infl0.cooks_distance[0], infl1.cooks_distance[0],
rtol=cd_rtol)
assert_allclose(infl0.dfbetas, infl1.dfbetas, rtol=1e-9, atol=5e-9)
assert_allclose(infl0.d_params, infl1.d_params, rtol=1e-9, atol=5e-9)
assert_allclose(infl0.d_fittedvalues, infl1.d_fittedvalues, rtol=5e-9)
assert_allclose(infl0.d_fittedvalues_scaled,
infl1.d_fittedvalues_scaled, rtol=5e-9)
@pytest.mark.smoke
@pytest.mark.matplotlib
def test_plots(self, close_figures):
infl1 = self.infl1
infl0 = self.infl0
fig = infl0.plot_influence(external=False)
fig = infl1.plot_influence(external=False)
fig = infl0.plot_index('resid', threshold=0.2, title='')
fig = infl1.plot_index('resid', threshold=0.2, title='')
fig = infl0.plot_index('dfbeta', idx=1, threshold=0.2, title='')
fig = infl1.plot_index('dfbeta', idx=1, threshold=0.2, title='')
fig = infl0.plot_index('cook', idx=1, threshold=0.2, title='')
fig = infl1.plot_index('cook', idx=1, threshold=0.2, title='')
fig = infl0.plot_index('hat', idx=1, threshold=0.2, title='')
fig = infl1.plot_index('hat', idx=1, threshold=0.2, title='')
def test_summary(self):
infl1 = self.infl1
infl0 = self.infl0
df0 = infl0.summary_frame()
df1 = infl1.summary_frame()
assert_allclose(df0.values, df1.values, rtol=5e-5)
pdt.assert_index_equal(df0.index, df1.index)
def _check_looo(self):
infl = self.infl1
# unwrap if needed
results = getattr(infl.results, '_results', infl.results)
res_looo = infl._res_looo
mask_infl = infl.cooks_distance[0] > 2 * infl.cooks_distance[0].std()
mask_low = ~mask_infl
diff_params = results.params - res_looo['params']
assert_allclose(infl.d_params[mask_low], diff_params[mask_low], atol=0.05)
assert_allclose(infl.params_one[mask_low], res_looo['params'][mask_low], rtol=0.01)
class TestInfluenceLogitGLMMLE(InfluenceCompareExact):
@classmethod
def setup_class(cls):
df = data_bin
res = GLM(df['constrict'], df[['const', 'log_rate', 'log_volumne']],
family=families.Binomial()).fit(attach_wls=True, atol=1e-10)
cls.infl1 = res.get_influence()
cls.infl0 = MLEInfluence(res)
def test_looo(self):
_check_looo(self)
class TestInfluenceBinomialGLMMLE(InfluenceCompareExact):
# example based on Williams and R docs
@classmethod
def setup_class(cls):
yi = np.array([0, 2, 14, 19, 30])
ni = 40 * np.ones(len(yi))
xi = np.arange(1, len(yi) + 1)
exog = np.column_stack((np.ones(len(yi)), xi))
endog = np.column_stack((yi, ni - yi))
res = GLM(endog, exog, family=families.Binomial()).fit()
cls.infl1 = res.get_influence()
cls.infl0 = MLEInfluence(res)
cls.cd_rtol = 5e-5
def test_looo(self):
_check_looo(self)
def test_r(self):
# values from R,
# > xi <- 1:5
# > yi <- c(0,2,14,19,30) # number of mice responding to dose xi
# > mi <- rep(40, 5) # number of mice exposed
# > glmI <- glm(cbind(yi, mi -yi) ~ xi, family = binomial)
# > imI <- influence.measures(glmI)
# > t(imI$infmat)
# dfbeta/dfbetas and dffits do not make sense to me and are furthe away from
# looo than mine
# resid seem to be resid_deviance based and not resid_pearson
# I did not compare cov.r
infl1 = self.infl1
cooks_d = [0.25220202795934726, 0.26107981497746285, 1.28985614424132389,
0.08449722285516942, 0.36362110845918005]
hat = [0.2594393406119333, 0.3696442663244837, 0.3535768402250521,
0.389209198535791057, 0.6281303543027403]
assert_allclose(infl1.hat_matrix_diag, hat, rtol=5e-6)
assert_allclose(infl1.cooks_distance[0], cooks_d, rtol=1e-5)
class TestInfluenceGaussianGLMMLE(InfluenceCompareExact):
@classmethod
def setup_class(cls):
from .test_diagnostic import get_duncan_data
endog, exog, labels = get_duncan_data()
data = pd.DataFrame(np.column_stack((endog, exog)),
columns='y const var1 var2'.split(),
index=labels)
res = GLM.from_formula('y ~ const + var1 + var2 - 1', data).fit()
#res = GLM(endog, exog).fit()
cls.infl1 = res.get_influence()
cls.infl0 = MLEInfluence(res)
def test_looo(self):
_check_looo(self)
class TestInfluenceGaussianGLMOLS(InfluenceCompareExact):
@classmethod
def setup_class(cls):
from .test_diagnostic import get_duncan_data
endog, exog, labels = get_duncan_data()
data = pd.DataFrame(np.column_stack((endog, exog)),
columns='y const var1 var2'.split(),
index=labels)
res0 = GLM.from_formula('y ~ const + var1 + var2 - 1', data).fit()
res1 = OLS.from_formula('y ~ const + var1 + var2 - 1', data).fit()
cls.infl1 = res1.get_influence()
cls.infl0 = res0.get_influence()
def test_basics(self):
# needs to override attributes that are not equivalent,
# i.e. not available or different definition like external vs internal
infl1 = self.infl1
infl0 = self.infl0
assert_allclose(infl0.hat_matrix_diag, infl1.hat_matrix_diag,
rtol=1e-12)
assert_allclose(infl0.resid_studentized,
infl1.resid_studentized, rtol=1e-12, atol=1e-7)
assert_allclose(infl0.cooks_distance, infl1.cooks_distance, rtol=1e-7)
assert_allclose(infl0.dfbetas, infl1.dfbetas, rtol=0.1) # changed
# OLSInfluence only has looo dfbeta/d_params
assert_allclose(infl0.d_params, infl1.dfbeta, rtol=1e-9, atol=1e-14)
# d_fittedvalues is not available in OLSInfluence, i.e. only scaled dffits
# assert_allclose(infl0.d_fittedvalues, infl1.d_fittedvalues, rtol=1e-9)
assert_allclose(infl0.d_fittedvalues_scaled,
infl1.dffits_internal[0], rtol=1e-9)
# specific to linear link
assert_allclose(infl0.d_linpred,
infl0.d_fittedvalues, rtol=1e-12)
assert_allclose(infl0.d_linpred_scaled,
infl0.d_fittedvalues_scaled, rtol=1e-12)
def test_summary(self):
infl1 = self.infl1
infl0 = self.infl0
df0 = infl0.summary_frame()
df1 = infl1.summary_frame()
# just some basic check on overlap except for dfbetas
cols = ['cooks_d', 'standard_resid', 'hat_diag', 'dffits_internal']
assert_allclose(df0[cols].values, df1[cols].values, rtol=1e-5)
pdt.assert_index_equal(df0.index, df1.index)
| bsd-3-clause |
sfeeney/neuRRaLy | optimize_architecture.py | 1 | 32631 | import numpy as np
import numpy.random as npr
import astropy.stats as aps
import astropy.io.fits as apf
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as mp
import subprocess as sp
import scipy.stats as ss
import scipy.signal as si
import matplotlib.cm as mpcm
import matplotlib.colors as mpc
import sklearn.neural_network as sk
import random as ra
######################################################################
def allocate_jobs(n_jobs, n_procs=1, rank=0):
n_j_allocated = 0
for i in range(n_procs):
n_j_remain = n_jobs - n_j_allocated
n_p_remain = n_procs - i
n_j_to_allocate = n_j_remain / n_p_remain
if rank == i:
return range(n_j_allocated, \
n_j_allocated + n_j_to_allocate)
n_j_allocated += n_j_to_allocate
def allocate_jobs_inc_time(n_jobs, n_procs=1, rank=0):
allocated = []
for i in range(n_jobs):
if rank == np.mod(n_jobs-i, n_procs):
allocated.append(i)
return allocated
def complete_array(target_distrib, use_mpi=False):
if use_mpi:
target = np.zeros(target_distrib.shape)
mpi.COMM_WORLD.Reduce(target_distrib, target, op=mpi.SUM, \
root=0)
else:
target = target_distrib
return target
######################################################################
# plotting settings
lw = 1.5
mp.rc('font', family = 'serif')
mp.rcParams['text.latex.preamble'] = [r'\boldmath']
mp.rcParams['axes.linewidth'] = lw
mp.rcParams['lines.linewidth'] = lw
cm = mpcm.get_cmap('plasma')
# useful constants
d2s = 24.0 * 3600.0
# settings
dataset = 'gloess' # 'gloess' or 'crts' or 'sim'
use_mpi = True
include_period = True
split_to_train = True
std_by_bin = False
test_training_length = False
n_rpt = 20
if dataset == 'gloess':
set_size = 4
elif dataset == 'crts':
set_size = 19 # 9
elif dataset == 'sim':
set_size = 250 # 9
base = dataset
if include_period:
base += '_inc_per'
if use_mpi:
import mpi4py.MPI as mpi
n_procs = mpi.COMM_WORLD.Get_size()
rank = mpi.COMM_WORLD.Get_rank()
else:
n_procs = 1
rank = 0
# switch on dataset
if dataset == 'gloess':
# dataset settings
# @TODO: might be able to push n_bins higher for this cadence
data_dir = 'data/gloess/'
n_bins = 50
# get training stars
cat_hdulist = apf.open(data_dir + 'gloess_cat.fit')
cols = cat_hdulist[1].columns
data = cat_hdulist[1].data
ids = data['Name']
fehs = data['__Fe_H_']
taus = data['FPer'] * d2s
types = data['RRL']
# check for correct type
rrab = (types == 'RRab')
ids = ids[rrab]
fehs = fehs[rrab]
taus = taus[rrab]
n_lc = len(ids)
# period distribution
tau_mean = np.mean(taus)
tau_std = np.std(taus)
# plot colours set by metallicities
feh_min = np.min(fehs)
feh_max = np.max(fehs)
feh_cols = (fehs - feh_min) / (feh_max - feh_min)
# read in lightcurves
cat_hdulist = apf.open(data_dir + 'gloess_lcs.fit')
cols = cat_hdulist[1].columns
data = cat_hdulist[1].data
binned_med_lcs = []
binned_mean_lcs = []
binned_mean_lc_stds = []
if rank == 0:
fig_sum, axes_sum = mp.subplots(1, 2, figsize=(16,5))
for i in range(n_lc):
# extract quantities of interest
inds = (data['Name'] == ids[i]) & (data['Flt'] == 'V')
phase = data['Phase'][inds]
mag = data['mag'][inds]
# calculate some binned statistics; no mag errors available
bins = np.linspace(0, 1, n_bins + 1)
meds, edges, i_bins = ss.binned_statistic(phase, \
mag - np.median(mag), \
statistic='median', \
bins=bins)
centers = (edges[0:-1] + edges[1:]) / 2.0
means = np.zeros(n_bins)
stds = np.zeros(n_bins)
for j in range(n_bins):
in_bin = (i_bins - 1 == j)
if in_bin.any():
means[j] = np.mean(mag[in_bin] - np.median(mag))
binned_med_lcs.append(meds)
binned_mean_lcs.append(means)
binned_mean_lc_stds.append(stds)
if rank == 0:
axes_sum[0].plot(centers, meds, color=cm(feh_cols[i]), alpha=0.4)
axes_sum[1].plot(centers, means, color=cm(feh_cols[i]), alpha=0.4)
elif dataset == 'crts':
# dataset settings
data_dir = 'data/crts_x_sdss/'
process_raw_lcs = False
n_bins = 25
threshold = 3.5
# get map between CRTS ID and ID number, along with peak time and
# period
css_id = []
css_id_num = []
css_period = []
css_peak = []
n_skip = 2
with open(data_dir + 'RRL_params') as f:
for i, l in enumerate(f):
if (i > n_skip - 1):
vals = [val for val in l.split()]
css_id.append(vals[0])
css_period.append(float(vals[4]))
css_peak.append(float(vals[9]))
css_id_num.append(vals[10])
# get training stars
hdulist = apf.open(data_dir + 'crts_bright_feh_info.fit')
cols = hdulist[1].columns
data = hdulist[1].data
ids = data['ID'][0]
fehs = data['FEH'][0]
taus = data['PER'][0]
mus = data['DM'][0]
# check for bad metallicities
bad_feh = (fehs < -3.0)
ids = ids[~bad_feh]
fehs = fehs[~bad_feh]
taus = taus[~bad_feh]
mus = mus[~bad_feh]
n_lc = len(ids)
# period distribution
tau_mean = np.mean(taus)
tau_std = np.std(taus)
# plot colours set by metallicities
feh_min = np.min(fehs)
feh_max = np.max(fehs)
feh_cols = (fehs - feh_min) / (feh_max - feh_min)
# loop through training set
binned_med_lcs = []
binned_mean_lcs = []
binned_mean_lc_stds = []
if rank == 0:
fig_sum, axes_sum = mp.subplots(1, 2, figsize=(16,5))
for i in range(n_lc):
# match IDs
ind = (j for j,v in enumerate(css_id) if v=='CSS_'+ids[i]).next()
# build lightcurves and save in simplified format, or read in
if process_raw_lcs:
# find matching entries in files
test = sp.Popen(['/usr/bin/grep ' + css_id_num[ind] + ' ' + \
data_dir + '/CSS_RR_phot/*phot'], \
shell=True, stdout=sp.PIPE)
output, err = test.communicate()
# parse lightcurves
time = []
mag = []
mag_err = []
lines = output.splitlines()
for line in lines:
vals = line.split(',')
time.append(float(vals[1]))
mag.append(float(vals[2]))
mag_err.append(float(vals[3]))
time = np.array(time)
mag = np.array(mag)
mag_err = np.array(mag_err)
# save to file
fname = data_dir + css_id[ind] + '_' + css_id_num[ind] + \
'_lc.txt'
np.savetxt(fname, \
np.column_stack((time, mag, mag_err)), \
fmt='%19.12e', header='time mag mag_err')
else:
# read lightcurves
fname = data_dir + css_id[ind] + '_' + css_id_num[ind] + \
'_lc.txt'
lc = np.genfromtxt(fname, names=True)
time = lc['time']
mag = lc['mag']
mag_err = lc['mag_err']
# what do the phase-wrapped lightcurves look like?
# 1007116003636; 0.5485033
period = taus[i]
phase = np.mod(time - css_peak[ind], period) / period
if False:
fig, axes = mp.subplots(1, 2, figsize=(16,5))
#nu = np.linspace(1.0, 3.0, 1000)
#power = aps.LombScargle(time, mag, mag_err).power(nu)
#nu, power = aps.LombScargle(time, mag, mag_err).autopower()
nu, power = aps.LombScargle(time, mag, mag_err).autopower(minimum_frequency=1.0, maximum_frequency=3.0)
print nu[np.argmax(power)]
axes[0].plot(phase, mag, '.', color=cm(feh_cols[i]), alpha=0.4)
axes[1].plot(nu, power, 'k-')
axes[1].axvline(1.0 / period, color='r', alpha=0.7, zorder=0)
mp.suptitle(css_id[ind] + ' / ' + css_id_num[ind])
#mp.show()
# calculate some binned statistics
bins = np.linspace(0, 1, n_bins + 1)
meds, edges, i_bins = ss.binned_statistic(phase, mag - np.median(mag), \
statistic='median', \
bins=bins)
centers = (edges[0:-1] + edges[1:]) / 2.0
means = np.zeros(n_bins)
stds = np.ones(n_bins) * 1e9
for j in range(n_bins):
in_bin = (i_bins - 1 == j)
if in_bin.any():
stds[j] = np.sqrt(1.0 / np.sum(mag_err[in_bin] ** -2))
means[j] = np.average(mag[in_bin] - np.median(mag), \
weights=mag_err[in_bin] ** -2)
binned_med_lcs.append(meds)
binned_mean_lcs.append(means)
binned_mean_lc_stds.append(stds)
if rank == 0:
axes_sum[0].plot(centers, meds, color=cm(feh_cols[i]), alpha=0.4)
axes_sum[1].plot(centers, means, color=cm(feh_cols[i]), alpha=0.4)
elif dataset == 'sim':
def set_phi_13(phi_1, phi_3):
phi_31 = 2.0 * np.pi + \
np.mod(phi_1 - 3.0 * phi_3, np.pi)
inds = phi_31 > 2.0 * np.pi
phi_31[inds] = np.pi + \
np.mod(phi_1[inds] - 3.0 * phi_3[inds], np.pi)
return phi_31
def set_feh(tau, phi_31):
return -5.038 - 5.394 * tau / 24.0 / 3600.0 + \
1.345 * phi_31
# settings
data_dir = 'data/asas/'
n_lc = 1000 #10000
n_fc = 3
n_samples = 1000
n_bins = 100
# stellar properties
tau_mean = 0.5 * d2s
tau_std = 0.1 * d2s
sigma_noise = 0.0 # 0.01
# stats from arXiv:0906.2199
raw_stats = np.genfromtxt(data_dir + 'fourier_decomp.txt')
stats = np.zeros((2 * n_fc, raw_stats.shape[0]))
for i in range(n_fc):
stats[i, :] = raw_stats[:, 1 + 2 * i]
stats[n_fc + i, :] = raw_stats[:, 2 * (i + 1)]
# some stars have negative amplitudes and small phases:
# shift so they're all in the same quadrant
weird_phase = stats[n_fc + i, :] < np.pi
stats[i, weird_phase] *= -1
stats[n_fc + i, weird_phase] += np.pi
#mp.plot(stats[i, :], stats[n_fc + i, :], '.')
#mp.plot(stats[i, weird_phase], stats[n_fc + i, weird_phase], 'r.')
#mp.show()
fc_mean = np.mean(stats, 1)
fc_cov = np.cov(stats)
# simulate fourier components, periods and metallicities
fcs = npr.multivariate_normal(fc_mean, fc_cov, n_lc)
taus = tau_mean + npr.randn(n_lc) * tau_std
phi_31s = set_phi_13(fcs[:, n_fc + 2], fcs[:, n_fc])
fehs = set_feh(taus, phi_31s)
if False:
mp.plot(phi_31s, fehs, '.')
phi_31_plot = np.linspace(np.min(phi_31s), np.max(phi_31s))
mp.plot(phi_31_plot, set_feh(np.mean(taus), phi_31_plot))
mp.xlabel(r'$\phi_{31}$')
mp.ylabel(r'${\rm [Fe/H]}$')
mp.xlim(np.min(phi_31s), np.max(phi_31s))
mp.ylim(np.min(fehs), np.max(fehs))
mp.show()
# plot colours set by metallicities
feh_min = np.min(fehs)
feh_max = np.max(fehs)
feh_cols = (fehs - feh_min) / (feh_max - feh_min)
# simulate binned lightcurves
binned_med_lcs = []
binned_mean_lcs = []
binned_mean_lc_stds = []
if rank == 0:
fig_sum, axes_sum = mp.subplots(1, 2, figsize=(16,5))
for i in range(n_lc):
# simulate lightcurves
phase = npr.rand(n_samples)
mag = npr.randn(n_samples) * sigma_noise
for j in range(n_fc):
mag += fcs[i, j] * np.sin(2.0 * np.pi * (j + 1) * phase + \
fcs[i, n_fc + j])
#mp.scatter(phase, mag)
#mp.show()
# calculate some binned statistics
bins = np.linspace(0, 1, n_bins + 1)
meds, edges, i_bins = ss.binned_statistic(phase, mag - np.median(mag), \
statistic='median', \
bins=bins)
centers = (edges[0:-1] + edges[1:]) / 2.0
means = np.zeros(n_bins)
stds = np.ones(n_bins) * 1e9
for j in range(n_bins):
in_bin = (i_bins - 1 == j)
if in_bin.any():
stds[j] = sigma_noise / np.sqrt(np.sum(in_bin))
means[j] = np.mean(mag[in_bin] - np.median(mag))
binned_med_lcs.append(meds)
binned_mean_lcs.append(means)
binned_mean_lc_stds.append(stds)
if n_lc < 1000 and rank == 0:
axes_sum[0].plot(centers, meds, color=cm(feh_cols[i]), alpha=0.4)
axes_sum[1].plot(centers, means, color=cm(feh_cols[i]), alpha=0.4)
# convert binned stats to useful dtype
binned_med_lcs = np.array(binned_med_lcs)
binned_mean_lcs = np.array(binned_mean_lcs)
binned_mean_lc_stds = np.array(binned_mean_lc_stds)
# summarize over stars to obtain median/mean lc shape
med_lc = np.zeros(n_bins)
mean_lc = np.zeros(n_bins)
std_lc = np.zeros(n_bins)
for i in range(n_bins):
med_lc[i] = np.nanmedian(binned_med_lcs[:, i])
mean_lc[i] = np.nanmean(binned_mean_lcs[:, i])
std_lc[i] = np.nanstd(binned_mean_lcs[:, i])
# check for outliers, taking into account intrinsic scatter in each
# bin
if dataset == 'crts':
is_out = []
for i in range(n_bins):
is_out.append(np.abs(mean_lc[i] - binned_mean_lcs[:, i]) / \
np.sqrt(std_lc[i] ** 2 + \
binned_mean_lc_stds[:, i] ** 2) > threshold)
out_count = np.sum(np.array(is_out), 0)
is_out = (out_count > 0)
for i in range(n_lc):
if out_count[i] > 0 and rank == 0:
print 'reject CSS_' + ids[i]
axes_sum[0].plot(centers, binned_med_lcs[i], 'k')
axes_sum[1].plot(centers, binned_mean_lcs[i], 'k')
else:
is_out = np.zeros(n_lc, dtype='bool')
# finish off overlays of lightcurves
if rank == 0:
if n_lc < 1000:
axes_sum[0].set_xlabel('phase')
axes_sum[0].set_ylabel('median mag')
axes_sum[0].set_ylim(-1.2, 0.6)
axes_sum[1].set_xlabel('phase')
axes_sum[1].set_ylabel('iv-weighted mean mag')
axes_sum[1].set_ylim(-1.2, 0.6)
fig_sum.savefig(dataset + '_mean_median_phase-wrapped_mags.pdf', \
bbox_inches='tight')
#mp.show()
mp.close()
# recalculate clean median and mean lightcurves
med_lc_clean = np.zeros(n_bins)
mean_lc_clean = np.zeros(n_bins)
std_lc_clean = np.zeros(n_bins)
n_lc_clean = np.zeros(n_bins)
for i in range(n_bins):
med_lc_clean[i] = np.nanmedian(binned_med_lcs[~is_out, i])
mean_lc_clean[i] = np.nanmean(binned_mean_lcs[~is_out, i])
std_lc_clean[i] = np.nanstd(binned_mean_lcs[~is_out, i])
n_lc_clean[i] = np.count_nonzero(~np.isnan(binned_mean_lcs[~is_out, i]))
# plot median and mean lightcurves, with and without outliers
if rank == 0:
fig_stats, axes_stats = mp.subplots(1, 2, figsize=(16,5))
axes_stats[0].plot(centers, med_lc, 'k-', label='median')
axes_stats[0].plot(centers, med_lc_clean, 'r--', label='median (clean)')
axes_stats[1].plot(centers, mean_lc, 'k-', label='mean')
axes_stats[1].plot(centers, mean_lc_clean, 'r--', label='mean (clean)')
axes_stats[0].set_xlabel('phase')
axes_stats[0].set_ylabel('median mag')
axes_stats[0].set_ylim(-0.8, 0.3)
axes_stats[0].legend(loc='upper left')
axes_stats[1].set_xlabel('phase')
axes_stats[1].set_ylabel('mean mag')
axes_stats[1].set_ylim(-0.8, 0.3)
axes_stats[1].legend(loc='upper left')
mp.savefig(dataset + '_mean_median_lightcurves.pdf', \
bbox_inches='tight')
mp.close()
#mp.show()
# divide through median/mean lightcurve, ditching outliers
if n_lc < 1000 and rank == 0:
feh_sort = np.argsort(fehs)
fig, axes = mp.subplots(1, 2, figsize=(16,5))
for j in range(n_lc):
i = feh_sort[j]
if not is_out[i]:
axes[0].plot(centers, (binned_med_lcs[i, :] - med_lc), \
color=cm(feh_cols[i]), alpha=0.4)
axes[1].plot(centers, (binned_mean_lcs[i, :] - mean_lc), \
color=cm(feh_cols[i]), alpha=0.4)
axes[0].set_xlabel('phase')
axes[0].set_ylabel('mag / med(mag)')
axes[1].set_xlabel('phase')
axes[1].set_ylabel('mag / mean(mag)')
fig.savefig(dataset + '_mean_median_phase-wrapped_scaled_mags.pdf', \
bbox_inches='tight')
mp.close()
#mp.show()
# test out metallicity dependence in bins
n_bins_feh = 3
feh_min = [-10.0, -1.7, -1.1]
feh_max = [-1.7, -1.1, 10.0]
cols = [cm(0.2), cm(0.5), cm(0.8)]
if rank == 0:
fig, axes = mp.subplots(2, 2, figsize=(16,10))
med_lc_all_feh = med_lc_clean
mean_lc_all_feh = mean_lc_clean
n_lc_all_feh = n_lc_clean
for k in range(n_bins_feh):
# summarize over stars to obtain median/mean lc shape
feh_inds = (fehs >= feh_min[k]) & (fehs < feh_max[k]) & \
~is_out
if rank == 0:
to_fmt = 'bin {:d} has {:5.2f} < Fe/H < {:5.2f}'
print to_fmt.format(k, np.min(fehs[feh_inds]), \
np.max(fehs[feh_inds]))
med_lc = np.zeros(n_bins)
mean_lc = np.zeros(n_bins)
n_lc_bin = np.zeros(n_bins)
for i in range(n_bins):
med_lc[i] = np.nanmedian(binned_med_lcs[feh_inds, i])
mean_lc[i] = np.nanmean(binned_mean_lcs[feh_inds, i])
n_lc_bin[i] = np.count_nonzero(~np.isnan(binned_mean_lcs[feh_inds, i]))
if rank == 0:
label = '${:4.1f} '.format(feh_min[k]) + r'\leq' + \
' [Fe/H] < {:4.1f}$'.format(feh_max[k])
#axes[0, 0].plot(centers, med_lc, color=cols[k], label=label)
#axes[0, 1].plot(centers, mean_lc, color=cols[k], label=label)
#axes[1, 0].plot(centers, med_lc - med_lc_all_feh, color=cols[k], label=label)
#axes[1, 1].plot(centers, mean_lc - mean_lc_all_feh, color=cols[k], label=label)
axes[0, 0].errorbar(centers, med_lc, std_lc_clean / np.sqrt(n_lc), color=cols[k], label=label)
axes[0, 1].errorbar(centers, mean_lc, std_lc_clean / np.sqrt(n_lc), color=cols[k], label=label)
axes[1, 0].errorbar(centers, med_lc - med_lc_all_feh, std_lc_clean * np.sqrt(1.0 / n_lc + 1.0 / n_lc_bin), color=cols[k], label=label)
axes[1, 1].errorbar(centers, mean_lc - mean_lc_all_feh, std_lc_clean * np.sqrt(1.0 / n_lc + 1.0 / n_lc_bin), color=cols[k], label=label)
if rank == 0:
axes[0, 0].set_xlabel('phase')
axes[0, 0].set_ylabel('median mag')
axes[0, 0].set_ylim(-0.8, 0.3)
axes[0, 0].legend(loc='upper left', fontsize=12)
axes[0, 1].set_xlabel('phase')
axes[0, 1].set_ylabel('mean mag')
axes[0, 1].set_ylim(-0.8, 0.3)
axes[0, 1].legend(loc='upper left', fontsize=12)
axes[1, 0].set_xlabel('phase')
axes[1, 0].set_ylabel('median mag - median shape')
axes[1, 0].set_ylim(-0.08, 0.08)
axes[1, 0].legend(loc='upper center', fontsize=12)
axes[1, 1].set_xlabel('phase')
axes[1, 1].set_ylabel('mean mag - mean shape')
axes[1, 1].set_ylim(-0.08, 0.08)
axes[1, 1].legend(loc='upper center', fontsize=12)
mp.savefig(dataset + '_mean_median_lightcurves_feh_dep.pdf', \
bbox_inches='tight')
mp.close()
#mp.show()
'''
blah = np.zeros(n_bins * n_lc)
for i in range(n_lc):
blah[i * n_bins: (i + 1) * n_bins] = binned_mean_lcs[i, :]# - mean_lc
mp.hist(blah, bins = 20)
mp.show()
exit()
'''
# should we split into training and test sets or not?
if split_to_train:
# split into training and test sets. i've coded this as
# equal splits, but one set will probably always be different
n_split = int(np.floor(n_lc / float(set_size)))
if rank == 0:
print 'splitting into committee of {:d} nets'.format(n_split)
set_ids = range(n_lc)
ra.shuffle(set_ids)
if use_mpi:
set_ids = mpi.COMM_WORLD.bcast(set_ids, root=0)
test_ids = np.zeros((n_lc, n_split), dtype=bool)
test_ids[:, 0] = [i < set_size for i in set_ids]
for j in range(1, n_split - 1):
test_ids[:, j] = [i < set_size * (j + 1) and \
i >= set_size * j for i in set_ids]
test_ids[:, -1] = [i >= set_size * (n_split - 1) for i in set_ids]
for j in range(n_split):
fmt_str = 'committee {:d}: {:d} training, {:d} testing'
if rank == 0:
print fmt_str.format(j + 1, np.sum(~test_ids[:, j]), \
np.sum(test_ids[:, j]))
# define neural network inputs
#nn_inputs = binned_mean_lcs - mean_lc
nn_inputs = binned_mean_lcs[~is_out, :] - mean_lc
feh_mean = np.mean(fehs[~is_out])
feh_std = np.std(fehs[~is_out])
nn_outputs = (fehs[~is_out] - feh_mean) / feh_std
n_lc = np.sum(~is_out)
if std_by_bin:
for i in range(n_bins):
avg_nn_inputs = np.mean(nn_inputs[:, i])
std_nn_inputs = np.std(nn_inputs[:, i])
nn_inputs[:, i] = (nn_inputs[:, i] - avg_nn_inputs) / std_nn_inputs
else:
avg_nn_inputs = np.mean(nn_inputs.flatten())
std_nn_inputs = np.std(nn_inputs.flatten())
nn_inputs = (nn_inputs - avg_nn_inputs) / std_nn_inputs
if include_period:
nn_inputs = np.append(nn_inputs, \
(taus[~is_out, None] - tau_mean) / tau_std, 1)
# dependence on training length
if test_training_length:
#max_its = np.array([3, 10, 30, 100, 300, 1000, 3000, 10000])
#max_its = np.array([3, 10, 30, 100, 300, 1000, 3000])
#max_its = np.array([25, 50, 100, 200, 300, 400, 500])
max_its = np.array([100, 300, 1000, 3000])
n_max_its = len(max_its)
dcol = 1.0 / float(n_max_its - 1)
seeds = np.random.randint(102314, 221216, n_rpt)
if use_mpi:
mpi.COMM_WORLD.Bcast(seeds, root=0)
feh_pred = np.zeros((n_lc, n_max_its, n_rpt))
chisq = np.zeros(n_max_its)
chisq_core = np.zeros(n_max_its)
feh_pred_loc = np.zeros((n_lc, n_max_its, n_rpt))
chisq_loc = np.zeros(n_max_its)
chisq_core_loc = np.zeros(n_max_its)
n_err_bins = 100
err_bins = np.linspace(-5.0, 5.0, n_err_bins + 1)
err_bin_mids = (err_bins[1:] + err_bins[:-1]) / 2.0
binned_errs = np.zeros((n_err_bins, n_max_its))
binned_errs_loc = np.zeros((n_err_bins, n_max_its))
job_list = allocate_jobs_inc_time(n_max_its, n_procs, rank)
print 'process id {:d}: jobs'.format(rank), job_list
#for i in range(n_max_its):
for i in job_list:
n_lc_core = 0
for j in range(n_rpt):
nn = sk.MLPRegressor(hidden_layer_sizes=(20,), \
activation='logistic', solver='lbfgs', \
alpha=0.1, batch_size='auto', \
learning_rate='constant', \
learning_rate_init=0.001, power_t=0.5, \
max_iter=max_its[i], shuffle=True, \
random_state=seeds[j], tol=0.000, \
verbose=False, warm_start=False, \
momentum=0.9, nesterovs_momentum=True, \
early_stopping=False, validation_fraction=0.1, \
beta_1=0.9, beta_2=0.999, epsilon=1e-08)
for k in range(n_split):
nn.fit(nn_inputs[~test_ids[:, k], :], \
nn_outputs[~test_ids[:, k]])
feh_pred_loc[test_ids[:, k], i, j] = \
nn.predict(nn_inputs[test_ids[:, k], :])
res = feh_pred_loc[:, i, j] - nn_outputs
res_rej = np.abs(res) > 1.0
chisq_loc[i] += np.sum((res) ** 2)
chisq_core_loc[i] += np.sum((res[~res_rej]) ** 2)
n_lc_core += np.sum(~res_rej)
binned_errs_loc[:, i] += np.histogram(res, bins=err_bins)[0]
chisq_core_loc[i] /= n_lc_core
print n_lc_core, n_lc
print 'n_max_its step {:d} of {:d} complete'.format(i + 1, \
n_max_its)
chisq_loc /= n_rpt * n_lc
if use_mpi:
mpi.COMM_WORLD.barrier()
chisq = complete_array(chisq_loc, use_mpi)
chisq_core = complete_array(chisq_core_loc, use_mpi)
feh_pred = complete_array(feh_pred_loc, use_mpi)
binned_errs = complete_array(binned_errs_loc, use_mpi)
# find optimum training length
opt_ind = np.argmin(chisq)
opt_ind_core = np.argmin(chisq_core)
if rank == 0:
print 'optimum chisq {:f} at {:d}'.format(chisq[opt_ind], \
max_its[opt_ind])
print 'or {:f} at {:d} w/out failures'.format(chisq_core[opt_ind_core], \
max_its[opt_ind_core])
# plot on main process only
if rank == 0:
# plot best performing network
plot_min = -3.0 * feh_std + feh_mean
plot_max = 3.0 * feh_std + feh_mean
mp.plot([plot_min, plot_max], [plot_min, plot_max], 'k')
mp.scatter(nn_outputs * feh_std + feh_mean, \
np.mean(feh_pred[:, opt_ind_core, :], -1) * \
feh_std + feh_mean)
mp.xlabel(r'$[Fe/H]_{\rm true}$')
mp.ylabel(r'$\langle[Fe/H]_{\rm pred}\rangle$')
mp.xlim(plot_min, plot_max)
mp.ylim(plot_min, plot_max)
mp.savefig(base + '_opt_its_predictions.pdf', \
bbox_inches='tight')
mp.close()
# plot chi_sq as function of max_its
mp.semilogx(max_its, chisq, label='all predictions')
mp.semilogx(max_its, chisq_core, label='failures removed')
mp.xlabel(r'${\rm n_{its}}$')
mp.ylabel(r'$\chi^2/{\rm DOF}$')
mp.xlim(np.min(max_its), np.max(max_its))
mp.legend(loc='upper right')
mp.savefig(base + '_max_its_performance.pdf', \
bbox_inches='tight')
mp.close()
# plot residuals distribution as function of max_its
res_max = 0.0
for i in range(n_max_its):
res_max_temp = np.max(np.abs(err_bin_mids[binned_errs[:, i] > 0]))
if res_max_temp > res_max:
res_max = res_max_temp
n_binned_max = np.max(binned_errs)
fig, axes = mp.subplots(2, 2, figsize=(16, 5), sharex=True, sharey=True)
print res_max
for i in range(n_max_its):
i_row = i / 2
i_col = np.mod(i, 2)
axes[i_row, i_col].step(err_bin_mids, binned_errs[:, i])
axes[i_row, i_col].text(-0.95 * res_max, 0.9 * n_binned_max, \
'{:d} iterations'.format(max_its[i]))
axes[i_row, i_col].set_xlim(-res_max, res_max)
if i_row == 1:
axes[i_row, i_col].set_xlabel(r'$\Delta[Fe/H]$')
if i_col == 0:
axes[i_row, i_col].set_ylabel(r'$N(\Delta[Fe/H])$')
fig.subplots_adjust(wspace=0, hspace=0)
mp.savefig(base + '_max_its_residuals.pdf', \
bbox_inches='tight')
if use_mpi:
mpi.Finalize()
exit()
# dependence on alpha and n_hidden
n_grid_hid = 10
n_grid_alpha = 25
dcol_hid = 1.0 / float(n_grid_hid - 1)
dcol_alpha = 1.0 / float(n_grid_alpha - 1)
n_hidden = np.linspace(1, 10, n_grid_hid, dtype=int) * 100
n_hidden = np.linspace(1, 10, n_grid_hid, dtype=int) * 4
#alpha = np.logspace(-7, 0, n_grid_alpha)
alpha = np.logspace(-4, 0, n_grid_alpha)
alpha = np.logspace(-6, 0, n_grid_alpha)
seeds = np.random.randint(102314, 221216, n_rpt)
if use_mpi:
mpi.COMM_WORLD.Bcast(seeds, root=0)
chisq = np.zeros((n_grid_hid, n_grid_alpha))
chisq_loc = np.zeros((n_grid_hid, n_grid_alpha))
feh_pred = np.zeros((n_lc, n_grid_hid, n_grid_alpha, n_rpt))
feh_pred_loc = np.zeros((n_lc, n_grid_hid, n_grid_alpha, n_rpt))
job_list = allocate_jobs(n_grid_alpha, n_procs, rank)
print 'process id {:d}: jobs'.format(rank), job_list
for i in range(n_grid_hid):
for j in job_list:
print 'n_hidden gridpoint {:d},'.format(i + 1), \
'alpha gridpoint {:d}'.format(j + 1)
for k in range(n_rpt):
#activation='tanh', solver='lbfgs', \
nn = sk.MLPRegressor(hidden_layer_sizes=(n_hidden[i],), \
activation='logistic', solver='lbfgs', \
alpha=alpha[j], batch_size='auto', \
learning_rate='constant', \
learning_rate_init=0.001, power_t=0.5, \
max_iter=100, shuffle=True, \
random_state=seeds[k], \
tol=0.0, verbose=False, warm_start=False, \
momentum=0.9, nesterovs_momentum=True, \
early_stopping=False, validation_fraction=0.1, \
beta_1=0.9, beta_2=0.999, epsilon=1e-08)
if split_to_train:
for m in range(n_split):
nn.fit(nn_inputs[~test_ids[:, m], :], \
nn_outputs[~test_ids[:, m]])
feh_pred_loc[test_ids[:, m], i, j, k] = \
nn.predict(nn_inputs[test_ids[:, m], :])
else:
nn.fit(nn_inputs, nn_outputs)
feh_pred_loc[:, i, j, k] = nn.predict(nn_inputs)
res = feh_pred_loc[:, i, j, k] - nn_outputs
chisq_loc[i, j] += np.sum(res ** 2)
chisq_loc /= n_rpt * n_lc
if use_mpi:
mpi.COMM_WORLD.barrier()
chisq = complete_array(chisq_loc, use_mpi)
feh_pred = complete_array(feh_pred_loc, use_mpi)
# find optimum n_hidden and alpha
opt_ind = np.unravel_index(np.argmin(chisq), (n_grid_hid, n_grid_alpha))
if rank == 0:
print 'optimum chisq {:f} at {:d}, {:e}'.format(chisq[opt_ind], \
n_hidden[opt_ind[0]], \
alpha[opt_ind[1]])
# save results to file! but what else to save?
#output_file = open(base + '_opt_alpha_n_hidden_predictions.dat', 'wb')
#feh_pred.tofile(output_file)
#output_file.close()
# no point duplicating plots
if rank == 0:
# plot best performing network
plot_min = -3.0 * feh_std + feh_mean
plot_max = 3.0 * feh_std + feh_mean
mp.plot([plot_min, plot_max], [plot_min, plot_max], 'k')
mp.scatter(nn_outputs * feh_std + feh_mean, \
np.mean(feh_pred[:, opt_ind[0], opt_ind[1], :], -1) * \
feh_std + feh_mean)
mp.xlabel(r'$[Fe/H]_{\rm true}$')
mp.ylabel(r'$\langle[Fe/H]_{\rm pred}\rangle$')
mp.xlim(plot_min, plot_max)
mp.ylim(plot_min, plot_max)
mp.savefig(base + '_opt_alpha_n_hidden_predictions.pdf', \
bbox_inches='tight')
mp.close()
# summary plots
fig, axes = mp.subplots(1, 2, figsize=(16, 5))
for i in range(n_grid_hid):
axes[0].semilogx(alpha, chisq[i, :], color=cm(dcol_hid * i))
for i in range(n_grid_alpha):
axes[1].plot(n_hidden, chisq[:, i], color=cm(dcol_alpha * i))
axes[0].set_xlabel(r'$\alpha$')
axes[0].set_ylabel(r'$\sum (Z_{\rm pred} - Z_{\rm true})^2$')
axes[1].set_xlabel(r'$n_{\rm hidden}$')
axes[1].set_ylabel(r'$\sum (Z_{\rm pred} - Z_{\rm true})^2$')
mp.savefig(base + '_alpha_n_hidden_1d_performance.pdf', \
bbox_inches='tight')
mp.close()
# 2D plot
fig = mp.figure()
ax = fig.add_subplot(111)
cax = ax.matshow(chisq, cmap = mpcm.plasma, interpolation = 'nearest')
mp.colorbar(cax)
xticks = ax.get_xticks()
yticks = ax.get_yticks()
ax.set_xticklabels([''] + ['{:6.1e}'.format(alpha[int(x)]) \
for x in xticks[1:-1]])
ax.set_yticklabels([''] + ['{:d}'.format(n_hidden[int(y)]) \
for y in yticks[1:-1]])
ax.set_xlabel(r'$\alpha$')
ax.xaxis.set_label_position('top')
ax.set_ylabel(r'$n_{\rm hidden}$')
mp.savefig(base + '_alpha_n_hidden_performance.pdf', \
bbox_inches='tight')
mp.close()
# @TODO: SAVE TO FILE (networks, eventually)
# @TODO: redo alpha n_hidden with 1000 iterations
# - then turn on tolerance to see if results same w/ speedup
# @TODO: or loop over max its
# @TODO: increase n_repeat
if use_mpi:
mpi.Finalize()
| mit |
gifford-lab/bcbio-nextgen | bcbio/graph/collectl.py | 6 | 11890 | import calendar
import glob
import gzip
import math
import os.path
import re
import pandas as pd
def _parse_raw(fp, start_tstamp, end_tstamp):
import progressbar
widgets = [
os.path.basename(fp.name), ': ',
progressbar.Bar(marker='-', left='[', right=']'), ' ',
progressbar.Percentage(), ' ', progressbar.ETA(),
]
# We don't know what the file's uncompressed size will wind up being,
# so take an educated guess and ignore the AssertionError later on
# if it winds up being bigger than we guess.
bar = progressbar.ProgressBar(
widgets=widgets, maxval=os.path.getsize(fp.name) * 15)
bar.start()
bar.update(0)
tstamp = 0
hardware = {}
data = {}
for line in fp:
matches = re.search(r'^>>> (\d+).\d+ <<<', line)
if matches:
try:
bar.update(fp.tell())
except AssertionError:
pass
tstamp = int(matches.group(1))
if (tstamp >= start_tstamp) or (tstamp <= end_tstamp):
data[tstamp] = {
'disk': {},
'mem': {},
'net': {},
'proc': {},
}
continue
if line.startswith('# SubSys: '):
matches = re.search(r'\sNumCPUs: (\d+)\s+', line)
if matches:
hardware['num_cpus'] = int(matches.group(1))
continue
if line.startswith('# Kernel: '):
matches = re.search(r'\sMemory: (\d+)\s+kB', line)
if matches:
hardware['memory'] = int(math.ceil(float(matches.group(1)) / math.pow(1024.0, 2.0)))
continue
if (tstamp < start_tstamp) or (tstamp > end_tstamp):
continue
if line.startswith('cpu '):
# Don't know what the last two fields are, but they
# always seem to be 0, and collectl doesn't parse them
# in formatit::dataAnalyze().
(title, user, nice, sys, idle, wait, irq,
soft, steal) = line.split()[:9]
data[tstamp]['cpu'] = {
'user': user,
'nice': nice,
'sys': sys,
'idle': idle,
'wait': wait,
'irq': irq,
'soft': soft,
'steal': steal,
}
elif line.startswith('disk '):
(title, major, minor, node,
num_reads, reads_merged, sectors_read, msec_spent_reading,
num_writes, writes_merged, sectors_written, msec_spent_writing,
iops_in_progress, msec_spent_on_iops,
weighted_msec_spent_on_iops) = line.split()
data[tstamp]['disk'][node] = {
'num_reads': num_reads,
'reads_merged': reads_merged,
'sectors_read': sectors_read,
'msec_spent_reading': msec_spent_reading,
'num_writes': num_writes,
'writes_merged': writes_merged,
'sectors_written': sectors_written,
'msec_spent_writing': msec_spent_writing,
'iops_in_progress': iops_in_progress,
'msec_spent_on_iops': msec_spent_on_iops,
'weighted_msec_spent_on_iops': weighted_msec_spent_on_iops,
}
elif line.startswith('Net '):
# Older kernel versions don't have whitespace after
# the interface colon:
#
# Net eth0:70627391
#
# unlike newer kernels:
#
# Net eth0: 415699541
line = re.sub(r'^(Net\s+[^:]+):', r'\1: ', line)
(title, iface,
rbyte, rpkt, rerr, rdrop, rfifo,
rframe, rcomp, rmulti,
tbyte, tpkt, terr, tdrop, tfifo,
tcoll, tcarrier, tcomp) = line.split()
iface = iface.replace(':', '')
data[tstamp]['net'][iface] = {
'rbyte': rbyte,
'rpkt': rpkt,
'rerr': rerr,
'rdrop': rdrop,
'rfifo': rfifo,
'rframe': rframe,
'rcomp': rcomp,
'rmulti': rmulti,
'tbyte': tbyte,
'tpkt': tpkt,
'terr': terr,
'tdrop': tdrop,
'tfifo': tfifo,
'tcoll': tcoll,
'tcarrier': tcarrier,
'tcomp': tcomp,
}
elif line.startswith('MemTotal:'):
title, amount, unit = line.split()
data[tstamp]['mem']['total'] = amount
elif line.startswith('MemFree:'):
title, amount, unit = line.split()
data[tstamp]['mem']['free'] = amount
elif line.startswith('Buffers:'):
title, amount, unit = line.split()
data[tstamp]['mem']['buffers'] = amount
elif line.startswith('Cached:'):
title, amount, unit = line.split()
data[tstamp]['mem']['cached'] = amount
# We don't currently do anything with process data,
# so don't bother parsing it.
elif False and line.startswith('proc:'):
title_pid, rest = line.split(None, 1)
title, pid = title_pid.split(':')
if pid not in data[tstamp]['proc']:
data[tstamp]['proc'][pid] = {}
if rest.startswith('cmd '):
title, cmd = rest.split(None, 1)
data[tstamp]['proc'][pid]['cmd'] = cmd
elif rest.startswith('io read_bytes: '):
value = rest.split(':')[1].strip()
data[tstamp]['proc'][pid]['read_bytes'] = value
elif rest.startswith('io write_bytes: '):
value = rest.split(':')[1].strip()
data[tstamp]['proc'][pid]['write_bytes'] = value
bar.finish()
return hardware, data
class _CollectlGunzip(gzip.GzipFile):
"""collectl writes data to its files incrementally, and doesn't
add a CRC to the end until it rotates the log. Ignore the CRC
errors; they're innocuous in this case.
"""
def _read_eof(self):
return
def load_collectl(pattern, start_time, end_time):
"""Read data from collectl data files into a pandas DataFrame.
:pattern: Absolute path to raw collectl files
"""
start_tstamp = calendar.timegm(start_time.utctimetuple())
end_tstamp = calendar.timegm(end_time.utctimetuple())
cols = []
rows = []
for path in glob.glob(pattern):
hardware, raw = _parse_raw(
_CollectlGunzip(path, 'r'), start_tstamp, end_tstamp)
if not cols:
instances = {
'disk': set(),
'net': set(),
'proc': set(),
}
for tstamp, sample in raw.iteritems():
for group, items in sample.iteritems():
if group == 'disk':
instances['disk'] = instances['disk'].union(
items.keys())
elif group == 'net':
instances['net'] = instances['net'].union(
items.keys())
elif group == 'proc':
instances['proc'] = instances['proc'].union(
items.keys())
cols = ['tstamp']
cols.extend([
'cpu_{}'.format(var)
for var
in ['user', 'nice', 'sys', 'idle', 'wait',
'irq', 'soft', 'steal']
])
for node in instances['disk']:
cols.extend([
'{}_{}'.format(node, var)
for var
in ['num_reads', 'reads_merged',
'sectors_read', 'msec_spent_reading',
'num_writes', 'writes_merged',
'sectors_written', 'msec_spent_writing',
'iops_in_progress', 'msec_spent_on_iops',
'weighted_msec_spent_on_iops']
])
cols.extend([
'mem_{}'.format(var)
for var
in ['total', 'free', 'buffers', 'cached']
])
for iface in instances['net']:
cols.extend([
'{}_{}'.format(iface, var)
for var
in ['rbyte', 'rpkt', 'rerr', 'rdrop',
'rfifo', 'rframe', 'rcomp', 'rmulti',
'tbyte', 'tpkt', 'terr', 'tdrop',
'tfifo', 'tcoll', 'tcarrier', 'tcomp']
])
for pid in instances['proc']:
cols.extend([
'{}_{}'.format(pid, var)
for var
in ['name', 'read_bytes', 'write_bytes']
])
for tstamp, sample in raw.iteritems():
if ('cpu' not in sample or
'disk' not in sample or
'mem' not in sample):
# Skip incomplete samples; there might be a truncated
# sample on the end of the file.
continue
values = [tstamp]
values.extend([
sample['cpu']['user'], sample['cpu']['nice'],
sample['cpu']['sys'], sample['cpu']['idle'],
sample['cpu']['wait'], sample['cpu']['irq'],
sample['cpu']['soft'], sample['cpu']['steal'],
])
for node in instances['disk']:
data = sample['disk'].get(node, {})
values.extend([
data.get('num_reads', 0),
data.get('reads_merged', 0),
data.get('sectors_read', 0),
data.get('msec_spent_reading', 0),
data.get('num_writes', 0),
data.get('writes_merged', 0),
data.get('sectors_written', 0),
data.get('msec_spent_writing', 0),
data.get('iops_in_progress', 0),
data.get('msec_spent_on_iops', 0),
data.get('weighted_msec_spent_on_iops', 0),
])
values.extend([
sample['mem']['total'], sample['mem']['free'],
sample['mem']['buffers'], sample['mem']['cached'],
])
for iface in instances['net']:
data = sample['net'].get(iface, {})
values.extend([
data.get('rbyte', 0), data.get('rpkt', 0),
data.get('rerr', 0), data.get('rdrop', 0),
data.get('rfifo', 0), data.get('rframe', 0),
data.get('rcomp', 0), data.get('rmulti', 0),
data.get('tbyte', 0), data.get('tpkt', 0),
data.get('terr', 0), data.get('tdrop', 0),
data.get('tfifo', 0), data.get('tcoll', 0),
data.get('tcarrier', 0), data.get('tcomp', 0),
])
if 'proc' in sample:
for pid in instances['proc']:
data = sample['proc'].get(pid, {})
values.extend([
data.get('cmd', ''),
data.get('read_bytes', 0),
data.get('write_bytes', 0),
])
rows.append(values)
if len(rows) == 0:
return pd.DataFrame(columns=cols), {}
df = pd.DataFrame(rows, columns=cols)
df = df.convert_objects(convert_numeric=True)
df['tstamp'] = df['tstamp'].astype('datetime64[s]')
df.set_index('tstamp', inplace=True)
df = df.tz_localize('UTC')
return df, hardware
| mit |
winklerand/pandas | pandas/tests/indexes/datetimelike.py | 1 | 2371 | """ generic datetimelike tests """
import pytest
import pandas as pd
from .common import Base
import pandas.util.testing as tm
class DatetimeLike(Base):
def test_shift_identity(self):
idx = self.create_index()
tm.assert_index_equal(idx, idx.shift(0))
def test_str(self):
# test the string repr
idx = self.create_index()
idx.name = 'foo'
assert not "length=%s" % len(idx) in str(idx)
assert "'foo'" in str(idx)
assert idx.__class__.__name__ in str(idx)
if hasattr(idx, 'tz'):
if idx.tz is not None:
assert idx.tz in str(idx)
if hasattr(idx, 'freq'):
assert "freq='%s'" % idx.freqstr in str(idx)
def test_view(self, indices):
super(DatetimeLike, self).test_view(indices)
i = self.create_index()
i_view = i.view('i8')
result = self._holder(i)
tm.assert_index_equal(result, i)
i_view = i.view(self._holder)
result = self._holder(i)
tm.assert_index_equal(result, i_view)
def test_map_callable(self):
expected = self.index + 1
result = self.index.map(lambda x: x + 1)
tm.assert_index_equal(result, expected)
# map to NaT
result = self.index.map(lambda x: pd.NaT if x == self.index[0] else x)
expected = pd.Index([pd.NaT] + self.index[1:].tolist())
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize(
"mapper",
[
lambda values, index: {i: e for e, i in zip(values, index)},
lambda values, index: pd.Series(values, index)])
def test_map_dictlike(self, mapper):
expected = self.index + 1
# don't compare the freqs
if isinstance(expected, pd.DatetimeIndex):
expected.freq = None
result = self.index.map(mapper(expected, self.index))
tm.assert_index_equal(result, expected)
expected = pd.Index([pd.NaT] + self.index[1:].tolist())
result = self.index.map(mapper(expected, self.index))
tm.assert_index_equal(result, expected)
# empty map; these map to np.nan because we cannot know
# to re-infer things
expected = pd.Index([pd.NaT] * len(self.index))
result = self.index.map(mapper([], []))
tm.assert_index_equal(result, expected)
| bsd-3-clause |
toobaz/pandas | pandas/io/html.py | 1 | 34063 | """:mod:`pandas.io.html` is a module containing functionality for dealing with
HTML IO.
"""
from collections import abc
import numbers
import os
import re
from pandas.compat import raise_with_traceback
from pandas.compat._optional import import_optional_dependency
from pandas.errors import AbstractMethodError, EmptyDataError
from pandas.core.dtypes.common import is_list_like
from pandas import Series
from pandas.io.common import _is_url, _validate_header_arg, urlopen
from pandas.io.formats.printing import pprint_thing
from pandas.io.parsers import TextParser
_IMPORTS = False
_HAS_BS4 = False
_HAS_LXML = False
_HAS_HTML5LIB = False
def _importers():
# import things we need
# but make this done on a first use basis
global _IMPORTS
if _IMPORTS:
return
global _HAS_BS4, _HAS_LXML, _HAS_HTML5LIB
bs4 = import_optional_dependency("bs4", raise_on_missing=False, on_version="ignore")
_HAS_BS4 = bs4 is not None
lxml = import_optional_dependency(
"lxml.etree", raise_on_missing=False, on_version="ignore"
)
_HAS_LXML = lxml is not None
html5lib = import_optional_dependency(
"html5lib", raise_on_missing=False, on_version="ignore"
)
_HAS_HTML5LIB = html5lib is not None
_IMPORTS = True
#############
# READ HTML #
#############
_RE_WHITESPACE = re.compile(r"[\r\n]+|\s{2,}")
def _remove_whitespace(s, regex=_RE_WHITESPACE):
"""Replace extra whitespace inside of a string with a single space.
Parameters
----------
s : str or unicode
The string from which to remove extra whitespace.
regex : regex
The regular expression to use to remove extra whitespace.
Returns
-------
subd : str or unicode
`s` with all extra whitespace replaced with a single space.
"""
return regex.sub(" ", s.strip())
def _get_skiprows(skiprows):
"""Get an iterator given an integer, slice or container.
Parameters
----------
skiprows : int, slice, container
The iterator to use to skip rows; can also be a slice.
Raises
------
TypeError
* If `skiprows` is not a slice, integer, or Container
Returns
-------
it : iterable
A proper iterator to use to skip rows of a DataFrame.
"""
if isinstance(skiprows, slice):
start, step = skiprows.start or 0, skiprows.step or 1
return list(range(start, skiprows.stop, step))
elif isinstance(skiprows, numbers.Integral) or is_list_like(skiprows):
return skiprows
elif skiprows is None:
return 0
raise TypeError(
"%r is not a valid type for skipping rows" % type(skiprows).__name__
)
def _read(obj):
"""Try to read from a url, file or string.
Parameters
----------
obj : str, unicode, or file-like
Returns
-------
raw_text : str
"""
if _is_url(obj):
with urlopen(obj) as url:
text = url.read()
elif hasattr(obj, "read"):
text = obj.read()
elif isinstance(obj, (str, bytes)):
text = obj
try:
if os.path.isfile(text):
with open(text, "rb") as f:
return f.read()
except (TypeError, ValueError):
pass
else:
raise TypeError("Cannot read object of type %r" % type(obj).__name__)
return text
class _HtmlFrameParser:
"""Base class for parsers that parse HTML into DataFrames.
Parameters
----------
io : str or file-like
This can be either a string of raw HTML, a valid URL using the HTTP,
FTP, or FILE protocols or a file-like object.
match : str or regex
The text to match in the document.
attrs : dict
List of HTML <table> element attributes to match.
encoding : str
Encoding to be used by parser
displayed_only : bool
Whether or not items with "display:none" should be ignored
.. versionadded:: 0.23.0
Attributes
----------
io : str or file-like
raw HTML, URL, or file-like object
match : regex
The text to match in the raw HTML
attrs : dict-like
A dictionary of valid table attributes to use to search for table
elements.
encoding : str
Encoding to be used by parser
displayed_only : bool
Whether or not items with "display:none" should be ignored
.. versionadded:: 0.23.0
Notes
-----
To subclass this class effectively you must override the following methods:
* :func:`_build_doc`
* :func:`_attr_getter`
* :func:`_text_getter`
* :func:`_parse_td`
* :func:`_parse_thead_tr`
* :func:`_parse_tbody_tr`
* :func:`_parse_tfoot_tr`
* :func:`_parse_tables`
* :func:`_equals_tag`
See each method's respective documentation for details on their
functionality.
"""
def __init__(self, io, match, attrs, encoding, displayed_only):
self.io = io
self.match = match
self.attrs = attrs
self.encoding = encoding
self.displayed_only = displayed_only
def parse_tables(self):
"""
Parse and return all tables from the DOM.
Returns
-------
list of parsed (header, body, footer) tuples from tables.
"""
tables = self._parse_tables(self._build_doc(), self.match, self.attrs)
return (self._parse_thead_tbody_tfoot(table) for table in tables)
def _attr_getter(self, obj, attr):
"""
Return the attribute value of an individual DOM node.
Parameters
----------
obj : node-like
A DOM node.
attr : str or unicode
The attribute, such as "colspan"
Returns
-------
str or unicode
The attribute value.
"""
# Both lxml and BeautifulSoup have the same implementation:
return obj.get(attr)
def _text_getter(self, obj):
"""
Return the text of an individual DOM node.
Parameters
----------
obj : node-like
A DOM node.
Returns
-------
text : str or unicode
The text from an individual DOM node.
"""
raise AbstractMethodError(self)
def _parse_td(self, obj):
"""Return the td elements from a row element.
Parameters
----------
obj : node-like
A DOM <tr> node.
Returns
-------
list of node-like
These are the elements of each row, i.e., the columns.
"""
raise AbstractMethodError(self)
def _parse_thead_tr(self, table):
"""
Return the list of thead row elements from the parsed table element.
Parameters
----------
table : a table element that contains zero or more thead elements.
Returns
-------
list of node-like
These are the <tr> row elements of a table.
"""
raise AbstractMethodError(self)
def _parse_tbody_tr(self, table):
"""
Return the list of tbody row elements from the parsed table element.
HTML5 table bodies consist of either 0 or more <tbody> elements (which
only contain <tr> elements) or 0 or more <tr> elements. This method
checks for both structures.
Parameters
----------
table : a table element that contains row elements.
Returns
-------
list of node-like
These are the <tr> row elements of a table.
"""
raise AbstractMethodError(self)
def _parse_tfoot_tr(self, table):
"""
Return the list of tfoot row elements from the parsed table element.
Parameters
----------
table : a table element that contains row elements.
Returns
-------
list of node-like
These are the <tr> row elements of a table.
"""
raise AbstractMethodError(self)
def _parse_tables(self, doc, match, attrs):
"""
Return all tables from the parsed DOM.
Parameters
----------
doc : the DOM from which to parse the table element.
match : str or regular expression
The text to search for in the DOM tree.
attrs : dict
A dictionary of table attributes that can be used to disambiguate
multiple tables on a page.
Raises
------
ValueError : `match` does not match any text in the document.
Returns
-------
list of node-like
HTML <table> elements to be parsed into raw data.
"""
raise AbstractMethodError(self)
def _equals_tag(self, obj, tag):
"""
Return whether an individual DOM node matches a tag
Parameters
----------
obj : node-like
A DOM node.
tag : str
Tag name to be checked for equality.
Returns
-------
boolean
Whether `obj`'s tag name is `tag`
"""
raise AbstractMethodError(self)
def _build_doc(self):
"""
Return a tree-like object that can be used to iterate over the DOM.
Returns
-------
node-like
The DOM from which to parse the table element.
"""
raise AbstractMethodError(self)
def _parse_thead_tbody_tfoot(self, table_html):
"""
Given a table, return parsed header, body, and foot.
Parameters
----------
table_html : node-like
Returns
-------
tuple of (header, body, footer), each a list of list-of-text rows.
Notes
-----
Header and body are lists-of-lists. Top level list is a list of
rows. Each row is a list of str text.
Logic: Use <thead>, <tbody>, <tfoot> elements to identify
header, body, and footer, otherwise:
- Put all rows into body
- Move rows from top of body to header only if
all elements inside row are <th>
- Move rows from bottom of body to footer only if
all elements inside row are <th>
"""
header_rows = self._parse_thead_tr(table_html)
body_rows = self._parse_tbody_tr(table_html)
footer_rows = self._parse_tfoot_tr(table_html)
def row_is_all_th(row):
return all(self._equals_tag(t, "th") for t in self._parse_td(row))
if not header_rows:
# The table has no <thead>. Move the top all-<th> rows from
# body_rows to header_rows. (This is a common case because many
# tables in the wild have no <thead> or <tfoot>
while body_rows and row_is_all_th(body_rows[0]):
header_rows.append(body_rows.pop(0))
header = self._expand_colspan_rowspan(header_rows)
body = self._expand_colspan_rowspan(body_rows)
footer = self._expand_colspan_rowspan(footer_rows)
return header, body, footer
def _expand_colspan_rowspan(self, rows):
"""
Given a list of <tr>s, return a list of text rows.
Parameters
----------
rows : list of node-like
List of <tr>s
Returns
-------
list of list
Each returned row is a list of str text.
Notes
-----
Any cell with ``rowspan`` or ``colspan`` will have its contents copied
to subsequent cells.
"""
all_texts = [] # list of rows, each a list of str
remainder = [] # list of (index, text, nrows)
for tr in rows:
texts = [] # the output for this row
next_remainder = []
index = 0
tds = self._parse_td(tr)
for td in tds:
# Append texts from previous rows with rowspan>1 that come
# before this <td>
while remainder and remainder[0][0] <= index:
prev_i, prev_text, prev_rowspan = remainder.pop(0)
texts.append(prev_text)
if prev_rowspan > 1:
next_remainder.append((prev_i, prev_text, prev_rowspan - 1))
index += 1
# Append the text from this <td>, colspan times
text = _remove_whitespace(self._text_getter(td))
rowspan = int(self._attr_getter(td, "rowspan") or 1)
colspan = int(self._attr_getter(td, "colspan") or 1)
for _ in range(colspan):
texts.append(text)
if rowspan > 1:
next_remainder.append((index, text, rowspan - 1))
index += 1
# Append texts from previous rows at the final position
for prev_i, prev_text, prev_rowspan in remainder:
texts.append(prev_text)
if prev_rowspan > 1:
next_remainder.append((prev_i, prev_text, prev_rowspan - 1))
all_texts.append(texts)
remainder = next_remainder
# Append rows that only appear because the previous row had non-1
# rowspan
while remainder:
next_remainder = []
texts = []
for prev_i, prev_text, prev_rowspan in remainder:
texts.append(prev_text)
if prev_rowspan > 1:
next_remainder.append((prev_i, prev_text, prev_rowspan - 1))
all_texts.append(texts)
remainder = next_remainder
return all_texts
def _handle_hidden_tables(self, tbl_list, attr_name):
"""
Return list of tables, potentially removing hidden elements
Parameters
----------
tbl_list : list of node-like
Type of list elements will vary depending upon parser used
attr_name : str
Name of the accessor for retrieving HTML attributes
Returns
-------
list of node-like
Return type matches `tbl_list`
"""
if not self.displayed_only:
return tbl_list
return [
x
for x in tbl_list
if "display:none"
not in getattr(x, attr_name).get("style", "").replace(" ", "")
]
class _BeautifulSoupHtml5LibFrameParser(_HtmlFrameParser):
"""HTML to DataFrame parser that uses BeautifulSoup under the hood.
See Also
--------
pandas.io.html._HtmlFrameParser
pandas.io.html._LxmlFrameParser
Notes
-----
Documentation strings for this class are in the base class
:class:`pandas.io.html._HtmlFrameParser`.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
from bs4 import SoupStrainer
self._strainer = SoupStrainer("table")
def _parse_tables(self, doc, match, attrs):
element_name = self._strainer.name
tables = doc.find_all(element_name, attrs=attrs)
if not tables:
raise ValueError("No tables found")
result = []
unique_tables = set()
tables = self._handle_hidden_tables(tables, "attrs")
for table in tables:
if self.displayed_only:
for elem in table.find_all(style=re.compile(r"display:\s*none")):
elem.decompose()
if table not in unique_tables and table.find(text=match) is not None:
result.append(table)
unique_tables.add(table)
if not result:
raise ValueError(
"No tables found matching pattern {patt!r}".format(patt=match.pattern)
)
return result
def _text_getter(self, obj):
return obj.text
def _equals_tag(self, obj, tag):
return obj.name == tag
def _parse_td(self, row):
return row.find_all(("td", "th"), recursive=False)
def _parse_thead_tr(self, table):
return table.select("thead tr")
def _parse_tbody_tr(self, table):
from_tbody = table.select("tbody tr")
from_root = table.find_all("tr", recursive=False)
# HTML spec: at most one of these lists has content
return from_tbody + from_root
def _parse_tfoot_tr(self, table):
return table.select("tfoot tr")
def _setup_build_doc(self):
raw_text = _read(self.io)
if not raw_text:
raise ValueError("No text parsed from document: {doc}".format(doc=self.io))
return raw_text
def _build_doc(self):
from bs4 import BeautifulSoup
return BeautifulSoup(
self._setup_build_doc(), features="html5lib", from_encoding=self.encoding
)
def _build_xpath_expr(attrs):
"""Build an xpath expression to simulate bs4's ability to pass in kwargs to
search for attributes when using the lxml parser.
Parameters
----------
attrs : dict
A dict of HTML attributes. These are NOT checked for validity.
Returns
-------
expr : unicode
An XPath expression that checks for the given HTML attributes.
"""
# give class attribute as class_ because class is a python keyword
if "class_" in attrs:
attrs["class"] = attrs.pop("class_")
s = ["@{key}={val!r}".format(key=k, val=v) for k, v in attrs.items()]
return "[{expr}]".format(expr=" and ".join(s))
_re_namespace = {"re": "http://exslt.org/regular-expressions"}
_valid_schemes = "http", "file", "ftp"
class _LxmlFrameParser(_HtmlFrameParser):
"""HTML to DataFrame parser that uses lxml under the hood.
Warning
-------
This parser can only handle HTTP, FTP, and FILE urls.
See Also
--------
_HtmlFrameParser
_BeautifulSoupLxmlFrameParser
Notes
-----
Documentation strings for this class are in the base class
:class:`_HtmlFrameParser`.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def _text_getter(self, obj):
return obj.text_content()
def _parse_td(self, row):
# Look for direct children only: the "row" element here may be a
# <thead> or <tfoot> (see _parse_thead_tr).
return row.xpath("./td|./th")
def _parse_tables(self, doc, match, kwargs):
pattern = match.pattern
# 1. check all descendants for the given pattern and only search tables
# 2. go up the tree until we find a table
query = "//table//*[re:test(text(), {patt!r})]/ancestor::table"
xpath_expr = query.format(patt=pattern)
# if any table attributes were given build an xpath expression to
# search for them
if kwargs:
xpath_expr += _build_xpath_expr(kwargs)
tables = doc.xpath(xpath_expr, namespaces=_re_namespace)
tables = self._handle_hidden_tables(tables, "attrib")
if self.displayed_only:
for table in tables:
# lxml utilizes XPATH 1.0 which does not have regex
# support. As a result, we find all elements with a style
# attribute and iterate them to check for display:none
for elem in table.xpath(".//*[@style]"):
if "display:none" in elem.attrib.get("style", "").replace(" ", ""):
elem.getparent().remove(elem)
if not tables:
raise ValueError(
"No tables found matching regex {patt!r}".format(patt=pattern)
)
return tables
def _equals_tag(self, obj, tag):
return obj.tag == tag
def _build_doc(self):
"""
Raises
------
ValueError
* If a URL that lxml cannot parse is passed.
Exception
* Any other ``Exception`` thrown. For example, trying to parse a
URL that is syntactically correct on a machine with no internet
connection will fail.
See Also
--------
pandas.io.html._HtmlFrameParser._build_doc
"""
from lxml.html import parse, fromstring, HTMLParser
from lxml.etree import XMLSyntaxError
parser = HTMLParser(recover=True, encoding=self.encoding)
try:
if _is_url(self.io):
with urlopen(self.io) as f:
r = parse(f, parser=parser)
else:
# try to parse the input in the simplest way
r = parse(self.io, parser=parser)
try:
r = r.getroot()
except AttributeError:
pass
except (UnicodeDecodeError, IOError) as e:
# if the input is a blob of html goop
if not _is_url(self.io):
r = fromstring(self.io, parser=parser)
try:
r = r.getroot()
except AttributeError:
pass
else:
raise e
else:
if not hasattr(r, "text_content"):
raise XMLSyntaxError("no text parsed from document", 0, 0, 0)
return r
def _parse_thead_tr(self, table):
rows = []
for thead in table.xpath(".//thead"):
rows.extend(thead.xpath("./tr"))
# HACK: lxml does not clean up the clearly-erroneous
# <thead><th>foo</th><th>bar</th></thead>. (Missing <tr>). Add
# the <thead> and _pretend_ it's a <tr>; _parse_td() will find its
# children as though it's a <tr>.
#
# Better solution would be to use html5lib.
elements_at_root = thead.xpath("./td|./th")
if elements_at_root:
rows.append(thead)
return rows
def _parse_tbody_tr(self, table):
from_tbody = table.xpath(".//tbody//tr")
from_root = table.xpath("./tr")
# HTML spec: at most one of these lists has content
return from_tbody + from_root
def _parse_tfoot_tr(self, table):
return table.xpath(".//tfoot//tr")
def _expand_elements(body):
lens = Series([len(elem) for elem in body])
lens_max = lens.max()
not_max = lens[lens != lens_max]
empty = [""]
for ind, length in not_max.items():
body[ind] += empty * (lens_max - length)
def _data_to_frame(**kwargs):
head, body, foot = kwargs.pop("data")
header = kwargs.pop("header")
kwargs["skiprows"] = _get_skiprows(kwargs["skiprows"])
if head:
body = head + body
# Infer header when there is a <thead> or top <th>-only rows
if header is None:
if len(head) == 1:
header = 0
else:
# ignore all-empty-text rows
header = [i for i, row in enumerate(head) if any(text for text in row)]
if foot:
body += foot
# fill out elements of body that are "ragged"
_expand_elements(body)
tp = TextParser(body, header=header, **kwargs)
df = tp.read()
return df
_valid_parsers = {
"lxml": _LxmlFrameParser,
None: _LxmlFrameParser,
"html5lib": _BeautifulSoupHtml5LibFrameParser,
"bs4": _BeautifulSoupHtml5LibFrameParser,
}
def _parser_dispatch(flavor):
"""Choose the parser based on the input flavor.
Parameters
----------
flavor : str
The type of parser to use. This must be a valid backend.
Returns
-------
cls : _HtmlFrameParser subclass
The parser class based on the requested input flavor.
Raises
------
ValueError
* If `flavor` is not a valid backend.
ImportError
* If you do not have the requested `flavor`
"""
valid_parsers = list(_valid_parsers.keys())
if flavor not in valid_parsers:
raise ValueError(
"{invalid!r} is not a valid flavor, valid flavors "
"are {valid}".format(invalid=flavor, valid=valid_parsers)
)
if flavor in ("bs4", "html5lib"):
if not _HAS_HTML5LIB:
raise ImportError("html5lib not found, please install it")
if not _HAS_BS4:
raise ImportError("BeautifulSoup4 (bs4) not found, please install it")
# Although we call this above, we want to raise here right before use.
bs4 = import_optional_dependency("bs4") # noqa:F841
else:
if not _HAS_LXML:
raise ImportError("lxml not found, please install it")
return _valid_parsers[flavor]
def _print_as_set(s):
return "{" + "{arg}".format(arg=", ".join(pprint_thing(el) for el in s)) + "}"
def _validate_flavor(flavor):
if flavor is None:
flavor = "lxml", "bs4"
elif isinstance(flavor, str):
flavor = (flavor,)
elif isinstance(flavor, abc.Iterable):
if not all(isinstance(flav, str) for flav in flavor):
raise TypeError(
"Object of type {typ!r} is not an iterable of "
"strings".format(typ=type(flavor).__name__)
)
else:
fmt = "{flavor!r}" if isinstance(flavor, str) else "{flavor}"
fmt += " is not a valid flavor"
raise ValueError(fmt.format(flavor=flavor))
flavor = tuple(flavor)
valid_flavors = set(_valid_parsers)
flavor_set = set(flavor)
if not flavor_set & valid_flavors:
raise ValueError(
"{invalid} is not a valid set of flavors, valid "
"flavors are {valid}".format(
invalid=_print_as_set(flavor_set), valid=_print_as_set(valid_flavors)
)
)
return flavor
def _parse(flavor, io, match, attrs, encoding, displayed_only, **kwargs):
flavor = _validate_flavor(flavor)
compiled_match = re.compile(match) # you can pass a compiled regex here
# hack around python 3 deleting the exception variable
retained = None
for flav in flavor:
parser = _parser_dispatch(flav)
p = parser(io, compiled_match, attrs, encoding, displayed_only)
try:
tables = p.parse_tables()
except Exception as caught:
# if `io` is an io-like object, check if it's seekable
# and try to rewind it before trying the next parser
if hasattr(io, "seekable") and io.seekable():
io.seek(0)
elif hasattr(io, "seekable") and not io.seekable():
# if we couldn't rewind it, let the user know
raise ValueError(
"The flavor {} failed to parse your input. "
"Since you passed a non-rewindable file "
"object, we can't rewind it to try "
"another parser. Try read_html() with a "
"different flavor.".format(flav)
)
retained = caught
else:
break
else:
raise_with_traceback(retained)
ret = []
for table in tables:
try:
ret.append(_data_to_frame(data=table, **kwargs))
except EmptyDataError: # empty table
continue
return ret
def read_html(
io,
match=".+",
flavor=None,
header=None,
index_col=None,
skiprows=None,
attrs=None,
parse_dates=False,
thousands=",",
encoding=None,
decimal=".",
converters=None,
na_values=None,
keep_default_na=True,
displayed_only=True,
):
r"""Read HTML tables into a ``list`` of ``DataFrame`` objects.
Parameters
----------
io : str, path object or file-like object
A URL, a file-like object, or a raw string containing HTML. Note that
lxml only accepts the http, ftp and file url protocols. If you have a
URL that starts with ``'https'`` you might try removing the ``'s'``.
match : str or compiled regular expression, optional
The set of tables containing text matching this regex or string will be
returned. Unless the HTML is extremely simple you will probably need to
pass a non-empty string here. Defaults to '.+' (match any non-empty
string). The default value will return all tables contained on a page.
This value is converted to a regular expression so that there is
consistent behavior between Beautiful Soup and lxml.
flavor : str or None, container of strings
The parsing engine to use. 'bs4' and 'html5lib' are synonymous with
each other, they are both there for backwards compatibility. The
default of ``None`` tries to use ``lxml`` to parse and if that fails it
falls back on ``bs4`` + ``html5lib``.
header : int or list-like or None, optional
The row (or list of rows for a :class:`~pandas.MultiIndex`) to use to
make the columns headers.
index_col : int or list-like or None, optional
The column (or list of columns) to use to create the index.
skiprows : int or list-like or slice or None, optional
0-based. Number of rows to skip after parsing the column integer. If a
sequence of integers or a slice is given, will skip the rows indexed by
that sequence. Note that a single element sequence means 'skip the nth
row' whereas an integer means 'skip n rows'.
attrs : dict or None, optional
This is a dictionary of attributes that you can pass to use to identify
the table in the HTML. These are not checked for validity before being
passed to lxml or Beautiful Soup. However, these attributes must be
valid HTML table attributes to work correctly. For example, ::
attrs = {'id': 'table'}
is a valid attribute dictionary because the 'id' HTML tag attribute is
a valid HTML attribute for *any* HTML tag as per `this document
<http://www.w3.org/TR/html-markup/global-attributes.html>`__. ::
attrs = {'asdf': 'table'}
is *not* a valid attribute dictionary because 'asdf' is not a valid
HTML attribute even if it is a valid XML attribute. Valid HTML 4.01
table attributes can be found `here
<http://www.w3.org/TR/REC-html40/struct/tables.html#h-11.2>`__. A
working draft of the HTML 5 spec can be found `here
<http://www.w3.org/TR/html-markup/table.html>`__. It contains the
latest information on table attributes for the modern web.
parse_dates : bool, optional
See :func:`~read_csv` for more details.
thousands : str, optional
Separator to use to parse thousands. Defaults to ``','``.
encoding : str or None, optional
The encoding used to decode the web page. Defaults to ``None``.``None``
preserves the previous encoding behavior, which depends on the
underlying parser library (e.g., the parser library will try to use
the encoding provided by the document).
decimal : str, default '.'
Character to recognize as decimal point (e.g. use ',' for European
data).
converters : dict, default None
Dict of functions for converting values in certain columns. Keys can
either be integers or column labels, values are functions that take one
input argument, the cell (not column) content, and return the
transformed content.
na_values : iterable, default None
Custom NA values
keep_default_na : bool, default True
If na_values are specified and keep_default_na is False the default NaN
values are overridden, otherwise they're appended to
displayed_only : bool, default True
Whether elements with "display: none" should be parsed
Returns
-------
dfs : list of DataFrames
See Also
--------
read_csv
Notes
-----
Before using this function you should read the :ref:`gotchas about the
HTML parsing libraries <io.html.gotchas>`.
Expect to do some cleanup after you call this function. For example, you
might need to manually assign column names if the column names are
converted to NaN when you pass the `header=0` argument. We try to assume as
little as possible about the structure of the table and push the
idiosyncrasies of the HTML contained in the table to the user.
This function searches for ``<table>`` elements and only for ``<tr>``
and ``<th>`` rows and ``<td>`` elements within each ``<tr>`` or ``<th>``
element in the table. ``<td>`` stands for "table data". This function
attempts to properly handle ``colspan`` and ``rowspan`` attributes.
If the function has a ``<thead>`` argument, it is used to construct
the header, otherwise the function attempts to find the header within
the body (by putting rows with only ``<th>`` elements into the header).
.. versionadded:: 0.21.0
Similar to :func:`~read_csv` the `header` argument is applied
**after** `skiprows` is applied.
This function will *always* return a list of :class:`DataFrame` *or*
it will fail, e.g., it will *not* return an empty list.
Examples
--------
See the :ref:`read_html documentation in the IO section of the docs
<io.read_html>` for some examples of reading in HTML tables.
"""
_importers()
# Type check here. We don't want to parse only to fail because of an
# invalid value of an integer skiprows.
if isinstance(skiprows, numbers.Integral) and skiprows < 0:
raise ValueError(
"cannot skip rows starting from the end of the "
"data (you passed a negative value)"
)
_validate_header_arg(header)
return _parse(
flavor=flavor,
io=io,
match=match,
header=header,
index_col=index_col,
skiprows=skiprows,
parse_dates=parse_dates,
thousands=thousands,
attrs=attrs,
encoding=encoding,
decimal=decimal,
converters=converters,
na_values=na_values,
keep_default_na=keep_default_na,
displayed_only=displayed_only,
)
| bsd-3-clause |
trentino-sistemi/l4s | web/pyjstat.py | 1 | 10028 | # -*- coding: utf-8 -*-
"""pyjstat is a python module for JSON-stat formatted data manipulation.
This module allows reading and writing JSON-stat [1]_ format with python,
using data frame structures provided by the widely accepted
pandas library [2]_. The JSON-stat format is a simple lightweight JSON format
for data dissemination. Pyjstat is inspired in rjstat [3]_, a library to read
and write JSON-stat with R, by ajschumacher.
pyjstat is written and maintained by `Miguel Expósito Martín
<https://twitter.com/predicador37>`_ and is distributed under the Apache 2.0
License (see LICENSE file).
.. [1] http://json-stat.org/ for JSON-stat information
.. [2] http://pandas.pydata.org for Python Data Analysis Library information
.. [3] https://github.com/ajschumacher/rjstat for rjstat library information
Example:
Importing a JSON-stat file into a pandas data frame can be done as follows::
import urllib2
import json
import pyjstat
results = pyjstat.from_json_stat(json.load(urllib2.urlopen(
'http://json-stat.org/samples/oecd-canada.json')))
print results
"""
import json
import pandas as pd
from collections import OrderedDict
def check_input(naming):
"""Check and validate input params.
Args:
naming (string): a string containing the naming type (label or id).
Returns:
Nothing
Raises:
ValueError: if the parameter is not in the allowed list.
"""
if naming not in ['label', 'id']:
raise ValueError('naming must be "label" or "id"')
def get_dimensions(js_dict, naming):
"""Get dimensions from input data.
Args:
js_dict (dict): dictionary containing dataset data and metadata.
naming (string, optional): dimension naming. Possible values: 'label' \
or 'id'.
Returns:
dimensions (list): list of pandas data frames with dimension \
category data.
dim_names (list): list of strings with dimension names.
"""
dimensions = []
dim_names = []
for dim in js_dict['dimension']['id']:
dim_name = js_dict['dimension'][dim]['label']
if not dim_name:
dim_name = dim
if naming == 'label':
dim_label = get_dim_label(js_dict, dim)
dimensions.append(dim_label)
dim_names.append(dim_name)
else:
dim_index = get_dim_index(js_dict, dim)
dimensions.append(dim_index)
dim_names.append(dim)
return dimensions, dim_names
def get_dim_label(js_dict, dim):
"""Get label from a given dimension.
Args:
js_dict (dict): dictionary containing dataset data and metadata.
dim (string): dimension name obtained from JSON file.
Returns:
dim_label(pandas.DataFrame): DataFrame with label-based dimension data.
"""
try:
dim_label = js_dict['dimension'][dim]['category']['label']
except KeyError:
dim_index = get_dim_index(js_dict, dim)
dim_label = pd.concat([dim_index['id'],
dim_index['id']],
axis=1)
dim_label.columns = ['id', 'label']
else:
dim_label = pd.DataFrame(zip(dim_label.keys(),
dim_label.values()),
index=dim_label.keys(),
columns=['id', 'label'])
return dim_label
def get_dim_index(js_dict, dim):
"""Get index from a given dimension.
Args:
js_dict (dict): dictionary containing dataset data and metadata.
dim (string): dimension name obtained from JSON file.
Returns:
dim_index (pandas.DataFrame): DataFrame with index-based dimension data.
"""
try:
dim_index = js_dict['dimension'][dim]['category']['index']
except KeyError:
dim_label = get_dim_label(js_dict, dim)
dim_index = pd.DataFrame(zip([dim_label['id'][0]], [0]),
index=[0],
columns=['id', 'index'])
else:
if type(dim_index) is list:
dim_index = pd.DataFrame(zip(dim_index,
range(0, len(dim_index))),
index=dim_index,
columns=['id', 'index'])
else:
dim_index = pd.DataFrame(zip(dim_index.keys(),
dim_index.values()),
index=dim_index.keys(),
columns=['id', 'index'])
return dim_index
def get_values(js_dict):
"""Get values from input data.
Args:
js_dict (dict): dictionary containing dataset data and metadata.
Returns:
values (list): list of dataset values.
"""
values = js_dict['value']
if type(values) is dict: # see json-stat docs
max_val = max(values.keys(), key=int)
vals = []
for element in values:
for i in range(0, max_val):
if element.key == i:
vals.append(element.value)
else:
vals.append(None)
values = vals
return values
def get_df_row(dimensions, naming='label', i=0, record=None):
"""Generate row dimension values for a pandas dataframe.
Args:
dimensions (list): list of pandas dataframes with dimension labels \
generated by get_dim_label or get_dim_index methods.
naming (string, optional): dimension naming. Possible values: 'label' \
or 'id'.
i (int): dimension list iteration index. Default is 0, it's used in the \
recursive calls to the method.
record (list): list of values representing a pandas dataframe row, \
except for the value column. Default is empty, it's used \
in the recursive calls to the method.
Yields:
list: list with pandas dataframe column values except for value column
"""
check_input(naming)
if i == 0 or record is None:
record = []
for dimension in dimensions[i][naming]:
record.append(dimension)
if len(record) == len(dimensions):
yield record
if i + 1 < len(dimensions):
for row in get_df_row(dimensions, naming, i + 1, record):
yield row
if len(record) == i + 1:
record.pop()
def uniquify(seq):
"""Return unique values in a list in the original order. See: \
http://www.peterbe.com/plog/uniqifiers-benchmark
Args:
seq (list): original list.
Returns:
list: list without duplicates preserving original order.
"""
seen = set()
seen_add = seen.add
return [x for x in seq if x not in seen and not seen_add(x)]
def from_json_stat(datasets, naming='label'):
"""Decode JSON-stat format into pandas.DataFrame object
Args:
datasets(OrderedDict): data in JSON-stat format, previously deserialized
to a python object by json.load() or json.loads(),
for example.
naming(string, optional): dimension naming. Possible values: 'label'
or 'id.'
Returns:
output(list): list of pandas.DataFrame with imported data.
"""
check_input(naming)
results = []
for dataset in datasets:
js_dict = datasets[dataset]
dimensions, dim_names = get_dimensions(js_dict, naming)
values = get_values(js_dict)
output = pd.DataFrame(columns=dim_names + [unicode('value', 'utf-8')],
index=range(0, len(values)))
for i, category in enumerate(get_df_row(dimensions, naming)):
output.loc[i] = category + [values.pop(0)]
output = output.convert_objects(convert_numeric=True)
results.append(output)
return results
def to_json_stat(input_df, value="value"):
"""Encode pandas.DataFrame object into JSON-stat format. The DataFrames
must have exactly one value column.
Args:
df(pandas.DataFrame): pandas data frame (or list of data frames) to
encode.
value(string): name of value column.
Returns:
output(string): String with JSON-stat object.
"""
data = []
if isinstance(input_df, pd.DataFrame):
data.append(input_df)
else:
data = input_df
result = []
for row, dataframe in enumerate(data):
if not type(value) is list:
value = [value]
dims = data[row].filter([item for item in data[row].columns.values if item not in value])
if len(dims.columns.values) != len(set(dims.columns.values)):
raise ValueError('Non-value columns must constitute a unique ID')
dim_names = list(dims)
categories = [{i: {"label": i, "category": {"index":
OrderedDict([(str(j), str(k)) for k, j in
enumerate(uniquify(dims[i]))]),
"label":OrderedDict([(str(k), str(j)) for k, j in
enumerate(uniquify(dims[i]))])}}}
for i in dims.columns.values]
dataset = {"dataset" + str(row + 1): {"dimension": OrderedDict(),
"value": list(
dataframe[value].where(
pd.notnull(dataframe[value]), None))}}
for category in categories:
dataset["dataset" + str(row + 1)]["dimension"].update(category)
dataset[
"dataset" + str(row + 1)]["dimension"].update({"id": dim_names})
dataset["dataset" + str(row + 1)]["dimension"].update(
{"size": [len(dims[i].unique()) for i in dims.columns.values]})
for category in categories:
dataset["dataset" + str(row + 1)]["dimension"].update(category)
result.append(dataset)
return json.dumps(result)
| agpl-3.0 |
jmmease/pandas | pandas/tests/groupby/common.py | 15 | 2100 | """ Base setup """
import pytest
import numpy as np
from pandas.util import testing as tm
from pandas import DataFrame, MultiIndex
@pytest.fixture
def mframe():
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'], ['one', 'two',
'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
return DataFrame(np.random.randn(10, 3), index=index,
columns=['A', 'B', 'C'])
@pytest.fixture
def df():
return DataFrame(
{'A': ['foo', 'bar', 'foo', 'bar', 'foo', 'bar', 'foo', 'foo'],
'B': ['one', 'one', 'two', 'three', 'two', 'two', 'one', 'three'],
'C': np.random.randn(8),
'D': np.random.randn(8)})
class MixIn(object):
def setup_method(self, method):
self.ts = tm.makeTimeSeries()
self.seriesd = tm.getSeriesData()
self.tsd = tm.getTimeSeriesData()
self.frame = DataFrame(self.seriesd)
self.tsframe = DataFrame(self.tsd)
self.df = df()
self.df_mixed_floats = DataFrame(
{'A': ['foo', 'bar', 'foo', 'bar', 'foo', 'bar', 'foo', 'foo'],
'B': ['one', 'one', 'two', 'three', 'two', 'two', 'one', 'three'],
'C': np.random.randn(8),
'D': np.array(
np.random.randn(8), dtype='float32')})
self.mframe = mframe()
self.three_group = DataFrame(
{'A': ['foo', 'foo', 'foo', 'foo', 'bar', 'bar', 'bar', 'bar',
'foo', 'foo', 'foo'],
'B': ['one', 'one', 'one', 'two', 'one', 'one', 'one', 'two',
'two', 'two', 'one'],
'C': ['dull', 'dull', 'shiny', 'dull', 'dull', 'shiny', 'shiny',
'dull', 'shiny', 'shiny', 'shiny'],
'D': np.random.randn(11),
'E': np.random.randn(11),
'F': np.random.randn(11)})
def assert_fp_equal(a, b):
assert (np.abs(a - b) < 1e-12).all()
| bsd-3-clause |
IshankGulati/scikit-learn | sklearn/neighbors/classification.py | 27 | 14358 | """Nearest Neighbor Classification"""
# Authors: Jake Vanderplas <[email protected]>
# Fabian Pedregosa <[email protected]>
# Alexandre Gramfort <[email protected]>
# Sparseness support by Lars Buitinck
# Multi-output support by Arnaud Joly <[email protected]>
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
import numpy as np
from scipy import stats
from ..utils.extmath import weighted_mode
from .base import \
_check_weights, _get_weights, \
NeighborsBase, KNeighborsMixin,\
RadiusNeighborsMixin, SupervisedIntegerMixin
from ..base import ClassifierMixin
from ..utils import check_array
class KNeighborsClassifier(NeighborsBase, KNeighborsMixin,
SupervisedIntegerMixin, ClassifierMixin):
"""Classifier implementing the k-nearest neighbors vote.
Read more in the :ref:`User Guide <classification>`.
Parameters
----------
n_neighbors : int, optional (default = 5)
Number of neighbors to use by default for :meth:`kneighbors` queries.
weights : str or callable, optional (default = 'uniform')
weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDTree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
metric : string or DistanceMetric object (default = 'minkowski')
the distance metric to use for the tree. The default metric is
minkowski, and with p=2 is equivalent to the standard Euclidean
metric. See the documentation of the DistanceMetric class for a
list of available metrics.
p : integer, optional (default = 2)
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params : dict, optional (default = None)
Additional keyword arguments for the metric function.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run for neighbors search.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Doesn't affect :meth:`fit` method.
Examples
--------
>>> X = [[0], [1], [2], [3]]
>>> y = [0, 0, 1, 1]
>>> from sklearn.neighbors import KNeighborsClassifier
>>> neigh = KNeighborsClassifier(n_neighbors=3)
>>> neigh.fit(X, y) # doctest: +ELLIPSIS
KNeighborsClassifier(...)
>>> print(neigh.predict([[1.1]]))
[0]
>>> print(neigh.predict_proba([[0.9]]))
[[ 0.66666667 0.33333333]]
See also
--------
RadiusNeighborsClassifier
KNeighborsRegressor
RadiusNeighborsRegressor
NearestNeighbors
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
.. warning::
Regarding the Nearest Neighbors algorithms, if it is found that two
neighbors, neighbor `k+1` and `k`, have identical distances
but different labels, the results will depend on the ordering of the
training data.
https://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, n_neighbors=5,
weights='uniform', algorithm='auto', leaf_size=30,
p=2, metric='minkowski', metric_params=None, n_jobs=1,
**kwargs):
self._init_params(n_neighbors=n_neighbors,
algorithm=algorithm,
leaf_size=leaf_size, metric=metric, p=p,
metric_params=metric_params, n_jobs=n_jobs, **kwargs)
self.weights = _check_weights(weights)
def predict(self, X):
"""Predict the class labels for the provided data
Parameters
----------
X : array-like, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
Test samples.
Returns
-------
y : array of shape [n_samples] or [n_samples, n_outputs]
Class labels for each data sample.
"""
X = check_array(X, accept_sparse='csr')
neigh_dist, neigh_ind = self.kneighbors(X)
classes_ = self.classes_
_y = self._y
if not self.outputs_2d_:
_y = self._y.reshape((-1, 1))
classes_ = [self.classes_]
n_outputs = len(classes_)
n_samples = X.shape[0]
weights = _get_weights(neigh_dist, self.weights)
y_pred = np.empty((n_samples, n_outputs), dtype=classes_[0].dtype)
for k, classes_k in enumerate(classes_):
if weights is None:
mode, _ = stats.mode(_y[neigh_ind, k], axis=1)
else:
mode, _ = weighted_mode(_y[neigh_ind, k], weights, axis=1)
mode = np.asarray(mode.ravel(), dtype=np.intp)
y_pred[:, k] = classes_k.take(mode)
if not self.outputs_2d_:
y_pred = y_pred.ravel()
return y_pred
def predict_proba(self, X):
"""Return probability estimates for the test data X.
Parameters
----------
X : array-like, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
Test samples.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
of such arrays if n_outputs > 1.
The class probabilities of the input samples. Classes are ordered
by lexicographic order.
"""
X = check_array(X, accept_sparse='csr')
neigh_dist, neigh_ind = self.kneighbors(X)
classes_ = self.classes_
_y = self._y
if not self.outputs_2d_:
_y = self._y.reshape((-1, 1))
classes_ = [self.classes_]
n_samples = X.shape[0]
weights = _get_weights(neigh_dist, self.weights)
if weights is None:
weights = np.ones_like(neigh_ind)
all_rows = np.arange(X.shape[0])
probabilities = []
for k, classes_k in enumerate(classes_):
pred_labels = _y[:, k][neigh_ind]
proba_k = np.zeros((n_samples, classes_k.size))
# a simple ':' index doesn't work right
for i, idx in enumerate(pred_labels.T): # loop is O(n_neighbors)
proba_k[all_rows, idx] += weights[:, i]
# normalize 'votes' into real [0,1] probabilities
normalizer = proba_k.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba_k /= normalizer
probabilities.append(proba_k)
if not self.outputs_2d_:
probabilities = probabilities[0]
return probabilities
class RadiusNeighborsClassifier(NeighborsBase, RadiusNeighborsMixin,
SupervisedIntegerMixin, ClassifierMixin):
"""Classifier implementing a vote among neighbors within a given radius
Read more in the :ref:`User Guide <classification>`.
Parameters
----------
radius : float, optional (default = 1.0)
Range of parameter space to use by default for :meth`radius_neighbors`
queries.
weights : str or callable
weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
Uniform weights are used by default.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDtree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
metric : string or DistanceMetric object (default='minkowski')
the distance metric to use for the tree. The default metric is
minkowski, and with p=2 is equivalent to the standard Euclidean
metric. See the documentation of the DistanceMetric class for a
list of available metrics.
p : integer, optional (default = 2)
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
outlier_label : int, optional (default = None)
Label, which is given for outlier samples (samples with no
neighbors on given radius).
If set to None, ValueError is raised, when outlier is detected.
metric_params : dict, optional (default = None)
Additional keyword arguments for the metric function.
Examples
--------
>>> X = [[0], [1], [2], [3]]
>>> y = [0, 0, 1, 1]
>>> from sklearn.neighbors import RadiusNeighborsClassifier
>>> neigh = RadiusNeighborsClassifier(radius=1.0)
>>> neigh.fit(X, y) # doctest: +ELLIPSIS
RadiusNeighborsClassifier(...)
>>> print(neigh.predict([[1.5]]))
[0]
See also
--------
KNeighborsClassifier
RadiusNeighborsRegressor
KNeighborsRegressor
NearestNeighbors
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
https://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, radius=1.0, weights='uniform',
algorithm='auto', leaf_size=30, p=2, metric='minkowski',
outlier_label=None, metric_params=None, **kwargs):
self._init_params(radius=radius,
algorithm=algorithm,
leaf_size=leaf_size,
metric=metric, p=p, metric_params=metric_params,
**kwargs)
self.weights = _check_weights(weights)
self.outlier_label = outlier_label
def predict(self, X):
"""Predict the class labels for the provided data
Parameters
----------
X : array-like, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
Test samples.
Returns
-------
y : array of shape [n_samples] or [n_samples, n_outputs]
Class labels for each data sample.
"""
X = check_array(X, accept_sparse='csr')
n_samples = X.shape[0]
neigh_dist, neigh_ind = self.radius_neighbors(X)
inliers = [i for i, nind in enumerate(neigh_ind) if len(nind) != 0]
outliers = [i for i, nind in enumerate(neigh_ind) if len(nind) == 0]
classes_ = self.classes_
_y = self._y
if not self.outputs_2d_:
_y = self._y.reshape((-1, 1))
classes_ = [self.classes_]
n_outputs = len(classes_)
if self.outlier_label is not None:
neigh_dist[outliers] = 1e-6
elif outliers:
raise ValueError('No neighbors found for test samples %r, '
'you can try using larger radius, '
'give a label for outliers, '
'or consider removing them from your dataset.'
% outliers)
weights = _get_weights(neigh_dist, self.weights)
y_pred = np.empty((n_samples, n_outputs), dtype=classes_[0].dtype)
for k, classes_k in enumerate(classes_):
pred_labels = np.array([_y[ind, k] for ind in neigh_ind],
dtype=object)
if weights is None:
mode = np.array([stats.mode(pl)[0]
for pl in pred_labels[inliers]], dtype=np.int)
else:
mode = np.array([weighted_mode(pl, w)[0]
for (pl, w)
in zip(pred_labels[inliers], weights[inliers])],
dtype=np.int)
mode = mode.ravel()
y_pred[inliers, k] = classes_k.take(mode)
if outliers:
y_pred[outliers, :] = self.outlier_label
if not self.outputs_2d_:
y_pred = y_pred.ravel()
return y_pred
| bsd-3-clause |
diegocavalca/Studies | phd-thesis/nilmtk/nilmtk/stats/tests/test_locategoodsections.py | 2 | 4559 | #!/usr/bin/python
from __future__ import print_function, division
import unittest
from os.path import join
import numpy as np
import pandas as pd
from datetime import timedelta
from nilmtk.stats import GoodSections
from nilmtk.stats.goodsectionsresults import GoodSectionsResults
from nilmtk import TimeFrame, ElecMeter, DataSet
from nilmtk.datastore import HDFDataStore
from nilmtk.elecmeter import ElecMeterID
from nilmtk.tests.testingtools import data_dir
METER_ID = ElecMeterID(instance=1, building=1, dataset='REDD')
class TestLocateGoodSections(unittest.TestCase):
@classmethod
def setUpClass(cls):
filename = join(data_dir(), 'energy_complex.h5')
cls.datastore = HDFDataStore(filename)
ElecMeter.load_meter_devices(cls.datastore)
cls.meter_meta = cls.datastore.load_metadata('building1')['elec_meters'][METER_ID.instance]
@classmethod
def tearDownClass(cls):
cls.datastore.close()
def test_pipeline(self):
meter1 = ElecMeter(store=self.datastore, metadata=self.meter_meta,
meter_id=METER_ID)
# load co_test.h5
dataset = DataSet(join(data_dir(), 'co_test.h5'))
meter2 = dataset.buildings[1].elec.mains()
for meter in [meter1, meter2]:
for chunksize in [None, 2**10, 2**29]:
if chunksize is None:
kwargs = {}
else:
kwargs = {'chunksize': chunksize}
source_node = meter.get_source_node(**kwargs)
good_sections = GoodSections(source_node)
good_sections.run()
combined = good_sections.results.simple()
meter.clear_cache()
meter.good_sections(**kwargs)
meter.good_sections(**kwargs)
meter.clear_cache()
dataset.store.close()
def test_process_chunk(self):
MAX_SAMPLE_PERIOD = 10
metadata = {'device': {'max_sample_period': MAX_SAMPLE_PERIOD}}
# 0 1 2 3 4 5 6 7
secs = [0,10,20,30, 50,60, 100, 200,
# 8 9 10 11 12 13 14 15 16
250,260,270,280,290,300, 350,360,370]
index = pd.DatetimeIndex([pd.Timestamp('2011-01-01 00:00:00') +
timedelta(seconds=sec) for sec in secs])
df = pd.DataFrame(data=np.random.randn(len(index), 3), index=index,
columns=['a', 'b', 'c'])
df.timeframe = TimeFrame(index[0], index[-1])
df.look_ahead = pd.DataFrame()
locate = GoodSections()
locate.results = GoodSectionsResults(MAX_SAMPLE_PERIOD)
locate._process_chunk(df, metadata)
results = locate.results.combined()
self.assertEqual(len(results), 4)
self.assertAlmostEqual(results[0].timedelta.total_seconds(), 30)
self.assertEqual(results[1].timedelta.total_seconds(), 10)
self.assertEqual(results[2].timedelta.total_seconds(), 50)
self.assertEqual(results[3].timedelta.total_seconds(), 20)
# Now try splitting data into multiple chunks
timestamps = [
pd.Timestamp("2011-01-01 00:00:00"),
pd.Timestamp("2011-01-01 00:00:40"),
pd.Timestamp("2011-01-01 00:01:20"),
pd.Timestamp("2011-01-01 00:04:20"),
pd.Timestamp("2011-01-01 00:06:20")
]
for split_point in [[4, 6, 9, 17], [4, 10, 12, 17]]:
locate = GoodSections()
locate.results = GoodSectionsResults(MAX_SAMPLE_PERIOD)
df.results = {}
prev_i = 0
for j, i in enumerate(split_point):
cropped_df = df.iloc[prev_i:i]
cropped_df.timeframe = TimeFrame(timestamps[j],
timestamps[j+1])
try:
cropped_df.look_ahead = df.iloc[i:]
except IndexError:
cropped_df.look_ahead = pd.DataFrame()
prev_i = i
locate._process_chunk(cropped_df, metadata)
results = locate.results.combined()
self.assertEqual(len(results), 4)
self.assertAlmostEqual(results[0].timedelta.total_seconds(), 30)
self.assertEqual(results[1].timedelta.total_seconds(), 10)
self.assertEqual(results[2].timedelta.total_seconds(), 50)
self.assertEqual(results[3].timedelta.total_seconds(), 20)
if __name__ == '__main__':
unittest.main()
| cc0-1.0 |
NelisVerhoef/scikit-learn | sklearn/ensemble/tests/test_bagging.py | 72 | 25573 | """
Testing for the bagging ensemble module (sklearn.ensemble.bagging).
"""
# Author: Gilles Louppe
# License: BSD 3 clause
import numpy as np
from sklearn.base import BaseEstimator
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_warns_message
from sklearn.dummy import DummyClassifier, DummyRegressor
from sklearn.grid_search import GridSearchCV, ParameterGrid
from sklearn.ensemble import BaggingClassifier, BaggingRegressor
from sklearn.linear_model import Perceptron, LogisticRegression
from sklearn.neighbors import KNeighborsClassifier, KNeighborsRegressor
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.svm import SVC, SVR
from sklearn.pipeline import make_pipeline
from sklearn.feature_selection import SelectKBest
from sklearn.cross_validation import train_test_split
from sklearn.datasets import load_boston, load_iris, make_hastie_10_2
from sklearn.utils import check_random_state
from scipy.sparse import csc_matrix, csr_matrix
rng = check_random_state(0)
# also load the iris dataset
# and randomly permute it
iris = load_iris()
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# also load the boston dataset
# and randomly permute it
boston = load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
def test_classification():
# Check classification for various parameter settings.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
grid = ParameterGrid({"max_samples": [0.5, 1.0],
"max_features": [1, 2, 4],
"bootstrap": [True, False],
"bootstrap_features": [True, False]})
for base_estimator in [None,
DummyClassifier(),
Perceptron(),
DecisionTreeClassifier(),
KNeighborsClassifier(),
SVC()]:
for params in grid:
BaggingClassifier(base_estimator=base_estimator,
random_state=rng,
**params).fit(X_train, y_train).predict(X_test)
def test_sparse_classification():
# Check classification for various parameter settings on sparse input.
class CustomSVC(SVC):
"""SVC variant that records the nature of the training set"""
def fit(self, X, y):
super(CustomSVC, self).fit(X, y)
self.data_type_ = type(X)
return self
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
parameter_sets = [
{"max_samples": 0.5,
"max_features": 2,
"bootstrap": True,
"bootstrap_features": True},
{"max_samples": 1.0,
"max_features": 4,
"bootstrap": True,
"bootstrap_features": True},
{"max_features": 2,
"bootstrap": False,
"bootstrap_features": True},
{"max_samples": 0.5,
"bootstrap": True,
"bootstrap_features": False},
]
for sparse_format in [csc_matrix, csr_matrix]:
X_train_sparse = sparse_format(X_train)
X_test_sparse = sparse_format(X_test)
for params in parameter_sets:
for f in ['predict', 'predict_proba', 'predict_log_proba', 'decision_function']:
# Trained on sparse format
sparse_classifier = BaggingClassifier(
base_estimator=CustomSVC(),
random_state=1,
**params
).fit(X_train_sparse, y_train)
sparse_results = getattr(sparse_classifier, f)(X_test_sparse)
# Trained on dense format
dense_classifier = BaggingClassifier(
base_estimator=CustomSVC(),
random_state=1,
**params
).fit(X_train, y_train)
dense_results = getattr(dense_classifier, f)(X_test)
assert_array_equal(sparse_results, dense_results)
sparse_type = type(X_train_sparse)
types = [i.data_type_ for i in sparse_classifier.estimators_]
assert all([t == sparse_type for t in types])
def test_regression():
# Check regression for various parameter settings.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data[:50],
boston.target[:50],
random_state=rng)
grid = ParameterGrid({"max_samples": [0.5, 1.0],
"max_features": [0.5, 1.0],
"bootstrap": [True, False],
"bootstrap_features": [True, False]})
for base_estimator in [None,
DummyRegressor(),
DecisionTreeRegressor(),
KNeighborsRegressor(),
SVR()]:
for params in grid:
BaggingRegressor(base_estimator=base_estimator,
random_state=rng,
**params).fit(X_train, y_train).predict(X_test)
def test_sparse_regression():
# Check regression for various parameter settings on sparse input.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data[:50],
boston.target[:50],
random_state=rng)
class CustomSVR(SVR):
"""SVC variant that records the nature of the training set"""
def fit(self, X, y):
super(CustomSVR, self).fit(X, y)
self.data_type_ = type(X)
return self
parameter_sets = [
{"max_samples": 0.5,
"max_features": 2,
"bootstrap": True,
"bootstrap_features": True},
{"max_samples": 1.0,
"max_features": 4,
"bootstrap": True,
"bootstrap_features": True},
{"max_features": 2,
"bootstrap": False,
"bootstrap_features": True},
{"max_samples": 0.5,
"bootstrap": True,
"bootstrap_features": False},
]
for sparse_format in [csc_matrix, csr_matrix]:
X_train_sparse = sparse_format(X_train)
X_test_sparse = sparse_format(X_test)
for params in parameter_sets:
# Trained on sparse format
sparse_classifier = BaggingRegressor(
base_estimator=CustomSVR(),
random_state=1,
**params
).fit(X_train_sparse, y_train)
sparse_results = sparse_classifier.predict(X_test_sparse)
# Trained on dense format
dense_results = BaggingRegressor(
base_estimator=CustomSVR(),
random_state=1,
**params
).fit(X_train, y_train).predict(X_test)
sparse_type = type(X_train_sparse)
types = [i.data_type_ for i in sparse_classifier.estimators_]
assert_array_equal(sparse_results, dense_results)
assert all([t == sparse_type for t in types])
assert_array_equal(sparse_results, dense_results)
def test_bootstrap_samples():
# Test that bootstraping samples generate non-perfect base estimators.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
base_estimator = DecisionTreeRegressor().fit(X_train, y_train)
# without bootstrap, all trees are perfect on the training set
ensemble = BaggingRegressor(base_estimator=DecisionTreeRegressor(),
max_samples=1.0,
bootstrap=False,
random_state=rng).fit(X_train, y_train)
assert_equal(base_estimator.score(X_train, y_train),
ensemble.score(X_train, y_train))
# with bootstrap, trees are no longer perfect on the training set
ensemble = BaggingRegressor(base_estimator=DecisionTreeRegressor(),
max_samples=1.0,
bootstrap=True,
random_state=rng).fit(X_train, y_train)
assert_greater(base_estimator.score(X_train, y_train),
ensemble.score(X_train, y_train))
def test_bootstrap_features():
# Test that bootstraping features may generate dupplicate features.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
ensemble = BaggingRegressor(base_estimator=DecisionTreeRegressor(),
max_features=1.0,
bootstrap_features=False,
random_state=rng).fit(X_train, y_train)
for features in ensemble.estimators_features_:
assert_equal(boston.data.shape[1], np.unique(features).shape[0])
ensemble = BaggingRegressor(base_estimator=DecisionTreeRegressor(),
max_features=1.0,
bootstrap_features=True,
random_state=rng).fit(X_train, y_train)
for features in ensemble.estimators_features_:
assert_greater(boston.data.shape[1], np.unique(features).shape[0])
def test_probability():
# Predict probabilities.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
with np.errstate(divide="ignore", invalid="ignore"):
# Normal case
ensemble = BaggingClassifier(base_estimator=DecisionTreeClassifier(),
random_state=rng).fit(X_train, y_train)
assert_array_almost_equal(np.sum(ensemble.predict_proba(X_test),
axis=1),
np.ones(len(X_test)))
assert_array_almost_equal(ensemble.predict_proba(X_test),
np.exp(ensemble.predict_log_proba(X_test)))
# Degenerate case, where some classes are missing
ensemble = BaggingClassifier(base_estimator=LogisticRegression(),
random_state=rng,
max_samples=5).fit(X_train, y_train)
assert_array_almost_equal(np.sum(ensemble.predict_proba(X_test),
axis=1),
np.ones(len(X_test)))
assert_array_almost_equal(ensemble.predict_proba(X_test),
np.exp(ensemble.predict_log_proba(X_test)))
def test_oob_score_classification():
# Check that oob prediction is a good estimation of the generalization
# error.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
for base_estimator in [DecisionTreeClassifier(), SVC()]:
clf = BaggingClassifier(base_estimator=base_estimator,
n_estimators=100,
bootstrap=True,
oob_score=True,
random_state=rng).fit(X_train, y_train)
test_score = clf.score(X_test, y_test)
assert_less(abs(test_score - clf.oob_score_), 0.1)
# Test with few estimators
assert_warns(UserWarning,
BaggingClassifier(base_estimator=base_estimator,
n_estimators=1,
bootstrap=True,
oob_score=True,
random_state=rng).fit,
X_train,
y_train)
def test_oob_score_regression():
# Check that oob prediction is a good estimation of the generalization
# error.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
clf = BaggingRegressor(base_estimator=DecisionTreeRegressor(),
n_estimators=50,
bootstrap=True,
oob_score=True,
random_state=rng).fit(X_train, y_train)
test_score = clf.score(X_test, y_test)
assert_less(abs(test_score - clf.oob_score_), 0.1)
# Test with few estimators
assert_warns(UserWarning,
BaggingRegressor(base_estimator=DecisionTreeRegressor(),
n_estimators=1,
bootstrap=True,
oob_score=True,
random_state=rng).fit,
X_train,
y_train)
def test_single_estimator():
# Check singleton ensembles.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
clf1 = BaggingRegressor(base_estimator=KNeighborsRegressor(),
n_estimators=1,
bootstrap=False,
bootstrap_features=False,
random_state=rng).fit(X_train, y_train)
clf2 = KNeighborsRegressor().fit(X_train, y_train)
assert_array_equal(clf1.predict(X_test), clf2.predict(X_test))
def test_error():
# Test that it gives proper exception on deficient input.
X, y = iris.data, iris.target
base = DecisionTreeClassifier()
# Test max_samples
assert_raises(ValueError,
BaggingClassifier(base, max_samples=-1).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_samples=0.0).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_samples=2.0).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_samples=1000).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_samples="foobar").fit, X, y)
# Test max_features
assert_raises(ValueError,
BaggingClassifier(base, max_features=-1).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_features=0.0).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_features=2.0).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_features=5).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_features="foobar").fit, X, y)
# Test support of decision_function
assert_false(hasattr(BaggingClassifier(base).fit(X, y), 'decision_function'))
def test_parallel_classification():
# Check parallel classification.
rng = check_random_state(0)
# Classification
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
ensemble = BaggingClassifier(DecisionTreeClassifier(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
# predict_proba
ensemble.set_params(n_jobs=1)
y1 = ensemble.predict_proba(X_test)
ensemble.set_params(n_jobs=2)
y2 = ensemble.predict_proba(X_test)
assert_array_almost_equal(y1, y2)
ensemble = BaggingClassifier(DecisionTreeClassifier(),
n_jobs=1,
random_state=0).fit(X_train, y_train)
y3 = ensemble.predict_proba(X_test)
assert_array_almost_equal(y1, y3)
# decision_function
ensemble = BaggingClassifier(SVC(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
ensemble.set_params(n_jobs=1)
decisions1 = ensemble.decision_function(X_test)
ensemble.set_params(n_jobs=2)
decisions2 = ensemble.decision_function(X_test)
assert_array_almost_equal(decisions1, decisions2)
ensemble = BaggingClassifier(SVC(),
n_jobs=1,
random_state=0).fit(X_train, y_train)
decisions3 = ensemble.decision_function(X_test)
assert_array_almost_equal(decisions1, decisions3)
def test_parallel_regression():
# Check parallel regression.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
ensemble = BaggingRegressor(DecisionTreeRegressor(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
ensemble.set_params(n_jobs=1)
y1 = ensemble.predict(X_test)
ensemble.set_params(n_jobs=2)
y2 = ensemble.predict(X_test)
assert_array_almost_equal(y1, y2)
ensemble = BaggingRegressor(DecisionTreeRegressor(),
n_jobs=1,
random_state=0).fit(X_train, y_train)
y3 = ensemble.predict(X_test)
assert_array_almost_equal(y1, y3)
def test_gridsearch():
# Check that bagging ensembles can be grid-searched.
# Transform iris into a binary classification task
X, y = iris.data, iris.target
y[y == 2] = 1
# Grid search with scoring based on decision_function
parameters = {'n_estimators': (1, 2),
'base_estimator__C': (1, 2)}
GridSearchCV(BaggingClassifier(SVC()),
parameters,
scoring="roc_auc").fit(X, y)
def test_base_estimator():
# Check base_estimator and its default values.
rng = check_random_state(0)
# Classification
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
ensemble = BaggingClassifier(None,
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert_true(isinstance(ensemble.base_estimator_, DecisionTreeClassifier))
ensemble = BaggingClassifier(DecisionTreeClassifier(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert_true(isinstance(ensemble.base_estimator_, DecisionTreeClassifier))
ensemble = BaggingClassifier(Perceptron(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert_true(isinstance(ensemble.base_estimator_, Perceptron))
# Regression
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
ensemble = BaggingRegressor(None,
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert_true(isinstance(ensemble.base_estimator_, DecisionTreeRegressor))
ensemble = BaggingRegressor(DecisionTreeRegressor(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert_true(isinstance(ensemble.base_estimator_, DecisionTreeRegressor))
ensemble = BaggingRegressor(SVR(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert_true(isinstance(ensemble.base_estimator_, SVR))
def test_bagging_with_pipeline():
estimator = BaggingClassifier(make_pipeline(SelectKBest(k=1),
DecisionTreeClassifier()),
max_features=2)
estimator.fit(iris.data, iris.target)
class DummyZeroEstimator(BaseEstimator):
def fit(self, X, y):
self.classes_ = np.unique(y)
return self
def predict(self, X):
return self.classes_[np.zeros(X.shape[0], dtype=int)]
def test_bagging_sample_weight_unsupported_but_passed():
estimator = BaggingClassifier(DummyZeroEstimator())
rng = check_random_state(0)
estimator.fit(iris.data, iris.target).predict(iris.data)
assert_raises(ValueError, estimator.fit, iris.data, iris.target,
sample_weight=rng.randint(10, size=(iris.data.shape[0])))
def test_warm_start(random_state=42):
# Test if fitting incrementally with warm start gives a forest of the
# right size and the same results as a normal fit.
X, y = make_hastie_10_2(n_samples=20, random_state=1)
clf_ws = None
for n_estimators in [5, 10]:
if clf_ws is None:
clf_ws = BaggingClassifier(n_estimators=n_estimators,
random_state=random_state,
warm_start=True)
else:
clf_ws.set_params(n_estimators=n_estimators)
clf_ws.fit(X, y)
assert_equal(len(clf_ws), n_estimators)
clf_no_ws = BaggingClassifier(n_estimators=10, random_state=random_state,
warm_start=False)
clf_no_ws.fit(X, y)
assert_equal(set([tree.random_state for tree in clf_ws]),
set([tree.random_state for tree in clf_no_ws]))
def test_warm_start_smaller_n_estimators():
# Test if warm start'ed second fit with smaller n_estimators raises error.
X, y = make_hastie_10_2(n_samples=20, random_state=1)
clf = BaggingClassifier(n_estimators=5, warm_start=True)
clf.fit(X, y)
clf.set_params(n_estimators=4)
assert_raises(ValueError, clf.fit, X, y)
def test_warm_start_equal_n_estimators():
# Test that nothing happens when fitting without increasing n_estimators
X, y = make_hastie_10_2(n_samples=20, random_state=1)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=43)
clf = BaggingClassifier(n_estimators=5, warm_start=True, random_state=83)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
# modify X to nonsense values, this should not change anything
X_train += 1.
assert_warns_message(UserWarning,
"Warm-start fitting without increasing n_estimators does not",
clf.fit, X_train, y_train)
assert_array_equal(y_pred, clf.predict(X_test))
def test_warm_start_equivalence():
# warm started classifier with 5+5 estimators should be equivalent to
# one classifier with 10 estimators
X, y = make_hastie_10_2(n_samples=20, random_state=1)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=43)
clf_ws = BaggingClassifier(n_estimators=5, warm_start=True,
random_state=3141)
clf_ws.fit(X_train, y_train)
clf_ws.set_params(n_estimators=10)
clf_ws.fit(X_train, y_train)
y1 = clf_ws.predict(X_test)
clf = BaggingClassifier(n_estimators=10, warm_start=False,
random_state=3141)
clf.fit(X_train, y_train)
y2 = clf.predict(X_test)
assert_array_almost_equal(y1, y2)
def test_warm_start_with_oob_score_fails():
# Check using oob_score and warm_start simultaneously fails
X, y = make_hastie_10_2(n_samples=20, random_state=1)
clf = BaggingClassifier(n_estimators=5, warm_start=True, oob_score=True)
assert_raises(ValueError, clf.fit, X, y)
def test_oob_score_removed_on_warm_start():
X, y = make_hastie_10_2(n_samples=2000, random_state=1)
clf = BaggingClassifier(n_estimators=50, oob_score=True)
clf.fit(X, y)
clf.set_params(warm_start=True, oob_score=False, n_estimators=100)
clf.fit(X, y)
assert_raises(AttributeError, getattr, clf, "oob_score_")
| bsd-3-clause |
plotly/plotly.py | packages/python/plotly/plotly/graph_objs/scattergl/_error_x.py | 1 | 19370 | from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class ErrorX(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "scattergl"
_path_str = "scattergl.error_x"
_valid_props = {
"array",
"arrayminus",
"arrayminussrc",
"arraysrc",
"color",
"copy_ystyle",
"symmetric",
"thickness",
"traceref",
"tracerefminus",
"type",
"value",
"valueminus",
"visible",
"width",
}
# array
# -----
@property
def array(self):
"""
Sets the data corresponding the length of each error bar.
Values are plotted relative to the underlying data.
The 'array' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["array"]
@array.setter
def array(self, val):
self["array"] = val
# arrayminus
# ----------
@property
def arrayminus(self):
"""
Sets the data corresponding the length of each error bar in the
bottom (left) direction for vertical (horizontal) bars Values
are plotted relative to the underlying data.
The 'arrayminus' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["arrayminus"]
@arrayminus.setter
def arrayminus(self, val):
self["arrayminus"] = val
# arrayminussrc
# -------------
@property
def arrayminussrc(self):
"""
Sets the source reference on Chart Studio Cloud for arrayminus
.
The 'arrayminussrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["arrayminussrc"]
@arrayminussrc.setter
def arrayminussrc(self, val):
self["arrayminussrc"] = val
# arraysrc
# --------
@property
def arraysrc(self):
"""
Sets the source reference on Chart Studio Cloud for array .
The 'arraysrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["arraysrc"]
@arraysrc.setter
def arraysrc(self, val):
self["arraysrc"] = val
# color
# -----
@property
def color(self):
"""
Sets the stoke color of the error bars.
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# copy_ystyle
# -----------
@property
def copy_ystyle(self):
"""
The 'copy_ystyle' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["copy_ystyle"]
@copy_ystyle.setter
def copy_ystyle(self, val):
self["copy_ystyle"] = val
# symmetric
# ---------
@property
def symmetric(self):
"""
Determines whether or not the error bars have the same length
in both direction (top/bottom for vertical bars, left/right for
horizontal bars.
The 'symmetric' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["symmetric"]
@symmetric.setter
def symmetric(self, val):
self["symmetric"] = val
# thickness
# ---------
@property
def thickness(self):
"""
Sets the thickness (in px) of the error bars.
The 'thickness' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["thickness"]
@thickness.setter
def thickness(self, val):
self["thickness"] = val
# traceref
# --------
@property
def traceref(self):
"""
The 'traceref' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [0, 9223372036854775807]
Returns
-------
int
"""
return self["traceref"]
@traceref.setter
def traceref(self, val):
self["traceref"] = val
# tracerefminus
# -------------
@property
def tracerefminus(self):
"""
The 'tracerefminus' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [0, 9223372036854775807]
Returns
-------
int
"""
return self["tracerefminus"]
@tracerefminus.setter
def tracerefminus(self, val):
self["tracerefminus"] = val
# type
# ----
@property
def type(self):
"""
Determines the rule used to generate the error bars. If
*constant`, the bar lengths are of a constant value. Set this
constant in `value`. If "percent", the bar lengths correspond
to a percentage of underlying data. Set this percentage in
`value`. If "sqrt", the bar lengths correspond to the square of
the underlying data. If "data", the bar lengths are set with
data set `array`.
The 'type' property is an enumeration that may be specified as:
- One of the following enumeration values:
['percent', 'constant', 'sqrt', 'data']
Returns
-------
Any
"""
return self["type"]
@type.setter
def type(self, val):
self["type"] = val
# value
# -----
@property
def value(self):
"""
Sets the value of either the percentage (if `type` is set to
"percent") or the constant (if `type` is set to "constant")
corresponding to the lengths of the error bars.
The 'value' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["value"]
@value.setter
def value(self, val):
self["value"] = val
# valueminus
# ----------
@property
def valueminus(self):
"""
Sets the value of either the percentage (if `type` is set to
"percent") or the constant (if `type` is set to "constant")
corresponding to the lengths of the error bars in the bottom
(left) direction for vertical (horizontal) bars
The 'valueminus' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["valueminus"]
@valueminus.setter
def valueminus(self, val):
self["valueminus"] = val
# visible
# -------
@property
def visible(self):
"""
Determines whether or not this set of error bars is visible.
The 'visible' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["visible"]
@visible.setter
def visible(self, val):
self["visible"] = val
# width
# -----
@property
def width(self):
"""
Sets the width (in px) of the cross-bar at both ends of the
error bars.
The 'width' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["width"]
@width.setter
def width(self, val):
self["width"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
array
Sets the data corresponding the length of each error
bar. Values are plotted relative to the underlying
data.
arrayminus
Sets the data corresponding the length of each error
bar in the bottom (left) direction for vertical
(horizontal) bars Values are plotted relative to the
underlying data.
arrayminussrc
Sets the source reference on Chart Studio Cloud for
arrayminus .
arraysrc
Sets the source reference on Chart Studio Cloud for
array .
color
Sets the stoke color of the error bars.
copy_ystyle
symmetric
Determines whether or not the error bars have the same
length in both direction (top/bottom for vertical bars,
left/right for horizontal bars.
thickness
Sets the thickness (in px) of the error bars.
traceref
tracerefminus
type
Determines the rule used to generate the error bars. If
*constant`, the bar lengths are of a constant value.
Set this constant in `value`. If "percent", the bar
lengths correspond to a percentage of underlying data.
Set this percentage in `value`. If "sqrt", the bar
lengths correspond to the square of the underlying
data. If "data", the bar lengths are set with data set
`array`.
value
Sets the value of either the percentage (if `type` is
set to "percent") or the constant (if `type` is set to
"constant") corresponding to the lengths of the error
bars.
valueminus
Sets the value of either the percentage (if `type` is
set to "percent") or the constant (if `type` is set to
"constant") corresponding to the lengths of the error
bars in the bottom (left) direction for vertical
(horizontal) bars
visible
Determines whether or not this set of error bars is
visible.
width
Sets the width (in px) of the cross-bar at both ends of
the error bars.
"""
def __init__(
self,
arg=None,
array=None,
arrayminus=None,
arrayminussrc=None,
arraysrc=None,
color=None,
copy_ystyle=None,
symmetric=None,
thickness=None,
traceref=None,
tracerefminus=None,
type=None,
value=None,
valueminus=None,
visible=None,
width=None,
**kwargs
):
"""
Construct a new ErrorX object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.scattergl.ErrorX`
array
Sets the data corresponding the length of each error
bar. Values are plotted relative to the underlying
data.
arrayminus
Sets the data corresponding the length of each error
bar in the bottom (left) direction for vertical
(horizontal) bars Values are plotted relative to the
underlying data.
arrayminussrc
Sets the source reference on Chart Studio Cloud for
arrayminus .
arraysrc
Sets the source reference on Chart Studio Cloud for
array .
color
Sets the stoke color of the error bars.
copy_ystyle
symmetric
Determines whether or not the error bars have the same
length in both direction (top/bottom for vertical bars,
left/right for horizontal bars.
thickness
Sets the thickness (in px) of the error bars.
traceref
tracerefminus
type
Determines the rule used to generate the error bars. If
*constant`, the bar lengths are of a constant value.
Set this constant in `value`. If "percent", the bar
lengths correspond to a percentage of underlying data.
Set this percentage in `value`. If "sqrt", the bar
lengths correspond to the square of the underlying
data. If "data", the bar lengths are set with data set
`array`.
value
Sets the value of either the percentage (if `type` is
set to "percent") or the constant (if `type` is set to
"constant") corresponding to the lengths of the error
bars.
valueminus
Sets the value of either the percentage (if `type` is
set to "percent") or the constant (if `type` is set to
"constant") corresponding to the lengths of the error
bars in the bottom (left) direction for vertical
(horizontal) bars
visible
Determines whether or not this set of error bars is
visible.
width
Sets the width (in px) of the cross-bar at both ends of
the error bars.
Returns
-------
ErrorX
"""
super(ErrorX, self).__init__("error_x")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.scattergl.ErrorX
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scattergl.ErrorX`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("array", None)
_v = array if array is not None else _v
if _v is not None:
self["array"] = _v
_v = arg.pop("arrayminus", None)
_v = arrayminus if arrayminus is not None else _v
if _v is not None:
self["arrayminus"] = _v
_v = arg.pop("arrayminussrc", None)
_v = arrayminussrc if arrayminussrc is not None else _v
if _v is not None:
self["arrayminussrc"] = _v
_v = arg.pop("arraysrc", None)
_v = arraysrc if arraysrc is not None else _v
if _v is not None:
self["arraysrc"] = _v
_v = arg.pop("color", None)
_v = color if color is not None else _v
if _v is not None:
self["color"] = _v
_v = arg.pop("copy_ystyle", None)
_v = copy_ystyle if copy_ystyle is not None else _v
if _v is not None:
self["copy_ystyle"] = _v
_v = arg.pop("symmetric", None)
_v = symmetric if symmetric is not None else _v
if _v is not None:
self["symmetric"] = _v
_v = arg.pop("thickness", None)
_v = thickness if thickness is not None else _v
if _v is not None:
self["thickness"] = _v
_v = arg.pop("traceref", None)
_v = traceref if traceref is not None else _v
if _v is not None:
self["traceref"] = _v
_v = arg.pop("tracerefminus", None)
_v = tracerefminus if tracerefminus is not None else _v
if _v is not None:
self["tracerefminus"] = _v
_v = arg.pop("type", None)
_v = type if type is not None else _v
if _v is not None:
self["type"] = _v
_v = arg.pop("value", None)
_v = value if value is not None else _v
if _v is not None:
self["value"] = _v
_v = arg.pop("valueminus", None)
_v = valueminus if valueminus is not None else _v
if _v is not None:
self["valueminus"] = _v
_v = arg.pop("visible", None)
_v = visible if visible is not None else _v
if _v is not None:
self["visible"] = _v
_v = arg.pop("width", None)
_v = width if width is not None else _v
if _v is not None:
self["width"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| mit |
themrmax/scikit-learn | sklearn/utils/tests/test_extmath.py | 5 | 22936 | # Authors: Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Denis Engemann <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from scipy import linalg
from scipy import stats
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import skip_if_32bit
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.fixes import np_version
from sklearn.utils.extmath import density
from sklearn.utils.extmath import logsumexp
from sklearn.utils.extmath import norm, squared_norm
from sklearn.utils.extmath import randomized_svd
from sklearn.utils.extmath import row_norms
from sklearn.utils.extmath import weighted_mode
from sklearn.utils.extmath import cartesian
from sklearn.utils.extmath import log_logistic
from sklearn.utils.extmath import svd_flip
from sklearn.utils.extmath import _incremental_mean_and_var
from sklearn.utils.extmath import _deterministic_vector_sign_flip
from sklearn.utils.extmath import softmax
from sklearn.utils.extmath import stable_cumsum
from sklearn.datasets.samples_generator import make_low_rank_matrix
def test_density():
rng = np.random.RandomState(0)
X = rng.randint(10, size=(10, 5))
X[1, 2] = 0
X[5, 3] = 0
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
X_coo = sparse.coo_matrix(X)
X_lil = sparse.lil_matrix(X)
for X_ in (X_csr, X_csc, X_coo, X_lil):
assert_equal(density(X_), density(X))
def test_uniform_weights():
# with uniform weights, results should be identical to stats.mode
rng = np.random.RandomState(0)
x = rng.randint(10, size=(10, 5))
weights = np.ones(x.shape)
for axis in (None, 0, 1):
mode, score = stats.mode(x, axis)
mode2, score2 = weighted_mode(x, weights, axis)
assert_array_equal(mode, mode2)
assert_array_equal(score, score2)
def test_random_weights():
# set this up so that each row should have a weighted mode of 6,
# with a score that is easily reproduced
mode_result = 6
rng = np.random.RandomState(0)
x = rng.randint(mode_result, size=(100, 10))
w = rng.random_sample(x.shape)
x[:, :5] = mode_result
w[:, :5] += 1
mode, score = weighted_mode(x, w, axis=1)
assert_array_equal(mode, mode_result)
assert_array_almost_equal(score.ravel(), w[:, :5].sum(1))
@ignore_warnings # Test deprecated backport to be removed in 0.21
def test_logsumexp():
# Try to add some smallish numbers in logspace
x = np.array([1e-40] * 1000000)
logx = np.log(x)
assert_almost_equal(np.exp(logsumexp(logx)), x.sum())
X = np.vstack([x, x])
logX = np.vstack([logx, logx])
assert_array_almost_equal(np.exp(logsumexp(logX, axis=0)), X.sum(axis=0))
assert_array_almost_equal(np.exp(logsumexp(logX, axis=1)), X.sum(axis=1))
def test_randomized_svd_low_rank():
# Check that extmath.randomized_svd is consistent with linalg.svd
n_samples = 100
n_features = 500
rank = 5
k = 10
# generate a matrix X of approximate effective rank `rank` and no noise
# component (very structured signal):
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=0.0,
random_state=0)
assert_equal(X.shape, (n_samples, n_features))
# compute the singular values of X using the slow exact method
U, s, V = linalg.svd(X, full_matrices=False)
for normalizer in ['auto', 'LU', 'QR']: # 'none' would not be stable
# compute the singular values of X using the fast approximate method
Ua, sa, Va = \
randomized_svd(X, k, power_iteration_normalizer=normalizer,
random_state=0)
assert_equal(Ua.shape, (n_samples, k))
assert_equal(sa.shape, (k,))
assert_equal(Va.shape, (k, n_features))
# ensure that the singular values of both methods are equal up to the
# real rank of the matrix
assert_almost_equal(s[:k], sa)
# check the singular vectors too (while not checking the sign)
assert_almost_equal(np.dot(U[:, :k], V[:k, :]), np.dot(Ua, Va))
# check the sparse matrix representation
X = sparse.csr_matrix(X)
# compute the singular values of X using the fast approximate method
Ua, sa, Va = \
randomized_svd(X, k, power_iteration_normalizer=normalizer,
random_state=0)
assert_almost_equal(s[:rank], sa[:rank])
@ignore_warnings # extmath.norm is deprecated to be removed in 0.21
def test_norm_squared_norm():
X = np.random.RandomState(42).randn(50, 63)
X *= 100 # check stability
X += 200
assert_almost_equal(np.linalg.norm(X.ravel()), norm(X))
assert_almost_equal(norm(X) ** 2, squared_norm(X), decimal=6)
assert_almost_equal(np.linalg.norm(X), np.sqrt(squared_norm(X)), decimal=6)
# Check the warning with an int array and np.dot potential overflow
assert_warns_message(
UserWarning, 'Array type is integer, np.dot may '
'overflow. Data should be float type to avoid this issue',
squared_norm, X.astype(int))
def test_row_norms():
X = np.random.RandomState(42).randn(100, 100)
for dtype in (np.float32, np.float64):
if dtype is np.float32:
precision = 4
else:
precision = 5
X = X.astype(dtype)
sq_norm = (X ** 2).sum(axis=1)
assert_array_almost_equal(sq_norm, row_norms(X, squared=True),
precision)
assert_array_almost_equal(np.sqrt(sq_norm), row_norms(X), precision)
Xcsr = sparse.csr_matrix(X, dtype=dtype)
assert_array_almost_equal(sq_norm, row_norms(Xcsr, squared=True),
precision)
assert_array_almost_equal(np.sqrt(sq_norm), row_norms(Xcsr), precision)
def test_randomized_svd_low_rank_with_noise():
# Check that extmath.randomized_svd can handle noisy matrices
n_samples = 100
n_features = 500
rank = 5
k = 10
# generate a matrix X wity structure approximate rank `rank` and an
# important noisy component
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=0.1,
random_state=0)
assert_equal(X.shape, (n_samples, n_features))
# compute the singular values of X using the slow exact method
_, s, _ = linalg.svd(X, full_matrices=False)
for normalizer in ['auto', 'none', 'LU', 'QR']:
# compute the singular values of X using the fast approximate
# method without the iterated power method
_, sa, _ = randomized_svd(X, k, n_iter=0,
power_iteration_normalizer=normalizer,
random_state=0)
# the approximation does not tolerate the noise:
assert_greater(np.abs(s[:k] - sa).max(), 0.01)
# compute the singular values of X using the fast approximate
# method with iterated power method
_, sap, _ = randomized_svd(X, k,
power_iteration_normalizer=normalizer,
random_state=0)
# the iterated power method is helping getting rid of the noise:
assert_almost_equal(s[:k], sap, decimal=3)
def test_randomized_svd_infinite_rank():
# Check that extmath.randomized_svd can handle noisy matrices
n_samples = 100
n_features = 500
rank = 5
k = 10
# let us try again without 'low_rank component': just regularly but slowly
# decreasing singular values: the rank of the data matrix is infinite
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=1.0,
random_state=0)
assert_equal(X.shape, (n_samples, n_features))
# compute the singular values of X using the slow exact method
_, s, _ = linalg.svd(X, full_matrices=False)
for normalizer in ['auto', 'none', 'LU', 'QR']:
# compute the singular values of X using the fast approximate method
# without the iterated power method
_, sa, _ = randomized_svd(X, k, n_iter=0,
power_iteration_normalizer=normalizer)
# the approximation does not tolerate the noise:
assert_greater(np.abs(s[:k] - sa).max(), 0.1)
# compute the singular values of X using the fast approximate method
# with iterated power method
_, sap, _ = randomized_svd(X, k, n_iter=5,
power_iteration_normalizer=normalizer)
# the iterated power method is still managing to get most of the
# structure at the requested rank
assert_almost_equal(s[:k], sap, decimal=3)
def test_randomized_svd_transpose_consistency():
# Check that transposing the design matrix has limited impact
n_samples = 100
n_features = 500
rank = 4
k = 10
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=0.5,
random_state=0)
assert_equal(X.shape, (n_samples, n_features))
U1, s1, V1 = randomized_svd(X, k, n_iter=3, transpose=False,
random_state=0)
U2, s2, V2 = randomized_svd(X, k, n_iter=3, transpose=True,
random_state=0)
U3, s3, V3 = randomized_svd(X, k, n_iter=3, transpose='auto',
random_state=0)
U4, s4, V4 = linalg.svd(X, full_matrices=False)
assert_almost_equal(s1, s4[:k], decimal=3)
assert_almost_equal(s2, s4[:k], decimal=3)
assert_almost_equal(s3, s4[:k], decimal=3)
assert_almost_equal(np.dot(U1, V1), np.dot(U4[:, :k], V4[:k, :]),
decimal=2)
assert_almost_equal(np.dot(U2, V2), np.dot(U4[:, :k], V4[:k, :]),
decimal=2)
# in this case 'auto' is equivalent to transpose
assert_almost_equal(s2, s3)
def test_randomized_svd_power_iteration_normalizer():
# randomized_svd with power_iteration_normalized='none' diverges for
# large number of power iterations on this dataset
rng = np.random.RandomState(42)
X = make_low_rank_matrix(100, 500, effective_rank=50, random_state=rng)
X += 3 * rng.randint(0, 2, size=X.shape)
n_components = 50
# Check that it diverges with many (non-normalized) power iterations
U, s, V = randomized_svd(X, n_components, n_iter=2,
power_iteration_normalizer='none')
A = X - U.dot(np.diag(s).dot(V))
error_2 = linalg.norm(A, ord='fro')
U, s, V = randomized_svd(X, n_components, n_iter=20,
power_iteration_normalizer='none')
A = X - U.dot(np.diag(s).dot(V))
error_20 = linalg.norm(A, ord='fro')
assert_greater(np.abs(error_2 - error_20), 100)
for normalizer in ['LU', 'QR', 'auto']:
U, s, V = randomized_svd(X, n_components, n_iter=2,
power_iteration_normalizer=normalizer,
random_state=0)
A = X - U.dot(np.diag(s).dot(V))
error_2 = linalg.norm(A, ord='fro')
for i in [5, 10, 50]:
U, s, V = randomized_svd(X, n_components, n_iter=i,
power_iteration_normalizer=normalizer,
random_state=0)
A = X - U.dot(np.diag(s).dot(V))
error = linalg.norm(A, ord='fro')
assert_greater(15, np.abs(error_2 - error))
def test_svd_flip():
# Check that svd_flip works in both situations, and reconstructs input.
rs = np.random.RandomState(1999)
n_samples = 20
n_features = 10
X = rs.randn(n_samples, n_features)
# Check matrix reconstruction
U, S, V = linalg.svd(X, full_matrices=False)
U1, V1 = svd_flip(U, V, u_based_decision=False)
assert_almost_equal(np.dot(U1 * S, V1), X, decimal=6)
# Check transposed matrix reconstruction
XT = X.T
U, S, V = linalg.svd(XT, full_matrices=False)
U2, V2 = svd_flip(U, V, u_based_decision=True)
assert_almost_equal(np.dot(U2 * S, V2), XT, decimal=6)
# Check that different flip methods are equivalent under reconstruction
U_flip1, V_flip1 = svd_flip(U, V, u_based_decision=True)
assert_almost_equal(np.dot(U_flip1 * S, V_flip1), XT, decimal=6)
U_flip2, V_flip2 = svd_flip(U, V, u_based_decision=False)
assert_almost_equal(np.dot(U_flip2 * S, V_flip2), XT, decimal=6)
def test_randomized_svd_sign_flip():
a = np.array([[2.0, 0.0], [0.0, 1.0]])
u1, s1, v1 = randomized_svd(a, 2, flip_sign=True, random_state=41)
for seed in range(10):
u2, s2, v2 = randomized_svd(a, 2, flip_sign=True, random_state=seed)
assert_almost_equal(u1, u2)
assert_almost_equal(v1, v2)
assert_almost_equal(np.dot(u2 * s2, v2), a)
assert_almost_equal(np.dot(u2.T, u2), np.eye(2))
assert_almost_equal(np.dot(v2.T, v2), np.eye(2))
def test_randomized_svd_sign_flip_with_transpose():
# Check if the randomized_svd sign flipping is always done based on u
# irrespective of transpose.
# See https://github.com/scikit-learn/scikit-learn/issues/5608
# for more details.
def max_loading_is_positive(u, v):
"""
returns bool tuple indicating if the values maximising np.abs
are positive across all rows for u and across all columns for v.
"""
u_based = (np.abs(u).max(axis=0) == u.max(axis=0)).all()
v_based = (np.abs(v).max(axis=1) == v.max(axis=1)).all()
return u_based, v_based
mat = np.arange(10 * 8).reshape(10, -1)
# Without transpose
u_flipped, _, v_flipped = randomized_svd(mat, 3, flip_sign=True)
u_based, v_based = max_loading_is_positive(u_flipped, v_flipped)
assert_true(u_based)
assert_false(v_based)
# With transpose
u_flipped_with_transpose, _, v_flipped_with_transpose = randomized_svd(
mat, 3, flip_sign=True, transpose=True)
u_based, v_based = max_loading_is_positive(
u_flipped_with_transpose, v_flipped_with_transpose)
assert_true(u_based)
assert_false(v_based)
def test_cartesian():
# Check if cartesian product delivers the right results
axes = (np.array([1, 2, 3]), np.array([4, 5]), np.array([6, 7]))
true_out = np.array([[1, 4, 6],
[1, 4, 7],
[1, 5, 6],
[1, 5, 7],
[2, 4, 6],
[2, 4, 7],
[2, 5, 6],
[2, 5, 7],
[3, 4, 6],
[3, 4, 7],
[3, 5, 6],
[3, 5, 7]])
out = cartesian(axes)
assert_array_equal(true_out, out)
# check single axis
x = np.arange(3)
assert_array_equal(x[:, np.newaxis], cartesian((x,)))
def test_logistic_sigmoid():
# Check correctness and robustness of logistic sigmoid implementation
def naive_log_logistic(x):
return np.log(1 / (1 + np.exp(-x)))
x = np.linspace(-2, 2, 50)
assert_array_almost_equal(log_logistic(x), naive_log_logistic(x))
extreme_x = np.array([-100., 100.])
assert_array_almost_equal(log_logistic(extreme_x), [-100, 0])
def test_incremental_variance_update_formulas():
# Test Youngs and Cramer incremental variance formulas.
# Doggie data from http://www.mathsisfun.com/data/standard-deviation.html
A = np.array([[600, 470, 170, 430, 300],
[600, 470, 170, 430, 300],
[600, 470, 170, 430, 300],
[600, 470, 170, 430, 300]]).T
idx = 2
X1 = A[:idx, :]
X2 = A[idx:, :]
old_means = X1.mean(axis=0)
old_variances = X1.var(axis=0)
old_sample_count = X1.shape[0]
final_means, final_variances, final_count = \
_incremental_mean_and_var(X2, old_means, old_variances,
old_sample_count)
assert_almost_equal(final_means, A.mean(axis=0), 6)
assert_almost_equal(final_variances, A.var(axis=0), 6)
assert_almost_equal(final_count, A.shape[0])
@skip_if_32bit
def test_incremental_variance_numerical_stability():
# Test Youngs and Cramer incremental variance formulas.
def np_var(A):
return A.var(axis=0)
# Naive one pass variance computation - not numerically stable
# https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance
def one_pass_var(X):
n = X.shape[0]
exp_x2 = (X ** 2).sum(axis=0) / n
expx_2 = (X.sum(axis=0) / n) ** 2
return exp_x2 - expx_2
# Two-pass algorithm, stable.
# We use it as a benchmark. It is not an online algorithm
# https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Two-pass_algorithm
def two_pass_var(X):
mean = X.mean(axis=0)
Y = X.copy()
return np.mean((Y - mean)**2, axis=0)
# Naive online implementation
# https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Online_algorithm
# This works only for chunks for size 1
def naive_mean_variance_update(x, last_mean, last_variance,
last_sample_count):
updated_sample_count = (last_sample_count + 1)
samples_ratio = last_sample_count / float(updated_sample_count)
updated_mean = x / updated_sample_count + last_mean * samples_ratio
updated_variance = last_variance * samples_ratio + \
(x - last_mean) * (x - updated_mean) / updated_sample_count
return updated_mean, updated_variance, updated_sample_count
# We want to show a case when one_pass_var has error > 1e-3 while
# _batch_mean_variance_update has less.
tol = 200
n_features = 2
n_samples = 10000
x1 = np.array(1e8, dtype=np.float64)
x2 = np.log(1e-5, dtype=np.float64)
A0 = x1 * np.ones((n_samples // 2, n_features), dtype=np.float64)
A1 = x2 * np.ones((n_samples // 2, n_features), dtype=np.float64)
A = np.vstack((A0, A1))
# Older versions of numpy have different precision
# In some old version, np.var is not stable
if np.abs(np_var(A) - two_pass_var(A)).max() < 1e-6:
stable_var = np_var
else:
stable_var = two_pass_var
# Naive one pass var: >tol (=1063)
assert_greater(np.abs(stable_var(A) - one_pass_var(A)).max(), tol)
# Starting point for online algorithms: after A0
# Naive implementation: >tol (436)
mean, var, n = A0[0, :], np.zeros(n_features), n_samples // 2
for i in range(A1.shape[0]):
mean, var, n = \
naive_mean_variance_update(A1[i, :], mean, var, n)
assert_equal(n, A.shape[0])
# the mean is also slightly unstable
assert_greater(np.abs(A.mean(axis=0) - mean).max(), 1e-6)
assert_greater(np.abs(stable_var(A) - var).max(), tol)
# Robust implementation: <tol (177)
mean, var, n = A0[0, :], np.zeros(n_features), n_samples // 2
for i in range(A1.shape[0]):
mean, var, n = \
_incremental_mean_and_var(A1[i, :].reshape((1, A1.shape[1])),
mean, var, n)
assert_equal(n, A.shape[0])
assert_array_almost_equal(A.mean(axis=0), mean)
assert_greater(tol, np.abs(stable_var(A) - var).max())
def test_incremental_variance_ddof():
# Test that degrees of freedom parameter for calculations are correct.
rng = np.random.RandomState(1999)
X = rng.randn(50, 10)
n_samples, n_features = X.shape
for batch_size in [11, 20, 37]:
steps = np.arange(0, X.shape[0], batch_size)
if steps[-1] != X.shape[0]:
steps = np.hstack([steps, n_samples])
for i, j in zip(steps[:-1], steps[1:]):
batch = X[i:j, :]
if i == 0:
incremental_means = batch.mean(axis=0)
incremental_variances = batch.var(axis=0)
# Assign this twice so that the test logic is consistent
incremental_count = batch.shape[0]
sample_count = batch.shape[0]
else:
result = _incremental_mean_and_var(
batch, incremental_means, incremental_variances,
sample_count)
(incremental_means, incremental_variances,
incremental_count) = result
sample_count += batch.shape[0]
calculated_means = np.mean(X[:j], axis=0)
calculated_variances = np.var(X[:j], axis=0)
assert_almost_equal(incremental_means, calculated_means, 6)
assert_almost_equal(incremental_variances,
calculated_variances, 6)
assert_equal(incremental_count, sample_count)
def test_vector_sign_flip():
# Testing that sign flip is working & largest value has positive sign
data = np.random.RandomState(36).randn(5, 5)
max_abs_rows = np.argmax(np.abs(data), axis=1)
data_flipped = _deterministic_vector_sign_flip(data)
max_rows = np.argmax(data_flipped, axis=1)
assert_array_equal(max_abs_rows, max_rows)
signs = np.sign(data[range(data.shape[0]), max_abs_rows])
assert_array_equal(data, data_flipped * signs[:, np.newaxis])
def test_softmax():
rng = np.random.RandomState(0)
X = rng.randn(3, 5)
exp_X = np.exp(X)
sum_exp_X = np.sum(exp_X, axis=1).reshape((-1, 1))
assert_array_almost_equal(softmax(X), exp_X / sum_exp_X)
def test_stable_cumsum():
if np_version < (1, 9):
raise SkipTest("Sum is as unstable as cumsum for numpy < 1.9")
assert_array_equal(stable_cumsum([1, 2, 3]), np.cumsum([1, 2, 3]))
r = np.random.RandomState(0).rand(100000)
assert_warns(RuntimeWarning, stable_cumsum, r, rtol=0, atol=0)
# test axis parameter
A = np.random.RandomState(36).randint(1000, size=(5, 5, 5))
assert_array_equal(stable_cumsum(A, axis=0), np.cumsum(A, axis=0))
assert_array_equal(stable_cumsum(A, axis=1), np.cumsum(A, axis=1))
assert_array_equal(stable_cumsum(A, axis=2), np.cumsum(A, axis=2))
| bsd-3-clause |
spallavolu/scikit-learn | examples/linear_model/plot_multi_task_lasso_support.py | 249 | 2211 | #!/usr/bin/env python
"""
=============================================
Joint feature selection with multi-task Lasso
=============================================
The multi-task lasso allows to fit multiple regression problems
jointly enforcing the selected features to be the same across
tasks. This example simulates sequential measurements, each task
is a time instant, and the relevant features vary in amplitude
over time while being the same. The multi-task lasso imposes that
features that are selected at one time point are select for all time
point. This makes feature selection by the Lasso more stable.
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import MultiTaskLasso, Lasso
rng = np.random.RandomState(42)
# Generate some 2D coefficients with sine waves with random frequency and phase
n_samples, n_features, n_tasks = 100, 30, 40
n_relevant_features = 5
coef = np.zeros((n_tasks, n_features))
times = np.linspace(0, 2 * np.pi, n_tasks)
for k in range(n_relevant_features):
coef[:, k] = np.sin((1. + rng.randn(1)) * times + 3 * rng.randn(1))
X = rng.randn(n_samples, n_features)
Y = np.dot(X, coef.T) + rng.randn(n_samples, n_tasks)
coef_lasso_ = np.array([Lasso(alpha=0.5).fit(X, y).coef_ for y in Y.T])
coef_multi_task_lasso_ = MultiTaskLasso(alpha=1.).fit(X, Y).coef_
###############################################################################
# Plot support and time series
fig = plt.figure(figsize=(8, 5))
plt.subplot(1, 2, 1)
plt.spy(coef_lasso_)
plt.xlabel('Feature')
plt.ylabel('Time (or Task)')
plt.text(10, 5, 'Lasso')
plt.subplot(1, 2, 2)
plt.spy(coef_multi_task_lasso_)
plt.xlabel('Feature')
plt.ylabel('Time (or Task)')
plt.text(10, 5, 'MultiTaskLasso')
fig.suptitle('Coefficient non-zero location')
feature_to_plot = 0
plt.figure()
plt.plot(coef[:, feature_to_plot], 'k', label='Ground truth')
plt.plot(coef_lasso_[:, feature_to_plot], 'g', label='Lasso')
plt.plot(coef_multi_task_lasso_[:, feature_to_plot],
'r', label='MultiTaskLasso')
plt.legend(loc='upper center')
plt.axis('tight')
plt.ylim([-1.1, 1.1])
plt.show()
| bsd-3-clause |
FrancoisRheaultUS/dipy | doc/examples/reconst_msdki.py | 3 | 13658 | # -*- coding: utf-8 -*-
"""
==============================================
Mean signal diffusion kurtosis imaging (MSDKI)
==============================================
Several microstructural models have been proposed to increase the specificity
of diffusion-weighted data; however, improper model assumptions are known to
compromise the validity of the model's estimates [NetoHe2019]_. To avoid
misleading interpretation, it might be enough to characterize
diffusion-weighted data using signal representation techniques. For example,
assuming that the degree of non-Gaussian diffusion decreases with tissue
degeneration, this can be sensitive to general microstructural alterations.
Although this cannot be used to distinguish different mechanisms of
microstructural changes (e.g. axonal loss vs demyelination), the degree of
non-Gaussian diffusion can provide insights on the general condition of tissue
microstructure and provide useful markers to understanding, for instance, the
relationship between brain microstructure changes and alterations in behaviour
(e.g. [Price2017]_).
Diffusion Kurtosis Imaging is one of the conventional ways to estimate the
degree of non-Gaussian diffusion (see :ref:`example_reconst_dki`). However,
as previously pointed [NetoHe2015]_, standard kurtosis measures do not only
depend on microstructural properties but also on mesoscopic properties such as
fiber dispersion or the intersection angle of crossing fibers.
In the following example, we show how one can process the diffusion kurtosis
from mean signals (also known as powder-averaged signals) and obtain a
characterization of non-Gaussian diffusion independently to the degree of fiber
organization [NetoHe2018]_. In the first part of this example, the properties
of the measures obtained from the mean signal diffusion kurtosis imaging
[NetoHe2018]_ are illustrated using synthetic data. Secondly, the mean signal
diffusion kurtosis imaging will be applied to in-vivo MRI data.
Let's import all relevant modules:
"""
import numpy as np
import matplotlib.pyplot as plt
# Reconstruction modules
import dipy.reconst.dki as dki
import dipy.reconst.msdki as msdki
# For simulations
from dipy.sims.voxel import multi_tensor
from dipy.core.gradients import gradient_table
from dipy.core.sphere import disperse_charges, HemiSphere
# For in-vivo data
from dipy.data import get_fnames
from dipy.io.gradients import read_bvals_bvecs
from dipy.io.image import load_nifti
from dipy.segment.mask import median_otsu
"""
Testing MSDKI in synthetic data
===============================
We simulate representative diffusion-weighted signals using MultiTensor
simulations (for more information on this type of simulations see
:ref:`example_simulate_multi_tensor`). For this example, simulations are
produced based on the sum of four diffusion tensors to represent the intra-
and extra-cellular spaces of two fiber populations. The parameters of these
tensors are adjusted according to [NetoHe2015]_ (see also
:ref:`example_simulate_dki`).
"""
mevals = np.array([[0.00099, 0, 0],
[0.00226, 0.00087, 0.00087],
[0.00099, 0, 0],
[0.00226, 0.00087, 0.00087]])
"""
For the acquisition parameters of the synthetic data, we use 60 gradient
directions for two non-zero b-values (1000 and 2000 $s/mm^{2}$) and two
zero bvalues (note that, such as the standard DKI, MSDKI requires at least
three different b-values).
"""
# Sample the spherical coordinates of 60 random diffusion-weighted directions.
n_pts = 60
theta = np.pi * np.random.rand(n_pts)
phi = 2 * np.pi * np.random.rand(n_pts)
# Convert direction to cartesian coordinates.
hsph_initial = HemiSphere(theta=theta, phi=phi)
# Evenly distribute the 60 directions
hsph_updated, potential = disperse_charges(hsph_initial, 5000)
directions = hsph_updated.vertices
# Reconstruct acquisition parameters for 2 non-zero=b-values and 2 b0s
bvals = np.hstack((np.zeros(2), 1000 * np.ones(n_pts), 2000 * np.ones(n_pts)))
bvecs = np.vstack((np.zeros((2, 3)), directions, directions))
gtab = gradient_table(bvals, bvecs)
"""
Simulations are looped for different intra- and extra-cellular water
volume fractions and different intersection angles of the two-fiber
populations.
"""
# Array containing the intra-cellular volume fractions tested
f = np.linspace(20, 80.0, num=7)
# Array containing the intersection angle
ang = np.linspace(0, 90.0, num=91)
# Matrix where synthetic signals will be stored
dwi = np.empty((f.size, ang.size, bvals.size))
for f_i in range(f.size):
# estimating volume fractions for individual tensors
fractions = np.array([100 - f[f_i], f[f_i], 100 - f[f_i], f[f_i]]) * 0.5
for a_i in range(ang.size):
# defining the directions for individual tensors
angles = [(ang[a_i], 0.0), (ang[a_i], 0.0), (0.0, 0.0), (0.0, 0.0)]
# producing signals using Dipy's function multi_tensor
signal, sticks = multi_tensor(gtab, mevals, S0=100, angles=angles,
fractions=fractions, snr=None)
dwi[f_i, a_i, :] = signal
"""
Now that all synthetic signals were produced, we can go forward with
MSDKI fitting. As other Dipy's reconstruction techniques, the MSDKI model has
to be first defined for the specific GradientTable object of the synthetic
data. For MSDKI, this is done by instantiating the MeanDiffusionKurtosisModel
object in the following way:
"""
msdki_model = msdki.MeanDiffusionKurtosisModel(gtab)
"""
MSDKI can then be fitted to the synthetic data by calling the ``fit`` function
of this object:
"""
msdki_fit = msdki_model.fit(dwi)
"""
From the above fit object we can extract the two main parameters of the MSDKI,
i.e.: 1) the mean signal diffusion (MSD); and 2) the mean signal kurtosis (MSK)
"""
MSD = msdki_fit.msd
MSK = msdki_fit.msk
""" For a reference, we also calculate the mean diffusivity (MD) and mean
kurtosis (MK) from the standard DKI.
"""
dki_model = dki.DiffusionKurtosisModel(gtab)
dki_fit = dki_model.fit(dwi)
MD = dki_fit.md
MK = dki_fit.mk(0, 3)
"""
Now we plot the results as a function of the ground truth intersection
angle and for different volume fractions of intra-cellular water.
"""
fig1, axs = plt.subplots(nrows=2, ncols=2, figsize=(10, 10))
for f_i in range(f.size):
axs[0, 0].plot(ang, MSD[f_i], linewidth=1.0,
label='$F: %.2f$' % f[f_i])
axs[0, 1].plot(ang, MSK[f_i], linewidth=1.0,
label='$F: %.2f$' % f[f_i])
axs[1, 0].plot(ang, MD[f_i], linewidth=1.0,
label='$F: %.2f$' % f[f_i])
axs[1, 1].plot(ang, MK[f_i], linewidth=1.0,
label='$F: %.2f$' % f[f_i])
# Adjust properties of the first panel of the figure
axs[0, 0].set_xlabel('Intersection angle')
axs[0, 0].set_ylabel('MSD')
axs[0, 1].set_xlabel('Intersection angle')
axs[0, 1].set_ylabel('MSK')
axs[0, 1].legend(loc='center left', bbox_to_anchor=(1, 0.5))
axs[1, 0].set_xlabel('Intersection angle')
axs[1, 0].set_ylabel('MD')
axs[1, 1].set_xlabel('Intersection angle')
axs[1, 1].set_ylabel('MK')
axs[1, 1].legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.show()
fig1.savefig('MSDKI_simulations.png')
"""
.. figure:: MSDKI_simulations.png
:align: center
MSDKI and DKI measures for data of two crossing synthetic fibers.
Upper panels show the MSDKI measures: 1) mean signal diffusivity (left
panel); and 2) mean signal kurtosis (right panel).
For reference, lower panels show the measures obtained by standard DKI:
1) mean diffusivity (left panel); and 2) mean kurtosis (right panel).
All estimates are plotted as a function of the intersecting angle of the
two crossing fibers. Different curves correspond to different ground truth
axonal volume fraction of intra-cellular space.
The results of the above figure, demonstrate that both MSD and MSK are
sensitive to axonal volume fraction (i.e. a microstructure property) but are
independent to the intersectiong angle of the two crossing fibers (i.e.
independent to properties regarding fiber orientation). In contrast, DKI
measures seem to be independent to both axonal volume fraction and
intersection angle.
"""
"""
Reconstructing diffusion data using MSDKI
=========================================
Now that the properties of MSDKI were illustrated, let's apply MSDKI to in-vivo
diffusion-weighted data. As the example for the standard DKI
(see :ref:`example_reconst_dki`), we use fetch to download a multi-shell
dataset which was kindly provided by Hansen and Jespersen (more details about
the data are provided in their paper [Hansen2016]_). The total size of the
downloaded data is 192 MBytes, however you only need to fetch it once.
"""
fraw, fbval, fbvec, t1_fname = get_fnames('cfin_multib')
data, affine = load_nifti(fraw)
bvals, bvecs = read_bvals_bvecs(fbval, fbvec)
gtab = gradient_table(bvals, bvecs)
"""
Before fitting the data, we preform some data pre-processing. For illustration,
we only mask the data to avoid unnecessary calculations on the background of
the image; however, you could also apply other pre-processing techniques.
For example, some state of the art denoising algorithms are available in DIPY_
(e.g. the non-local means filter :ref:`example-denoise-nlmeans` or the
local pca :ref:`example-denoise-localpca`).
"""
maskdata, mask = median_otsu(data, vol_idx=[0, 1], median_radius=4, numpass=2,
autocrop=False, dilate=1)
"""
Now that we have loaded and pre-processed the data we can go forward
with MSDKI fitting. As for the synthetic data, the MSDKI model has to be first
defined for the data's GradientTable object:
"""
msdki_model = msdki.MeanDiffusionKurtosisModel(gtab)
"""
The data can then be fitted by calling the ``fit`` function of this object:
"""
msdki_fit = msdki_model.fit(data, mask=mask)
"""
Let's then extract the two main MSDKI's parameters: 1) mean signal diffusion
(MSD); and 2) mean signal kurtosis (MSK).
"""
MSD = msdki_fit.msd
MSK = msdki_fit.msk
"""
For comparison, we calculate also the mean diffusivity (MD) and mean kurtosis
(MK) from the standard DKI.
"""
dki_model = dki.DiffusionKurtosisModel(gtab)
dki_fit = dki_model.fit(data, mask=mask)
MD = dki_fit.md
MK = dki_fit.mk(0, 3)
"""
Let's now visualize the data using matplotlib for a selected axial slice.
"""
axial_slice = 9
fig2, ax = plt.subplots(2, 2, figsize=(6, 6),
subplot_kw={'xticks': [], 'yticks': []})
fig2.subplots_adjust(hspace=0.3, wspace=0.05)
ax.flat[0].imshow(MSD[:, :, axial_slice].T, cmap='gray', vmin=0, vmax=2.0e-3,
origin='lower')
ax.flat[0].set_title('MSD (MSDKI)')
ax.flat[1].imshow(MSK[:, :, axial_slice].T, cmap='gray', vmin=0, vmax=2,
origin='lower')
ax.flat[1].set_title('MSK (MSDKI)')
ax.flat[2].imshow(MD[:, :, axial_slice].T, cmap='gray', vmin=0, vmax=2.0e-3,
origin='lower')
ax.flat[2].set_title('MD (DKI)')
ax.flat[3].imshow(MK[:, :, axial_slice].T, cmap='gray', vmin=0, vmax=2,
origin='lower')
ax.flat[3].set_title('MK (DKI)')
plt.show()
fig2.savefig('MSDKI_invivo.png')
"""
.. figure::MSDKI_invivo.png
:align: center
MSDKI measures (upper panels) and DKI standard measures (lower panels).
This figure shows that the contrast of in-vivo MSD and MSK maps (upper panels)
are similar to the contrast of MD and MSK maps (lower panels); however, in the
upper part we insure that direct contributions of fiber dispersion were
removed. The upper panels also reveal that MSDKI measures are let sensitive
to noise artefacts than standard DKI measures (as pointed by [NetoHe2018]_),
particularly one can appriciate that MSK maps always present positive values
in brain white matter regions, while implausible negative kurtosis values are
present in the MK maps in the same regions.
References
----------
.. [NetoHe2019] Neto Henriques R, Jespersen SN, Shemesh N (2019). Microscopic
anisotropy misestimation in spherical‐mean single diffusion
encoding MRI. Magnetic Resonance in Medicine (In press).
doi: 10.1002/mrm.27606
.. [Price2017] Price D, Tyler LK, Neto Henriques R, Campbell KR, Williams N,
Treder M, Taylor J, Cam-CAN, Henson R (2017). Age-Related
Delay in Visual and Auditory Evoked Responses is Mediated by
White- and Gray-matter Differences. Nature Communications 8,
15671. doi: 10.1038/ncomms15671.
.. [Jensen2005] Jensen JH, Helpern JA, Ramani A, Lu H, Kaczynski K (2005).
Diffusional Kurtosis Imaging: The Quantification of
Non_Gaussian Water Diffusion by Means of Magnetic Resonance
Imaging. Magnetic Resonance in Medicine 53: 1432-1440
.. [NetoHe2015] Neto Henriques R, Correia MM, Nunes RG, Ferreira HA (2015).
Exploring the 3D geometry of the diffusion kurtosis tensor -
Impact on the development of robust tractography procedures and
novel biomarkers, NeuroImage 111: 85-99
.. [NetoHe2018] Henriques RN, 2018. Advanced Methods for Diffusion MRI Data
Analysis and their Application to the Healthy Ageing Brain
(Doctoral thesis). Downing College, University of Cambridge.
https://doi.org/10.17863/CAM.29356
.. [Hansen2016] Hansen, B, Jespersen, SN (2016). Data for evaluation of fast
kurtosis strategies, b-value optimization and exploration of
diffusion MRI contrast. Scientific Data 3: 160072
doi:10.1038/sdata.2016.72
.. include:: ../links_names.inc
"""
| bsd-3-clause |
OshynSong/scikit-learn | sklearn/decomposition/tests/test_dict_learning.py | 69 | 8605 | import numpy as np
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import TempMemmap
from sklearn.decomposition import DictionaryLearning
from sklearn.decomposition import MiniBatchDictionaryLearning
from sklearn.decomposition import SparseCoder
from sklearn.decomposition import dict_learning_online
from sklearn.decomposition import sparse_encode
rng_global = np.random.RandomState(0)
n_samples, n_features = 10, 8
X = rng_global.randn(n_samples, n_features)
def test_dict_learning_shapes():
n_components = 5
dico = DictionaryLearning(n_components, random_state=0).fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_overcomplete():
n_components = 12
dico = DictionaryLearning(n_components, random_state=0).fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_reconstruction():
n_components = 12
dico = DictionaryLearning(n_components, transform_algorithm='omp',
transform_alpha=0.001, random_state=0)
code = dico.fit(X).transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X)
dico.set_params(transform_algorithm='lasso_lars')
code = dico.transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X, decimal=2)
# used to test lars here too, but there's no guarantee the number of
# nonzero atoms is right.
def test_dict_learning_reconstruction_parallel():
# regression test that parallel reconstruction works with n_jobs=-1
n_components = 12
dico = DictionaryLearning(n_components, transform_algorithm='omp',
transform_alpha=0.001, random_state=0, n_jobs=-1)
code = dico.fit(X).transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X)
dico.set_params(transform_algorithm='lasso_lars')
code = dico.transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X, decimal=2)
def test_dict_learning_lassocd_readonly_data():
n_components = 12
with TempMemmap(X) as X_read_only:
dico = DictionaryLearning(n_components, transform_algorithm='lasso_cd',
transform_alpha=0.001, random_state=0, n_jobs=-1)
code = dico.fit(X_read_only).transform(X_read_only)
assert_array_almost_equal(np.dot(code, dico.components_), X_read_only, decimal=2)
def test_dict_learning_nonzero_coefs():
n_components = 4
dico = DictionaryLearning(n_components, transform_algorithm='lars',
transform_n_nonzero_coefs=3, random_state=0)
code = dico.fit(X).transform(X[np.newaxis, 1])
assert_true(len(np.flatnonzero(code)) == 3)
dico.set_params(transform_algorithm='omp')
code = dico.transform(X[np.newaxis, 1])
assert_equal(len(np.flatnonzero(code)), 3)
def test_dict_learning_unknown_fit_algorithm():
n_components = 5
dico = DictionaryLearning(n_components, fit_algorithm='<unknown>')
assert_raises(ValueError, dico.fit, X)
def test_dict_learning_split():
n_components = 5
dico = DictionaryLearning(n_components, transform_algorithm='threshold',
random_state=0)
code = dico.fit(X).transform(X)
dico.split_sign = True
split_code = dico.transform(X)
assert_array_equal(split_code[:, :n_components] -
split_code[:, n_components:], code)
def test_dict_learning_online_shapes():
rng = np.random.RandomState(0)
n_components = 8
code, dictionary = dict_learning_online(X, n_components=n_components,
alpha=1, random_state=rng)
assert_equal(code.shape, (n_samples, n_components))
assert_equal(dictionary.shape, (n_components, n_features))
assert_equal(np.dot(code, dictionary).shape, X.shape)
def test_dict_learning_online_verbosity():
n_components = 5
# test verbosity
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
try:
sys.stdout = StringIO()
dico = MiniBatchDictionaryLearning(n_components, n_iter=20, verbose=1,
random_state=0)
dico.fit(X)
dico = MiniBatchDictionaryLearning(n_components, n_iter=20, verbose=2,
random_state=0)
dico.fit(X)
dict_learning_online(X, n_components=n_components, alpha=1, verbose=1,
random_state=0)
dict_learning_online(X, n_components=n_components, alpha=1, verbose=2,
random_state=0)
finally:
sys.stdout = old_stdout
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_online_estimator_shapes():
n_components = 5
dico = MiniBatchDictionaryLearning(n_components, n_iter=20, random_state=0)
dico.fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_online_overcomplete():
n_components = 12
dico = MiniBatchDictionaryLearning(n_components, n_iter=20,
random_state=0).fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_online_initialization():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features)
dico = MiniBatchDictionaryLearning(n_components, n_iter=0,
dict_init=V, random_state=0).fit(X)
assert_array_equal(dico.components_, V)
def test_dict_learning_online_partial_fit():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
dict1 = MiniBatchDictionaryLearning(n_components, n_iter=10 * len(X),
batch_size=1,
alpha=1, shuffle=False, dict_init=V,
random_state=0).fit(X)
dict2 = MiniBatchDictionaryLearning(n_components, alpha=1,
n_iter=1, dict_init=V,
random_state=0)
for i in range(10):
for sample in X:
dict2.partial_fit(sample[np.newaxis, :])
assert_true(not np.all(sparse_encode(X, dict1.components_, alpha=1) ==
0))
assert_array_almost_equal(dict1.components_, dict2.components_,
decimal=2)
def test_sparse_encode_shapes():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
for algo in ('lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'):
code = sparse_encode(X, V, algorithm=algo)
assert_equal(code.shape, (n_samples, n_components))
def test_sparse_encode_error():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
code = sparse_encode(X, V, alpha=0.001)
assert_true(not np.all(code == 0))
assert_less(np.sqrt(np.sum((np.dot(code, V) - X) ** 2)), 0.1)
def test_sparse_encode_error_default_sparsity():
rng = np.random.RandomState(0)
X = rng.randn(100, 64)
D = rng.randn(2, 64)
code = ignore_warnings(sparse_encode)(X, D, algorithm='omp',
n_nonzero_coefs=None)
assert_equal(code.shape, (100, 2))
def test_unknown_method():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
assert_raises(ValueError, sparse_encode, X, V, algorithm="<unknown>")
def test_sparse_coder_estimator():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
code = SparseCoder(dictionary=V, transform_algorithm='lasso_lars',
transform_alpha=0.001).transform(X)
assert_true(not np.all(code == 0))
assert_less(np.sqrt(np.sum((np.dot(code, V) - X) ** 2)), 0.1)
| bsd-3-clause |
ElDeveloper/scikit-learn | examples/linear_model/plot_iris_logistic.py | 283 | 1678 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Logistic Regression 3-class Classifier
=========================================================
Show below is a logistic-regression classifiers decision boundaries on the
`iris <http://en.wikipedia.org/wiki/Iris_flower_data_set>`_ dataset. The
datapoints are colored according to their labels.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model, datasets
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features.
Y = iris.target
h = .02 # step size in the mesh
logreg = linear_model.LogisticRegression(C=1e5)
# we create an instance of Neighbours Classifier and fit the data.
logreg.fit(X, Y)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
Z = logreg.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure(1, figsize=(4, 3))
plt.pcolormesh(xx, yy, Z, cmap=plt.cm.Paired)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=Y, edgecolors='k', cmap=plt.cm.Paired)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
vermouthmjl/scikit-learn | examples/datasets/plot_random_dataset.py | 348 | 2254 | """
==============================================
Plot randomly generated classification dataset
==============================================
Plot several randomly generated 2D classification datasets.
This example illustrates the :func:`datasets.make_classification`
:func:`datasets.make_blobs` and :func:`datasets.make_gaussian_quantiles`
functions.
For ``make_classification``, three binary and two multi-class classification
datasets are generated, with different numbers of informative features and
clusters per class. """
print(__doc__)
import matplotlib.pyplot as plt
from sklearn.datasets import make_classification
from sklearn.datasets import make_blobs
from sklearn.datasets import make_gaussian_quantiles
plt.figure(figsize=(8, 8))
plt.subplots_adjust(bottom=.05, top=.9, left=.05, right=.95)
plt.subplot(321)
plt.title("One informative feature, one cluster per class", fontsize='small')
X1, Y1 = make_classification(n_features=2, n_redundant=0, n_informative=1,
n_clusters_per_class=1)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(322)
plt.title("Two informative features, one cluster per class", fontsize='small')
X1, Y1 = make_classification(n_features=2, n_redundant=0, n_informative=2,
n_clusters_per_class=1)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(323)
plt.title("Two informative features, two clusters per class", fontsize='small')
X2, Y2 = make_classification(n_features=2, n_redundant=0, n_informative=2)
plt.scatter(X2[:, 0], X2[:, 1], marker='o', c=Y2)
plt.subplot(324)
plt.title("Multi-class, two informative features, one cluster",
fontsize='small')
X1, Y1 = make_classification(n_features=2, n_redundant=0, n_informative=2,
n_clusters_per_class=1, n_classes=3)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(325)
plt.title("Three blobs", fontsize='small')
X1, Y1 = make_blobs(n_features=2, centers=3)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(326)
plt.title("Gaussian divided into three quantiles", fontsize='small')
X1, Y1 = make_gaussian_quantiles(n_features=2, n_classes=3)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.show()
| bsd-3-clause |
UManPychron/pychron | pychron/mv/zoom/zoom.py | 2 | 6536 | # ===============================================================================
# Copyright 2016 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from __future__ import absolute_import
from __future__ import print_function
import math
import os
from scipy import stats
from skimage import feature
from skimage.transform._hough_transform import probabilistic_hough_line
# ============= standard library imports ========================
from PIL import Image
from numpy import array, linspace, \
polyval, polyfit
import matplotlib.pyplot as plt
# ============= local library imports ==========================
def calc_rotation(x1, y1, x2, y2):
rise = y2 - y1
run = x2 - x1
return math.degrees(math.atan2(rise, run))
def calculate_spacing(p):
# with open(p) as fp:
im = Image.open(p).convert('L')
w, h = im.size
# im = im.crop((50, 50, w - 50, h - 50))
pad = 40
im = im.crop((pad, pad, w - pad, h - pad))
im = array(im)
# edges1 = feature.canny(im)
# edges2 = feature.canny(im, sigma=3)
edges = feature.canny(im, sigma=1)
lines = probabilistic_hough_line(edges)
# plot(im, edges, lines)
xs = []
ys = []
for a, b in lines:
x1, y1 = a
x2, y2 = b
rot = calc_rotation(x1, y1, x2, y2)
if rot == -90.0:
# ax2.plot((x1, x2), (y1, y2))
# ax3.plot((x1, x2), (y1, y2))
xs.append(x1)
xs.append(x2)
elif rot == 0.0:
ys.append(y1)
ys.append(y2)
xs = array(sorted(xs))
ys = array(sorted(ys))
# print xs
ds = []
for xx in (xs, ys):
for xi in xx:
for yi in xx:
di = yi - xi
if di > 5:
ds.append(di)
break
# print ds
dists = array(ds)
# while dists.std() > 1:
# md = dists.max()
# dists = dists[where(dists < md)]
mr = stats.mode(dists)
# print mr
# print dists
return mr.mode[0], mr.count[0], dists.mean(), dists.std()
def plot(im, edges, lines):
fig, (ax1, ax2, ax3) = plt.subplots(nrows=1, ncols=3,
figsize=(8,
3),
sharex=True, sharey=True)
ax1.imshow(im, cmap=plt.cm.jet)
ax1.axis('off')
ax2.imshow(edges, cmap=plt.cm.gray)
ax2.axis('off')
for a, b in lines:
x1, y1 = a
x2, y2 = b
rot = calc_rotation(x1, y1, x2, y2)
if rot == -90.0:
ax2.plot((x1, x2), (y1, y2))
ax3.plot((x1, x2), (y1, y2))
fig.subplots_adjust(wspace=0.02, hspace=0.02, top=0.9,
bottom=0.02, left=0.02, right=0.98)
# plt.show()
def calculate_spacings():
ps = [
# ('/Users/ross/Sandbox/zoom_cal/snapshot-008.jpg', 0),
# ('/Users/ross/Sandbox/zoom_cal/snapshot-013.jpg', 0),
('/Users/ross/Sandbox/zoom_cal/snapshot-007.jpg', 25, 24.958),
('/Users/ross/Sandbox/zoom_cal/snapshot-012.jpg', 25, 24.958),
('/Users/ross/Sandbox/zoom_cal/snapshot-014.jpg', 25, 24.965),
('/Users/ross/Sandbox/zoom_cal/snapshot-006.jpg', 50, 49.997),
('/Users/ross/Sandbox/zoom_cal/snapshot-011.jpg', 50, 49.993),
('/Users/ross/Sandbox/zoom_cal/snapshot-002.jpg', 50, 49.916),
('/Users/ross/Sandbox/zoom_cal/snapshot-015.jpg', 50, 49.909),
('/Users/ross/Sandbox/zoom_cal/snapshot-005.jpg', 75, 74.986),
('/Users/ross/Sandbox/zoom_cal/snapshot-003.jpg', 75, 74.941),
('/Users/ross/Sandbox/zoom_cal/snapshot-016.jpg', 75, 74.937),
('/Users/ross/Sandbox/zoom_cal/snapshot-010.jpg', 75, 74.979),
('/Users/ross/Sandbox/zoom_cal/snapshot-009.jpg', 100, 99.955),
('/Users/ross/Sandbox/zoom_cal/snapshot-004.jpg', 100, 99.969),
('/Users/ross/Sandbox/zoom_cal/snapshot-017.jpg', 100, 99.969),
]
print('Path |Z |Mode |Cnt |Avg |STD')
zns = [0]
zas = [0]
px = [23]
for pp, zn, za in ps:
# if z!=100:
# continue
bp, _ = os.path.splitext(os.path.basename(pp))
m, c, a, s = calculate_spacing(pp)
a = '{:0.3f}'.format(a)
s = '{:0.3f}'.format(s)
print('{}|{:<4s}|{:<5s}|{:<4s}|{:<6s}|{}'.format(bp, str(zn),
str(m), str(c), a, s))
zns.append(zn)
zas.append(za)
pxpermm = m / 0.25
px.append(pxpermm)
return zns, zas, px
if __name__ == '__main__':
zns, zas, px = calculate_spacings()
# print zns
# print zas
# print px
# zns = [0, 25, 25, 25, 50, 50, 50, 75, 75, 75, 75, 100, 100, 100]
# zas = [0, 24.958, 24.958, 24.965, 49.997, 49.993, 49.909, 74.986,
# 74.941, 74.937, 74.979, 99.955, 99.969, 99.969]
# px = [23, 28.0, 28.0, 28.0, 48.0, 48.0, 48.0, 84.0, 84.0, 84.0, 84.0,
# 128.0, 128.0, 128.0]
# plt.plot(zns, px, '+')
plt.plot(zas, px, '+')
xs = linspace(0, 100)
# plt.plot(xs, polyval(polyfit(zns, px, 4), xs))
coeffs = polyfit(zas, px, 4)
print(coeffs)
plt.plot(xs, polyval(coeffs, xs))
plt.xlabel('Z')
plt.ylabel('pxpermm')
plt.show()
# print os.path.basename(pp)
# print z, calculate_spacing(pp)
# fig, (ax1, ax2, ax3) = plt.subplots(nrows=1, ncols=3,
# figsize=(8,
# 3),
# sharex=True, sharey=True)
#
# ax1.imshow(im, cmap=plt.cm.jet)
# ax1.axis('off')
#
# ax2.imshow(edges, cmap=plt.cm.gray)
# ax2.axis('off')
#
# fig.subplots_adjust(wspace=0.02, hspace=0.02, top=0.9,
# bottom=0.02, left=0.02, right=0.98)
#
# plt.show()
# ============= EOF =============================================
| apache-2.0 |
jmetzen/scikit-learn | sklearn/model_selection/tests/test_search.py | 20 | 30855 | """Test the search module"""
from collections import Iterable, Sized
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.externals.six.moves import xrange
from itertools import chain, product
import pickle
import sys
import numpy as np
import scipy.sparse as sp
from sklearn.utils.fixes import sp_version
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_false, assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.mocking import CheckingClassifier, MockDataFrame
from scipy.stats import bernoulli, expon, uniform
from sklearn.externals.six.moves import zip
from sklearn.base import BaseEstimator
from sklearn.datasets import make_classification
from sklearn.datasets import make_blobs
from sklearn.datasets import make_multilabel_classification
from sklearn.model_selection import KFold
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.model_selection import LeaveOneLabelOut
from sklearn.model_selection import LeavePLabelOut
from sklearn.model_selection import LabelKFold
from sklearn.model_selection import LabelShuffleSplit
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import RandomizedSearchCV
from sklearn.model_selection import ParameterGrid
from sklearn.model_selection import ParameterSampler
# TODO Import from sklearn.exceptions once merged.
from sklearn.base import ChangedBehaviorWarning
from sklearn.model_selection._validation import FitFailedWarning
from sklearn.svm import LinearSVC, SVC
from sklearn.tree import DecisionTreeRegressor
from sklearn.tree import DecisionTreeClassifier
from sklearn.cluster import KMeans
from sklearn.neighbors import KernelDensity
from sklearn.metrics import f1_score
from sklearn.metrics import make_scorer
from sklearn.metrics import roc_auc_score
from sklearn.preprocessing import Imputer
from sklearn.pipeline import Pipeline
# Neither of the following two estimators inherit from BaseEstimator,
# to test hyperparameter search on user-defined classifiers.
class MockClassifier(object):
"""Dummy classifier to test the parameter search algorithms"""
def __init__(self, foo_param=0):
self.foo_param = foo_param
def fit(self, X, Y):
assert_true(len(X) == len(Y))
return self
def predict(self, T):
return T.shape[0]
predict_proba = predict
decision_function = predict
transform = predict
def score(self, X=None, Y=None):
if self.foo_param > 1:
score = 1.
else:
score = 0.
return score
def get_params(self, deep=False):
return {'foo_param': self.foo_param}
def set_params(self, **params):
self.foo_param = params['foo_param']
return self
class LinearSVCNoScore(LinearSVC):
"""An LinearSVC classifier that has no score method."""
@property
def score(self):
raise AttributeError
X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
y = np.array([1, 1, 2, 2])
def assert_grid_iter_equals_getitem(grid):
assert_equal(list(grid), [grid[i] for i in range(len(grid))])
def test_parameter_grid():
# Test basic properties of ParameterGrid.
params1 = {"foo": [1, 2, 3]}
grid1 = ParameterGrid(params1)
assert_true(isinstance(grid1, Iterable))
assert_true(isinstance(grid1, Sized))
assert_equal(len(grid1), 3)
assert_grid_iter_equals_getitem(grid1)
params2 = {"foo": [4, 2],
"bar": ["ham", "spam", "eggs"]}
grid2 = ParameterGrid(params2)
assert_equal(len(grid2), 6)
# loop to assert we can iterate over the grid multiple times
for i in xrange(2):
# tuple + chain transforms {"a": 1, "b": 2} to ("a", 1, "b", 2)
points = set(tuple(chain(*(sorted(p.items())))) for p in grid2)
assert_equal(points,
set(("bar", x, "foo", y)
for x, y in product(params2["bar"], params2["foo"])))
assert_grid_iter_equals_getitem(grid2)
# Special case: empty grid (useful to get default estimator settings)
empty = ParameterGrid({})
assert_equal(len(empty), 1)
assert_equal(list(empty), [{}])
assert_grid_iter_equals_getitem(empty)
assert_raises(IndexError, lambda: empty[1])
has_empty = ParameterGrid([{'C': [1, 10]}, {}, {'C': [.5]}])
assert_equal(len(has_empty), 4)
assert_equal(list(has_empty), [{'C': 1}, {'C': 10}, {}, {'C': .5}])
assert_grid_iter_equals_getitem(has_empty)
def test_grid_search():
# Test that the best estimator contains the right value for foo_param
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, verbose=3)
# make sure it selects the smallest parameter in case of ties
old_stdout = sys.stdout
sys.stdout = StringIO()
grid_search.fit(X, y)
sys.stdout = old_stdout
assert_equal(grid_search.best_estimator_.foo_param, 2)
for i, foo_i in enumerate([1, 2, 3]):
assert_true(grid_search.grid_scores_[i][0]
== {'foo_param': foo_i})
# Smoke test the score etc:
grid_search.score(X, y)
grid_search.predict_proba(X)
grid_search.decision_function(X)
grid_search.transform(X)
# Test exception handling on scoring
grid_search.scoring = 'sklearn'
assert_raises(ValueError, grid_search.fit, X, y)
@ignore_warnings
def test_grid_search_no_score():
# Test grid-search on classifier that has no score function.
clf = LinearSVC(random_state=0)
X, y = make_blobs(random_state=0, centers=2)
Cs = [.1, 1, 10]
clf_no_score = LinearSVCNoScore(random_state=0)
grid_search = GridSearchCV(clf, {'C': Cs}, scoring='accuracy')
grid_search.fit(X, y)
grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs},
scoring='accuracy')
# smoketest grid search
grid_search_no_score.fit(X, y)
# check that best params are equal
assert_equal(grid_search_no_score.best_params_, grid_search.best_params_)
# check that we can call score and that it gives the correct result
assert_equal(grid_search.score(X, y), grid_search_no_score.score(X, y))
# giving no scoring function raises an error
grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs})
assert_raise_message(TypeError, "no scoring", grid_search_no_score.fit,
[[1]])
def test_grid_search_score_method():
X, y = make_classification(n_samples=100, n_classes=2, flip_y=.2,
random_state=0)
clf = LinearSVC(random_state=0)
grid = {'C': [.1]}
search_no_scoring = GridSearchCV(clf, grid, scoring=None).fit(X, y)
search_accuracy = GridSearchCV(clf, grid, scoring='accuracy').fit(X, y)
search_no_score_method_auc = GridSearchCV(LinearSVCNoScore(), grid,
scoring='roc_auc').fit(X, y)
search_auc = GridSearchCV(clf, grid, scoring='roc_auc').fit(X, y)
# Check warning only occurs in situation where behavior changed:
# estimator requires score method to compete with scoring parameter
score_no_scoring = assert_no_warnings(search_no_scoring.score, X, y)
score_accuracy = assert_warns(ChangedBehaviorWarning,
search_accuracy.score, X, y)
score_no_score_auc = assert_no_warnings(search_no_score_method_auc.score,
X, y)
score_auc = assert_warns(ChangedBehaviorWarning,
search_auc.score, X, y)
# ensure the test is sane
assert_true(score_auc < 1.0)
assert_true(score_accuracy < 1.0)
assert_not_equal(score_auc, score_accuracy)
assert_almost_equal(score_accuracy, score_no_scoring)
assert_almost_equal(score_auc, score_no_score_auc)
def test_grid_search_labels():
# Check if ValueError (when labels is None) propagates to GridSearchCV
# And also check if labels is correctly passed to the cv object
rng = np.random.RandomState(0)
X, y = make_classification(n_samples=15, n_classes=2, random_state=0)
labels = rng.randint(0, 3, 15)
clf = LinearSVC(random_state=0)
grid = {'C': [1]}
label_cvs = [LeaveOneLabelOut(), LeavePLabelOut(2), LabelKFold(),
LabelShuffleSplit()]
for cv in label_cvs:
gs = GridSearchCV(clf, grid, cv=cv)
assert_raise_message(ValueError,
"The labels parameter should not be None",
gs.fit, X, y)
gs.fit(X, y, labels)
non_label_cvs = [StratifiedKFold(), StratifiedShuffleSplit()]
for cv in non_label_cvs:
print(cv)
gs = GridSearchCV(clf, grid, cv=cv)
# Should not raise an error
gs.fit(X, y)
def test_trivial_grid_scores():
# Test search over a "grid" with only one point.
# Non-regression test: grid_scores_ wouldn't be set by GridSearchCV.
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1]})
grid_search.fit(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
random_search = RandomizedSearchCV(clf, {'foo_param': [0]}, n_iter=1)
random_search.fit(X, y)
assert_true(hasattr(random_search, "grid_scores_"))
def test_no_refit():
# Test that grid search can be used for model selection only
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, refit=False)
grid_search.fit(X, y)
assert_true(hasattr(grid_search, "best_params_"))
def test_grid_search_error():
# Test that grid search will capture errors on data with different
# length
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, X_[:180], y_)
def test_grid_search_iid():
# test the iid parameter
# noise-free simple 2d-data
X, y = make_blobs(centers=[[0, 0], [1, 0], [0, 1], [1, 1]], random_state=0,
cluster_std=0.1, shuffle=False, n_samples=80)
# split dataset into two folds that are not iid
# first one contains data of all 4 blobs, second only from two.
mask = np.ones(X.shape[0], dtype=np.bool)
mask[np.where(y == 1)[0][::2]] = 0
mask[np.where(y == 2)[0][::2]] = 0
# this leads to perfect classification on one fold and a score of 1/3 on
# the other
svm = SVC(kernel='linear')
# create "cv" for splits
cv = [[mask, ~mask], [~mask, mask]]
# once with iid=True (default)
grid_search = GridSearchCV(svm, param_grid={'C': [1, 10]}, cv=cv)
grid_search.fit(X, y)
first = grid_search.grid_scores_[0]
assert_equal(first.parameters['C'], 1)
assert_array_almost_equal(first.cv_validation_scores, [1, 1. / 3.])
# for first split, 1/4 of dataset is in test, for second 3/4.
# take weighted average
assert_almost_equal(first.mean_validation_score,
1 * 1. / 4. + 1. / 3. * 3. / 4.)
# once with iid=False
grid_search = GridSearchCV(svm, param_grid={'C': [1, 10]}, cv=cv,
iid=False)
grid_search.fit(X, y)
first = grid_search.grid_scores_[0]
assert_equal(first.parameters['C'], 1)
# scores are the same as above
assert_array_almost_equal(first.cv_validation_scores, [1, 1. / 3.])
# averaged score is just mean of scores
assert_almost_equal(first.mean_validation_score,
np.mean(first.cv_validation_scores))
def test_grid_search_one_grid_point():
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
param_dict = {"C": [1.0], "kernel": ["rbf"], "gamma": [0.1]}
clf = SVC()
cv = GridSearchCV(clf, param_dict)
cv.fit(X_, y_)
clf = SVC(C=1.0, kernel="rbf", gamma=0.1)
clf.fit(X_, y_)
assert_array_equal(clf.dual_coef_, cv.best_estimator_.dual_coef_)
def test_grid_search_bad_param_grid():
param_dict = {"C": 1.0}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
param_dict = {"C": []}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
param_dict = {"C": np.ones(6).reshape(3, 2)}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
def test_grid_search_sparse():
# Test that grid search works with both dense and sparse matrices
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(X_[:180], y_[:180])
y_pred = cv.predict(X_[180:])
C = cv.best_estimator_.C
X_ = sp.csr_matrix(X_)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(X_[:180].tocoo(), y_[:180])
y_pred2 = cv.predict(X_[180:])
C2 = cv.best_estimator_.C
assert_true(np.mean(y_pred == y_pred2) >= .9)
assert_equal(C, C2)
def test_grid_search_sparse_scoring():
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring="f1")
cv.fit(X_[:180], y_[:180])
y_pred = cv.predict(X_[180:])
C = cv.best_estimator_.C
X_ = sp.csr_matrix(X_)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring="f1")
cv.fit(X_[:180], y_[:180])
y_pred2 = cv.predict(X_[180:])
C2 = cv.best_estimator_.C
assert_array_equal(y_pred, y_pred2)
assert_equal(C, C2)
# Smoke test the score
# np.testing.assert_allclose(f1_score(cv.predict(X_[:180]), y[:180]),
# cv.score(X_[:180], y[:180]))
# test loss where greater is worse
def f1_loss(y_true_, y_pred_):
return -f1_score(y_true_, y_pred_)
F1Loss = make_scorer(f1_loss, greater_is_better=False)
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring=F1Loss)
cv.fit(X_[:180], y_[:180])
y_pred3 = cv.predict(X_[180:])
C3 = cv.best_estimator_.C
assert_equal(C, C3)
assert_array_equal(y_pred, y_pred3)
def test_grid_search_precomputed_kernel():
# Test that grid search works when the input features are given in the
# form of a precomputed kernel matrix
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
# compute the training kernel matrix corresponding to the linear kernel
K_train = np.dot(X_[:180], X_[:180].T)
y_train = y_[:180]
clf = SVC(kernel='precomputed')
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(K_train, y_train)
assert_true(cv.best_score_ >= 0)
# compute the test kernel matrix
K_test = np.dot(X_[180:], X_[:180].T)
y_test = y_[180:]
y_pred = cv.predict(K_test)
assert_true(np.mean(y_pred == y_test) >= 0)
# test error is raised when the precomputed kernel is not array-like
# or sparse
assert_raises(ValueError, cv.fit, K_train.tolist(), y_train)
def test_grid_search_precomputed_kernel_error_nonsquare():
# Test that grid search returns an error with a non-square precomputed
# training kernel matrix
K_train = np.zeros((10, 20))
y_train = np.ones((10, ))
clf = SVC(kernel='precomputed')
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, K_train, y_train)
def test_grid_search_precomputed_kernel_error_kernel_function():
# Test that grid search returns an error when using a kernel_function
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
kernel_function = lambda x1, x2: np.dot(x1, x2.T)
clf = SVC(kernel=kernel_function)
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, X_, y_)
class BrokenClassifier(BaseEstimator):
"""Broken classifier that cannot be fit twice"""
def __init__(self, parameter=None):
self.parameter = parameter
def fit(self, X, y):
assert_true(not hasattr(self, 'has_been_fit_'))
self.has_been_fit_ = True
def predict(self, X):
return np.zeros(X.shape[0])
@ignore_warnings
def test_refit():
# Regression test for bug in refitting
# Simulates re-fitting a broken estimator; this used to break with
# sparse SVMs.
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = GridSearchCV(BrokenClassifier(), [{'parameter': [0, 1]}],
scoring="precision", refit=True)
clf.fit(X, y)
def test_gridsearch_nd():
# Pass X as list in GridSearchCV
X_4d = np.arange(10 * 5 * 3 * 2).reshape(10, 5, 3, 2)
y_3d = np.arange(10 * 7 * 11).reshape(10, 7, 11)
check_X = lambda x: x.shape[1:] == (5, 3, 2)
check_y = lambda x: x.shape[1:] == (7, 11)
clf = CheckingClassifier(check_X=check_X, check_y=check_y)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]})
grid_search.fit(X_4d, y_3d).score(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_X_as_list():
# Pass X as list in GridSearchCV
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = CheckingClassifier(check_X=lambda x: isinstance(x, list))
cv = KFold(n_folds=3)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=cv)
grid_search.fit(X.tolist(), y).score(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_y_as_list():
# Pass y as list in GridSearchCV
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = CheckingClassifier(check_y=lambda x: isinstance(x, list))
cv = KFold(n_folds=3)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=cv)
grid_search.fit(X, y.tolist()).score(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
@ignore_warnings
def test_pandas_input():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((DataFrame, Series))
except ImportError:
pass
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
for InputFeatureType, TargetType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]})
grid_search.fit(X_df, y_ser).score(X_df, y_ser)
grid_search.predict(X_df)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_unsupervised_grid_search():
# test grid-search with unsupervised estimator
X, y = make_blobs(random_state=0)
km = KMeans(random_state=0)
grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]),
scoring='adjusted_rand_score')
grid_search.fit(X, y)
# ARI can find the right number :)
assert_equal(grid_search.best_params_["n_clusters"], 3)
# Now without a score, and without y
grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]))
grid_search.fit(X)
assert_equal(grid_search.best_params_["n_clusters"], 4)
def test_gridsearch_no_predict():
# test grid-search with an estimator without predict.
# slight duplication of a test from KDE
def custom_scoring(estimator, X):
return 42 if estimator.bandwidth == .1 else 0
X, _ = make_blobs(cluster_std=.1, random_state=1,
centers=[[0, 1], [1, 0], [0, 0]])
search = GridSearchCV(KernelDensity(),
param_grid=dict(bandwidth=[.01, .1, 1]),
scoring=custom_scoring)
search.fit(X)
assert_equal(search.best_params_['bandwidth'], .1)
assert_equal(search.best_score_, 42)
def test_param_sampler():
# test basic properties of param sampler
param_distributions = {"kernel": ["rbf", "linear"],
"C": uniform(0, 1)}
sampler = ParameterSampler(param_distributions=param_distributions,
n_iter=10, random_state=0)
samples = [x for x in sampler]
assert_equal(len(samples), 10)
for sample in samples:
assert_true(sample["kernel"] in ["rbf", "linear"])
assert_true(0 <= sample["C"] <= 1)
# test that repeated calls yield identical parameters
param_distributions = {"C": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]}
sampler = ParameterSampler(param_distributions=param_distributions,
n_iter=3, random_state=0)
assert_equal([x for x in sampler], [x for x in sampler])
if sp_version >= (0, 16):
param_distributions = {"C": uniform(0, 1)}
sampler = ParameterSampler(param_distributions=param_distributions,
n_iter=10, random_state=0)
assert_equal([x for x in sampler], [x for x in sampler])
def test_randomized_search_grid_scores():
# Make a dataset with a lot of noise to get various kind of prediction
# errors across CV folds and parameter settings
X, y = make_classification(n_samples=200, n_features=100, n_informative=3,
random_state=0)
# XXX: as of today (scipy 0.12) it's not possible to set the random seed
# of scipy.stats distributions: the assertions in this test should thus
# not depend on the randomization
params = dict(C=expon(scale=10),
gamma=expon(scale=0.1))
n_cv_iter = 3
n_search_iter = 30
search = RandomizedSearchCV(SVC(), n_iter=n_search_iter, cv=n_cv_iter,
param_distributions=params, iid=False)
search.fit(X, y)
assert_equal(len(search.grid_scores_), n_search_iter)
# Check consistency of the structure of each cv_score item
for cv_score in search.grid_scores_:
assert_equal(len(cv_score.cv_validation_scores), n_cv_iter)
# Because we set iid to False, the mean_validation score is the
# mean of the fold mean scores instead of the aggregate sample-wise
# mean score
assert_almost_equal(np.mean(cv_score.cv_validation_scores),
cv_score.mean_validation_score)
assert_equal(list(sorted(cv_score.parameters.keys())),
list(sorted(params.keys())))
# Check the consistency with the best_score_ and best_params_ attributes
sorted_grid_scores = list(sorted(search.grid_scores_,
key=lambda x: x.mean_validation_score))
best_score = sorted_grid_scores[-1].mean_validation_score
assert_equal(search.best_score_, best_score)
tied_best_params = [s.parameters for s in sorted_grid_scores
if s.mean_validation_score == best_score]
assert_true(search.best_params_ in tied_best_params,
"best_params_={0} is not part of the"
" tied best models: {1}".format(
search.best_params_, tied_best_params))
def test_grid_search_score_consistency():
# test that correct scores are used
clf = LinearSVC(random_state=0)
X, y = make_blobs(random_state=0, centers=2)
Cs = [.1, 1, 10]
for score in ['f1', 'roc_auc']:
grid_search = GridSearchCV(clf, {'C': Cs}, scoring=score)
grid_search.fit(X, y)
cv = StratifiedKFold(n_folds=3)
for C, scores in zip(Cs, grid_search.grid_scores_):
clf.set_params(C=C)
scores = scores[2] # get the separate runs from grid scores
i = 0
for train, test in cv.split(X, y):
clf.fit(X[train], y[train])
if score == "f1":
correct_score = f1_score(y[test], clf.predict(X[test]))
elif score == "roc_auc":
dec = clf.decision_function(X[test])
correct_score = roc_auc_score(y[test], dec)
assert_almost_equal(correct_score, scores[i])
i += 1
def test_pickle():
# Test that a fit search can be pickled
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, refit=True)
grid_search.fit(X, y)
pickle.dumps(grid_search) # smoke test
random_search = RandomizedSearchCV(clf, {'foo_param': [1, 2, 3]},
refit=True, n_iter=3)
random_search.fit(X, y)
pickle.dumps(random_search) # smoke test
def test_grid_search_with_multioutput_data():
# Test search with multi-output estimator
X, y = make_multilabel_classification(return_indicator=True,
random_state=0)
est_parameters = {"max_depth": [1, 2, 3, 4]}
cv = KFold(random_state=0)
estimators = [DecisionTreeRegressor(random_state=0),
DecisionTreeClassifier(random_state=0)]
# Test with grid search cv
for est in estimators:
grid_search = GridSearchCV(est, est_parameters, cv=cv)
grid_search.fit(X, y)
for parameters, _, cv_validation_scores in grid_search.grid_scores_:
est.set_params(**parameters)
for i, (train, test) in enumerate(cv.split(X, y)):
est.fit(X[train], y[train])
correct_score = est.score(X[test], y[test])
assert_almost_equal(correct_score,
cv_validation_scores[i])
# Test with a randomized search
for est in estimators:
random_search = RandomizedSearchCV(est, est_parameters,
cv=cv, n_iter=3)
random_search.fit(X, y)
for parameters, _, cv_validation_scores in random_search.grid_scores_:
est.set_params(**parameters)
for i, (train, test) in enumerate(cv.split(X, y)):
est.fit(X[train], y[train])
correct_score = est.score(X[test], y[test])
assert_almost_equal(correct_score,
cv_validation_scores[i])
def test_predict_proba_disabled():
# Test predict_proba when disabled on estimator.
X = np.arange(20).reshape(5, -1)
y = [0, 0, 1, 1, 1]
clf = SVC(probability=False)
gs = GridSearchCV(clf, {}, cv=2).fit(X, y)
assert_false(hasattr(gs, "predict_proba"))
def test_grid_search_allows_nans():
# Test GridSearchCV with Imputer
X = np.arange(20, dtype=np.float64).reshape(5, -1)
X[2, :] = np.nan
y = [0, 0, 1, 1, 1]
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
GridSearchCV(p, {'classifier__foo_param': [1, 2, 3]}, cv=2).fit(X, y)
class FailingClassifier(BaseEstimator):
"""Classifier that raises a ValueError on fit()"""
FAILING_PARAMETER = 2
def __init__(self, parameter=None):
self.parameter = parameter
def fit(self, X, y=None):
if self.parameter == FailingClassifier.FAILING_PARAMETER:
raise ValueError("Failing classifier failed as required")
def predict(self, X):
return np.zeros(X.shape[0])
def test_grid_search_failing_classifier():
# GridSearchCV with on_error != 'raise'
# Ensures that a warning is raised and score reset where appropriate.
X, y = make_classification(n_samples=20, n_features=10, random_state=0)
clf = FailingClassifier()
# refit=False because we only want to check that errors caused by fits
# to individual folds will be caught and warnings raised instead. If
# refit was done, then an exception would be raised on refit and not
# caught by grid_search (expected behavior), and this would cause an
# error in this test.
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score=0.0)
assert_warns(FitFailedWarning, gs.fit, X, y)
# Ensure that grid scores were set to zero as required for those fits
# that are expected to fail.
assert all(np.all(this_point.cv_validation_scores == 0.0)
for this_point in gs.grid_scores_
if this_point.parameters['parameter'] ==
FailingClassifier.FAILING_PARAMETER)
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score=float('nan'))
assert_warns(FitFailedWarning, gs.fit, X, y)
assert all(np.all(np.isnan(this_point.cv_validation_scores))
for this_point in gs.grid_scores_
if this_point.parameters['parameter'] ==
FailingClassifier.FAILING_PARAMETER)
def test_grid_search_failing_classifier_raise():
# GridSearchCV with on_error == 'raise' raises the error
X, y = make_classification(n_samples=20, n_features=10, random_state=0)
clf = FailingClassifier()
# refit=False because we want to test the behaviour of the grid search part
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score='raise')
# FailingClassifier issues a ValueError so this is what we look for.
assert_raises(ValueError, gs.fit, X, y)
def test_parameters_sampler_replacement():
# raise error if n_iter too large
params = {'first': [0, 1], 'second': ['a', 'b', 'c']}
sampler = ParameterSampler(params, n_iter=7)
assert_raises(ValueError, list, sampler)
# degenerates to GridSearchCV if n_iter the same as grid_size
sampler = ParameterSampler(params, n_iter=6)
samples = list(sampler)
assert_equal(len(samples), 6)
for values in ParameterGrid(params):
assert_true(values in samples)
# test sampling without replacement in a large grid
params = {'a': range(10), 'b': range(10), 'c': range(10)}
sampler = ParameterSampler(params, n_iter=99, random_state=42)
samples = list(sampler)
assert_equal(len(samples), 99)
hashable_samples = ["a%db%dc%d" % (p['a'], p['b'], p['c'])
for p in samples]
assert_equal(len(set(hashable_samples)), 99)
# doesn't go into infinite loops
params_distribution = {'first': bernoulli(.5), 'second': ['a', 'b', 'c']}
sampler = ParameterSampler(params_distribution, n_iter=7)
samples = list(sampler)
assert_equal(len(samples), 7)
| bsd-3-clause |
dilawar/moose-full | moose-examples/snippets/diffSpinyNeuron.py | 2 | 10724 | #########################################################################
## This program is part of 'MOOSE', the
## Messaging Object Oriented Simulation Environment.
## Copyright (C) 2015 Upinder S. Bhalla. and NCBS
## It is made available under the terms of the
## GNU Lesser General Public License version 2.1
## See the file COPYING.LIB for the full notice.
#########################################################################
import math
import pylab
import numpy
import matplotlib.pyplot as plt
import moose
diffConst = 1e-11
chemdt = 0.001 # Tested various dts, this is reasonable.
diffdt = 0.001
plotdt = 0.01
animationdt = 0.01
runtime = 1
useGssa = False
def makeModel():
model = moose.Neutral( '/model' )
# Make neuronal model. It has no channels, just for geometry
cell = moose.loadModel( './spinyNeuron.p', '/model/cell', 'Neutral' )
# We don't want the cell to do any calculations. Disable everything.
for i in moose.wildcardFind( '/model/cell/##' ):
i.tick = -1
# create container for model
model = moose.element( '/model' )
chem = moose.Neutral( '/model/chem' )
# The naming of the compartments is dicated by the places that the
# chem model expects to be loaded.
compt0 = moose.NeuroMesh( '/model/chem/compt0' )
compt0.separateSpines = 1
compt0.geometryPolicy = 'cylinder'
compt1 = moose.SpineMesh( '/model/chem/compt1' )
moose.connect( compt0, 'spineListOut', compt1, 'spineList', 'OneToOne' )
compt2 = moose.PsdMesh( '/model/chem/compt2' )
moose.connect( compt0, 'psdListOut', compt2, 'psdList', 'OneToOne' )
#reacSystem = moose.loadModel( 'simpleOsc.g', '/model/chem', 'ee' )
makeChemModel( compt0, True ) # Populate all 3 compts with the chem system.
makeChemModel( compt1, False )
makeChemModel( compt2, True )
compt0.diffLength = 2e-6 # This will be over 100 compartments.
# This is the magic command that configures the diffusion compartments.
compt0.subTreePath = cell.path + "/#"
moose.showfields( compt0 )
# Build the solvers. No need for diffusion in this version.
ksolve0 = moose.Ksolve( '/model/chem/compt0/ksolve' )
if useGssa:
ksolve1 = moose.Gsolve( '/model/chem/compt1/ksolve' )
ksolve2 = moose.Gsolve( '/model/chem/compt2/ksolve' )
else:
ksolve1 = moose.Ksolve( '/model/chem/compt1/ksolve' )
ksolve2 = moose.Ksolve( '/model/chem/compt2/ksolve' )
dsolve0 = moose.Dsolve( '/model/chem/compt0/dsolve' )
dsolve1 = moose.Dsolve( '/model/chem/compt1/dsolve' )
dsolve2 = moose.Dsolve( '/model/chem/compt2/dsolve' )
stoich0 = moose.Stoich( '/model/chem/compt0/stoich' )
stoich1 = moose.Stoich( '/model/chem/compt1/stoich' )
stoich2 = moose.Stoich( '/model/chem/compt2/stoich' )
# Configure solvers
stoich0.compartment = compt0
stoich1.compartment = compt1
stoich2.compartment = compt2
stoich0.ksolve = ksolve0
stoich1.ksolve = ksolve1
stoich2.ksolve = ksolve2
stoich0.dsolve = dsolve0
stoich1.dsolve = dsolve1
stoich2.dsolve = dsolve2
stoich0.path = '/model/chem/compt0/#'
stoich1.path = '/model/chem/compt1/#'
stoich2.path = '/model/chem/compt2/#'
assert( stoich0.numVarPools == 1 )
assert( stoich0.numProxyPools == 0 )
assert( stoich0.numRates == 1 )
assert( stoich1.numVarPools == 1 )
assert( stoich1.numProxyPools == 0 )
if useGssa:
assert( stoich1.numRates == 2 )
assert( stoich2.numRates == 2 )
else:
assert( stoich1.numRates == 1 )
assert( stoich2.numRates == 1 )
assert( stoich2.numVarPools == 1 )
assert( stoich2.numProxyPools == 0 )
dsolve0.buildNeuroMeshJunctions( dsolve1, dsolve2 )
stoich0.buildXreacs( stoich1 )
stoich1.buildXreacs( stoich2 )
stoich0.filterXreacs()
stoich1.filterXreacs()
stoich2.filterXreacs()
Ca_input_dend = moose.vec( '/model/chem/compt0/Ca_input' )
print len( Ca_input_dend )
for i in range( 60 ):
Ca_input_dend[ 3 + i * 3 ].conc = 2.0
Ca_input_PSD = moose.vec( '/model/chem/compt2/Ca_input' )
print len( Ca_input_PSD )
for i in range( 5 ):
Ca_input_PSD[ 2 + i * 2].conc = 1.0
# Create the output tables
num = compt0.numDiffCompts - 1
graphs = moose.Neutral( '/model/graphs' )
makeTab( 'Ca_soma', '/model/chem/compt0/Ca[0]' )
makeTab( 'Ca_d1', '/model/chem/compt0/Ca[1]' )
makeTab( 'Ca_d2', '/model/chem/compt0/Ca[2]' )
makeTab( 'Ca_d3', '/model/chem/compt0/Ca[3]' )
makeTab( 'Ca_s3', '/model/chem/compt1/Ca[3]' )
makeTab( 'Ca_s4', '/model/chem/compt1/Ca[4]' )
makeTab( 'Ca_s5', '/model/chem/compt1/Ca[5]' )
makeTab( 'Ca_p3', '/model/chem/compt2/Ca[3]' )
makeTab( 'Ca_p4', '/model/chem/compt2/Ca[4]' )
makeTab( 'Ca_p5', '/model/chem/compt2/Ca[5]' )
def makeTab( plotname, molpath ):
tab = moose.Table2( '/model/graphs/' + plotname ) # Make output table
# connect up the tables
moose.connect( tab, 'requestOut', moose.element( molpath ), 'getConc' );
def makeDisplay():
plt.ion()
fig = plt.figure( figsize=(10,12) )
dend = fig.add_subplot( 411 )
plt.ylabel( 'Conc (mM)' )
plt.xlabel( 'Dend voxel #' )
plt.legend()
timeLabel = plt.text(200, 0.5, 'time = 0')
spine = fig.add_subplot( 412 )
plt.ylabel( 'Conc (mM)' )
plt.xlabel( 'Spine voxel #' )
plt.legend()
psd = fig.add_subplot( 413 )
plt.ylabel( 'Conc (mM)' )
plt.xlabel( 'PSD voxel #' )
plt.legend()
timeSeries = fig.add_subplot( 414 )
timeSeries.set_ylim( 0, 2 )
plt.ylabel( 'Conc (mM)' )
plt.xlabel( 'time (seconds)' )
plt.legend()
Ca = moose.vec( '/model/chem/compt0/Ca' )
Ca_input = moose.vec( '/model/chem/compt0/Ca_input' )
line1, = dend.plot( range( len( Ca ) ), Ca.conc, label='Ca' )
line2, = dend.plot( range( len( Ca_input ) ), Ca_input.conc, label='Ca_input' )
dend.set_ylim( 0, 2 )
Ca = moose.vec( '/model/chem/compt1/Ca' )
line3, = spine.plot( range( len( Ca ) ), Ca.conc, label='Ca' )
spine.set_ylim( 0, 1 )
Ca = moose.vec( '/model/chem/compt2/Ca' )
Ca_input = moose.vec( '/model/chem/compt2/Ca_input' )
line4, = psd.plot( range( len( Ca ) ), Ca.conc, label='Ca' )
line5, = psd.plot( range( len( Ca_input ) ), Ca_input.conc, label='Ca_input' )
psd.set_ylim( 0, 1 )
fig.canvas.draw()
return ( timeSeries, dend, spine, psd, fig, line1, line2, line3, line4, line5, timeLabel )
def updateDisplay( plotlist ):
Ca = moose.vec( '/model/chem/compt0/Ca' )
Ca_input = moose.vec( '/model/chem/compt0/Ca_input' )
plotlist[5].set_ydata( Ca.conc )
plotlist[6].set_ydata( Ca_input.conc )
Ca = moose.vec( '/model/chem/compt1/Ca' )
plotlist[7].set_ydata( Ca.conc )
Ca = moose.vec( '/model/chem/compt2/Ca' )
Ca_input = moose.vec( '/model/chem/compt2/Ca_input' )
plotlist[8].set_ydata( Ca.conc )
plotlist[9].set_ydata( Ca_input.conc )
plotlist[4].canvas.draw()
def finalizeDisplay( plotlist, cPlotDt ):
for x in moose.wildcardFind( '/model/graphs/#[ISA=Table2]' ):
pos = numpy.arange( 0, x.vector.size, 1 ) * cPlotDt
line1, = plotlist[0].plot( pos, x.vector, label=x.name )
plotlist[4].canvas.draw()
print( "Hit 'enter' to exit" )
raw_input()
def makeChemModel( compt, doInput ):
"""
This function setus up a simple chemical system in which Ca input
comes to the dend and to selected PSDs. There is diffusion between
PSD and spine head, and between dend and spine head.
Ca_input ------> Ca // in dend and spine head only.
"""
# create molecules and reactions
Ca = moose.Pool( compt.path + '/Ca' )
Ca.concInit = 0.08*1e-3
Ca.diffConst = diffConst
if doInput:
Ca_input = moose.BufPool( compt.path + '/Ca_input' )
Ca_input.concInit = 0.08*1e-3
Ca_input.diffConst = diffConst
rInput = moose.Reac( compt.path + '/rInput' )
moose.connect( rInput, 'sub', Ca, 'reac' )
moose.connect( rInput, 'prd', Ca_input, 'reac' )
rInput.Kf = 100 # 1/sec
rInput.Kb = 100 # 1/sec
else:
Ca_sink = moose.BufPool( compt.path + '/Ca_sink' )
Ca_sink.concInit = 0.08*1e-3
rSink = moose.Reac( compt.path + '/rSink' )
moose.connect( rSink, 'sub', Ca, 'reac' )
moose.connect( rSink, 'prd', Ca_sink, 'reac' )
rSink.Kf = 10 # 1/sec
rSink.Kb = 10 # 1/sec
def main():
"""
This example illustrates and tests diffusion embedded in
the branching pseudo-1-dimensional geometry of a neuron.
An input pattern of Ca stimulus is applied in a periodic manner both
on the dendrite and on the PSDs of the 13 spines. The Ca levels in
each of the dend, the spine head, and the spine PSD are monitored.
Since the same molecule name is used for Ca in the three compartments,
these are automagially connected up for diffusion. The simulation
shows the outcome of this diffusion.
This example uses an external electrical model file with basal
dendrite and three branches on
the apical dendrite. One of those branches has the 13 spines.
The model is set up to run using the Ksolve for integration and the
Dsolve for handling diffusion.
The timesteps here are not the defaults. It turns out that the
chem reactions and diffusion in this example are sufficiently fast
that the chemDt has to be smaller than default. Note that this example
uses rates quite close to those used in production models.
The display has four parts:
a. animated line plot of concentration against main compartment#.
b. animated line plot of concentration against spine compartment#.
c. animated line plot of concentration against psd compartment#.
d. time-series plot that appears after the simulation has
ended.
"""
makeModel()
plotlist = makeDisplay()
# Schedule the whole lot - autoscheduling already does this.
for i in range( 11, 17 ):
moose.setClock( i, chemdt ) # for the chem objects
moose.setClock( 10, diffdt ) # for the diffusion
moose.setClock( 18, plotdt ) # for the output tables.
'''
'''
moose.reinit()
for i in numpy.arange( 0, runtime, animationdt ):
moose.start( animationdt )
plotlist[10].set_text( "time = %d" % i )
updateDisplay( plotlist )
finalizeDisplay( plotlist, plotdt )
# Run the 'main' if this script is executed standalone.
if __name__ == '__main__':
main()
| gpl-2.0 |
mr-perry/SHARAD_Coverage | code/SHARAD_Coverage_func.py | 1 | 64354 | #!/usr/bin/env python3.4
#
# Import necessary libraries, module, etc
#
import sys, os, socket, argparse, time
import shapefile
import MySQLdb as SQL# MySQL Interface Library
import _mysql_exceptions
import numpy as np
import scipy.interpolate
import scipy.ndimage
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
from scipy.io.idl import readsav # Needed to read IDL MOLA save files
from mpl_toolkits.basemap import Basemap
from datetime import datetime as dt
from copy import copy
#
# MATT, THIS IS YOUR 'FROM SCRATCH' VERSION OF SHARAD_COVERAGE
# DESCREPENCIES EXIST BETWEEN THE IDL AND PYTHON VERSION AND I NEED
# TO FIX THEM. SHARAD_COVERAGE.PY WAS THE ORIGINAL WORKING VERSION,
# SHARAD_COVERAGE2.PY WAS USED TO DEBUG SIZE DESCREPENCIES
#
###########################################################################
# FUNCTIONS
#
# PARSE ARGS IS TAKEN DIRECTLY FROM SHARAD_COVERAGE2.PY
#
def parseargs(prog, vers, shresc, shres):
#
# Parse the Command Line arguments
#
#
# Set the default values
#
orng = np.array([829,100000])
srng = np.array([0., -88., 360., 88.])
orbitids = ''
orbittrig = False
roll = 99.
rcri = '<'
nomap = 0
foldmap = 0
elevmap = 0
replot = 0
update = 0
clat = 0
cset = 0
shresc = 3.0
missions = ['PSP', 'ESP', 'EM1', 'EM2', 'EM3', 'EM4', 'EM5']
shresi = 0.750
#
# Initiate parser
#
parser = argparse.ArgumentParser(description=str(prog + ' ' + str(vers)))
#
# Ordered Arguments
#
parser.add_argument('mapname', type=str, nargs=1, default='SHARAD_coverage',
help = str("Desired base filename for map: {SHARAD_coverage}"))
#
# Optional Arguments
#
parser.add_argument('-o', '--orbitrange', nargs=2, default=orng, type=float,
help="Desired range of orbits to include")
parser.add_argument('-r','--rollabove', nargs=1, type=float,
help="Select data with roll at or above angle, degrees")
parser.add_argument('-n', '--rollbelow', nargs=1, type=float,
help="Select data with roll below angle, degrees")
parser.add_argument('-w', '--trackwidth', nargs=1, default=shresc, type=float,
help="Effective track width in km")
parser.add_argument('-p', '--pixelsperdegree', nargs=1, default=shres, type=float,
help="Mapping resolution in pix/deg")
parser.add_argument('-m', '--mapping', nargs=4, default=srng, type=float,
help="Geographic mapping limits in degrees")
parser.add_argument('-s', '--selection', nargs=4, default=[999, 999, 999, 999], type=float,
help="Geographic selection limits in degrees")
parser.add_argument('-c', '--centerlat', nargs=1, type=int,
help="Center latitude for projection")
parser.add_argument('-P', '--missionphase', nargs=1, type=str,
help="Map coverage for specific science phase")
parser.add_argument('-C', '--cumulative', action="store_true", default=False,
help="Map science phase maps cumulative")
parser.add_argument('-e', '--elevation', action="store_true", default=False,
help="Map coverage in elevation rather than over elevation")
parser.add_argument('-O', '--overlay', action="store_true", default=False,
help="Overlay colored MOLA elevation over b/w MOLA map")
parser.add_argument('-f', '--foldmap', action="store_true", default=False,
help="Map coverage in fold rather than over elevation")
parser.add_argument('-d', '--savedataonly', action="store_true", default=False,
help="Retain ground-track data and skip mapping")
parser.add_argument('-R', '--restore', nargs=1, type=str,
help="Restore previously saved map file and recreate plot")
parser.add_argument('-U', '--update', nargs=1, type=str,
help="Update previous version of map (requires appropriate .sav file)")
parser.add_argument('-T', '--targetbox', nargs=1, type=int, default=0,
help="Select target box")
parser.add_argument('-L', '--landingsite', nargs=1, type=str, default='',
help="Select Landing Sites: CLH, JEZ, NES, ARA, MAW, OXA")
parser.add_argument('-I', '--orbitfile', nargs=1, type=str, default='',
help="Import Orbit IDs to map")
parser.add_argument('-D', '--backgrounddata', nargs=1, type=str, default='MOLA',
help="Select the background data to plot: MOLA, TES (not available), MOLAHRSC (not available)")
if len(sys.argv[1:]) == 0:
parser.print_help()
# parser.print_usage() # for just the usage line
parser.exit()
args = parser.parse_args()
#
# Before parsing argument check if files mentioned in replot or update are valid
#
#
# Now parse the remaining arguments
#
mapbase = str(args.mapname[0])
#
# Open log file with mapname
#
outDir = '/home/mperry/Applications/SHARAD_Coverage/output/'
if not os.path.isdir(outDir):
os.mkdir(outDir)
log_fname = outDir + mapbase + ".log"
sys.stdout = open(log_fname, 'w')
sys.stderr = sys.stdout
#
# Get background data
#
bg_data = 'MOLA' if args.backgrounddata == 'MOLA' else args.backgrounddata[0]
#
# Check if update file exists
#
if args.update is not None:
update = True
f_update = args.update[0]
if not os.path.isfile(f_update):
print("ERROR: File with which to update does not exist in the current path.")
print("Please move to file to the local directory or specify the file's full path")
print("Exiting")
sys.stdout.flush()
parser.exit()
if type(orng[0]) is str:
orng = np.float(orng)
orng = np.asarray(args.orbitrange)
#
# Only -r or -n can be specified. If both are specified print usage and exit
#
if args.rollbelow and args.rollabove:
print("ERROR: Only '-r' or '-n' can be specified. Exiting.")
sys.stdout.flush()
parser.print_help()
parser.exit()
elif args.rollbelow is not None:
roll = float(args.rollbelow[0])
rcri = str("<")
elif args.rollabove is not None:
roll = float(args.rollabove[0])
rcri = str(">=")
mres = shres if args.pixelsperdegree == shres else int(args.pixelsperdegree[0])
try:
wtrk = float(args.trackwidth[0])
except:
wtrk = float(args.trackwidth)
#############################
#
# Map bounds!
#
# Check if target box was selected
#
if args.targetbox != 0:
target = args.targetbox[0] #Retrieve target box
#
# Determine latitude and longitude bounds
#
mrng = targetboxbounds(target)
srng = targetboxbounds(target)
elif args.landingsite != '':
LS_Abbr = args.landingsite[0] # Get landing site abbreviation
#
# Determine latitude and longitude bounds
#
mrng = landingsitebounds(LS_Abbr)
else:
mrng = np.asarray(args.mapping, float)
srng = np.asarray(args.selection, float)
if srng[0] == 999:
srng[0] = mrng[0]
if srng[1] == 999:
srng[1] = mrng[1]
if srng[2] == 999:
srng[2] = mrng[2]
if srng[3] == 999:
srng[3] = mrng[3]
if args.centerlat is None:
#
# If the center latitude is not specified find the center latitude
#
avlat = float((mrng[1]+mrng[3])/2)
if avlat < -60:
clat = -90
elif avlat > 60:
clat = 90
else:
clat = 0
else:
clat = args.centerlat[0]
if args.missionphase is not None:
#
# Make sure mission phase is one of the actual mission phases
#
phase = str(args.missionphase[0]).upper()
if phase in missions:
ph_trig = True
else:
print("Error: Mission phase %s not in mission list. Please revise and restart" %
(phase))
sys.stdout.flush()
else:
phase = 'None'
cml = args.cumulative
if (args.elevation or args.foldmap) and args.savedataonly:
print("WARNING: Elevation or Fold maps selected along with no map option")
print("WARNING: Continuing with no map option")
sys.stdout.flush()
elevmap = False
foldmap = False
nomap = args.savedataonly
else:
elevmap = args.elevation
foldmap = args.foldmap
overlay = args.overlay
#
# If overlay is selected, turn off elevation map
#
if overlay and elevmap:
elevmap = False;
####
#
# Generate Product ID range from orbit range
#
try:
orbitfile = args.orbitfile[0]
except:
orbitfile = args.orbitfile
if orbitfile != '':
orbittrig = True
#
# Check if file exists
#
if os.path.isfile(orbitfile):
#
# Success! Now read in file
#
orbitids = np.loadtxt(orbitfile, int)
else:
print('Orbit file {} not found'.format(str(orbitfile)))
sys.exit()
if orng[0] < 829:
orng[0] = 829
pmin = int(orng[0] * 100000)
pmax = int((orng[1] + 1) *100000)
#
# MATT 28 SEPT 2017
# CHECK SHARAD INLINE RESOLUTION. IF THE CURRENT TRACK WIDTH IS LESS
# THAN 0.750 KM, ADJUST TO WTRK TO AVOID INTERPOLATION ISSUES AND CHECKERED
# TRACK APPEARANCES
if wtrk < 0.750:
shresi = wtrk
#
# Print out summary
#
print("----- Coverage Map Options Summary -----")
print("Map Base:\t{}".format(mapbase))
print("Background Data:\t{}".format(bg_data))
if orbittrig:
print("Orbit File:\t{}".format(str(orbitfile)))
else:
print("Orbit Range:\t{}".format(str(orng)))
print("Roll Angle:\t{}{}".format(str(rcri), str(roll)))
print("Track Width:\t{}".format(str(wtrk)))
print("Map Resolution:\t{}".format(str(mres)))
print("SHARAD Inline Resolution:\t{}".format(str(0.750)))
if args.targetbox != 0:
print("Target Box:\t{}".format(str(args.targetbox)))
elif args.landingsite != '':
print("Landing Site:\t{}".format(str(args.landingsite)))
print("Mapping Range:\t{}".format(str(mrng)))
print("Selection Range:\t{}".format(str(srng)))
print("Center Latitude:\t{}".format(str(clat)))
print("Mission Phase:\t{}".format(str(phase)))
print("Cumulative Mission Phase Map:\t{}".format(str(cml)))
print("Overlay Map:\t{}".format(str(overlay)))
print("Elevation Map:\t{}".format(str(elevmap)))
print("Fold Map:\t{}".format(str(foldmap)))
print("Save track data (no plot):\t{}".format(str(nomap)))
print("Restore a previous plot:\t{}".format(str(replot)))
print("Update an existing map:\t{}".format(str(update)))
print("----- End of Summary -----")
sys.stdout.flush()
return mapbase, orng, orbitids, pmin, pmax, roll, rcri, wtrk, mres, shresi, \
mrng, srng, clat, phase, cml, elevmap, foldmap, nomap, \
replot, update, overlay, bg_data
def targetboxbounds(targetbox_number):
prom = '[TARGET_BOX]: '
target_path = '/data/d/WUSHARPS/Support/SHARAD_Coverage/data/target_boxes/Targets'
#
# Given a target box number, return latitude and longitude bounds
#
sf = shapefile.Reader(target_path)
shapes = sf.shapes()
records = sf.records()
# Make array of IDS, longitudes and latitudes
if len(shapes) == len(records): #Idiot check
ids = np.zeros(len(records), int)
for ii in range(len(records)):
ids[ii] = records[ii][2]
w = np.where(ids == targetbox_number)
if len(w[0]) > 0:
mrng = np.asarray(shapes[w[0][0]].bbox)
else:
print(prom + 'Target box {} not found. Exiting.'.format(str(targetbox_number)))
sys.exit()
return mrng
def landingsitebounds(LS_Abbr):
prom = '[LANDING_SITE]: '
#
# LS_Abbr: Landing Site abbreviation
#
landing_sites = { # Mars2020 Landing Sites
'CLH': np.array([174.675, -15.386, 176.376, -13.685]),
'JEZ': np.array([76.669, 17.621, 78.370, 19.322]),
'NES': np.array([76.298, 17.0327, 77.998, 18.733]),
# ExoMars Landing Sites
'ARA': np.array([347.950, 7.132, 349.650, 8.833]),
'MAW': np.array([341.198, 21.398, 342.899, 23.098]),
'OXA': np.array([334.908, 17.288, 336.609, 18.989])
}
try:
mrng = landing_sites[LS_Abbr.upper()]
return mrng
except:
print(prom + 'Landing Site %s was not found. Please check your abbreviation and try again.' % (LS_Abbr.upper()))
sys.exit()
#
# LOAD MOLA
#
def loadmola(mres, null=np.nan):
#
# This function loads and resamples per user input the MOLA data.
# The function mapextract is called in order to limit the extent of the MOLA
# data based on user input.
#
# Adapted for Python from SHARAD_coverage by Matthew Perry
# Last update to this function: 13 July 2017
#
# Input:
# mres - Resolution of map in pixels per degree
# null - null value; default is np.nan
#
# Output:
# elev = MOLA map
#
############################################################################
prom = "[LOAD_MOLA]: "
#
# Create map from binary INT array of surface elevation
# above areoid in meters and overplot ground tracks from
# selected observations
#
savemap = '/data/d/WUSHARPS/Support/SHARAD_Coverage/data/mola_data/mola_' + str(mres) + 'ppd.npy'
if not os.path.isfile(savemap):
#
# Rebin the elevation map
#
res = 128 #MOLA res in pix/deg
nlon = 360
nlat = 176
infile = '/data/d/WUSHARPS/Support/SHARAD_Coverage/data/mola_data/mola.128.dat'
#
# Read in and convert MOLA data
#
elev = np.fromfile(infile, dtype=np.int16, count=-1)
elev = np.reshape(elev, [nlat*res,nlon*res])
elev = np.asarray(elev)
elev = congrid(elev, [nlat*mres, nlon*mres])
elev = np.flipud(elev)
np.save(savemap,elev)
else:
print(prom + "Restoring existing resampled MOLA map {}".format(savemap))
sys.stdout.flush()
elev = np.load(savemap)
elev = elev/1000 #Place MOLA in KM
sys.stdout.flush()
return elev
def loadmdim(mres):
#
# This function loads and resamples per user input the MDIM data.
# The function mapextract is called in order to limit the extent of the MDIM
# data based on user input.
#
# Adapted for Python from SHARAD_coverage by Matthew Perry
# Last update to this function: 13 July 2017
#
# Input:
# mres - Resolution of map in pixels per degree
# null - null value; default is np.nan
#
# Output:
# elev = MDIM map
#
############################################################################
prom = "[LOAD_MDIM]: "
#
# Create map from binary INT array of surface elevation
# above areoid in meters and overplot ground tracks from
# selected observations
#
savemap = '/data/d/WUSHARPS/Support/SHARAD_Coverage/data/mdim_data/mdim_' + str(mres) + 'ppd.npy'
if not os.path.isfile(savemap):
#
# Rebin the elevation map
#
print(prom + "This is not available yet. Exiting")
sys.stdout.flush()
sys.exit()
'''
res = 128 #MOLA res in pix/deg
nlon = 360
nlat = 176
infile = '/data/mola_data/mola.128.dat'
#
# Read in and convert MOLA data
#
elev = np.fromfile(infile, dtype=np.int16, count=-1)
elev = np.reshape(elev, [nlat*res,nlon*res])
elev = np.asarray(elev)
elev = imresize(elev, [nlat*mres, nlon*mres])
elev = np.rot90(elev,3) #Reorient with N up and W left
np.save(savemap,elev)
'''
else:
print(prom + "Restoring existing resampled MDIM map {}".format(savemap))
sys.stdout.flush()
elev = np.load(savemap)
elev = elev #Place MOLA in KM
sys.stdout.flush()
return elev
def interpTracks(wo_data, shresi=0.75, limit=np.array([0,-88.,360, 88.])):
erad = 3396 # Mars equatorial radius in km
d2r = np.pi / 180. # Degress to radians
nx = np.shape(wo_data)[0]
npnt = 0
nob = 0
olat = []
olon = []
if nx > 1:
for jj in range(0, nx-1):
#
# Find great circle distance from spherical law of cosines
# #Errors occur if x[jj+1] - x[jj] == 0
d = erad * np.arccos(np.sin(wo_data[jj, 0]*d2r) * np.sin(wo_data[jj+1, 0]*d2r) +
np.cos(wo_data[jj,0]*d2r) * np.cos(wo_data[jj+1, 0]*d2r) *
np.cos((wo_data[jj+1,1] - wo_data[jj,1])*d2r))
if d > 200:
#
# These points occur where a ground track is interrupted
# due to a variable roll angle exceeding the selection limit
# or to the track coming in and out of the mapping limits.
#
print(warn3 + str(d) + ' km')
else:
try:
npt = int((d/shresi) + 1) # Set number of interp points by in-track res)
except:
print(warn4)
npt = 0 # Duplicate rows will cause d to be nana
if npt > 1:
#
# Adjust tracks that cross the meridian for interpolation
#
if wo_data[jj+1,1] - wo_data[jj,1] > 300:
wo_data[jj,1] = wo_data[jj,1] + 360
if wo_data[jj+1,1] - wo_data[jj,1] < -300:
wo_data[jj,1] = wo_data[jj,1] - 360
#
# MATT: This interpolation could be incorrect. MAKE SURE TO CHECK THIS
# IF THE TRACKS LOOK LIKE SILLY GOOSES
#
u = np.interp(np.arange(0, npt),[0, npt-1], [wo_data[jj,1], wo_data[jj+1,1]]) # Interpolation longitudes
v = np.interp(np.arange(0, npt),[0, npt-1], [wo_data[jj,0], wo_data[jj+1,0]]) # Interpolation latitudes
#
# Correct meridian-crossing longitucdes
#
wneg = np.where(u > 0)
if len(wneg[0]) > 0:
u[wneg] = u[wneg]+360.
wbig = np.where(u >= 360.)
if len(wneg[0]) > 0:
u[wbig] = u[wbig] - 360.
else:
u = wo_data[jj,1] if jj < nx - 2 else [wo_data[jj,1], wo_data[jj+1,1]] # Transfer longitudes
v = wo_data[jj,0] if jj < nx - 2 else [wo_data[jj,0], wo_data[jj+1,0]] # Transfer latitudes
print(warn2)
#
# Apply mapping limits
#
wlim = np.where((u >= limit[0]) & (u <= limit[2]) & (v >= limit[1]) & (v <= limit[3]))
if len(wlim[0]) > 0:
#
# Create or append to orbit lon, lat array
#
olon = np.append(olon, u)
olat = np.append(olat, v)
npnt += 1 # Increment interpolation counter
nob += 1
else:
print(warn1)
sys.stdout.flush()
return npnt, nob, olon, olat
def gettracks(odat, wtrk, troll='', shresi=0.75, limit=np.array([0, -88, 360, 88])):
st = dt.now()
global warn1, warn2, warn3, warn4
prom = "[GET_TRACK]: "
#
# Find number of unique orbit IDs
#
oval = np.array(odat[:,0], int)
ouni = oval[np.sort(np.unique(oval, return_index=True)[1])] # Orb_id (unique)
nuni = len(ouni) # Number of unique Orb_id values
nobs = str(nuni)
print(prom + "Database has {} SHARAD observations {}".format(nobs, troll))
sys.stdout.flush()
#
# Now cycle through ground-tracks, map orbit lon,lat (x,y) to map lon,lat (u,v)
#
nobs = 0 # Reset number of orbits to zero
npnts = 0 # St interpolation counter to zero
olons = []
olats = []
for ii in range(nuni):
wo = np.where(oval == ouni[ii]) # Find unique Orb_id
wo_data = odat[wo, 1:3][0]
warn1 = prom + "WARNING: less than two points, Orb_id {} not mapped".format(str(ouni[ii]))
warn2 = prom + "WARNING: Orb_id {} points closer than {} km".format(str(ouni[ii]), str(wtrk))
warn3 = prom + "WARNING: Omitted point from Orb_id {} spaced > 200 km -- ".format(str(ouni[ii]))
warn4 = prom + "WARNING: Invalid value encountered when calculating the distance for Orb_id {} ".format(str(ouni[ii]))
npnt, nob, olon, olat = interpTracks(wo_data)
npnts += npnt # Update
nobs += nob # Update
olons.append(olon)
olats.append(olat)
olon = np.concatenate(olons).ravel()
olat = np.concatenate(olats).ravel()
print(prom + "Mapping {} points on {} SHARAD observations {}".format(str(npnts), str(nobs), troll))
print(prom + "Completed in {}".format(dt.now() - st))
sys.stdout.flush()
return nobs, npnts, olat, olon
def fixarrays(array1, array2):
if np.shape(array1)[0] != np.shape(array2)[0]:
#
# The rows are inconsistent
#
if np.shape(array1)[0] > np.shape(array2)[0]:
array1 = array1[:-1,:]
elif np.shape(array1)[0] < np.shape(array2)[0]:
coverage = coverage[:-1,:]
if np.shape(array1)[1] != np.shape(array2)[1]:
#
# The columns are inconsistent
#
if np.shape(array1)[1] > np.shape(array2)[1]:
array1 = array1[:,:-1]
elif np.shape(array1)[1] < np.shape(array2)[1]:
array2 = array2[:,:-1]
return array1, array2
def setcolormap(table, overlay, limit):
latDiff = limit[3] - limit[1]
lonDiff = limit[2] - limit[0]
if lonDiff >= 50. and latDiff >= 45.:
cmap = custom_color_tables('MOLA')
elif lonDiff >= 50. and (limit[3] == 87.5 and limit[1] == 69):
cmap = custom_color_tables('MOLA')
elif lonDiff >= 50. and (limit[1] == -87.5 and limit[3] == -69):
cmap = custom_color_tables('MOLA')
else:
cmap = mpl.cm.jet
if overlay:
cmap.set_bad(alpha=0.0)
else:
cmap.set_bad('black', 1.)
return cmap
def coveragesupptxt(troll, nobs, omin, omax, rarea, limit, clat):
prom = '[PLOT LABELS]: '
print(prom + 'Setting up the plot labels')
sys.stdout.flush()
ttle = "{} SHARAD Observations between orbits {} and {}".format(nobs, omin, omax)
if troll:
ttle += '\n'+troll
sup_txt1 = "Created at: {}".format(dt.now())
sup_txt2 = "{:5.2f}% Coverage\nW, N Limits: [{}, {}, {}, {}]\ncenter lat (N): {} \ncenter lon (E): {}".format(rarea, str(limit[0]), str(limit[1]), str(limit[2]), str(limit[3]), str(clat), str(0))
return ttle, sup_txt1, sup_txt2
###########################################################################
#
# Database functions
#
def formquery(limit, rcri, roll, pmin, pmax, phase, cml):
if phase == 'None':
#
# Get observation data
#
query = "SELECT O.Orb_id, O.Lat, O.Lon FROM Orbit_data O WHERE ABS(O.Roll) {} {} ".format(rcri, str(roll))
query += "AND O.Lon BETWEEN {} and {} ".format(str(limit[0]), str(limit[2]))
query += "AND O.Lat BETWEEN {} AND {} ".format(str(limit[1]), str(limit[3]))
query += "AND O.Orb_id IN (SELECT X.Orb_id FROM Orbit X WHERE X.Orb_id = (SELECT MAX(Y.Orb_id) FROM Orbit Y "
query += "WHERE X.Prod_id = Y.Prod_id) AND X.Prod_id BETWEEN {} AND {}) ".format(str(pmin), str(pmax))
query += "ORDER BY O.Sample_Time"
#
# Get minimum and maximum orbits and dates
#
query2 = "SELECT DISTINCT Orbit.Prod_id DIV 100000, Orbit.Start_time "
query2 += "FROM Orbit "
query2 += "WHERE Orbit.Prod_id = ( "
query2 += "SELECT MIN(Orbit.Prod_id) "
query2 += "FROM Orbit JOIN Orbit_data ON Orbit.Orb_id = Orbit_data.Orb_id "
query2 += "WHERE ABS(Orbit_data.Roll) {} {} ".format(rcri, str(roll))
query2 += "AND Orbit_data.Lon BETWEEN {} AND {} ".format(limit[0], limit[2])
query2 += "AND Orbit_data.Lat BETWEEN {} AND {} ".format(limit[1], limit[3])
query2 += "AND Orbit.Prod_id BETWEEN {} and {}".format(str(pmin), str(pmax))
query2 += ") "
query2 += "OR Orbit.Prod_id = ( "
query2 += "SELECT MAX(Orbit.Prod_id) "
query2 += "FROM Orbit JOIN Orbit_data ON Orbit.Orb_id = Orbit_data.Orb_id "
query2 += "WHERE ABS(Orbit_data.Roll) {} {} ".format(rcri, str(roll))
query2 += "AND Orbit_data.Lon BETWEEN {} AND {} ".format(limit[0], limit[2])
query2 += "AND Orbit_data.Lat BETWEEN {} AND {} ".format(limit[1], limit[3])
query2 += "AND Orbit.Prod_id BETWEEN {} and {}".format(str(pmin), str(pmax))
query2 += ")"
else:
if phase not in ['PSP', 'ESP', 'EM1',
'EM2', 'EM3', 'EM4',
'EM5', 'EM6']:
print('Select phase {} is not understood. Please try again'.format(phase))
sys.stdout.flush()
else:
if phase == "PSP":
st = '2006-10-01 00:00:00.000'
et = '2008-10-31 23:59:59.999'
elif phase == "ESP":
st = '2008-11-01 00:00:00.000'
et = '2010-11-30 23:59:59.999'
elif phase == "EM1":
st = '2010-12-01 00:00:00.000'
et = '2012-09-30 23:59:59.999'
elif phase == "EM2":
st = '2012-10-01 00:00:00.000'
et = '2014-09-30 23:59:59.999'
elif phase == "EM3":
st = '2014-10-01 00:00:00.000'
et = '2016-09-30 23:59:59.999'
elif phase == "EM4":
st = '2016-10-01 00:00:00.000'
et = '2019-09-31 23:59:59.999'
elif phase == "EM5":
st = '2019-10-01 00:00:00.000'
et = '2022-09-30 23:59:59.999'
elif phase == "EM6":
st = '2022-10-01 00:00:00.000'
et = '2024-09-30 23:59:59.999'
if cml:
st = '2006-10-01 00:00:00.000'
query = "SELECT OD.Orb_id, OD.Lat, OD.Lon FROM Orbit_data as OD INNER JOIN Orbit as O ON O.Orb_id = OD.Orb_id "
query += "INNER JOIN DecData as D ON D.Prod_id = O.Prod_id WHERE D.OST_Start_Date BETWEEN '{}' AND '{}' ".format(st, et)
query += "AND ABS(OD.Roll) {} {} ".format(rcri, str(roll))
query += "AND OD.Lon BETWEEN {} AND {} AND OD.Lat BETWEEN {} AND {} ".format(str(limit[0]), str(limit[2]), str(limit[1]), str(limit[3]))
query += "AND OD.Orb_id IN (SELECT X.Orb_id FROM Orbit X WHERE X.Orb_id = (SELECT MAX(Y.Orb_id) FROM Orbit Y "
query += "WHERE X.Prod_id = Y.Prod_id)) "
query += "ORDER BY OD.Orb_id"
#
# Get minimum and maximum orbits and dates
#
query2 = "SELECT DISTINCT Orbit.Prod_id DIV 100000, Orbit.Start_time "
query2 += "FROM Orbit "
query2 += "WHERE Orbit.Prod_id = ( "
query2 += "SELECT MIN(Orbit.Prod_id) "
query2 += "FROM Orbit JOIN Orbit_data ON Orbit.Orb_id = Orbit_data.Orb_id "
query2 += "WHERE ABS(Orbit_data.Roll) {} {} ".format(rcri, str(roll))
query2 += "AND Orbit_data.Lon BETWEEN {} AND {} ".format(limit[0], limit[2])
query2 += "AND Orbit_data.Lat BETWEEN {} AND {} ".format(limit[1], limit[3])
query2 += "AND Orbit.Start_time BETWEEN '{}' and '{}'".format(str(st), str(et))
query2 += ") "
query2 += "OR Orbit.Prod_id = ( "
query2 += "SELECT MAX(Orbit.Prod_id) "
query2 += "FROM Orbit JOIN Orbit_data ON Orbit.Orb_id = Orbit_data.Orb_id "
query2 += "WHERE ABS(Orbit_data.Roll) {} {} ".format(rcri, str(roll))
query2 += "AND Orbit_data.Lon BETWEEN {} AND {} ".format(limit[0], limit[2])
query2 += "AND Orbit_data.Lat BETWEEN {} AND {} ".format(limit[1], limit[3])
query2 += "AND Orbit.Start_time BETWEEN '{}' and '{}'".format(str(st), str(et))
query2 += ")"
return query, query2
def formorbitquery(limit, orbitids):
query = "SELECT O.Orb_id, O.Lat, O.Lon FROM Orbit_data O WHERE "
query += "O.Lon BETWEEN {} and {} ".format(str(limit[0]), str(limit[2]))
query += "AND O.Lat BETWEEN {} AND {} ".format(str(limit[1]), str(limit[3]))
query += "AND O.Orb_id IN (SELECT X.Orb_id FROM Orbit X WHERE X.Orb_id = (SELECT MAX(Y.Orb_id) FROM Orbit Y "
query += "WHERE X.Prod_id = Y.Prod_id) AND X.Prod_id IN ( "
for ii in range(len(orbitids)):
if ii != len(orbitids) - 1:
query += '{}, '.format(orbitids[ii])
else:
query += '{})) '.format(orbitids[ii])
query += 'ORDER BY O.Sample_Time'
query2 = "SELECT Orbit.Prod_id DIV 100000, Orbit.Start_time "
query2 += "FROM Orbit "
query2 += "WHERE Orbit.Prod_id = {} ".format(str(orbitids[0]))
query2 += "OR Orbit.Prod_id = {}" .format(str(orbitids[-1]))
return query, query2
def executequeries(limit,srng, rcri, roll, pmin, pmax, phase, cml, config_file, orbitids, latbuf=50.):
global_lim = np.array([0,-88,360,88])
erad = 3396 # Mars equatorial radius in km
d2r = np.pi / 180. # Degress to radians
escl = erad * d2r #Equatorial scale in km/deg
prom = '[EXECUTE_QUERIES]: '
#
# Determine selection limits
#
# THE FOLLOWING COMES FROM A CONVERSATION WITH THAN ON HOW TO AUTOMATE THE SEARCH
# TO INCLUDE ALL TRACKS THAT GO THROUGH A GIVEN WINDOW
#
if orbitids == '':
if np.all(limit == srng):
if limit[0] > global_lim[0]:
sllon = limit[0] - np.abs(latbuf*np.tan((1.6+87.4*np.cos((90-limit[1])*d2r))*d2r))/escl
else:
sllon = limit[0]
if limit[2] < global_lim[2]:
sulon = limit[2] + np.abs(latbuf*np.tan( (1.6+87.4*np.cos((90-limit[3])*d2r))*d2r))/escl
else:
sulon = limit[2]
if limit[1] > global_lim[1]:
sllat = limit[1]-np.abs(latbuf/escl)
else:
sllat = limit[1]
if limit[3] < global_lim[3]:
sulat = limit[3]+np.abs(latbuf/escl)
else:
sulat = limit[3]
else:
sllon = srng[0]
sllat = srng[1]
sulon = srng[2]
sulat = srng[3]
slimit = np.array([sllon, sllat, sulon, sulat])
print('Querying the database with selection limits: {}'.format(slimit))
sys.stdout.flush()
#
query, query2 = formquery(slimit, rcri, roll, pmin, pmax, phase, cml)
#
# Connect to DB and run query
#
else: #Orbit file loaded
print('Querying the database with selected orbits.')
query, query2 = formorbitquery(limit, orbitids)
[u, p, d] = dbreadconf(config_file)
db = dbconnect(u, p, d)
c = db.cursor()
numrows = c.execute(query)
if numrows == 0:
print(prom + 'The query returned no results. Exiting')
sys.stdout.flush()
#
# Infill results into array
#
orbit_data = np.zeros([numrows,3], float)
for ii in range(numrows):
row = c.fetchone()
orbit_data[ii,0] = row[0]
orbit_data[ii,1] = row[1]
orbit_data[ii,2] = row[2]
#
# Get min and max values from SQL
#
#if orbitids == '':
c = db.cursor()
c.execute(query2)
temp = c.fetchall()
orbit_min, min_date = temp[0][0], temp[0][1]
orbit_max, max_date = temp[-1][0], temp[-1][1]
#else:
# orbit_min = min(orbitids)
# orbit_max = max(orbitids)
#
# close DB connection
#
c.close()
db.close()
return orbit_data, orbit_min, orbit_max, min_date, max_date
def savedata(mapbase, coverage, fold, limit, rarea, nobs, troll, clat, omin, omax):
fname = mapbase
data = {'coverage': coverage, 'fold': fold, 'limit': limit, 'rarea': rarea, 'nobs': nobs, 'troll': troll, 'clat': clat}
np.save(fname, data)
return
def loaddata(filename):
#
# First check to ensure data exists in path given
#
if os.path.isfile(filename):
#
# File exists
#
data = np.load(filename)
data = data[()] # Data saved through numpy is an array containing a dictionary
coverage = data['coverage']
fold = data['fold']
limit = data['limit']
rarea = data['rarea']
nobs = data['nobs']
troll = data['troll']
clat = data['clat']
omin = data['omin']
omax = data['omax']
return coverage, fold, limit, rarea, nobs, troll, clat, omin, omax
def congrid(a, newdims, method='linear', centre=False, minusone=False):
if not a.dtype in [np.float64, np.float32]:
a = np.cast[float](a)
m1 = np.cast[int](minusone)
ofs = np.cast[int](centre) * 0.5
old = np.array( a.shape )
ndims = len( a.shape )
if len( newdims ) != ndims:
print("[congrid] dimensions error. " \
"This routine currently only support " \
"rebinning to the same number of dimensions.")
return None
newdims = np.asarray( newdims, dtype=float )
dimlist = []
if method == 'neighbour':
for i in range( ndims ):
base = np.indices(newdims)[i]
dimlist.append( (old[i] - m1) / (newdims[i] - m1) \
* (base + ofs) - ofs )
cd = np.array( dimlist ).round().astype(int)
newa = a[list( cd )]
return newa
elif method in ['nearest','linear']:
# calculate new dims
for i in range( ndims ):
base = np.arange( newdims[i] )
dimlist.append( (old[i] - m1) / (newdims[i] - m1) \
* (base + ofs) - ofs )
# specify old dims
olddims = [np.arange(i, dtype = np.float) for i in list( a.shape )]
# first interpolation - for ndims = any
mint = scipy.interpolate.interp1d( olddims[-1], a, kind=method )
newa = mint( dimlist[-1] )
#
# This is an ad-hoc fix to the cookbook being in python 3,
# I can't figure out how to reproduce the results from
# [ndims - 1] + range(ndims -1 )
trorder = [1, 0]
for i in range( ndims - 2, -1, -1 ):
newa = newa.transpose( trorder )
mint = scipy.interpolate.interp1d( olddims[i], newa, kind=method )
newa = mint( dimlist[i] )
if ndims > 1:
# need one more transpose to return to original dimensions
newa = newa.transpose( trorder )
return newa
elif method in ['spline']:
oslices = [ slice(0,j) for j in old ]
oldcoords = n.ogrid[oslices]
nslices = [ slice(0,j) for j in list(newdims) ]
newcoords = np.mgrid[nslices]
newcoords_dims = range(np.rank(newcoords))
#make first index last
newcoords_dims.append(newcoords_dims.pop(0))
newcoords_tr = newcoords.transpose(newcoords_dims)
# makes a view that affects newcoords
newcoords_tr += ofs
deltas = (np.asarray(old) - m1) / (newdims - m1)
newcoords_tr *= deltas
newcoords_tr -= ofs
newa = scipy.ndimage.map_coordinates(a, newcoords)
return newa
else:
print("Congrid error: Unrecognized interpolation type.\n", \
"Currently only \'neighbour\', \'nearest\',\'linear\',", \
"and \'spline\' are supported.")
return None
##################################################################################
#
# Plotting Functions
#
##################################################################################
def getmapparams(clat, limit):
if limit[0] == 0 and limit[2] == 360:
#
# Global maps
#
if clat == 90:
table = 'MOLA'
vmin = -6.0
vmax = -3.0
proj = 'north'
meridians = np.arange(0, 360, 60)
parallels = np.arange(70,90+1, 10)
elif clat == -90:
table = 'MOLA'
vmin = -0.5
vmax = 6.0
proj = 'south'
meridians = np.arange(0, 360, 60)
parallels = np.arange(-90,-70-1, 10)
else:
table = 'MOLA'
vmin = -9
vmax = 21
proj = 'glob'
meridians = np.arange(0, 360, 60)
parallels = np.arange(-70,70+1, 20)
else:
table = 'MOLA'
meridians = np.arange(limit[0], limit[2], (limit[2]-limit[0])/3)
parallels = np.arange(limit[1], limit[3], (limit[3]-limit[1])/3)
if clat == 90:
vmin = -6.5
vmax = -1.5
proj = 'north_custom'
elif clat == -90:
vmin = -0.5
vmax = 6.0
proj = 'south_custom'
else:
vmin = -9
vmax = 21
proj = 'custom'
return table, vmin, vmax, proj, meridians, parallels
def plotmap(coverage, nelev, foldmap, elevmap, limit, blon, blat, troll, nobs, omin, omax, rarea, clat, mres, overlay=False, mapbase="SHARAD_Coverage", null=np.nan):
#
# Wrapper for map plotting
#
#
# Get plot labels and supplementary text
#
ttle, sup_txt1, sup_txt2 = coveragesupptxt(troll, nobs, omin, omax, rarea, limit, clat)
tclat = "" if clat == 0 else str("clat = " + str(clat))
#
# Set the plot size to standard paper size
# This will help create maps with uniform font sizes
#
xsize, ysize = 11, 8.5
if foldmap:
print('This function is not available at this time')
sys.stdout.flush()
return
else:
if overlay:
bg_map = nelev.copy()
bg_map[~np.isnan(coverage)] = null
nelev[np.isnan(coverage)] = null
else:
if elevmap: #Plot coverage paths in elevation rather than black
nelev[np.isnan(coverage)] = null
bg_map = False
else:
nelev[~np.isnan(coverage)] = null
bg_map = False
[table, vmin, vmax, proj, mer, par] = getmapparams(clat, limit)
#
# Get colormap and set nulls to black
#
cmap = setcolormap(table, overlay, limit)
#
# Let the plotting begin
#
drawmap(nelev, bg_map, blon, blat, xsize, ysize, limit, vmin, vmax, proj, mres, overlay=overlay, mer=mer, par=par, cmap=cmap, ttle=ttle, sup_txt1=sup_txt1, sup_txt2=sup_txt2, mapbase=mapbase)
return
def drawmap(nelev, bg_map, blon, blat, xsize, ysize, limit, vmin, vmax, proj, mres, overlay=False, mer=None, par=None, cmap=mpl.cm.jet, ttle=None, sup_txt1=None, sup_txt2=None, mapbase='SHARAD_Coverage', outDir='../output/'):
prom = '[DRAW MAP]: '
bannername = outDir + mapbase + '_banner.png'
jpgname = outDir + mapbase + '.jpg'
pngname = outDir + mapbase + '.png'
# Create meshgrid for plotting
#
if proj == 'glob' or proj == 'north' or proj == 'south':
#
# Roll nelev and bg_map to get into proper coordinates
#
sft = int(180*mres)
nelev = np.roll(nelev, sft, axis=1)
if overlay:
bg_map = np.roll(bg_map, sft, axis=1)
blon = blon - 180.;
limit[0] = limit[0] - 180.
limit[2] = limit[2] - 180.
if proj == 'glob':
mer = mer - 180.
#
# Now create a masked array for plotting
#
if overlay:
bg_map[np.where(~np.isnan(nelev))] = np.nan
map_bg = np.ma.masked_where(np.isnan(bg_map), bg_map)
plot_data = np.ma.masked_where(np.isnan(nelev), nelev)
[x, y] = np.meshgrid(blon, blat)
#
# Let the plotting begin
#
print(prom + 'Initializing Map')
sys.stdout.flush()
# fig = plt.figure(figsize=(xsize, ysize), dpi=500, facecolor='white')
fig = plt.figure(figsize=(11, 8.5), dpi=500, facecolor='white')
ax1 = fig.add_subplot(111)
#
# Set the basemap projection and limits
#
if proj == 'glob':
bmap = Basemap(projection='cyl', llcrnrlat=limit[1], urcrnrlat=limit[3], llcrnrlon=limit[0],
urcrnrlon=limit[2], resolution=None, lon_0=0)
elif proj == 'custom':
bmap = Basemap(projection='cyl', llcrnrlat=limit[1], urcrnrlat=limit[3], llcrnrlon=limit[0],
urcrnrlon=limit[2], resolution=None)
elif proj == 'north_custom':
bmap = polar_stere(limit[0], limit[2], limit[1], limit[3], resolution=None)
#bmap = Basemap(projection='npaeqd', boundinglat=limit[1], lon_0=0, resolution=None, round=False)
elif proj == 'south_custom':
bmap = Basemap(projection='spstere', boundinglat=limit[3], lon_0=180, resolution=None, round=False)
elif proj == 'north':
bmap = Basemap(projection='npstere', boundinglat=limit[1], lon_0=0, resolution=None, round=True)
elif proj == 'south':
bmap = Basemap(projection='spstere', boundinglat=limit[3], lon_0=180, resolution=None, round=True)
# Plot the data
#
if proj == "custom":
if overlay:
bmap.pcolormesh(x, y, map_bg, cmap=plt.cm.gray, latlon=True)
cs = bmap.pcolormesh(x, y, plot_data, cmap=cmap, latlon=True, alpha=1.0)
fig.savefig(bannername, dpi=100, bbox_inches='tight', transparent=True)
else:
if overlay:
bmap.pcolormesh(x, y, map_bg, vmin=vmin, vmax=vmax, cmap='gray', latlon=True)
cs = bmap.pcolormesh(x, y, plot_data, vmin=vmin, vmax=vmax, cmap=cmap, latlon=True, alpha=1.0)
fig.savefig(bannername, dpi=200, bbox_inches='tight', transparent=True)
print(prom + 'Banner image saved as {}'.format(bannername))
#
# If meridians and parallels, plot
#
if mer is not None:
bmap.drawmeridians(mer, labels=[False, False, True, True], fontsize=8)
if par is not None:
bmap.drawparallels(par, labels=[True, True, False, False], fontsize=8)
#
# Supplemental Text
#
if not sup_txt2 is None:
plt.gcf().text(0.00, 0.04, sup_txt2, fontsize=10, horizontalalignment='left', verticalalignment='center')
if not sup_txt1 is None:
plt.gcf().text(1.00, 0.01, sup_txt1, fontsize=10, horizontalalignment='right', verticalalignment='center')
#
# Colorbar
#
if proj == "north" or proj == "south":
cbar = bmap.colorbar(cs, location='bottom', pad="5%")
elif proj == "custom":
cbar = bmap.colorbar(cs, location='bottom', pad="10%")
else:
cbar = bmap.colorbar(cs, location='bottom', pad="20%")
#
# Plot title
#
plt.title(ttle, y=1.08, fontsize=12)
fig.savefig(jpgname, dpi=250)
fig.savefig(pngname, dpi=1000, transparent=True)
return
##################################################################################
#
# Log functions
#
##################################################################################
def writeLog(logfile, entryi, verbose=False):
prom = "[WRITE LOG]: "
if verbose:
print(entry)
if not fid.closed:
logfile.write(entry+"\n")
else:
print("Log file is closed or has not been opened for writing.")
print("Exiting...")
return
import sys, os
import numpy as np
from matplotlib.colors import LinearSegmentedColormap
from matplotlib.colors import ListedColormap
from datetime import datetime as dt
from mpl_toolkits.basemap import Basemap
import matplotlib.pyplot as plt
def mapextract(inmap, limit, lhead=1024, nlon=None, nlat=None, fmt=False, null=np.nan,
diag=False, extent=np.array([0., -90., 360., 90.]), swap_endian=False):
#
# Created: 2003 Sep 01, Than Putzig - converted from mappoint.pro
# mod: 2004 Sep 08, Than Putzig - switch to LIMIT spec, added assoc params
# mod: 2006 Feb 24, Than Putzig - no longer loading INMAP to array,
# added min, max, mean, and median diagnostics
# mod: 2012 Mar 09, Than Putzig - added SWAP_ENDIAN keyword
# mod: 2012 Mar 12, Than Putzig - fixed latitude range selection
# mod: 2017 Sep 29, Matt Perry - ported to Python 3+(missing file option at this point)
#
# For extracting a subset of an IDL or ASSOC map.
# Longitudes should be 0 to <360 (West or East)
# Latitudes should be -90 to 90 North.
# Input map is presumed to be of bin-centered data (e.g., first longitudes
# are 0+BLON/2 where BLON is longitudinal bin size).
#
# Procedure:
############################################################################
prom = '[MAP_EXTRACT]: '
##########################################################################
#
# Validity check -- test range of LLAT, HLAT, LLON and HLON, whether NLON
# and NLAT or neither is specified. Print error message
# and exit with zero for output if parameters incorrect.
llon=limit[0]
llat=limit[1]
hlon=limit[2]
hlat=limit[3]
if llon > 360. or llon < 0. or hlon > 360. or hlon < 0. or llon > hlon:
print(prom + "longitude value out of range (0-360, llon < hlon)")
outmap = 0.0
print(' ')
sys.stdout.flush()
return outmap
if llat > 90. or llat < -90. or hlat > 90. or hlat < -90 or llat > hlat:
print(prom + "latitude value out of range (-90-90, llat < hlat)")
outmap = 0.0
print(' ')
sys.stdout.flush()
return outmap
if not nlon is None or not nlat is None:
print(prom + "You must specify either both NLON and NLAT or neither.")
outmap = 0.0
print("")
sys.stdout.flush()
return outmap
#######################################################################
#
# determine whether INMAP is array or file and extract dimensions
#
# If INMAP is a file, it is assumed to be an assoc file with a 1024 byte
# header, where the first two words are the number of longitudes and number
# of latitudes, respectively (e.g., output from mapbin.pro), and the data
# is an array of type INT (16 bit signed integers).
#
atype = type(inmap)
if atype is str:
#Here I am assuming any string entry is a file name
print(prom + "This functionality is not yet available. Please use array until further notice.")
outmap = 0.0
print("")
sys.stdout.flush()
return outmap
elif atype is np.ndarray:
mp = inmap
msize = np.shape(mp)
nlon = msize[1]
nlat = msize[0]
else:
print(prom + "Variable type not recognized. Please use either an array or file name.")
outmap = 0.0
print("")
sys.stdout.flush()
return outmap
#######################################################################
#
# precompute a few things
#
if np.shape(extent)[0] != 4:
extent = np.array([0., -90., 360., 90.])
polar = '' # preset
lat = 0 # preset
blat = (extent[3] - extent[1]) / nlat # latitude bin size
blon = (extent[2] - extent[0]) / nlon # longitude bin size
maxlon = nlon*blon - blon/2 #Largest longitude of bin center
#
# Find the high and low latitude and longitude indices
#
lat_lo = 0
lat_hi = nlat - 1
for ii in range(0, nlat): #ii: loop latitudes ascending
jj = nlat - 1 - ii #jj: reverse loop
latii = ii*blat + extent[1] + blat/2 # find map bin center for ii
latjj = jj*blat + extent[1] + blat/2 # find map bin center for jj
if latii < llat:
lat_lo = ii + 1 #Save lower limit for ii
if latjj > hlat:
lat_hi = jj - 1 #Save upper limit for jj
lon_lo = 0
lon_hi = nlon - 1
for ii in range(0, nlon): #ii: loop longitudes ascending
jj = nlon - 1 - ii #jj: reverse loop
lonii = ii*blon + extent[0] + blon/2 # find map bin centers for ii
lonjj = jj*blon + extent[0] + blon/2 # find map bin centers for jj
if lonii < llon:
lon_lo = ii + 1 # save lower limit for ii
if lonjj > hlon:
lon_hi = jj - 1 # save upper limit for jj
#
# Python's slicing goes from n:m where the mth element IS NOT included
# Add one to each hi index.
#
lon_hi = lon_hi + 1
lat_hi = lat_hi + 1
# Get OUTMAP
#
outmap = mp[lat_lo:lat_hi, lon_lo:lon_hi]
if diag:
#
# Find min, max, mean, median
#
wdnn = np.where(outmap != null)
if len(wdnn[0]) == 0:
print(prom + 'WARNING: all data in null = {}'.format(null))
print(prom + 'Exiting...')
sys.stdout.flush()
exit()
dmin = np.amin(outmap[wdnn])
dmax = np.amax(outmap[wdnn])
dmea = np.mean(outmap[wdnn])
dmed = np.median(outmap[wdnn])
#
# Get output lat and lon arrays
#
# Determine minimum and maximum lats and lons from indicies
#
minlon = lon_lo * blon + blon / 2
maxlon = lon_hi * blon + blon / 2
minlat = lat_lo * blat - 88. + blat / 2.
maxlat = lat_hi * blat - 88. + blat / 2.
#
# How many elements will be in the arrays
#
nlats = int((maxlon - minlon)/blon)
nlons = int((maxlat - minlat)/blat)
#
# Form arrays
#
lon_array = np.arange(nlons) * blon + minlon + blon/2
lat_array = np.arange(nlats) * blat + minlat + blat/2
#limit = [lon_lo*blon+blon/2,
# lat_lo*blat-88.+blat/2,
# lon_hi * blon + blon / 2,
# lat_hi * blat - 88. + blat / 2]
#
# Diagnostic output
#
print('------------------------')
print('Map information:')
if atype is str:
print('Map file: {}'.format(inmap))
print('NLAT: {}'.format(str(nlat)))
print('NLON: {}'.format(str(nlon)))
if atype is str:
print('LHEAD: {}'.format(str(lhead)))
print('Values at first 5 longitudes starting from 0:')
print('Northmost:')
print(str(mp[nlat-1, 0:4]))
print("Southern most:")
print(str(mp[0, 0:4]))
print("------------------------")
print("Desired subsection:")
print("LLAT:\t{}".format(str(llat)))
print("HLAT:\t{}".format(str(hlat)))
print("LLON:\t{}".format(str(llon)))
print("LLON:\t{}".format(str(hlon)))
print("------------------------")
print("Extracted data:")
print("\tMap Pixel:\tLat/Lon:")
print("lat_lo:\t{},{}".format(str(lat_lo), str(minlat)))
print("lat_hi:\t{},{}".format(str(lat_hi), str(maxlat)))
print("lon_lo:\t{},{}".format(str(lon_lo), str(minlon)))
print("lon_hi:\t{},{}".format(str(lon_hi), str(maxlon)))
print("minimum:\t{}".format(dmin))
print("maximum:\t{}".format(dmax))
print("mean:\t{}".format(dmea))
print("median:\t{}".format(dmed))
print("------------------------")
print("Output map data array:")
print('{}:\t{}'.format(type(outmap), np.shape(outmap)))
print("------------------------")
sys.stdout.flush()
return outmap, blat, blon, lat_array, lon_array
2
def getmap(dim, data, lat, lon, blat, blon, null=np.nan, limit=np.array([0., -90., 360., 90.]), fp=0.0):
st = dt.now()
# #############################################################################
# mapp.pro
#
# Written 1998 Mar 05, Mike Mellon
# Updated 1998 Apr 20, Mike Mellon, to improve speed
# Updated 2011 Dec 05, Than Putzig, to allow non-global mapping
# Updated 2014 Feb 24, Than Putzig, to allow rectangular bins
# Updated 2014 Feb 26, IDL Version 1: Than Putzig, add use of footprints
# Updated 2015 Sep 23, IDL Version 1: Than Putzig, change fix to long for longitude
# Updated 2017 Sep 01, IDL Version 2: Than Putzig, rename to mapp from map (IDL8.x)
# Updated 2017 Sep 29, Python Version 1: Matt Perry, ported to Python 3+ from mapp.pro
#
# This program is intended to be used to bin data into a map. It accepts
# an array of data points and co-registered latitudes and longitudes. From
# this is creates a map of the desired bin size and puts the data into those
# bins averaging as needed. If there is no data in a bin, value is set to NULL.
# As of version 1, a footprint for the data may be specified that will cause
# data points to be mapped to more than one bin where the footprint exceeds
# the bin size (typically more often at high latitudes). For compatibility
# with prior versions, the default footprint size is zero.
# #############################################################################
vers = '1'
prom = '[GET_MAP]: ' # Errors prompt
d2r = np.pi / 180. # degree to radians
eflag = 0 # Preset error flag
ndat = len(data) # Get size of data array
if ndat != len(lat):
elfag = 1
print(prom + "DATA and LATITUDE dimensions are not the same")
print(prom + "DATA AND LATITUDE must be co-registered")
sys.stdout.flush()
if ndat != len(lon):
elfag = 1
print(prom + "DATA and LONGITUDE dimensions are not the same")
print(prom + "DATA AND LONGITUDE must be co-registered")
sys.stdout.flush()
if blat <= 0 or blon <=0:
eflag = 1
print(prom + "bin size(s) must be greater than zero")
sys.stdout.flush()
#
# Check LIMIT ranges (default to global)
#
if type(limit) is not np.ndarray:
limit = np.array(limit, float)
if len(limit) != 4:
limit = np.array([0, -90, 360, 90])
minlon = limit[0]
minlat = limit[1]
maxlon = limit[2]
maxlat = limit[3]
longlob = (limit[0] == 0. and limit[2] == 360.)
if minlon > 360. or minlon < 0. or maxlon > 360. \
or maxlon < 0. or minlon > maxlon:
print(prom + "LIMIT longitude value out of range (0-360, minlon < maxlon")
sys.stdout.flush()
eflag = 1
if minlat > 90. or minlat < -90. or maxlat > 90. \
or maxlat < -90. or minlat > maxlat:
print(prom + "LIMIT latitude value out of range (-90-90, minlat < maxlat")
sys.stdout.flush()
eflag = 1
if eflag == 1:
mout = -1
###############################################################################
# Define needed variables: I need to create a map array with defined
# longitude and latitudes in order to place co-registered data-lat-lon
# into a mapped format.
###############################################################################
req = 3396. # Radius of Mars at equator
bn = np.array([blon, blat])
if type(fp) is not np.ndarray or type(fp) is not list:
fp = float(fp)
fp = np.array([fp,fp])
elif type(fp) is list:
fp = np.array(fp)
dhfe = bn/2 if np.amin(fp) == 0.0 else fp*180/(np.pi*req)/2 # deg/0.5 footprint @ equat
nlon = int((maxlon - minlon)/bn[0]) # Number of map longitudes
nlat = int((maxlat - minlat)/bn[1]) # Number of map latitudes
mout = np.zeros([int(nlat), int(nlon)], float) # Initialize output map array
nmap = np.zeros([int(nlat), int(nlon)],int) # Initialize data density map array
mout_new = np.zeros([int(nlat), int(nlon)], float) # Initialize output map array
nmap_new = np.zeros([int(nlat), int(nlon)],int) # Initialize data density map array
mlon = np.arange(int(nlon)) * bn[0] + minlon + bn[0]/2 # lon array centered on map bins
mlat = np.arange(int(nlat)) * bn[1] + minlat + bn[1]/2 # Lat array centered on map bins
###############################################################################
# Next, scan through all the longitudes and latitudes in MOUT, locate all
# the data points falling within a bin and sum them, averaging at the end.
# The streamlined way is to find all the points that fall in a zone of
# bins (all the same latitude), save that subset of longitudes and data,
# then search only that subset for appropriate longitude bins. It turns out to
# be more that 10 times faster that way (at least when Mellon developed this
# method with longitudes in the outer loop and latitudes in the inner loop).
###############################################################################
slat = np.amax(np.append(dhfe[1], bn/2)) #Lat search distance in 1/2 bin or 1/2 footprint
err = 0
err_new = 0
dhfl_new = dhfe[0]/(np.cos(mlat*d2r)) #Degs/half footprint at this lat
n_new = np.array(dhfl_new*2/bn[0], int) # no. lon bins to fill
n_new[n_new < 1] = 1
n_new[n_new > int(nlon)] = int(nlon)
for ilat in range(0, int(nlat)): # Loop over latitute bins
#
# Test for largest latitude. Elsewhere, include bin lower boundary and stay
# less than upper boundary
#
if ilat != nlat-1: # Test for largest latitude
w1 = np.where((lat >= mlat[ilat] - slat) & (lat < mlat[ilat] + slat))
else:
#
# At largest latitude, include upper boundary (e.g. 90 N if global)
#
w1 = np.where((lat >= mlat[ilat] - slat) & (lat <= mlat[ilat] + slat))
if len(w1[0]) != 0: # Trap no points here
lon2 = lon[w1] # Save longitude subset
data2 = data[w1] #Save data subset
dhfl = dhfe[0]/(np.cos(mlat[ilat]*d2r)) #Degs/half footprint at this lat
n = int(dhfl*2/bn[0]) # no. lon bins to fill
if n < 1:
n = 1
elif n > int(nlon):
n = int(nlon)
lons_new = np.arange(n_new[ilat]) - float(n_new[ilat]-1.)/2.
#
# Confirmed that lons_new and lons are the same for all interations
#
lonz_new = lons_new*bn[0] + lon2.reshape([len(lon2),1])
ilon_new = np.array((lonz_new - limit[0])/bn[0], int)
#
# All this line does is make sure that the indexes do not exceed nlon
# and are greated than zero
#
if longlob:
ilon_new = np.mod(ilon_new+nlon, nlon)
else:
#
# Create a masked array, nulling out where values meet conditions
#
ilon_new = ilon_new[np.where( (ilon_new < int(nlon)) & (ilon_new >= 0) )]
#
# Now create a copy of ilon_new and replace non null values with ilat
jlat_new = np.zeros(np.shape(ilon_new), int) + ilat
try:
mout_new[jlat_new, ilon_new] = mout_new[jlat_new, ilon_new] + 1
nmap_new[jlat_new, ilon_new] = nmap_new[jlat_new, ilon_new] + 1
except (RuntimeError, TypeError, NameError, IndexError) as e:
err_new += err_new
nmap = nmap_new
mout = mout_new
if err != 0:
print(prom + 'WARNING: {} samples not included in map'.format(err))
sys.stdout.flush()
ind_null = np.where(nmap == 0)
if len(ind_null[0]) > 0:
mout[ind_null] = null # set zero-count bins to NULL
w = np.where(nmap != 0)
if len(w[0] > 0):
mout[w] = mout[w]/nmap[w] #Average map values
print(prom + "Took {} to complete".format(dt.now() - st))
return mout, mlat, mlon, nmap, ind_null
def maparea(mp, rng=False, limit=np.array([0., -90., 360., 90]), infile=False, lats=False, null=np.nan):
#############################################################################
# maparea.pro
#
# Modifications history:
# 2003 jun 05 Than Putzig, created from mapbin.pro
# 2007 Feb 09 Than Putzig, updated to allow read from extant array
# 2011 Nov 08 Than Putzig, to output mean values of map
# 2014 Feb 27 Than Putzig, to allow non-global maps
# 2017 Sep 29 Matt Perry, ported to python 3+
# 2017 Oct 27 Matt Perry, updated area calculation for quicker processing
#
# Reads a map from a file or an array and determines the area of
# coverage for values in a given inclusive range over a specified latitude
# range.
#
#############################################################################
prom = "[MAPAREA]: "
if not rng:
print(prom + "You must specify a range")
rap = -1
sys.stdout.flush()
return rap
print(prom + "Calculating coverage percentage")
sys.stdout.flush()
if len(limit) != 4:
limit = np.array([0., -90., 360., 90])
else:
limit = np.array(limit)
if not lats:
lats = np.array([limit[1], limit[3]])
if len(lats) != 2:
lats = np.array([lats, lats])
#
# Set variable name
#
# Restore previous map and create corresponding lat/lon maps
#
if type(infile) is str:
if os.path.isfile(infile):
print(prom + "This function is not yet available.")
sys.stdout.flush()
sys.exit()
else:
print(prom + "File does not exist")
print(prom + "This function is not yet available.")
sys.stdout.flush()
r = 3396. # mean equatorial radius of Mars
a = 4.0*np.pi*np.power(r,2.)
print(prom + "Nominal surface area of Mars is {} sq. km..".format(str(a)))
sys.stdout.flush()
sz = np.shape(mp)
nlon = sz[1]
nlat = sz[0]
binlon = (limit[2] - limit[0])/nlon
binlat = (limit[3] - limit[1])/nlat
jmin = int((lats[0] - limit[1])/binlat)
jmax = int((lats[1] - limit[1])/binlat)
area = 0.0
atot = 0.0
valu = 0.0
vtot = 0.0
t_start = dt.now()
for jj in range(jmin, jmax):
binarea = binlon * binlat * np.cos((jj * binlat + limit[1] + binlat / 2.) * np.pi/180.) * \
np.power((2. * np.pi * r / 360.), 2.)
bins = np.nansum(mp[jj,:]) # Get number of filled bins
area = area + binarea*bins # Area of filled bins
atot = atot + binarea*len(mp[jj,:]); #Total area of latitude row
vtot = vtot + binarea*bins
#
# Print statements
#
if np.mod(jj, int(jmax/10)) == 0:
print(prom + "Area S of latitude {} is {}".format(str(jj*binlat+limit[1]), str(area)))
sys.stdout.flush()
if area > 0:
print(prom + "Mean value S of latitude {} is {}".format(str(jj * binlat + limit[1]), str(vtot/area)))
sys.stdout.flush()
rap = 100*area/atot
#
# Print Results
#
print(prom + 'Area of data values between {} and {} is {} sq km'.format(rng[0], rng[1], area))
print(prom + 'Total area between latitudes {} and {} is {} sq km'.format(lats[0], lats[1], atot))
print(prom + 'Relative area is {0:.2f}%'.format(rap))
print(prom + 'Mean value scaled to bin sizes is {}'.format(vtot/area))
print(prom + '... done.')
print(prom + 'Total Time {}'.format(dt.now() - t_start))
sys.stdout.flush()
return rap
def custom_color_tables(table):
color_dir = '/data/d/WUSHARPS/Support/SHARAD_Coverage/data/color_tables/'
#
# Custom color table
nlat = 176
avelat = 176
#
if table == 'MOLA':
name = 'MOLA_standard_cmap2.npy'
cmap = np.load(color_dir + name)
cmap = ListedColormap(cmap, name=name,N=None)
else:
cmap = {'red': ((0.0, 0.0, 0.0),
(0.10, 0.0, 0.0),
(0.23, 0.0, 0.0),
(0.33, 0.0, 0.0),
(0.4, 1.0, 1.0),
(0.50, 1.0, 1.0),
(0.70, 0.5, 0.5),
(1.00, 1.0, 1.0)),
'green': ((0.0, 0.0, 0.0),
(0.10, 0.0, 0.0),
(0.23, 1.0, 1.0),
(0.33, 1.0, 1.0),
(0.4, 1.0, 1.0),
(0.50, 0.0, 0.0),
(0.70, 0.30, 0.30),
(1.00, 1.0, 1.0)),
'blue': ((0.0, 0.0, 0.0),
(0.10, 1.0, 1.0),
(0.23, 1.0, 1.0),
(0.33, 0.0, 0.0),
(0.4, 0.0, 0.0),
(0.50, 0.0, 0.0),
(0.70, 0.25, 0.25),
(1.00, 1.0, 1.0))}
cmap = LinearSegmentedColormap('TI',cmap,256)
return cmap
def plotsize(limit, mres, tclat):
#
# Calculate the plot size in inches
#
slat = (limit[3] - limit[1]) * mres / 75 / 2.54
if tclat == "":
slon = (limit[2] - limit[0]) * mres / 75 / 2.54
xsize = np.max([slon, 6.5]) + 2
ysize = slat * (xsize - 2) / slon + 3
else:
xsize = np.max([slat, 3.25]) * 2 + 2
ysize = xsize + 1
return xsize, ysize
def polar_stere(lon_w, lon_e, lat_s, lat_n, **kwargs):
'''Returns a Basemap object (NPS/SPS) focused in a region.
lon_w, lon_e, lat_s, lat_n -- Graphic limits in geographical coordinates.
W and S directions are negative.
**kwargs -- Aditional arguments for Basemap object.
'''
lon_0 = lon_w + (lon_e - lon_w) / 2.
ref = lat_s if abs(lat_s) > abs(lat_n) else lat_n
lat_0 = np.copysign(90., ref)
proj = 'npstere' if lat_0 > 0 else 'spstere'
prj = Basemap(projection=proj, lon_0=lon_0, lat_0=lat_0,
boundinglat=0, resolution='c')
lons = [lon_w, lon_e, lon_w, lon_e, lon_0, lon_0]
lats = [lat_s, lat_s, lat_n, lat_n, lat_s, lat_n]
x, y = prj(lons, lats)
ll_lon, ll_lat = prj(min(x), min(y), inverse=True)
ur_lon, ur_lat = prj(max(x), max(y), inverse=True)
return Basemap(projection='stere', lat_0=lat_0, lon_0=lon_0,
llcrnrlon=ll_lon, llcrnrlat=ll_lat,
urcrnrlon=ur_lon, urcrnrlat=ur_lat, **kwargs)
import os, socket
import MySQLdb as SQL# MySQL Interface Library
import _mysql_exceptions
def dbreadconf(config_file):
prom = "[DB_READ_CONFIG]: "
if os.path.isfile(config_file):
with open(config_file) as f:
temp = f.read()
temp = temp.splitlines()
user = str(temp[0])
password = str(temp[1])
database = str(temp[2])
return user, password, database
else:
print(prom + "Database configuration file not found.")
print(prom + "Please ensure the path to the file is correct and try again.")
print(prom + "Exiting...")
return
def dbconnect(user, password, database):
prom = "[DB_CONNECT]: "
hname = socket.gethostname()
if hname == 'hachiwara':
host = 'localhost'
else:
host = 'hachiwara'
# This comment is to test something cool
try:
db = SQL.connect(host=host, user=user, passwd=password, db=database)
return db
except SQL.Error as err:
print(prom + "Error in Connection")
print(err)
print(prom + "Exiting")
return
def SHARAD_Coverage(mapname, orng=np.array([829,100000]), srng = np.array([0., -88., 360., 88.]), orbitids = '', orbittrig = False, roll = 99., rcri = '<', nomap = 0, foldmap = 0, elevmap = 0, replot = 0, update = 0, clat = 0, cset = 0, shresc = 3.0, shresi = 0.750, backgrounddata='MOLA'):
#
# Set Directories
#
config_file = '/data/d/WUSHARPS/Support/SHARAD_Coverage/data/dbconfig.txt'
outDir = '/home/mperry/Applications/SHARAD_Coverage/output/'
#
# Global Variables
#
global warn1, warn2, warn3, warn4
#
# Constants
#
startTime = dt.now()
missions = ['PSP', 'ESP', 'EM1', 'EM2', 'EM3', 'EM4', 'EM5']
prog = "SHARAD_coverage"
vers = 0.1
null = np.nan
shresc = 3.0 # SHARAD crossline resolution in km
shresi = 0.750 # SHARAD inline resolution in km
erad = 3396 # Mars equatorial radius in km
d2r = np.pi / 180. # Degress to radians
escl = erad * d2r #Equatorial scale in km/deg
kpd = 59. # kilometers per degree on Mars
shres = int(round(escl/shresc)) # SHARAD res. in pixels/degree at the equator (rounded to nearest pixel)
latbuf = 50 # 50 km latitude buffer for searches (per conversation with Than)
#
# Check input variables
#
mapbase = str(mapname)
if not os.path.isdir(outDir):
os.mkdir(outDir)
log_fname = outDir + mapbase + ".log"
sys.stdout = open(log_fname, 'w')
sys.stderr = sys.stdout
bg_data = 'MOLA' if backgrounddata == 'MOLA' else backgrounddata
if args.update is not None:
update = True
f_update = args.update[0]
if not os.path.isfile(f_update):
print("ERROR: File with which to update does not exist in the current path.")
print("Please move to file to the local directory or specify the file's full path")
print("Exiting")
sys.stdout.flush()
exit()
return
| gpl-3.0 |
sorgerlab/indra | models/p53_model/param_analysis.py | 6 | 1958 | import numpy
import matplotlib.pyplot as plt
from indra.util import plot_formatting as pf
# Import ATM models
from ATM_v1 import model as ATM_v1
from ATM_v2 import model as ATM_v2
from ATM_v3 import model as ATM_v3
from ATM_v4a import model as ATM_v4a
from ATM_v4b import model as ATM_v4b
# Import ATR models
from ATR_v1 import model as ATR_v1
from ATR_v2 import model as ATR_v2
from ATR_v3 import model as ATR_v3
from run_p53_model import run_model
from pysb.bng import generate_equations
from pysb.simulator import ScipyOdeSimulator as Solver
def sample_params(mu, sigma):
r = numpy.random.randn(len(mu))
p = numpy.power(10, mu + r*sigma)
return p
def parameter_sweep(model, sigma, ns):
generate_equations(model)
logp = [numpy.log10(p.value) for p in model.parameters]
ts = numpy.linspace(0, 20*3600, 20*60)
solver = Solver(model, ts)
pf.set_fig_params()
plt.figure(figsize=(1.8, 1), dpi=300)
for i in range(ns):
psample = sample_params(logp, 0.05)
res = solver.run(param_values=psample)
signal = res.observables['p53_active']
plt.plot(signal, color=(0.7, 0.7, 0.7), alpha=0.3)
# Highlighted
colors = ['g', 'y', 'c']
for c in colors:
psample = sample_params(logp, 0.05)
res = solver.run(param_values=psample)
signal = res.observables['p53_active']
plt.plot(signal, c)
# Nominal
solver = Solver(model, ts)
res = solver.run()
signal = res.observables['p53_active']
plt.plot(signal, 'r')
plt.xticks([])
plt.xlabel('Time (a.u.)', fontsize=7)
plt.ylabel('Active p53', fontsize=7)
plt.yticks([])
plt.ylim(ymin=0)
pf.format_axis(plt.gca())
plt.savefig(model.name + '_sample.pdf')
if __name__ == '__main__':
models = [ATM_v1, ATM_v2, ATM_v3, ATM_v4a, ATM_v4b,
ATR_v1, ATR_v2, ATR_v3]
for model in models:
print(model.name)
parameter_sweep(model, 1, 50)
| bsd-2-clause |
kenshay/ImageScript | ProgramData/SystemFiles/Python/Lib/site-packages/jupyter_core/tests/dotipython_empty/profile_default/ipython_kernel_config.py | 24 | 15358 | # Configuration file for ipython-kernel.
c = get_config()
#------------------------------------------------------------------------------
# IPKernelApp configuration
#------------------------------------------------------------------------------
# IPython: an enhanced interactive Python shell.
# IPKernelApp will inherit config from: BaseIPythonApplication, Application,
# InteractiveShellApp, ConnectionFileMixin
# Should variables loaded at startup (by startup files, exec_lines, etc.) be
# hidden from tools like %who?
# c.IPKernelApp.hide_initial_ns = True
# The importstring for the DisplayHook factory
# c.IPKernelApp.displayhook_class = 'IPython.kernel.zmq.displayhook.ZMQDisplayHook'
# A list of dotted module names of IPython extensions to load.
# c.IPKernelApp.extensions = []
# Execute the given command string.
# c.IPKernelApp.code_to_run = ''
# redirect stderr to the null device
# c.IPKernelApp.no_stderr = False
# The date format used by logging formatters for %(asctime)s
# c.IPKernelApp.log_datefmt = '%Y-%m-%d %H:%M:%S'
# Whether to create profile dir if it doesn't exist
# c.IPKernelApp.auto_create = False
# Reraise exceptions encountered loading IPython extensions?
# c.IPKernelApp.reraise_ipython_extension_failures = False
# Set the log level by value or name.
# c.IPKernelApp.log_level = 30
# Run the file referenced by the PYTHONSTARTUP environment variable at IPython
# startup.
# c.IPKernelApp.exec_PYTHONSTARTUP = True
# Pre-load matplotlib and numpy for interactive use, selecting a particular
# matplotlib backend and loop integration.
# c.IPKernelApp.pylab = None
# Run the module as a script.
# c.IPKernelApp.module_to_run = ''
# The importstring for the OutStream factory
# c.IPKernelApp.outstream_class = 'IPython.kernel.zmq.iostream.OutStream'
# dotted module name of an IPython extension to load.
# c.IPKernelApp.extra_extension = ''
# Create a massive crash report when IPython encounters what may be an internal
# error. The default is to append a short message to the usual traceback
# c.IPKernelApp.verbose_crash = False
# Whether to overwrite existing config files when copying
# c.IPKernelApp.overwrite = False
# The IPython profile to use.
# c.IPKernelApp.profile = 'default'
# List of files to run at IPython startup.
# c.IPKernelApp.exec_files = []
# The Logging format template
# c.IPKernelApp.log_format = '[%(name)s]%(highlevel)s %(message)s'
# Whether to install the default config files into the profile dir. If a new
# profile is being created, and IPython contains config files for that profile,
# then they will be staged into the new directory. Otherwise, default config
# files will be automatically generated.
# c.IPKernelApp.copy_config_files = False
# set the stdin (ROUTER) port [default: random]
# c.IPKernelApp.stdin_port = 0
# Path to an extra config file to load.
#
# If specified, load this config file in addition to any other IPython config.
# c.IPKernelApp.extra_config_file = ''
# lines of code to run at IPython startup.
# c.IPKernelApp.exec_lines = []
# set the control (ROUTER) port [default: random]
# c.IPKernelApp.control_port = 0
# set the heartbeat port [default: random]
# c.IPKernelApp.hb_port = 0
# Enable GUI event loop integration with any of ('glut', 'gtk', 'gtk3', 'osx',
# 'pyglet', 'qt', 'qt5', 'tk', 'wx').
# c.IPKernelApp.gui = None
# A file to be run
# c.IPKernelApp.file_to_run = ''
# The name of the IPython directory. This directory is used for logging
# configuration (through profiles), history storage, etc. The default is usually
# $HOME/.ipython. This option can also be specified through the environment
# variable IPYTHONDIR.
# c.IPKernelApp.ipython_dir = ''
# kill this process if its parent dies. On Windows, the argument specifies the
# HANDLE of the parent process, otherwise it is simply boolean.
# c.IPKernelApp.parent_handle = 0
# Configure matplotlib for interactive use with the default matplotlib backend.
# c.IPKernelApp.matplotlib = None
# set the iopub (PUB) port [default: random]
# c.IPKernelApp.iopub_port = 0
# redirect stdout to the null device
# c.IPKernelApp.no_stdout = False
#
# c.IPKernelApp.transport = 'tcp'
# JSON file in which to store connection info [default: kernel-<pid>.json]
#
# This file will contain the IP, ports, and authentication key needed to connect
# clients to this kernel. By default, this file will be created in the security
# dir of the current profile, but can be specified by absolute path.
# c.IPKernelApp.connection_file = ''
# The Kernel subclass to be used.
#
# This should allow easy re-use of the IPKernelApp entry point to configure and
# launch kernels other than IPython's own.
# c.IPKernelApp.kernel_class = <class 'IPython.kernel.zmq.ipkernel.IPythonKernel'>
# ONLY USED ON WINDOWS Interrupt this process when the parent is signaled.
# c.IPKernelApp.interrupt = 0
# set the shell (ROUTER) port [default: random]
# c.IPKernelApp.shell_port = 0
# If true, IPython will populate the user namespace with numpy, pylab, etc. and
# an ``import *`` is done from numpy and pylab, when using pylab mode.
#
# When False, pylab mode should not import any names into the user namespace.
# c.IPKernelApp.pylab_import_all = True
# Set the kernel's IP address [default localhost]. If the IP address is
# something other than localhost, then Consoles on other machines will be able
# to connect to the Kernel, so be careful!
# c.IPKernelApp.ip = ''
#------------------------------------------------------------------------------
# IPythonKernel configuration
#------------------------------------------------------------------------------
# IPythonKernel will inherit config from: Kernel
#
# c.IPythonKernel._execute_sleep = 0.0005
# Whether to use appnope for compatiblity with OS X App Nap.
#
# Only affects OS X >= 10.9.
# c.IPythonKernel._darwin_app_nap = True
#
# c.IPythonKernel._poll_interval = 0.05
#------------------------------------------------------------------------------
# ZMQInteractiveShell configuration
#------------------------------------------------------------------------------
# A subclass of InteractiveShell for ZMQ.
# ZMQInteractiveShell will inherit config from: InteractiveShell
#
# c.ZMQInteractiveShell.object_info_string_level = 0
#
# c.ZMQInteractiveShell.separate_out = ''
# Automatically call the pdb debugger after every exception.
# c.ZMQInteractiveShell.pdb = False
#
# c.ZMQInteractiveShell.ipython_dir = ''
#
# c.ZMQInteractiveShell.history_length = 10000
#
# c.ZMQInteractiveShell.readline_remove_delims = '-/~'
# If True, anything that would be passed to the pager will be displayed as
# regular output instead.
# c.ZMQInteractiveShell.display_page = False
# Deprecated, use PromptManager.in2_template
# c.ZMQInteractiveShell.prompt_in2 = ' .\\D.: '
#
# c.ZMQInteractiveShell.separate_in = '\n'
# Start logging to the default log file in overwrite mode. Use `logappend` to
# specify a log file to **append** logs to.
# c.ZMQInteractiveShell.logstart = False
# Set the size of the output cache. The default is 1000, you can change it
# permanently in your config file. Setting it to 0 completely disables the
# caching system, and the minimum value accepted is 20 (if you provide a value
# less than 20, it is reset to 0 and a warning is issued). This limit is
# defined because otherwise you'll spend more time re-flushing a too small cache
# than working
# c.ZMQInteractiveShell.cache_size = 1000
#
# c.ZMQInteractiveShell.wildcards_case_sensitive = True
# The name of the logfile to use.
# c.ZMQInteractiveShell.logfile = ''
# 'all', 'last', 'last_expr' or 'none', specifying which nodes should be run
# interactively (displaying output from expressions).
# c.ZMQInteractiveShell.ast_node_interactivity = 'last_expr'
#
# c.ZMQInteractiveShell.debug = False
#
# c.ZMQInteractiveShell.quiet = False
# Save multi-line entries as one entry in readline history
# c.ZMQInteractiveShell.multiline_history = True
# Deprecated, use PromptManager.in_template
# c.ZMQInteractiveShell.prompt_in1 = 'In [\\#]: '
# Enable magic commands to be called without the leading %.
# c.ZMQInteractiveShell.automagic = True
# The part of the banner to be printed before the profile
# c.ZMQInteractiveShell.banner1 = 'Python 3.4.3 |Continuum Analytics, Inc.| (default, Mar 6 2015, 12:07:41) \nType "copyright", "credits" or "license" for more information.\n\nIPython 3.1.0 -- An enhanced Interactive Python.\nAnaconda is brought to you by Continuum Analytics.\nPlease check out: http://continuum.io/thanks and https://binstar.org\n? -> Introduction and overview of IPython\'s features.\n%quickref -> Quick reference.\nhelp -> Python\'s own help system.\nobject? -> Details about \'object\', use \'object??\' for extra details.\n'
# Make IPython automatically call any callable object even if you didn't type
# explicit parentheses. For example, 'str 43' becomes 'str(43)' automatically.
# The value can be '0' to disable the feature, '1' for 'smart' autocall, where
# it is not applied if there are no more arguments on the line, and '2' for
# 'full' autocall, where all callable objects are automatically called (even if
# no arguments are present).
# c.ZMQInteractiveShell.autocall = 0
#
# c.ZMQInteractiveShell.readline_parse_and_bind = ['tab: complete', '"\\C-l": clear-screen', 'set show-all-if-ambiguous on', '"\\C-o": tab-insert', '"\\C-r": reverse-search-history', '"\\C-s": forward-search-history', '"\\C-p": history-search-backward', '"\\C-n": history-search-forward', '"\\e[A": history-search-backward', '"\\e[B": history-search-forward', '"\\C-k": kill-line', '"\\C-u": unix-line-discard']
# Set the color scheme (NoColor, Linux, or LightBG).
# c.ZMQInteractiveShell.colors = 'LightBG'
# Use colors for displaying information about objects. Because this information
# is passed through a pager (like 'less'), and some pagers get confused with
# color codes, this capability can be turned off.
# c.ZMQInteractiveShell.color_info = True
# Show rewritten input, e.g. for autocall.
# c.ZMQInteractiveShell.show_rewritten_input = True
#
# c.ZMQInteractiveShell.xmode = 'Context'
#
# c.ZMQInteractiveShell.separate_out2 = ''
# The part of the banner to be printed after the profile
# c.ZMQInteractiveShell.banner2 = ''
# Start logging to the given file in append mode. Use `logfile` to specify a log
# file to **overwrite** logs to.
# c.ZMQInteractiveShell.logappend = ''
# Don't call post-execute functions that have failed in the past.
# c.ZMQInteractiveShell.disable_failing_post_execute = False
# Deprecated, use PromptManager.out_template
# c.ZMQInteractiveShell.prompt_out = 'Out[\\#]: '
# Enable deep (recursive) reloading by default. IPython can use the deep_reload
# module which reloads changes in modules recursively (it replaces the reload()
# function, so you don't need to change anything to use it). deep_reload()
# forces a full reload of modules whose code may have changed, which the default
# reload() function does not. When deep_reload is off, IPython will use the
# normal reload(), but deep_reload will still be available as dreload().
# c.ZMQInteractiveShell.deep_reload = False
# Deprecated, use PromptManager.justify
# c.ZMQInteractiveShell.prompts_pad_left = True
# A list of ast.NodeTransformer subclass instances, which will be applied to
# user input before code is run.
# c.ZMQInteractiveShell.ast_transformers = []
#------------------------------------------------------------------------------
# ProfileDir configuration
#------------------------------------------------------------------------------
# An object to manage the profile directory and its resources.
#
# The profile directory is used by all IPython applications, to manage
# configuration, logging and security.
#
# This object knows how to find, create and manage these directories. This
# should be used by any code that wants to handle profiles.
# Set the profile location directly. This overrides the logic used by the
# `profile` option.
# c.ProfileDir.location = ''
#------------------------------------------------------------------------------
# Session configuration
#------------------------------------------------------------------------------
# Object for handling serialization and sending of messages.
#
# The Session object handles building messages and sending them with ZMQ sockets
# or ZMQStream objects. Objects can communicate with each other over the
# network via Session objects, and only need to work with the dict-based IPython
# message spec. The Session will handle serialization/deserialization, security,
# and metadata.
#
# Sessions support configurable serialization via packer/unpacker traits, and
# signing with HMAC digests via the key/keyfile traits.
#
# Parameters ----------
#
# debug : bool
# whether to trigger extra debugging statements
# packer/unpacker : str : 'json', 'pickle' or import_string
# importstrings for methods to serialize message parts. If just
# 'json' or 'pickle', predefined JSON and pickle packers will be used.
# Otherwise, the entire importstring must be used.
#
# The functions must accept at least valid JSON input, and output *bytes*.
#
# For example, to use msgpack:
# packer = 'msgpack.packb', unpacker='msgpack.unpackb'
# pack/unpack : callables
# You can also set the pack/unpack callables for serialization directly.
# session : bytes
# the ID of this Session object. The default is to generate a new UUID.
# username : unicode
# username added to message headers. The default is to ask the OS.
# key : bytes
# The key used to initialize an HMAC signature. If unset, messages
# will not be signed or checked.
# keyfile : filepath
# The file containing a key. If this is set, `key` will be initialized
# to the contents of the file.
# The digest scheme used to construct the message signatures. Must have the form
# 'hmac-HASH'.
# c.Session.signature_scheme = 'hmac-sha256'
# The maximum number of digests to remember.
#
# The digest history will be culled when it exceeds this value.
# c.Session.digest_history_size = 65536
# The name of the unpacker for unserializing messages. Only used with custom
# functions for `packer`.
# c.Session.unpacker = 'json'
# The name of the packer for serializing messages. Should be one of 'json',
# 'pickle', or an import name for a custom callable serializer.
# c.Session.packer = 'json'
# Username for the Session. Default is your system username.
# c.Session.username = 'minrk'
# Debug output in the Session
# c.Session.debug = False
# path to file containing execution key.
# c.Session.keyfile = ''
# The maximum number of items for a container to be introspected for custom
# serialization. Containers larger than this are pickled outright.
# c.Session.item_threshold = 64
# Threshold (in bytes) beyond which an object's buffer should be extracted to
# avoid pickling.
# c.Session.buffer_threshold = 1024
# The UUID identifying this session.
# c.Session.session = ''
# Threshold (in bytes) beyond which a buffer should be sent without copying.
# c.Session.copy_threshold = 65536
# execution key, for signing messages.
# c.Session.key = b''
# Metadata dictionary, which serves as the default top-level metadata dict for
# each message.
# c.Session.metadata = {}
| gpl-3.0 |
deepchem/deepchem | examples/delaney/delaney_krr.py | 6 | 1169 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Sun Jul 23 15:04:19 2017
@author: zqwu
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
from sklearn.kernel_ridge import KernelRidge
import numpy as np
import deepchem as dc
import tempfile
# Only for debug!
np.random.seed(123)
# Load Delaney dataset
n_features = 1024
delaney_tasks, delaney_datasets, transformers = dc.molnet.load_delaney()
train_dataset, valid_dataset, test_dataset = delaney_datasets
metric = dc.metrics.Metric(dc.metrics.pearson_r2_score, np.mean)
def model_builder(model_dir):
sklearn_model = KernelRidge(kernel="rbf", alpha=1e-3, gamma=0.05)
return dc.models.SklearnModel(sklearn_model, model_dir)
model_dir = tempfile.mkdtemp()
model = dc.models.SingletaskToMultitask(delaney_tasks, model_builder, model_dir)
model.fit(train_dataset)
model.save()
print("Evaluating model")
train_scores = model.evaluate(train_dataset, [metric], transformers)
valid_scores = model.evaluate(valid_dataset, [metric], transformers)
print("Train scores")
print(train_scores)
print("Validation scores")
print(valid_scores)
| mit |
aetilley/scikit-learn | sklearn/decomposition/tests/test_dict_learning.py | 85 | 8565 | import numpy as np
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import TempMemmap
from sklearn.decomposition import DictionaryLearning
from sklearn.decomposition import MiniBatchDictionaryLearning
from sklearn.decomposition import SparseCoder
from sklearn.decomposition import dict_learning_online
from sklearn.decomposition import sparse_encode
rng_global = np.random.RandomState(0)
n_samples, n_features = 10, 8
X = rng_global.randn(n_samples, n_features)
def test_dict_learning_shapes():
n_components = 5
dico = DictionaryLearning(n_components, random_state=0).fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_overcomplete():
n_components = 12
dico = DictionaryLearning(n_components, random_state=0).fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_reconstruction():
n_components = 12
dico = DictionaryLearning(n_components, transform_algorithm='omp',
transform_alpha=0.001, random_state=0)
code = dico.fit(X).transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X)
dico.set_params(transform_algorithm='lasso_lars')
code = dico.transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X, decimal=2)
# used to test lars here too, but there's no guarantee the number of
# nonzero atoms is right.
def test_dict_learning_reconstruction_parallel():
# regression test that parallel reconstruction works with n_jobs=-1
n_components = 12
dico = DictionaryLearning(n_components, transform_algorithm='omp',
transform_alpha=0.001, random_state=0, n_jobs=-1)
code = dico.fit(X).transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X)
dico.set_params(transform_algorithm='lasso_lars')
code = dico.transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X, decimal=2)
def test_dict_learning_lassocd_readonly_data():
n_components = 12
with TempMemmap(X) as X_read_only:
dico = DictionaryLearning(n_components, transform_algorithm='lasso_cd',
transform_alpha=0.001, random_state=0, n_jobs=-1)
code = dico.fit(X_read_only).transform(X_read_only)
assert_array_almost_equal(np.dot(code, dico.components_), X_read_only, decimal=2)
def test_dict_learning_nonzero_coefs():
n_components = 4
dico = DictionaryLearning(n_components, transform_algorithm='lars',
transform_n_nonzero_coefs=3, random_state=0)
code = dico.fit(X).transform(X[1])
assert_true(len(np.flatnonzero(code)) == 3)
dico.set_params(transform_algorithm='omp')
code = dico.transform(X[1])
assert_equal(len(np.flatnonzero(code)), 3)
def test_dict_learning_unknown_fit_algorithm():
n_components = 5
dico = DictionaryLearning(n_components, fit_algorithm='<unknown>')
assert_raises(ValueError, dico.fit, X)
def test_dict_learning_split():
n_components = 5
dico = DictionaryLearning(n_components, transform_algorithm='threshold',
random_state=0)
code = dico.fit(X).transform(X)
dico.split_sign = True
split_code = dico.transform(X)
assert_array_equal(split_code[:, :n_components] -
split_code[:, n_components:], code)
def test_dict_learning_online_shapes():
rng = np.random.RandomState(0)
n_components = 8
code, dictionary = dict_learning_online(X, n_components=n_components,
alpha=1, random_state=rng)
assert_equal(code.shape, (n_samples, n_components))
assert_equal(dictionary.shape, (n_components, n_features))
assert_equal(np.dot(code, dictionary).shape, X.shape)
def test_dict_learning_online_verbosity():
n_components = 5
# test verbosity
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
try:
sys.stdout = StringIO()
dico = MiniBatchDictionaryLearning(n_components, n_iter=20, verbose=1,
random_state=0)
dico.fit(X)
dico = MiniBatchDictionaryLearning(n_components, n_iter=20, verbose=2,
random_state=0)
dico.fit(X)
dict_learning_online(X, n_components=n_components, alpha=1, verbose=1,
random_state=0)
dict_learning_online(X, n_components=n_components, alpha=1, verbose=2,
random_state=0)
finally:
sys.stdout = old_stdout
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_online_estimator_shapes():
n_components = 5
dico = MiniBatchDictionaryLearning(n_components, n_iter=20, random_state=0)
dico.fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_online_overcomplete():
n_components = 12
dico = MiniBatchDictionaryLearning(n_components, n_iter=20,
random_state=0).fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_online_initialization():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features)
dico = MiniBatchDictionaryLearning(n_components, n_iter=0,
dict_init=V, random_state=0).fit(X)
assert_array_equal(dico.components_, V)
def test_dict_learning_online_partial_fit():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
dict1 = MiniBatchDictionaryLearning(n_components, n_iter=10 * len(X),
batch_size=1,
alpha=1, shuffle=False, dict_init=V,
random_state=0).fit(X)
dict2 = MiniBatchDictionaryLearning(n_components, alpha=1,
n_iter=1, dict_init=V,
random_state=0)
for i in range(10):
for sample in X:
dict2.partial_fit(sample)
assert_true(not np.all(sparse_encode(X, dict1.components_, alpha=1) ==
0))
assert_array_almost_equal(dict1.components_, dict2.components_,
decimal=2)
def test_sparse_encode_shapes():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
for algo in ('lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'):
code = sparse_encode(X, V, algorithm=algo)
assert_equal(code.shape, (n_samples, n_components))
def test_sparse_encode_error():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
code = sparse_encode(X, V, alpha=0.001)
assert_true(not np.all(code == 0))
assert_less(np.sqrt(np.sum((np.dot(code, V) - X) ** 2)), 0.1)
def test_sparse_encode_error_default_sparsity():
rng = np.random.RandomState(0)
X = rng.randn(100, 64)
D = rng.randn(2, 64)
code = ignore_warnings(sparse_encode)(X, D, algorithm='omp',
n_nonzero_coefs=None)
assert_equal(code.shape, (100, 2))
def test_unknown_method():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
assert_raises(ValueError, sparse_encode, X, V, algorithm="<unknown>")
def test_sparse_coder_estimator():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
code = SparseCoder(dictionary=V, transform_algorithm='lasso_lars',
transform_alpha=0.001).transform(X)
assert_true(not np.all(code == 0))
assert_less(np.sqrt(np.sum((np.dot(code, V) - X) ** 2)), 0.1) | bsd-3-clause |
jaidevd/scikit-learn | examples/applications/plot_stock_market.py | 76 | 8522 | """
=======================================
Visualizing the stock market structure
=======================================
This example employs several unsupervised learning techniques to extract
the stock market structure from variations in historical quotes.
The quantity that we use is the daily variation in quote price: quotes
that are linked tend to cofluctuate during a day.
.. _stock_market:
Learning a graph structure
--------------------------
We use sparse inverse covariance estimation to find which quotes are
correlated conditionally on the others. Specifically, sparse inverse
covariance gives us a graph, that is a list of connection. For each
symbol, the symbols that it is connected too are those useful to explain
its fluctuations.
Clustering
----------
We use clustering to group together quotes that behave similarly. Here,
amongst the :ref:`various clustering techniques <clustering>` available
in the scikit-learn, we use :ref:`affinity_propagation` as it does
not enforce equal-size clusters, and it can choose automatically the
number of clusters from the data.
Note that this gives us a different indication than the graph, as the
graph reflects conditional relations between variables, while the
clustering reflects marginal properties: variables clustered together can
be considered as having a similar impact at the level of the full stock
market.
Embedding in 2D space
---------------------
For visualization purposes, we need to lay out the different symbols on a
2D canvas. For this we use :ref:`manifold` techniques to retrieve 2D
embedding.
Visualization
-------------
The output of the 3 models are combined in a 2D graph where nodes
represents the stocks and edges the:
- cluster labels are used to define the color of the nodes
- the sparse covariance model is used to display the strength of the edges
- the 2D embedding is used to position the nodes in the plan
This example has a fair amount of visualization-related code, as
visualization is crucial here to display the graph. One of the challenge
is to position the labels minimizing overlap. For this we use an
heuristic based on the direction of the nearest neighbor along each
axis.
"""
print(__doc__)
# Author: Gael Varoquaux [email protected]
# License: BSD 3 clause
import datetime
import numpy as np
import matplotlib.pyplot as plt
try:
from matplotlib.finance import quotes_historical_yahoo_ochl
except ImportError:
# quotes_historical_yahoo_ochl was named quotes_historical_yahoo before matplotlib 1.4
from matplotlib.finance import quotes_historical_yahoo as quotes_historical_yahoo_ochl
from matplotlib.collections import LineCollection
from sklearn import cluster, covariance, manifold
###############################################################################
# Retrieve the data from Internet
# Choose a time period reasonably calm (not too long ago so that we get
# high-tech firms, and before the 2008 crash)
d1 = datetime.datetime(2003, 1, 1)
d2 = datetime.datetime(2008, 1, 1)
# kraft symbol has now changed from KFT to MDLZ in yahoo
symbol_dict = {
'TOT': 'Total',
'XOM': 'Exxon',
'CVX': 'Chevron',
'COP': 'ConocoPhillips',
'VLO': 'Valero Energy',
'MSFT': 'Microsoft',
'IBM': 'IBM',
'TWX': 'Time Warner',
'CMCSA': 'Comcast',
'CVC': 'Cablevision',
'YHOO': 'Yahoo',
'DELL': 'Dell',
'HPQ': 'HP',
'AMZN': 'Amazon',
'TM': 'Toyota',
'CAJ': 'Canon',
'MTU': 'Mitsubishi',
'SNE': 'Sony',
'F': 'Ford',
'HMC': 'Honda',
'NAV': 'Navistar',
'NOC': 'Northrop Grumman',
'BA': 'Boeing',
'KO': 'Coca Cola',
'MMM': '3M',
'MCD': 'Mc Donalds',
'PEP': 'Pepsi',
'MDLZ': 'Kraft Foods',
'K': 'Kellogg',
'UN': 'Unilever',
'MAR': 'Marriott',
'PG': 'Procter Gamble',
'CL': 'Colgate-Palmolive',
'GE': 'General Electrics',
'WFC': 'Wells Fargo',
'JPM': 'JPMorgan Chase',
'AIG': 'AIG',
'AXP': 'American express',
'BAC': 'Bank of America',
'GS': 'Goldman Sachs',
'AAPL': 'Apple',
'SAP': 'SAP',
'CSCO': 'Cisco',
'TXN': 'Texas instruments',
'XRX': 'Xerox',
'LMT': 'Lookheed Martin',
'WMT': 'Wal-Mart',
'WBA': 'Walgreen',
'HD': 'Home Depot',
'GSK': 'GlaxoSmithKline',
'PFE': 'Pfizer',
'SNY': 'Sanofi-Aventis',
'NVS': 'Novartis',
'KMB': 'Kimberly-Clark',
'R': 'Ryder',
'GD': 'General Dynamics',
'RTN': 'Raytheon',
'CVS': 'CVS',
'CAT': 'Caterpillar',
'DD': 'DuPont de Nemours'}
symbols, names = np.array(list(symbol_dict.items())).T
quotes = [quotes_historical_yahoo_ochl(symbol, d1, d2, asobject=True)
for symbol in symbols]
open = np.array([q.open for q in quotes]).astype(np.float)
close = np.array([q.close for q in quotes]).astype(np.float)
# The daily variations of the quotes are what carry most information
variation = close - open
###############################################################################
# Learn a graphical structure from the correlations
edge_model = covariance.GraphLassoCV()
# standardize the time series: using correlations rather than covariance
# is more efficient for structure recovery
X = variation.copy().T
X /= X.std(axis=0)
edge_model.fit(X)
###############################################################################
# Cluster using affinity propagation
_, labels = cluster.affinity_propagation(edge_model.covariance_)
n_labels = labels.max()
for i in range(n_labels + 1):
print('Cluster %i: %s' % ((i + 1), ', '.join(names[labels == i])))
###############################################################################
# Find a low-dimension embedding for visualization: find the best position of
# the nodes (the stocks) on a 2D plane
# We use a dense eigen_solver to achieve reproducibility (arpack is
# initiated with random vectors that we don't control). In addition, we
# use a large number of neighbors to capture the large-scale structure.
node_position_model = manifold.LocallyLinearEmbedding(
n_components=2, eigen_solver='dense', n_neighbors=6)
embedding = node_position_model.fit_transform(X.T).T
###############################################################################
# Visualization
plt.figure(1, facecolor='w', figsize=(10, 8))
plt.clf()
ax = plt.axes([0., 0., 1., 1.])
plt.axis('off')
# Display a graph of the partial correlations
partial_correlations = edge_model.precision_.copy()
d = 1 / np.sqrt(np.diag(partial_correlations))
partial_correlations *= d
partial_correlations *= d[:, np.newaxis]
non_zero = (np.abs(np.triu(partial_correlations, k=1)) > 0.02)
# Plot the nodes using the coordinates of our embedding
plt.scatter(embedding[0], embedding[1], s=100 * d ** 2, c=labels,
cmap=plt.cm.spectral)
# Plot the edges
start_idx, end_idx = np.where(non_zero)
#a sequence of (*line0*, *line1*, *line2*), where::
# linen = (x0, y0), (x1, y1), ... (xm, ym)
segments = [[embedding[:, start], embedding[:, stop]]
for start, stop in zip(start_idx, end_idx)]
values = np.abs(partial_correlations[non_zero])
lc = LineCollection(segments,
zorder=0, cmap=plt.cm.hot_r,
norm=plt.Normalize(0, .7 * values.max()))
lc.set_array(values)
lc.set_linewidths(15 * values)
ax.add_collection(lc)
# Add a label to each node. The challenge here is that we want to
# position the labels to avoid overlap with other labels
for index, (name, label, (x, y)) in enumerate(
zip(names, labels, embedding.T)):
dx = x - embedding[0]
dx[index] = 1
dy = y - embedding[1]
dy[index] = 1
this_dx = dx[np.argmin(np.abs(dy))]
this_dy = dy[np.argmin(np.abs(dx))]
if this_dx > 0:
horizontalalignment = 'left'
x = x + .002
else:
horizontalalignment = 'right'
x = x - .002
if this_dy > 0:
verticalalignment = 'bottom'
y = y + .002
else:
verticalalignment = 'top'
y = y - .002
plt.text(x, y, name, size=10,
horizontalalignment=horizontalalignment,
verticalalignment=verticalalignment,
bbox=dict(facecolor='w',
edgecolor=plt.cm.spectral(label / float(n_labels)),
alpha=.6))
plt.xlim(embedding[0].min() - .15 * embedding[0].ptp(),
embedding[0].max() + .10 * embedding[0].ptp(),)
plt.ylim(embedding[1].min() - .03 * embedding[1].ptp(),
embedding[1].max() + .03 * embedding[1].ptp())
plt.show()
| bsd-3-clause |
ClimbsRocks/scikit-learn | sklearn/tests/test_kernel_ridge.py | 342 | 3027 | import numpy as np
import scipy.sparse as sp
from sklearn.datasets import make_regression
from sklearn.linear_model import Ridge
from sklearn.kernel_ridge import KernelRidge
from sklearn.metrics.pairwise import pairwise_kernels
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_array_almost_equal
X, y = make_regression(n_features=10)
Xcsr = sp.csr_matrix(X)
Xcsc = sp.csc_matrix(X)
Y = np.array([y, y]).T
def test_kernel_ridge():
pred = Ridge(alpha=1, fit_intercept=False).fit(X, y).predict(X)
pred2 = KernelRidge(kernel="linear", alpha=1).fit(X, y).predict(X)
assert_array_almost_equal(pred, pred2)
def test_kernel_ridge_csr():
pred = Ridge(alpha=1, fit_intercept=False,
solver="cholesky").fit(Xcsr, y).predict(Xcsr)
pred2 = KernelRidge(kernel="linear", alpha=1).fit(Xcsr, y).predict(Xcsr)
assert_array_almost_equal(pred, pred2)
def test_kernel_ridge_csc():
pred = Ridge(alpha=1, fit_intercept=False,
solver="cholesky").fit(Xcsc, y).predict(Xcsc)
pred2 = KernelRidge(kernel="linear", alpha=1).fit(Xcsc, y).predict(Xcsc)
assert_array_almost_equal(pred, pred2)
def test_kernel_ridge_singular_kernel():
# alpha=0 causes a LinAlgError in computing the dual coefficients,
# which causes a fallback to a lstsq solver. This is tested here.
pred = Ridge(alpha=0, fit_intercept=False).fit(X, y).predict(X)
kr = KernelRidge(kernel="linear", alpha=0)
ignore_warnings(kr.fit)(X, y)
pred2 = kr.predict(X)
assert_array_almost_equal(pred, pred2)
def test_kernel_ridge_precomputed():
for kernel in ["linear", "rbf", "poly", "cosine"]:
K = pairwise_kernels(X, X, metric=kernel)
pred = KernelRidge(kernel=kernel).fit(X, y).predict(X)
pred2 = KernelRidge(kernel="precomputed").fit(K, y).predict(K)
assert_array_almost_equal(pred, pred2)
def test_kernel_ridge_precomputed_kernel_unchanged():
K = np.dot(X, X.T)
K2 = K.copy()
KernelRidge(kernel="precomputed").fit(K, y)
assert_array_almost_equal(K, K2)
def test_kernel_ridge_sample_weights():
K = np.dot(X, X.T) # precomputed kernel
sw = np.random.RandomState(0).rand(X.shape[0])
pred = Ridge(alpha=1,
fit_intercept=False).fit(X, y, sample_weight=sw).predict(X)
pred2 = KernelRidge(kernel="linear",
alpha=1).fit(X, y, sample_weight=sw).predict(X)
pred3 = KernelRidge(kernel="precomputed",
alpha=1).fit(K, y, sample_weight=sw).predict(K)
assert_array_almost_equal(pred, pred2)
assert_array_almost_equal(pred, pred3)
def test_kernel_ridge_multi_output():
pred = Ridge(alpha=1, fit_intercept=False).fit(X, Y).predict(X)
pred2 = KernelRidge(kernel="linear", alpha=1).fit(X, Y).predict(X)
assert_array_almost_equal(pred, pred2)
pred3 = KernelRidge(kernel="linear", alpha=1).fit(X, y).predict(X)
pred3 = np.array([pred3, pred3]).T
assert_array_almost_equal(pred2, pred3)
| bsd-3-clause |
xiaolvmu/flounder-kernel | scripts/tracing/dma-api/plotting.py | 96 | 4043 | """Ugly graph drawing tools"""
import matplotlib.pyplot as plt
import matplotlib.cm as cmap
#import numpy as np
from matplotlib import cbook
# http://stackoverflow.com/questions/4652439/is-there-a-matplotlib-equivalent-of-matlabs-datacursormode
class DataCursor(object):
"""A simple data cursor widget that displays the x,y location of a
matplotlib artist when it is selected."""
def __init__(self, artists, tolerance=5, offsets=(-20, 20),
template='x: %0.2f\ny: %0.2f', display_all=False):
"""Create the data cursor and connect it to the relevant figure.
"artists" is the matplotlib artist or sequence of artists that will be
selected.
"tolerance" is the radius (in points) that the mouse click must be
within to select the artist.
"offsets" is a tuple of (x,y) offsets in points from the selected
point to the displayed annotation box
"template" is the format string to be used. Note: For compatibility
with older versions of python, this uses the old-style (%)
formatting specification.
"display_all" controls whether more than one annotation box will
be shown if there are multiple axes. Only one will be shown
per-axis, regardless.
"""
self.template = template
self.offsets = offsets
self.display_all = display_all
if not cbook.iterable(artists):
artists = [artists]
self.artists = artists
self.axes = tuple(set(art.axes for art in self.artists))
self.figures = tuple(set(ax.figure for ax in self.axes))
self.annotations = {}
for ax in self.axes:
self.annotations[ax] = self.annotate(ax)
for artist in self.artists:
artist.set_picker(tolerance)
for fig in self.figures:
fig.canvas.mpl_connect('pick_event', self)
def annotate(self, ax):
"""Draws and hides the annotation box for the given axis "ax"."""
annotation = ax.annotate(self.template, xy=(0, 0), ha='right',
xytext=self.offsets, textcoords='offset points', va='bottom',
bbox=dict(boxstyle='round,pad=0.5', fc='yellow', alpha=0.5),
arrowprops=dict(arrowstyle='->', connectionstyle='arc3,rad=0')
)
annotation.set_visible(False)
return annotation
def __call__(self, event):
"""Intended to be called through "mpl_connect"."""
# Rather than trying to interpolate, just display the clicked coords
# This will only be called if it's within "tolerance", anyway.
x, y = event.mouseevent.xdata, event.mouseevent.ydata
try:
annotation = self.annotations[event.artist.axes]
except KeyError:
return
if x is not None:
if not self.display_all:
# Hide any other annotation boxes...
for ann in self.annotations.values():
ann.set_visible(False)
# Update the annotation in the current axis..
annotation.xy = x, y
annotation.set_text(self.template % (x, y))
annotation.set_visible(True)
event.canvas.draw()
def plotseries(*serieslabels):
"""Plot lists of series in separate axes, tie time axis together"""
global fig
fig, axes = plt.subplots(nrows=len(serieslabels), sharex=True)
for subplot, ax in zip(serieslabels, axes):
for ser, lab in zip(*subplot): # subplot = ([x], [y])
ax.step(ser[0], ser[1], label=lab, where="post")
ax.grid(True)
ax.legend()
(DataCursor(ax.lines))
plt.grid(True)
plt.show()
def disp_pic(bitmap):
"""Display the allocation bitmap. TODO."""
fig=plt.figure()
a=fig.add_subplot(1,1,1)
fig.clf()
implt=plt.imshow(bitmap, extent=(0, len(bitmap[0]), 0, len(bitmap)),
interpolation="nearest", cmap=cmap.gist_heat)
fig.canvas.draw()
plt.show()
| gpl-2.0 |
shyamalschandra/scikit-learn | sklearn/utils/setup.py | 296 | 2884 | import os
from os.path import join
from sklearn._build_utils import get_blas_info
def configuration(parent_package='', top_path=None):
import numpy
from numpy.distutils.misc_util import Configuration
config = Configuration('utils', parent_package, top_path)
config.add_subpackage('sparsetools')
cblas_libs, blas_info = get_blas_info()
cblas_compile_args = blas_info.pop('extra_compile_args', [])
cblas_includes = [join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])]
libraries = []
if os.name == 'posix':
libraries.append('m')
cblas_libs.append('m')
config.add_extension('sparsefuncs_fast', sources=['sparsefuncs_fast.c'],
libraries=libraries)
config.add_extension('arrayfuncs',
sources=['arrayfuncs.c'],
depends=[join('src', 'cholesky_delete.h')],
libraries=cblas_libs,
include_dirs=cblas_includes,
extra_compile_args=cblas_compile_args,
**blas_info
)
config.add_extension(
'murmurhash',
sources=['murmurhash.c', join('src', 'MurmurHash3.cpp')],
include_dirs=['src'])
config.add_extension('lgamma',
sources=['lgamma.c', join('src', 'gamma.c')],
include_dirs=['src'],
libraries=libraries)
config.add_extension('graph_shortest_path',
sources=['graph_shortest_path.c'],
include_dirs=[numpy.get_include()])
config.add_extension('fast_dict',
sources=['fast_dict.cpp'],
language="c++",
include_dirs=[numpy.get_include()],
libraries=libraries)
config.add_extension('seq_dataset',
sources=['seq_dataset.c'],
include_dirs=[numpy.get_include()])
config.add_extension('weight_vector',
sources=['weight_vector.c'],
include_dirs=cblas_includes,
libraries=cblas_libs,
**blas_info)
config.add_extension("_random",
sources=["_random.c"],
include_dirs=[numpy.get_include()],
libraries=libraries)
config.add_extension("_logistic_sigmoid",
sources=["_logistic_sigmoid.c"],
include_dirs=[numpy.get_include()],
libraries=libraries)
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| bsd-3-clause |
adykstra/mne-python | tutorials/misc/plot_ecog.py | 3 | 2827 | """
======================
Working with ECoG data
======================
MNE supports working with more than just MEG and EEG data. Here we show some
of the functions that can be used to facilitate working with
electrocorticography (ECoG) data.
"""
# Authors: Eric Larson <[email protected]>
# Chris Holdgraf <[email protected]>
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
from scipy.io import loadmat
from mayavi import mlab
import mne
from mne.viz import plot_alignment, snapshot_brain_montage
print(__doc__)
###############################################################################
# Let's load some ECoG electrode locations and names, and turn them into
# a :class:`mne.channels.DigMontage` class.
mat = loadmat(mne.datasets.misc.data_path() + '/ecog/sample_ecog.mat')
ch_names = mat['ch_names'].tolist()
elec = mat['elec'] # electrode positions given in meters
dig_ch_pos = dict(zip(ch_names, elec))
mon = mne.channels.DigMontage(dig_ch_pos=dig_ch_pos)
print('Created %s channel positions' % len(ch_names))
###############################################################################
# Now that we have our electrode positions in MRI coordinates, we can create
# our measurement info structure.
info = mne.create_info(ch_names, 1000., 'ecog', montage=mon)
###############################################################################
# We can then plot the locations of our electrodes on our subject's brain.
#
# .. note:: These are not real electrodes for this subject, so they
# do not align to the cortical surface perfectly.
subjects_dir = mne.datasets.sample.data_path() + '/subjects'
fig = plot_alignment(info, subject='sample', subjects_dir=subjects_dir,
surfaces=['pial'])
mlab.view(200, 70)
###############################################################################
# Sometimes it is useful to make a scatterplot for the current figure view.
# This is best accomplished with matplotlib. We can capture an image of the
# current mayavi view, along with the xy position of each electrode, with the
# `snapshot_brain_montage` function.
# We'll once again plot the surface, then take a snapshot.
fig_scatter = plot_alignment(info, subject='sample', subjects_dir=subjects_dir,
surfaces='pial')
mlab.view(200, 70)
xy, im = snapshot_brain_montage(fig_scatter, mon)
# Convert from a dictionary to array to plot
xy_pts = np.vstack([xy[ch] for ch in info['ch_names']])
# Define an arbitrary "activity" pattern for viz
activity = np.linspace(100, 200, xy_pts.shape[0])
# This allows us to use matplotlib to create arbitrary 2d scatterplots
_, ax = plt.subplots(figsize=(10, 10))
ax.imshow(im)
ax.scatter(*xy_pts.T, c=activity, s=200, cmap='coolwarm')
ax.set_axis_off()
plt.show()
| bsd-3-clause |
MJuddBooth/pandas | pandas/tests/series/test_apply.py | 1 | 25098 | # coding=utf-8
# pylint: disable-msg=E1101,W0612
from collections import Counter, OrderedDict, defaultdict
from itertools import chain
import numpy as np
import pytest
import pandas.compat as compat
from pandas.compat import lrange
import pandas as pd
from pandas import DataFrame, Index, Series, isna
from pandas.conftest import _get_cython_table_params
import pandas.util.testing as tm
from pandas.util.testing import assert_frame_equal, assert_series_equal
class TestSeriesApply():
def test_apply(self, datetime_series):
with np.errstate(all='ignore'):
tm.assert_series_equal(datetime_series.apply(np.sqrt),
np.sqrt(datetime_series))
# element-wise apply
import math
tm.assert_series_equal(datetime_series.apply(math.exp),
np.exp(datetime_series))
# empty series
s = Series(dtype=object, name='foo', index=pd.Index([], name='bar'))
rs = s.apply(lambda x: x)
tm.assert_series_equal(s, rs)
# check all metadata (GH 9322)
assert s is not rs
assert s.index is rs.index
assert s.dtype == rs.dtype
assert s.name == rs.name
# index but no data
s = Series(index=[1, 2, 3])
rs = s.apply(lambda x: x)
tm.assert_series_equal(s, rs)
def test_apply_same_length_inference_bug(self):
s = Series([1, 2])
f = lambda x: (x, x + 1)
result = s.apply(f)
expected = s.map(f)
assert_series_equal(result, expected)
s = Series([1, 2, 3])
result = s.apply(f)
expected = s.map(f)
assert_series_equal(result, expected)
def test_apply_dont_convert_dtype(self):
s = Series(np.random.randn(10))
f = lambda x: x if x > 0 else np.nan
result = s.apply(f, convert_dtype=False)
assert result.dtype == object
def test_with_string_args(self, datetime_series):
for arg in ['sum', 'mean', 'min', 'max', 'std']:
result = datetime_series.apply(arg)
expected = getattr(datetime_series, arg)()
assert result == expected
def test_apply_args(self):
s = Series(['foo,bar'])
result = s.apply(str.split, args=(',', ))
assert result[0] == ['foo', 'bar']
assert isinstance(result[0], list)
def test_series_map_box_timestamps(self):
# GH#2689, GH#2627
ser = Series(pd.date_range('1/1/2000', periods=10))
def func(x):
return (x.hour, x.day, x.month)
# it works!
ser.map(func)
ser.apply(func)
def test_apply_box(self):
# ufunc will not be boxed. Same test cases as the test_map_box
vals = [pd.Timestamp('2011-01-01'), pd.Timestamp('2011-01-02')]
s = pd.Series(vals)
assert s.dtype == 'datetime64[ns]'
# boxed value must be Timestamp instance
res = s.apply(lambda x: '{0}_{1}_{2}'.format(x.__class__.__name__,
x.day, x.tz))
exp = pd.Series(['Timestamp_1_None', 'Timestamp_2_None'])
tm.assert_series_equal(res, exp)
vals = [pd.Timestamp('2011-01-01', tz='US/Eastern'),
pd.Timestamp('2011-01-02', tz='US/Eastern')]
s = pd.Series(vals)
assert s.dtype == 'datetime64[ns, US/Eastern]'
res = s.apply(lambda x: '{0}_{1}_{2}'.format(x.__class__.__name__,
x.day, x.tz))
exp = pd.Series(['Timestamp_1_US/Eastern', 'Timestamp_2_US/Eastern'])
tm.assert_series_equal(res, exp)
# timedelta
vals = [pd.Timedelta('1 days'), pd.Timedelta('2 days')]
s = pd.Series(vals)
assert s.dtype == 'timedelta64[ns]'
res = s.apply(lambda x: '{0}_{1}'.format(x.__class__.__name__, x.days))
exp = pd.Series(['Timedelta_1', 'Timedelta_2'])
tm.assert_series_equal(res, exp)
# period
vals = [pd.Period('2011-01-01', freq='M'),
pd.Period('2011-01-02', freq='M')]
s = pd.Series(vals)
assert s.dtype == 'Period[M]'
res = s.apply(lambda x: '{0}_{1}'.format(x.__class__.__name__,
x.freqstr))
exp = pd.Series(['Period_M', 'Period_M'])
tm.assert_series_equal(res, exp)
def test_apply_datetimetz(self):
values = pd.date_range('2011-01-01', '2011-01-02',
freq='H').tz_localize('Asia/Tokyo')
s = pd.Series(values, name='XX')
result = s.apply(lambda x: x + pd.offsets.Day())
exp_values = pd.date_range('2011-01-02', '2011-01-03',
freq='H').tz_localize('Asia/Tokyo')
exp = pd.Series(exp_values, name='XX')
tm.assert_series_equal(result, exp)
# change dtype
# GH 14506 : Returned dtype changed from int32 to int64
result = s.apply(lambda x: x.hour)
exp = pd.Series(list(range(24)) + [0], name='XX', dtype=np.int64)
tm.assert_series_equal(result, exp)
# not vectorized
def f(x):
if not isinstance(x, pd.Timestamp):
raise ValueError
return str(x.tz)
result = s.map(f)
exp = pd.Series(['Asia/Tokyo'] * 25, name='XX')
tm.assert_series_equal(result, exp)
def test_apply_dict_depr(self):
tsdf = pd.DataFrame(np.random.randn(10, 3),
columns=['A', 'B', 'C'],
index=pd.date_range('1/1/2000', periods=10))
with tm.assert_produces_warning(FutureWarning):
tsdf.A.agg({'foo': ['sum', 'mean']})
@pytest.mark.parametrize('series', [
['1-1', '1-1', np.NaN],
['1-1', '1-2', np.NaN]])
def test_apply_categorical_with_nan_values(self, series):
# GH 20714 bug fixed in: GH 24275
s = pd.Series(series, dtype='category')
result = s.apply(lambda x: x.split('-')[0])
result = result.astype(object)
expected = pd.Series(['1', '1', np.NaN], dtype='category')
expected = expected.astype(object)
tm.assert_series_equal(result, expected)
class TestSeriesAggregate():
def test_transform(self, string_series):
# transforming functions
with np.errstate(all='ignore'):
f_sqrt = np.sqrt(string_series)
f_abs = np.abs(string_series)
# ufunc
result = string_series.transform(np.sqrt)
expected = f_sqrt.copy()
assert_series_equal(result, expected)
result = string_series.apply(np.sqrt)
assert_series_equal(result, expected)
# list-like
result = string_series.transform([np.sqrt])
expected = f_sqrt.to_frame().copy()
expected.columns = ['sqrt']
assert_frame_equal(result, expected)
result = string_series.transform([np.sqrt])
assert_frame_equal(result, expected)
result = string_series.transform(['sqrt'])
assert_frame_equal(result, expected)
# multiple items in list
# these are in the order as if we are applying both functions per
# series and then concatting
expected = pd.concat([f_sqrt, f_abs], axis=1)
expected.columns = ['sqrt', 'absolute']
result = string_series.apply([np.sqrt, np.abs])
assert_frame_equal(result, expected)
result = string_series.transform(['sqrt', 'abs'])
expected.columns = ['sqrt', 'abs']
assert_frame_equal(result, expected)
# dict, provide renaming
expected = pd.concat([f_sqrt, f_abs], axis=1)
expected.columns = ['foo', 'bar']
expected = expected.unstack().rename('series')
result = string_series.apply({'foo': np.sqrt, 'bar': np.abs})
assert_series_equal(result.reindex_like(expected), expected)
def test_transform_and_agg_error(self, string_series):
# we are trying to transform with an aggregator
with pytest.raises(ValueError):
string_series.transform(['min', 'max'])
with pytest.raises(ValueError):
with np.errstate(all='ignore'):
string_series.agg(['sqrt', 'max'])
with pytest.raises(ValueError):
with np.errstate(all='ignore'):
string_series.transform(['sqrt', 'max'])
with pytest.raises(ValueError):
with np.errstate(all='ignore'):
string_series.agg({'foo': np.sqrt, 'bar': 'sum'})
def test_demo(self):
# demonstration tests
s = Series(range(6), dtype='int64', name='series')
result = s.agg(['min', 'max'])
expected = Series([0, 5], index=['min', 'max'], name='series')
tm.assert_series_equal(result, expected)
result = s.agg({'foo': 'min'})
expected = Series([0], index=['foo'], name='series')
tm.assert_series_equal(result, expected)
# nested renaming
with tm.assert_produces_warning(FutureWarning):
result = s.agg({'foo': ['min', 'max']})
expected = DataFrame(
{'foo': [0, 5]},
index=['min', 'max']).unstack().rename('series')
tm.assert_series_equal(result, expected)
def test_multiple_aggregators_with_dict_api(self):
s = Series(range(6), dtype='int64', name='series')
# nested renaming
with tm.assert_produces_warning(FutureWarning):
result = s.agg({'foo': ['min', 'max'], 'bar': ['sum', 'mean']})
expected = DataFrame(
{'foo': [5.0, np.nan, 0.0, np.nan],
'bar': [np.nan, 2.5, np.nan, 15.0]},
columns=['foo', 'bar'],
index=['max', 'mean',
'min', 'sum']).unstack().rename('series')
tm.assert_series_equal(result.reindex_like(expected), expected)
def test_agg_apply_evaluate_lambdas_the_same(self, string_series):
# test that we are evaluating row-by-row first
# before vectorized evaluation
result = string_series.apply(lambda x: str(x))
expected = string_series.agg(lambda x: str(x))
tm.assert_series_equal(result, expected)
result = string_series.apply(str)
expected = string_series.agg(str)
tm.assert_series_equal(result, expected)
def test_with_nested_series(self, datetime_series):
# GH 2316
# .agg with a reducer and a transform, what to do
result = datetime_series.apply(lambda x: Series(
[x, x ** 2], index=['x', 'x^2']))
expected = DataFrame({'x': datetime_series,
'x^2': datetime_series ** 2})
tm.assert_frame_equal(result, expected)
result = datetime_series.agg(lambda x: Series(
[x, x ** 2], index=['x', 'x^2']))
tm.assert_frame_equal(result, expected)
def test_replicate_describe(self, string_series):
# this also tests a result set that is all scalars
expected = string_series.describe()
result = string_series.apply(OrderedDict(
[('count', 'count'),
('mean', 'mean'),
('std', 'std'),
('min', 'min'),
('25%', lambda x: x.quantile(0.25)),
('50%', 'median'),
('75%', lambda x: x.quantile(0.75)),
('max', 'max')]))
assert_series_equal(result, expected)
def test_reduce(self, string_series):
# reductions with named functions
result = string_series.agg(['sum', 'mean'])
expected = Series([string_series.sum(),
string_series.mean()],
['sum', 'mean'],
name=string_series.name)
assert_series_equal(result, expected)
def test_non_callable_aggregates(self):
# test agg using non-callable series attributes
s = Series([1, 2, None])
# Calling agg w/ just a string arg same as calling s.arg
result = s.agg('size')
expected = s.size
assert result == expected
# test when mixed w/ callable reducers
result = s.agg(['size', 'count', 'mean'])
expected = Series(OrderedDict([('size', 3.0),
('count', 2.0),
('mean', 1.5)]))
assert_series_equal(result[expected.index], expected)
@pytest.mark.parametrize("series, func, expected", chain(
_get_cython_table_params(Series(), [
('sum', 0),
('max', np.nan),
('min', np.nan),
('all', True),
('any', False),
('mean', np.nan),
('prod', 1),
('std', np.nan),
('var', np.nan),
('median', np.nan),
]),
_get_cython_table_params(Series([np.nan, 1, 2, 3]), [
('sum', 6),
('max', 3),
('min', 1),
('all', True),
('any', True),
('mean', 2),
('prod', 6),
('std', 1),
('var', 1),
('median', 2),
]),
_get_cython_table_params(Series('a b c'.split()), [
('sum', 'abc'),
('max', 'c'),
('min', 'a'),
('all', 'c'), # see GH12863
('any', 'a'),
]),
))
def test_agg_cython_table(self, series, func, expected):
# GH21224
# test reducing functions in
# pandas.core.base.SelectionMixin._cython_table
result = series.agg(func)
if tm.is_number(expected):
assert np.isclose(result, expected, equal_nan=True)
else:
assert result == expected
@pytest.mark.parametrize("series, func, expected", chain(
_get_cython_table_params(Series(), [
('cumprod', Series([], Index([]))),
('cumsum', Series([], Index([]))),
]),
_get_cython_table_params(Series([np.nan, 1, 2, 3]), [
('cumprod', Series([np.nan, 1, 2, 6])),
('cumsum', Series([np.nan, 1, 3, 6])),
]),
_get_cython_table_params(Series('a b c'.split()), [
('cumsum', Series(['a', 'ab', 'abc'])),
]),
))
def test_agg_cython_table_transform(self, series, func, expected):
# GH21224
# test transforming functions in
# pandas.core.base.SelectionMixin._cython_table (cumprod, cumsum)
result = series.agg(func)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("series, func, expected", chain(
_get_cython_table_params(Series('a b c'.split()), [
('mean', TypeError), # mean raises TypeError
('prod', TypeError),
('std', TypeError),
('var', TypeError),
('median', TypeError),
('cumprod', TypeError),
])
))
def test_agg_cython_table_raises(self, series, func, expected):
# GH21224
with pytest.raises(expected):
# e.g. Series('a b'.split()).cumprod() will raise
series.agg(func)
class TestSeriesMap():
def test_map(self, datetime_series):
index, data = tm.getMixedTypeDict()
source = Series(data['B'], index=data['C'])
target = Series(data['C'][:4], index=data['D'][:4])
merged = target.map(source)
for k, v in compat.iteritems(merged):
assert v == source[target[k]]
# input could be a dict
merged = target.map(source.to_dict())
for k, v in compat.iteritems(merged):
assert v == source[target[k]]
# function
result = datetime_series.map(lambda x: x * 2)
tm.assert_series_equal(result, datetime_series * 2)
# GH 10324
a = Series([1, 2, 3, 4])
b = Series(["even", "odd", "even", "odd"], dtype="category")
c = Series(["even", "odd", "even", "odd"])
exp = Series(["odd", "even", "odd", np.nan], dtype="category")
tm.assert_series_equal(a.map(b), exp)
exp = Series(["odd", "even", "odd", np.nan])
tm.assert_series_equal(a.map(c), exp)
a = Series(['a', 'b', 'c', 'd'])
b = Series([1, 2, 3, 4],
index=pd.CategoricalIndex(['b', 'c', 'd', 'e']))
c = Series([1, 2, 3, 4], index=Index(['b', 'c', 'd', 'e']))
exp = Series([np.nan, 1, 2, 3])
tm.assert_series_equal(a.map(b), exp)
exp = Series([np.nan, 1, 2, 3])
tm.assert_series_equal(a.map(c), exp)
a = Series(['a', 'b', 'c', 'd'])
b = Series(['B', 'C', 'D', 'E'], dtype='category',
index=pd.CategoricalIndex(['b', 'c', 'd', 'e']))
c = Series(['B', 'C', 'D', 'E'], index=Index(['b', 'c', 'd', 'e']))
exp = Series(pd.Categorical([np.nan, 'B', 'C', 'D'],
categories=['B', 'C', 'D', 'E']))
tm.assert_series_equal(a.map(b), exp)
exp = Series([np.nan, 'B', 'C', 'D'])
tm.assert_series_equal(a.map(c), exp)
@pytest.mark.parametrize("index", tm.all_index_generator(10))
def test_map_empty(self, index):
s = Series(index)
result = s.map({})
expected = pd.Series(np.nan, index=s.index)
tm.assert_series_equal(result, expected)
def test_map_compat(self):
# related GH 8024
s = Series([True, True, False], index=[1, 2, 3])
result = s.map({True: 'foo', False: 'bar'})
expected = Series(['foo', 'foo', 'bar'], index=[1, 2, 3])
assert_series_equal(result, expected)
def test_map_int(self):
left = Series({'a': 1., 'b': 2., 'c': 3., 'd': 4})
right = Series({1: 11, 2: 22, 3: 33})
assert left.dtype == np.float_
assert issubclass(right.dtype.type, np.integer)
merged = left.map(right)
assert merged.dtype == np.float_
assert isna(merged['d'])
assert not isna(merged['c'])
def test_map_type_inference(self):
s = Series(lrange(3))
s2 = s.map(lambda x: np.where(x == 0, 0, 1))
assert issubclass(s2.dtype.type, np.integer)
def test_map_decimal(self, string_series):
from decimal import Decimal
result = string_series.map(lambda x: Decimal(str(x)))
assert result.dtype == np.object_
assert isinstance(result[0], Decimal)
def test_map_na_exclusion(self):
s = Series([1.5, np.nan, 3, np.nan, 5])
result = s.map(lambda x: x * 2, na_action='ignore')
exp = s * 2
assert_series_equal(result, exp)
def test_map_dict_with_tuple_keys(self):
"""
Due to new MultiIndex-ing behaviour in v0.14.0,
dicts with tuple keys passed to map were being
converted to a multi-index, preventing tuple values
from being mapped properly.
"""
# GH 18496
df = pd.DataFrame({'a': [(1, ), (2, ), (3, 4), (5, 6)]})
label_mappings = {(1, ): 'A', (2, ): 'B', (3, 4): 'A', (5, 6): 'B'}
df['labels'] = df['a'].map(label_mappings)
df['expected_labels'] = pd.Series(['A', 'B', 'A', 'B'], index=df.index)
# All labels should be filled now
tm.assert_series_equal(df['labels'], df['expected_labels'],
check_names=False)
def test_map_counter(self):
s = Series(['a', 'b', 'c'], index=[1, 2, 3])
counter = Counter()
counter['b'] = 5
counter['c'] += 1
result = s.map(counter)
expected = Series([0, 5, 1], index=[1, 2, 3])
assert_series_equal(result, expected)
def test_map_defaultdict(self):
s = Series([1, 2, 3], index=['a', 'b', 'c'])
default_dict = defaultdict(lambda: 'blank')
default_dict[1] = 'stuff'
result = s.map(default_dict)
expected = Series(['stuff', 'blank', 'blank'], index=['a', 'b', 'c'])
assert_series_equal(result, expected)
def test_map_dict_subclass_with_missing(self):
"""
Test Series.map with a dictionary subclass that defines __missing__,
i.e. sets a default value (GH #15999).
"""
class DictWithMissing(dict):
def __missing__(self, key):
return 'missing'
s = Series([1, 2, 3])
dictionary = DictWithMissing({3: 'three'})
result = s.map(dictionary)
expected = Series(['missing', 'missing', 'three'])
assert_series_equal(result, expected)
def test_map_dict_subclass_without_missing(self):
class DictWithoutMissing(dict):
pass
s = Series([1, 2, 3])
dictionary = DictWithoutMissing({3: 'three'})
result = s.map(dictionary)
expected = Series([np.nan, np.nan, 'three'])
assert_series_equal(result, expected)
def test_map_box(self):
vals = [pd.Timestamp('2011-01-01'), pd.Timestamp('2011-01-02')]
s = pd.Series(vals)
assert s.dtype == 'datetime64[ns]'
# boxed value must be Timestamp instance
res = s.map(lambda x: '{0}_{1}_{2}'.format(x.__class__.__name__,
x.day, x.tz))
exp = pd.Series(['Timestamp_1_None', 'Timestamp_2_None'])
tm.assert_series_equal(res, exp)
vals = [pd.Timestamp('2011-01-01', tz='US/Eastern'),
pd.Timestamp('2011-01-02', tz='US/Eastern')]
s = pd.Series(vals)
assert s.dtype == 'datetime64[ns, US/Eastern]'
res = s.map(lambda x: '{0}_{1}_{2}'.format(x.__class__.__name__,
x.day, x.tz))
exp = pd.Series(['Timestamp_1_US/Eastern', 'Timestamp_2_US/Eastern'])
tm.assert_series_equal(res, exp)
# timedelta
vals = [pd.Timedelta('1 days'), pd.Timedelta('2 days')]
s = pd.Series(vals)
assert s.dtype == 'timedelta64[ns]'
res = s.map(lambda x: '{0}_{1}'.format(x.__class__.__name__, x.days))
exp = pd.Series(['Timedelta_1', 'Timedelta_2'])
tm.assert_series_equal(res, exp)
# period
vals = [pd.Period('2011-01-01', freq='M'),
pd.Period('2011-01-02', freq='M')]
s = pd.Series(vals)
assert s.dtype == 'Period[M]'
res = s.map(lambda x: '{0}_{1}'.format(x.__class__.__name__,
x.freqstr))
exp = pd.Series(['Period_M', 'Period_M'])
tm.assert_series_equal(res, exp)
def test_map_categorical(self):
values = pd.Categorical(list('ABBABCD'), categories=list('DCBA'),
ordered=True)
s = pd.Series(values, name='XX', index=list('abcdefg'))
result = s.map(lambda x: x.lower())
exp_values = pd.Categorical(list('abbabcd'), categories=list('dcba'),
ordered=True)
exp = pd.Series(exp_values, name='XX', index=list('abcdefg'))
tm.assert_series_equal(result, exp)
tm.assert_categorical_equal(result.values, exp_values)
result = s.map(lambda x: 'A')
exp = pd.Series(['A'] * 7, name='XX', index=list('abcdefg'))
tm.assert_series_equal(result, exp)
assert result.dtype == np.object
with pytest.raises(NotImplementedError):
s.map(lambda x: x, na_action='ignore')
def test_map_datetimetz(self):
values = pd.date_range('2011-01-01', '2011-01-02',
freq='H').tz_localize('Asia/Tokyo')
s = pd.Series(values, name='XX')
# keep tz
result = s.map(lambda x: x + pd.offsets.Day())
exp_values = pd.date_range('2011-01-02', '2011-01-03',
freq='H').tz_localize('Asia/Tokyo')
exp = pd.Series(exp_values, name='XX')
tm.assert_series_equal(result, exp)
# change dtype
# GH 14506 : Returned dtype changed from int32 to int64
result = s.map(lambda x: x.hour)
exp = pd.Series(list(range(24)) + [0], name='XX', dtype=np.int64)
tm.assert_series_equal(result, exp)
with pytest.raises(NotImplementedError):
s.map(lambda x: x, na_action='ignore')
# not vectorized
def f(x):
if not isinstance(x, pd.Timestamp):
raise ValueError
return str(x.tz)
result = s.map(f)
exp = pd.Series(['Asia/Tokyo'] * 25, name='XX')
tm.assert_series_equal(result, exp)
@pytest.mark.parametrize("vals,mapping,exp", [
(list('abc'), {np.nan: 'not NaN'}, [np.nan] * 3 + ['not NaN']),
(list('abc'), {'a': 'a letter'}, ['a letter'] + [np.nan] * 3),
(list(range(3)), {0: 42}, [42] + [np.nan] * 3)])
def test_map_missing_mixed(self, vals, mapping, exp):
# GH20495
s = pd.Series(vals + [np.nan])
result = s.map(mapping)
tm.assert_series_equal(result, pd.Series(exp))
| bsd-3-clause |
daler/hubward | hubward/utils.py | 2 | 14026 | import subprocess
import pybedtools
import os
import pkg_resources
import tempfile
from docutils.core import publish_string
import bleach
import pycurl
import pybedtools
import string
import tarfile
# The following license is from conda_build. Code from conda_build is used in
# the download, tar_xf, and unzip functions.
#
# ----------------------------------------------------------------------------
# Except where noted below, conda is released under the following terms:
#
# (c) 2012 Continuum Analytics, Inc. / http://continuum.io
# All Rights Reserved
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Continuum Analytics, Inc. nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL CONTINUUM ANALYTICS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#
# Exceptions
# ==========
#
# versioneer.py is Public Domain
# ----------------------------------------------------------------------------
def download(url, outfile):
with open(outfile, 'wb') as f:
c = pycurl.Curl()
c.setopt(c.URL, url)
c.setopt(c.WRITEDATA, f)
c.perform()
c.close()
def _tar_xf(tarball, dir_path, mode='r:*'):
# From conda_build, see license above.
if tarball.lower().endswith('.tar.z'):
uncompress = external.find_executable('uncompress')
if not uncompress:
sys.exit("""\
uncompress is required to unarchive .z source files.
""")
subprocess.check_call([uncompress, '-f', tarball])
tarball = tarball[:-2]
if tarball.endswith('.tar.xz'):
unxz = external.find_executable('unxz')
if not unxz:
sys.exit("""\
unxz is required to unarchive .xz source files.
""")
subprocess.check_call([unxz, '-f', '-k', tarball])
tarball = tarball[:-3]
t = tarfile.open(tarball, mode)
t.extractall(path=dir_path)
t.close()
def _unzip(zip_path, dir_path):
# From conda_build, see license above.
z = zipfile.ZipFile(zip_path)
for name in z.namelist():
if name.endswith('/'):
continue
path = join(dir_path, *name.split('/'))
dp = dirname(path)
if not isdir(dp):
os.makedirs(dp)
with open(path, 'wb') as fo:
fo.write(z.read(name))
z.close()
def make_executable(filename):
mode = os.stat(filename).st_mode
mode |= (mode & 292) >> 2
os.chmod(filename, mode)
def makedirs(dirnames):
"""
Recursively create the given directory or directories without reporting
errors if they are present.
"""
if isinstance(dirnames, str):
dirnames = [dirnames]
for dirname in dirnames:
if not os.path.exists(dirname):
os.makedirs(dirname)
def unpack(filename, dest):
if filename.lower().endswith(
('.tar.gz', '.tar.bz2', '.tgz', '.tar.xz', '.tar', 'tar.z')
):
_tar_xf(filename, dest)
elif filename.lower().endswith('.zip'):
_unzip(filename, dest)
def link_is_newer(x, y):
return os.lstat(x).st_mtime > os.lstat(y).st_mtime
def is_newer(x, y):
return os.stat(x).st_mtime > os.stat(y).st_mtime
def get_resource(fn, as_tempfile=False):
"""
Retrieve an installed resource.
If an installed resource can't be found, then assume we're working out of
the source directory in which case we can find the file in the ../resources
dir.
By default, returns a string. If as_tempfile=True, then write the string to
a tempfile and return that new filename. The caller is responsible for
deleting the tempfile.
"""
try:
s = pkg_resources.resource_string('hubward', fn)
except IOError:
s = open(os.path.join(
os.path.dirname(__file__), '..', 'resources', fn)).read()
if not as_tempfile:
return s
tmp = tempfile.NamedTemporaryFile(delete=False).name
with open(tmp, 'w') as fout:
fout.write(s)
return tmp
def reST_to_html(s):
"""
Convert ReST-formatted string `s` into HTML.
Output is intended for uploading to UCSC configuration pages, so this uses
a whitelist approach for HTML tags.
"""
html = publish_string(
source=s,
writer_name='html',
settings=None,
settings_overrides={'embed_stylesheet': False},
)
safe = bleach.ALLOWED_TAGS + [
'p', 'img', 'pre', 'tt', 'a', 'h1', 'h2', 'h3', 'h4'
]
attributes = {
'img': ['alt', 'src'],
'a': ['href'],
}
return bleach.clean(html, tags=safe, strip=True, attributes=attributes)
def sanitize(s, strict=False):
"""
If strict, only allow letters and digits -- spaces will be stripped.
Otherwise, convert spaces to underscores.
"""
if strict:
allowed = string.letters + string.digits
else:
allowed = string.letters + string.digits + ' '
return ''.join([i for i in s if i in allowed]).replace(' ', '_')
# copied over from metaseq.colormap_adjust to avoid pulling in all of
# metaseq...
def smart_colormap(vmin, vmax, color_high='#b11902', hue_low=0.6):
"""
Creates a "smart" colormap that is centered on zero, and accounts for
asymmetrical vmin and vmax by matching saturation/value of high and low
colors.
It works by first creating a colormap from white to `color_high`. Setting
this color to the max(abs([vmin, vmax])), it then determines what the color
of min(abs([vmin, vmax])) should be on that scale. Then it shifts the
color to the new hue `hue_low`, and finally creates a new colormap with the
new hue-shifted as the low, `color_high` as the max, and centered on zero.
Parameters
----------
color_high : color
Can be any format supported by matplotlib. Try "#b11902" for a nice
red.
hue_low : float in [0, 1]
Try 0.6 for a nice blue
vmin : float
Lowest value in data you'll be plotting
vmax : float
Highest value in data you'll be plotting
"""
import matplotlib
import colorsys
# first go from white to color_high
orig_cmap = matplotlib.colors.LinearSegmentedColormap.from_list(
'test', ['#FFFFFF', color_high], N=2048)
# For example, say vmin=-3 and vmax=9. If vmin were positive, what would
# its color be?
vmin = float(vmin)
vmax = float(vmax)
mx = max([vmin, vmax])
mn = min([vmin, vmax])
frac = abs(mn / mx)
rgb = orig_cmap(frac)[:-1]
# Convert to HSV and shift the hue
hsv = list(colorsys.rgb_to_hsv(*rgb))
hsv[0] = hue_low
new_rgb = colorsys.hsv_to_rgb(*hsv)
new_hex = matplotlib.colors.rgb2hex(new_rgb)
zeropoint = -vmin / (vmax - vmin)
# Create a new colormap using the new hue-shifted color as the low end
new_cmap = matplotlib.colors.LinearSegmentedColormap.from_list(
'test', [(0, new_rgb), (zeropoint, '#FFFFFF'), (1, color_high)],
N=2048)
return new_cmap
def fix_macs_wig(fn, genome, output=None, add_chr=False, to_ignore=None):
"""
wig files created by MACS often are extended outside the chromsome ranges.
This function edits an input WIG file to fit within the chromosome
boundaries defined by `genome`.
If `add_chr` is True, then prefix each chromosome name with "chr".
Also gets rid of any track lines so the file is ready for conversion to
bigWig.
Returns the output filename.
fn : str
Input WIG filename. Can be gzipped, if extension ends in .gz.
genome : str or dict
output : str or None
If None, writes to temp file
to_ignore : list
List of chromosomes to ignore.
"""
if output is None:
output = pybedtools.BedTool._tmp()
if to_ignore is None:
to_ignore = []
genome = pybedtools.chromsizes(genome)
with open(output, 'w') as fout:
if fn.endswith('.gz'):
f = gzip.open(fn)
else:
f = open(fn)
for line in f:
if line.startswith('track'):
continue
if line.startswith('variableStep'):
a, b, c = line.strip().split()
prefix, chrom = b.split('=')
if add_chr:
chrom = 'chr' + chrom
if chrom in to_ignore:
continue
fout.write(' '.join([a, prefix + '=' + chrom, c]) + '\n')
span = int(c.split('=')[1])
continue
pos, val = line.strip().split()
if chrom in to_ignore:
continue
if (int(pos) + span) >= genome[chrom][1]:
continue
fout.write(line)
return output
def colored_bigbed(x, color, genome, target, autosql=None, bedtype=None):
"""
if color is "smart", then use metaseq's smart colormap centered on zero.
otherwise, use singlecolormap.
assumes that you have scores in BedTool x; this will zero all scores in the
final bigbed
"""
from pybedtools.featurefuncs import add_color
norm = x.colormap_normalize()
if color == 'smart':
cmap = smart_colormap(norm.vmin, norm.vmax)
else:
cmap = singlecolormap(color)
def func(f):
f = add_color(f, cmap, norm)
f.score = '0'
return f
x = x\
.sort()\
.each(func)\
.saveas()
bigbed(x, genome=genome, output=target, _as=autosql, bedtype=bedtype)
def singlecolormap(color, func=None, n=64):
"""
Creates a linear colormap where `color` is the top, and func(color) is the
bottom.
`func` should take an RGB tuple as its only input. If `func` is None, then
use a light gray as the min.
`n` is the number of levels.
"""
if func is None:
def func(x):
return '0.9'
import numpy as np
import matplotlib
rgb = np.array(matplotlib.colors.colorConverter.to_rgb(color))
return matplotlib.colors.LinearSegmentedColormap.from_list(
name='colormap',
colors=[func(rgb), rgb],
N=n,
)
def colortuple(col):
"""
Given a color in any format supported by matplotlib, return
a comma-separated string of R,G,B uint8 values.
"""
rgb = np.array(matplotlib.colors.colorConverter.to_rgb(col))
rgb = [int(i * 255) for i in rgb]
return ','.join(map(str, rgb))
def add_chr(f):
"""
Prepend "chr" to the beginning of chromosome names.
Useful when passed to pybedtool.BedTool.each().
"""
f.chrom = 'chr' + f.chrom
return f
def chromsizes(assembly):
url = ("http://hgdownload.cse.ucsc.edu/goldenPath/"
"{0}/bigZips/{0}.chrom.sizes")
dest = tempfile.NamedTemporaryFile(delete=False).name + '.chromsizes'
download(url.format(assembly), dest)
return dest
def bigbed(filename, genome, output, blockSize=256, itemsPerSlot=512,
bedtype=None, _as=None, unc=False, tab=False):
"""
Parameters
----------
:filename:
BED-like file to convert
:genome:
Assembly string (e.g., "mm10" or "hg19")
:output:
Path to bigBed file to create.
Other args are passed to bedToBigBed. In particular, `bedtype` (which
becomes the "-type=" argument) is automatically handled for you if it is
kept as the default None.
Assumes that a recent version of bedToBigBed from UCSC is on the path.
"""
if isinstance(filename, pybedtools.BedTool):
filename = filename.fn
x = pybedtools.BedTool(filename)
chromsizes_file = chromsizes(genome)
if bedtype is None:
bedtype = 'bed%s' % x.field_count()
cmds = [
'bedToBigBed',
filename,
chromsizes_file,
output,
'-blockSize=%s' % blockSize,
'-itemsPerSlot=%s' % itemsPerSlot,
'-type=%s' % bedtype
]
if unc:
cmds.append('-unc')
if tab:
cmds.append('-tab')
if _as:
cmds.append('-as=%s' % _as)
try:
p = subprocess.check_output(cmds, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError:
os.system('mv {0} {0}.bak'.format(filename))
raise
return output
def bigwig(filename, genome, output, blockSize=256, itemsPerSlot=512,
bedtype=None, _as=None, unc=False, tab=False):
"""
Parameters
----------
:filename:
BEDGRAPH-like file to convert
:genome:
Assembly string (e.g., "mm10" or "hg19")
:output:
Path to bigWig file to create.
Other args are passed to bedGraphToBigWig.
"""
chromsizes_file = chromsizes(genome)
cmds = [
'bedGraphToBigWig',
filename,
chromsizes_file,
output,
]
p = subprocess.check_output(cmds, stderr=subprocess.STDOUT)
return output
| bsd-3-clause |
alexeyum/scikit-learn | examples/decomposition/plot_incremental_pca.py | 175 | 1974 | """
===============
Incremental PCA
===============
Incremental principal component analysis (IPCA) is typically used as a
replacement for principal component analysis (PCA) when the dataset to be
decomposed is too large to fit in memory. IPCA builds a low-rank approximation
for the input data using an amount of memory which is independent of the
number of input data samples. It is still dependent on the input data features,
but changing the batch size allows for control of memory usage.
This example serves as a visual check that IPCA is able to find a similar
projection of the data to PCA (to a sign flip), while only processing a
few samples at a time. This can be considered a "toy example", as IPCA is
intended for large datasets which do not fit in main memory, requiring
incremental approaches.
"""
print(__doc__)
# Authors: Kyle Kastner
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_iris
from sklearn.decomposition import PCA, IncrementalPCA
iris = load_iris()
X = iris.data
y = iris.target
n_components = 2
ipca = IncrementalPCA(n_components=n_components, batch_size=10)
X_ipca = ipca.fit_transform(X)
pca = PCA(n_components=n_components)
X_pca = pca.fit_transform(X)
colors = ['navy', 'turquoise', 'darkorange']
for X_transformed, title in [(X_ipca, "Incremental PCA"), (X_pca, "PCA")]:
plt.figure(figsize=(8, 8))
for color, i, target_name in zip(colors, [0, 1, 2], iris.target_names):
plt.scatter(X_transformed[y == i, 0], X_transformed[y == i, 1],
color=color, lw=2, label=target_name)
if "Incremental" in title:
err = np.abs(np.abs(X_pca) - np.abs(X_ipca)).mean()
plt.title(title + " of iris dataset\nMean absolute unsigned error "
"%.6f" % err)
else:
plt.title(title + " of iris dataset")
plt.legend(loc="best", shadow=False, scatterpoints=1)
plt.axis([-4, 4, -1.5, 1.5])
plt.show()
| bsd-3-clause |
icdishb/scikit-learn | sklearn/preprocessing/label.py | 13 | 28598 | # Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Olivier Grisel <[email protected]>
# Andreas Mueller <[email protected]>
# Joel Nothman <[email protected]>
# Hamzeh Alsalhi <[email protected]>
# License: BSD 3 clause
from collections import defaultdict
import itertools
import array
import warnings
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, TransformerMixin
from ..utils.fixes import np_version
from ..utils.fixes import sparse_min_max
from ..utils.fixes import astype
from ..utils.fixes import in1d
from ..utils import deprecated, column_or_1d
from ..utils.validation import check_array
from ..utils.validation import _num_samples
from ..utils.multiclass import unique_labels
from ..utils.multiclass import type_of_target
from ..externals import six
zip = six.moves.zip
map = six.moves.map
__all__ = [
'label_binarize',
'LabelBinarizer',
'LabelEncoder',
'MultiLabelBinarizer',
]
def _check_numpy_unicode_bug(labels):
"""Check that user is not subject to an old numpy bug
Fixed in master before 1.7.0:
https://github.com/numpy/numpy/pull/243
"""
if np_version[:3] < (1, 7, 0) and labels.dtype.kind == 'U':
raise RuntimeError("NumPy < 1.7.0 does not implement searchsorted"
" on unicode data correctly. Please upgrade"
" NumPy to use LabelEncoder with unicode inputs.")
class LabelEncoder(BaseEstimator, TransformerMixin):
"""Encode labels with value between 0 and n_classes-1.
Attributes
----------
classes_ : array of shape (n_class,)
Holds the label for each class.
Examples
--------
`LabelEncoder` can be used to normalize labels.
>>> from sklearn import preprocessing
>>> le = preprocessing.LabelEncoder()
>>> le.fit([1, 2, 2, 6])
LabelEncoder()
>>> le.classes_
array([1, 2, 6])
>>> le.transform([1, 1, 2, 6]) #doctest: +ELLIPSIS
array([0, 0, 1, 2]...)
>>> le.inverse_transform([0, 0, 1, 2])
array([1, 1, 2, 6])
It can also be used to transform non-numerical labels (as long as they are
hashable and comparable) to numerical labels.
>>> le = preprocessing.LabelEncoder()
>>> le.fit(["paris", "paris", "tokyo", "amsterdam"])
LabelEncoder()
>>> list(le.classes_)
['amsterdam', 'paris', 'tokyo']
>>> le.transform(["tokyo", "tokyo", "paris"]) #doctest: +ELLIPSIS
array([2, 2, 1]...)
>>> list(le.inverse_transform([2, 2, 1]))
['tokyo', 'tokyo', 'paris']
"""
def _check_fitted(self):
if not hasattr(self, "classes_"):
raise ValueError("LabelEncoder was not fitted yet.")
def fit(self, y):
"""Fit label encoder
Parameters
----------
y : array-like of shape (n_samples,)
Target values.
Returns
-------
self : returns an instance of self.
"""
y = column_or_1d(y, warn=True)
_check_numpy_unicode_bug(y)
self.classes_ = np.unique(y)
return self
def fit_transform(self, y):
"""Fit label encoder and return encoded labels
Parameters
----------
y : array-like of shape [n_samples]
Target values.
Returns
-------
y : array-like of shape [n_samples]
"""
y = column_or_1d(y, warn=True)
_check_numpy_unicode_bug(y)
self.classes_, y = np.unique(y, return_inverse=True)
return y
def transform(self, y):
"""Transform labels to normalized encoding.
Parameters
----------
y : array-like of shape [n_samples]
Target values.
Returns
-------
y : array-like of shape [n_samples]
"""
self._check_fitted()
classes = np.unique(y)
_check_numpy_unicode_bug(classes)
if len(np.intersect1d(classes, self.classes_)) < len(classes):
diff = np.setdiff1d(classes, self.classes_)
raise ValueError("y contains new labels: %s" % str(diff))
return np.searchsorted(self.classes_, y)
def inverse_transform(self, y):
"""Transform labels back to original encoding.
Parameters
----------
y : numpy array of shape [n_samples]
Target values.
Returns
-------
y : numpy array of shape [n_samples]
"""
self._check_fitted()
y = np.asarray(y)
return self.classes_[y]
class LabelBinarizer(BaseEstimator, TransformerMixin):
"""Binarize labels in a one-vs-all fashion
Several regression and binary classification algorithms are
available in the scikit. A simple way to extend these algorithms
to the multi-class classification case is to use the so-called
one-vs-all scheme.
At learning time, this simply consists in learning one regressor
or binary classifier per class. In doing so, one needs to convert
multi-class labels to binary labels (belong or does not belong
to the class). LabelBinarizer makes this process easy with the
transform method.
At prediction time, one assigns the class for which the corresponding
model gave the greatest confidence. LabelBinarizer makes this easy
with the inverse_transform method.
Parameters
----------
neg_label : int (default: 0)
Value with which negative labels must be encoded.
pos_label : int (default: 1)
Value with which positive labels must be encoded.
sparse_output : boolean (default: False)
True if the returned array from transform is desired to be in sparse
CSR format.
Attributes
----------
classes_ : array of shape [n_class]
Holds the label for each class.
y_type_ : str,
Represents the type of the target data as evaluated by
utils.multiclass.type_of_target. Possible type are 'continuous',
'continuous-multioutput', 'binary', 'multiclass',
'mutliclass-multioutput', 'multilabel-sequences',
'multilabel-indicator', and 'unknown'.
multilabel_ : boolean
True if the transformer was fitted on a multilabel rather than a
multiclass set of labels. The ``multilabel_`` attribute is deprecated
and will be removed in 0.18
sparse_input_ : boolean,
True if the input data to transform is given as a sparse matrix, False
otherwise.
indicator_matrix_ : str
'sparse' when the input data to tansform is a multilable-indicator and
is sparse, None otherwise. The ``indicator_matrix_`` attribute is
deprecated as of version 0.16 and will be removed in 0.18
Examples
--------
>>> from sklearn import preprocessing
>>> lb = preprocessing.LabelBinarizer()
>>> lb.fit([1, 2, 6, 4, 2])
LabelBinarizer(neg_label=0, pos_label=1, sparse_output=False)
>>> lb.classes_
array([1, 2, 4, 6])
>>> lb.transform([1, 6])
array([[1, 0, 0, 0],
[0, 0, 0, 1]])
Binary targets transform to a column vector
>>> lb = preprocessing.LabelBinarizer()
>>> lb.fit_transform(['yes', 'no', 'no', 'yes'])
array([[1],
[0],
[0],
[1]])
Passing a 2D matrix for multilabel classification
>>> import numpy as np
>>> lb.fit(np.array([[0, 1, 1], [1, 0, 0]]))
LabelBinarizer(neg_label=0, pos_label=1, sparse_output=False)
>>> lb.classes_
array([0, 1, 2])
>>> lb.transform([0, 1, 2, 1])
array([[1, 0, 0],
[0, 1, 0],
[0, 0, 1],
[0, 1, 0]])
See also
--------
label_binarize : function to perform the transform operation of
LabelBinarizer with fixed classes.
"""
def __init__(self, neg_label=0, pos_label=1, sparse_output=False):
if neg_label >= pos_label:
raise ValueError("neg_label={0} must be strictly less than "
"pos_label={1}.".format(neg_label, pos_label))
if sparse_output and (pos_label == 0 or neg_label != 0):
raise ValueError("Sparse binarization is only supported with non "
"zero pos_label and zero neg_label, got "
"pos_label={0} and neg_label={1}"
"".format(pos_label, neg_label))
self.neg_label = neg_label
self.pos_label = pos_label
self.sparse_output = sparse_output
@property
@deprecated("Attribute ``indicator_matrix_`` is deprecated and will be "
"removed in 0.17. Use ``y_type_ == 'multilabel-indicator'`` "
"instead")
def indicator_matrix_(self):
return self.y_type_ == 'multilabel-indicator'
@property
@deprecated("Attribute ``multilabel_`` is deprecated and will be removed "
"in 0.17. Use ``y_type_.startswith('multilabel')`` "
"instead")
def multilabel_(self):
return self.y_type_.startswith('multilabel')
def _check_fitted(self):
if not hasattr(self, "classes_"):
raise ValueError("LabelBinarizer was not fitted yet.")
def fit(self, y):
"""Fit label binarizer
Parameters
----------
y : numpy array of shape (n_samples,) or (n_samples, n_classes)
Target values. The 2-d matrix should only contain 0 and 1,
represents multilabel classification.
Returns
-------
self : returns an instance of self.
"""
self.y_type_ = type_of_target(y)
if 'multioutput' in self.y_type_:
raise ValueError("Multioutput target data is not supported with "
"label binarization")
if _num_samples(y) == 0:
raise ValueError('y has 0 samples: %r' % y)
self.sparse_input_ = sp.issparse(y)
self.classes_ = unique_labels(y)
return self
def transform(self, y):
"""Transform multi-class labels to binary labels
The output of transform is sometimes referred to by some authors as the
1-of-K coding scheme.
Parameters
----------
y : numpy array or sparse matrix of shape (n_samples,) or
(n_samples, n_classes) Target values. The 2-d matrix should only
contain 0 and 1, represents multilabel classification. Sparse
matrix can be CSR, CSC, COO, DOK, or LIL.
Returns
-------
Y : numpy array or CSR matrix of shape [n_samples, n_classes]
Shape will be [n_samples, 1] for binary problems.
"""
self._check_fitted()
y_is_multilabel = type_of_target(y).startswith('multilabel')
if y_is_multilabel and not self.y_type_.startswith('multilabel'):
raise ValueError("The object was not fitted with multilabel"
" input.")
return label_binarize(y, self.classes_,
pos_label=self.pos_label,
neg_label=self.neg_label,
sparse_output=self.sparse_output)
def inverse_transform(self, Y, threshold=None):
"""Transform binary labels back to multi-class labels
Parameters
----------
Y : numpy array or sparse matrix with shape [n_samples, n_classes]
Target values. All sparse matrices are converted to CSR before
inverse transformation.
threshold : float or None
Threshold used in the binary and multi-label cases.
Use 0 when:
- Y contains the output of decision_function (classifier)
Use 0.5 when:
- Y contains the output of predict_proba
If None, the threshold is assumed to be half way between
neg_label and pos_label.
Returns
-------
y : numpy array or CSR matrix of shape [n_samples] Target values.
Notes
-----
In the case when the binary labels are fractional
(probabilistic), inverse_transform chooses the class with the
greatest value. Typically, this allows to use the output of a
linear model's decision_function method directly as the input
of inverse_transform.
"""
self._check_fitted()
if threshold is None:
threshold = (self.pos_label + self.neg_label) / 2.
if self.y_type_ == "multiclass":
y_inv = _inverse_binarize_multiclass(Y, self.classes_)
else:
y_inv = _inverse_binarize_thresholding(Y, self.y_type_,
self.classes_, threshold)
if self.sparse_input_:
y_inv = sp.csr_matrix(y_inv)
elif sp.issparse(y_inv):
y_inv = y_inv.toarray()
return y_inv
def label_binarize(y, classes, neg_label=0, pos_label=1,
sparse_output=False, multilabel=None):
"""Binarize labels in a one-vs-all fashion
Several regression and binary classification algorithms are
available in the scikit. A simple way to extend these algorithms
to the multi-class classification case is to use the so-called
one-vs-all scheme.
This function makes it possible to compute this transformation for a
fixed set of class labels known ahead of time.
Parameters
----------
y : array-like
Sequence of integer labels or multilabel data to encode.
classes : array-like of shape [n_classes]
Uniquely holds the label for each class.
neg_label : int (default: 0)
Value with which negative labels must be encoded.
pos_label : int (default: 1)
Value with which positive labels must be encoded.
sparse_output : boolean (default: False),
Set to true if output binary array is desired in CSR sparse format
Returns
-------
Y : numpy array or CSR matrix of shape [n_samples, n_classes]
Shape will be [n_samples, 1] for binary problems.
Examples
--------
>>> from sklearn.preprocessing import label_binarize
>>> label_binarize([1, 6], classes=[1, 2, 4, 6])
array([[1, 0, 0, 0],
[0, 0, 0, 1]])
The class ordering is preserved:
>>> label_binarize([1, 6], classes=[1, 6, 4, 2])
array([[1, 0, 0, 0],
[0, 1, 0, 0]])
Binary targets transform to a column vector
>>> label_binarize(['yes', 'no', 'no', 'yes'], classes=['no', 'yes'])
array([[1],
[0],
[0],
[1]])
See also
--------
LabelBinarizer : class used to wrap the functionality of label_binarize and
allow for fitting to classes independently of the transform operation
"""
if not isinstance(y, list):
# XXX Workaround that will be removed when list of list format is
# dropped
y = check_array(y, accept_sparse='csr', ensure_2d=False, dtype=None)
else:
if _num_samples(y) == 0:
raise ValueError('y has 0 samples: %r' % y)
if neg_label >= pos_label:
raise ValueError("neg_label={0} must be strictly less than "
"pos_label={1}.".format(neg_label, pos_label))
if (sparse_output and (pos_label == 0 or neg_label != 0)):
raise ValueError("Sparse binarization is only supported with non "
"zero pos_label and zero neg_label, got "
"pos_label={0} and neg_label={1}"
"".format(pos_label, neg_label))
if multilabel is not None:
warnings.warn("The multilabel parameter is deprecated as of version "
"0.15 and will be removed in 0.17. The parameter is no "
"longer necessary because the value is automatically "
"inferred.", DeprecationWarning)
# To account for pos_label == 0 in the dense case
pos_switch = pos_label == 0
if pos_switch:
pos_label = -neg_label
y_type = type_of_target(y)
if 'multioutput' in y_type:
raise ValueError("Multioutput target data is not supported with label "
"binarization")
n_samples = y.shape[0] if sp.issparse(y) else len(y)
n_classes = len(classes)
classes = np.asarray(classes)
if y_type == "binary":
if len(classes) == 1:
Y = np.zeros((len(y), 1), dtype=np.int)
Y += neg_label
return Y
elif len(classes) >= 3:
y_type = "multiclass"
sorted_class = np.sort(classes)
if (y_type == "multilabel-indicator" and classes.size != y.shape[1]):
raise ValueError("classes {0} missmatch with the labels {1}"
"found in the data".format(classes, unique_labels(y)))
if y_type in ("binary", "multiclass"):
y = column_or_1d(y)
# pick out the known labels from y
y_in_classes = in1d(y, classes)
y_seen = y[y_in_classes]
indices = np.searchsorted(sorted_class, y_seen)
indptr = np.hstack((0, np.cumsum(y_in_classes)))
data = np.empty_like(indices)
data.fill(pos_label)
Y = sp.csr_matrix((data, indices, indptr),
shape=(n_samples, n_classes))
elif y_type == "multilabel-indicator":
Y = sp.csr_matrix(y)
if pos_label != 1:
data = np.empty_like(Y.data)
data.fill(pos_label)
Y.data = data
elif y_type == "multilabel-sequences":
Y = MultiLabelBinarizer(classes=classes,
sparse_output=sparse_output).fit_transform(y)
if sp.issparse(Y):
Y.data[:] = pos_label
else:
Y[Y == 1] = pos_label
return Y
if not sparse_output:
Y = Y.toarray()
Y = astype(Y, int, copy=False)
if neg_label != 0:
Y[Y == 0] = neg_label
if pos_switch:
Y[Y == pos_label] = 0
else:
Y.data = astype(Y.data, int, copy=False)
# preserve label ordering
if np.any(classes != sorted_class):
indices = np.searchsorted(sorted_class, classes)
Y = Y[:, indices]
if y_type == "binary":
if sparse_output:
Y = Y.getcol(-1)
else:
Y = Y[:, -1].reshape((-1, 1))
return Y
def _inverse_binarize_multiclass(y, classes):
"""Inverse label binarization transformation for multiclass.
Multiclass uses the maximal score instead of a threshold.
"""
classes = np.asarray(classes)
if sp.issparse(y):
# Find the argmax for each row in y where y is a CSR matrix
y = y.tocsr()
n_samples, n_outputs = y.shape
outputs = np.arange(n_outputs)
row_max = sparse_min_max(y, 1)[1]
row_nnz = np.diff(y.indptr)
y_data_repeated_max = np.repeat(row_max, row_nnz)
# picks out all indices obtaining the maximum per row
y_i_all_argmax = np.flatnonzero(y_data_repeated_max == y.data)
# For corner case where last row has a max of 0
if row_max[-1] == 0:
y_i_all_argmax = np.append(y_i_all_argmax, [len(y.data)])
# Gets the index of the first argmax in each row from y_i_all_argmax
index_first_argmax = np.searchsorted(y_i_all_argmax, y.indptr[:-1])
# first argmax of each row
y_ind_ext = np.append(y.indices, [0])
y_i_argmax = y_ind_ext[y_i_all_argmax[index_first_argmax]]
# Handle rows of all 0
y_i_argmax[np.where(row_nnz == 0)[0]] = 0
# Handles rows with max of 0 that contain negative numbers
samples = np.arange(n_samples)[(row_nnz > 0) &
(row_max.ravel() == 0)]
for i in samples:
ind = y.indices[y.indptr[i]:y.indptr[i + 1]]
y_i_argmax[i] = classes[np.setdiff1d(outputs, ind)][0]
return classes[y_i_argmax]
else:
return classes.take(y.argmax(axis=1), mode="clip")
def _inverse_binarize_thresholding(y, output_type, classes, threshold):
"""Inverse label binarization transformation using thresholding."""
if output_type == "binary" and y.ndim == 2 and y.shape[1] > 2:
raise ValueError("output_type='binary', but y.shape = {0}".
format(y.shape))
if output_type != "binary" and y.shape[1] != len(classes):
raise ValueError("The number of class is not equal to the number of "
"dimension of y.")
classes = np.asarray(classes)
# Perform thresholding
if sp.issparse(y):
if threshold > 0:
if y.format not in ('csr', 'csc'):
y = y.tocsr()
y.data = np.array(y.data > threshold, dtype=np.int)
y.eliminate_zeros()
else:
y = np.array(y.toarray() > threshold, dtype=np.int)
else:
y = np.array(y > threshold, dtype=np.int)
# Inverse transform data
if output_type == "binary":
if sp.issparse(y):
y = y.toarray()
if y.ndim == 2 and y.shape[1] == 2:
return classes[y[:, 1]]
else:
if len(classes) == 1:
y = np.empty(len(y), dtype=classes.dtype)
y.fill(classes[0])
return y
else:
return classes[y.ravel()]
elif output_type == "multilabel-indicator":
return y
elif output_type == "multilabel-sequences":
warnings.warn('Direct support for sequence of sequences multilabel '
'representation will be unavailable from version 0.17. '
'Use sklearn.preprocessing.MultiLabelBinarizer to '
'convert to a label indicator representation.',
DeprecationWarning)
mlb = MultiLabelBinarizer(classes=classes).fit([])
return mlb.inverse_transform(y)
else:
raise ValueError("{0} format is not supported".format(output_type))
class MultiLabelBinarizer(BaseEstimator, TransformerMixin):
"""Transform between iterable of iterables and a multilabel format
Although a list of sets or tuples is a very intuitive format for multilabel
data, it is unwieldy to process. This transformer converts between this
intuitive format and the supported multilabel format: a (samples x classes)
binary matrix indicating the presence of a class label.
Parameters
----------
classes : array-like of shape [n_classes] (optional)
Indicates an ordering for the class labels
sparse_output : boolean (default: False),
Set to true if output binary array is desired in CSR sparse format
Attributes
----------
classes_ : array of labels
A copy of the `classes` parameter where provided,
or otherwise, the sorted set of classes found when fitting.
Examples
--------
>>> mlb = MultiLabelBinarizer()
>>> mlb.fit_transform([(1, 2), (3,)])
array([[1, 1, 0],
[0, 0, 1]])
>>> mlb.classes_
array([1, 2, 3])
>>> mlb.fit_transform([set(['sci-fi', 'thriller']), set(['comedy'])])
array([[0, 1, 1],
[1, 0, 0]])
>>> list(mlb.classes_)
['comedy', 'sci-fi', 'thriller']
"""
def __init__(self, classes=None, sparse_output=False):
self.classes = classes
self.sparse_output = sparse_output
def fit(self, y):
"""Fit the label sets binarizer, storing `classes_`
Parameters
----------
y : iterable of iterables
A set of labels (any orderable and hashable object) for each
sample. If the `classes` parameter is set, `y` will not be
iterated.
Returns
-------
self : returns this MultiLabelBinarizer instance
"""
if self.classes is None:
classes = sorted(set(itertools.chain.from_iterable(y)))
else:
classes = self.classes
dtype = np.int if all(isinstance(c, int) for c in classes) else object
self.classes_ = np.empty(len(classes), dtype=dtype)
self.classes_[:] = classes
return self
def fit_transform(self, y):
"""Fit the label sets binarizer and transform the given label sets
Parameters
----------
y : iterable of iterables
A set of labels (any orderable and hashable object) for each
sample. If the `classes` parameter is set, `y` will not be
iterated.
Returns
-------
y_indicator : array or CSR matrix, shape (n_samples, n_classes)
A matrix such that `y_indicator[i, j] = 1` iff `classes_[j]` is in
`y[i]`, and 0 otherwise.
"""
if self.classes is not None:
return self.fit(y).transform(y)
# Automatically increment on new class
class_mapping = defaultdict(int)
class_mapping.default_factory = class_mapping.__len__
yt = self._transform(y, class_mapping)
# sort classes and reorder columns
tmp = sorted(class_mapping, key=class_mapping.get)
# (make safe for tuples)
dtype = np.int if all(isinstance(c, int) for c in tmp) else object
class_mapping = np.empty(len(tmp), dtype=dtype)
class_mapping[:] = tmp
self.classes_, inverse = np.unique(class_mapping, return_inverse=True)
yt.indices = np.take(inverse, yt.indices)
if not self.sparse_output:
yt = yt.toarray()
return yt
def transform(self, y):
"""Transform the given label sets
Parameters
----------
y : iterable of iterables
A set of labels (any orderable and hashable object) for each
sample. If the `classes` parameter is set, `y` will not be
iterated.
Returns
-------
y_indicator : array or CSR matrix, shape (n_samples, n_classes)
A matrix such that `y_indicator[i, j] = 1` iff `classes_[j]` is in
`y[i]`, and 0 otherwise.
"""
class_to_index = dict(zip(self.classes_, range(len(self.classes_))))
yt = self._transform(y, class_to_index)
if not self.sparse_output:
yt = yt.toarray()
return yt
def _transform(self, y, class_mapping):
"""Transforms the label sets with a given mapping
Parameters
----------
y : iterable of iterables
class_mapping : Mapping
Maps from label to column index in label indicator matrix
Returns
-------
y_indicator : sparse CSR matrix, shape (n_samples, n_classes)
Label indicator matrix
"""
indices = array.array('i')
indptr = array.array('i', [0])
for labels in y:
indices.extend(set(class_mapping[label] for label in labels))
indptr.append(len(indices))
data = np.ones(len(indices), dtype=int)
return sp.csr_matrix((data, indices, indptr),
shape=(len(indptr) - 1, len(class_mapping)))
def inverse_transform(self, yt):
"""Transform the given indicator matrix into label sets
Parameters
----------
yt : array or sparse matrix of shape (n_samples, n_classes)
A matrix containing only 1s ands 0s.
Returns
-------
y : list of tuples
The set of labels for each sample such that `y[i]` consists of
`classes_[j]` for each `yt[i, j] == 1`.
"""
if yt.shape[1] != len(self.classes_):
raise ValueError('Expected indicator for {0} classes, but got {1}'
.format(len(self.classes_), yt.shape[1]))
if sp.issparse(yt):
yt = yt.tocsr()
if len(yt.data) != 0 and len(np.setdiff1d(yt.data, [0, 1])) > 0:
raise ValueError('Expected only 0s and 1s in label indicator.')
return [tuple(self.classes_.take(yt.indices[start:end]))
for start, end in zip(yt.indptr[:-1], yt.indptr[1:])]
else:
unexpected = np.setdiff1d(yt, [0, 1])
if len(unexpected) > 0:
raise ValueError('Expected only 0s and 1s in label indicator. '
'Also got {0}'.format(unexpected))
return [tuple(self.classes_.compress(indicators)) for indicators
in yt]
| bsd-3-clause |
LiaoPan/scikit-learn | examples/cluster/plot_birch_vs_minibatchkmeans.py | 333 | 3694 | """
=================================
Compare BIRCH and MiniBatchKMeans
=================================
This example compares the timing of Birch (with and without the global
clustering step) and MiniBatchKMeans on a synthetic dataset having
100,000 samples and 2 features generated using make_blobs.
If ``n_clusters`` is set to None, the data is reduced from 100,000
samples to a set of 158 clusters. This can be viewed as a preprocessing
step before the final (global) clustering step that further reduces these
158 clusters to 100 clusters.
"""
# Authors: Manoj Kumar <[email protected]
# Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
print(__doc__)
from itertools import cycle
from time import time
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as colors
from sklearn.preprocessing import StandardScaler
from sklearn.cluster import Birch, MiniBatchKMeans
from sklearn.datasets.samples_generator import make_blobs
# Generate centers for the blobs so that it forms a 10 X 10 grid.
xx = np.linspace(-22, 22, 10)
yy = np.linspace(-22, 22, 10)
xx, yy = np.meshgrid(xx, yy)
n_centres = np.hstack((np.ravel(xx)[:, np.newaxis],
np.ravel(yy)[:, np.newaxis]))
# Generate blobs to do a comparison between MiniBatchKMeans and Birch.
X, y = make_blobs(n_samples=100000, centers=n_centres, random_state=0)
# Use all colors that matplotlib provides by default.
colors_ = cycle(colors.cnames.keys())
fig = plt.figure(figsize=(12, 4))
fig.subplots_adjust(left=0.04, right=0.98, bottom=0.1, top=0.9)
# Compute clustering with Birch with and without the final clustering step
# and plot.
birch_models = [Birch(threshold=1.7, n_clusters=None),
Birch(threshold=1.7, n_clusters=100)]
final_step = ['without global clustering', 'with global clustering']
for ind, (birch_model, info) in enumerate(zip(birch_models, final_step)):
t = time()
birch_model.fit(X)
time_ = time() - t
print("Birch %s as the final step took %0.2f seconds" % (
info, (time() - t)))
# Plot result
labels = birch_model.labels_
centroids = birch_model.subcluster_centers_
n_clusters = np.unique(labels).size
print("n_clusters : %d" % n_clusters)
ax = fig.add_subplot(1, 3, ind + 1)
for this_centroid, k, col in zip(centroids, range(n_clusters), colors_):
mask = labels == k
ax.plot(X[mask, 0], X[mask, 1], 'w',
markerfacecolor=col, marker='.')
if birch_model.n_clusters is None:
ax.plot(this_centroid[0], this_centroid[1], '+', markerfacecolor=col,
markeredgecolor='k', markersize=5)
ax.set_ylim([-25, 25])
ax.set_xlim([-25, 25])
ax.set_autoscaley_on(False)
ax.set_title('Birch %s' % info)
# Compute clustering with MiniBatchKMeans.
mbk = MiniBatchKMeans(init='k-means++', n_clusters=100, batch_size=100,
n_init=10, max_no_improvement=10, verbose=0,
random_state=0)
t0 = time()
mbk.fit(X)
t_mini_batch = time() - t0
print("Time taken to run MiniBatchKMeans %0.2f seconds" % t_mini_batch)
mbk_means_labels_unique = np.unique(mbk.labels_)
ax = fig.add_subplot(1, 3, 3)
for this_centroid, k, col in zip(mbk.cluster_centers_,
range(n_clusters), colors_):
mask = mbk.labels_ == k
ax.plot(X[mask, 0], X[mask, 1], 'w', markerfacecolor=col, marker='.')
ax.plot(this_centroid[0], this_centroid[1], '+', markeredgecolor='k',
markersize=5)
ax.set_xlim([-25, 25])
ax.set_ylim([-25, 25])
ax.set_title("MiniBatchKMeans")
ax.set_autoscaley_on(False)
plt.show()
| bsd-3-clause |
IshankGulati/scikit-learn | sklearn/feature_extraction/text.py | 19 | 52042 | # -*- coding: utf-8 -*-
# Authors: Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Lars Buitinck
# Robert Layton <[email protected]>
# Jochen Wersdörfer <[email protected]>
# Roman Sinayev <[email protected]>
#
# License: BSD 3 clause
"""
The :mod:`sklearn.feature_extraction.text` submodule gathers utilities to
build feature vectors from text documents.
"""
from __future__ import unicode_literals
import array
from collections import Mapping, defaultdict
import numbers
from operator import itemgetter
import re
import unicodedata
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..externals.six.moves import xrange
from ..preprocessing import normalize
from .hashing import FeatureHasher
from .stop_words import ENGLISH_STOP_WORDS
from ..utils import deprecated
from ..utils.fixes import frombuffer_empty, bincount
from ..utils.validation import check_is_fitted
__all__ = ['CountVectorizer',
'ENGLISH_STOP_WORDS',
'TfidfTransformer',
'TfidfVectorizer',
'strip_accents_ascii',
'strip_accents_unicode',
'strip_tags']
def strip_accents_unicode(s):
"""Transform accentuated unicode symbols into their simple counterpart
Warning: the python-level loop and join operations make this
implementation 20 times slower than the strip_accents_ascii basic
normalization.
See also
--------
strip_accents_ascii
Remove accentuated char for any unicode symbol that has a direct
ASCII equivalent.
"""
normalized = unicodedata.normalize('NFKD', s)
if normalized == s:
return s
else:
return ''.join([c for c in normalized if not unicodedata.combining(c)])
def strip_accents_ascii(s):
"""Transform accentuated unicode symbols into ascii or nothing
Warning: this solution is only suited for languages that have a direct
transliteration to ASCII symbols.
See also
--------
strip_accents_unicode
Remove accentuated char for any unicode symbol.
"""
nkfd_form = unicodedata.normalize('NFKD', s)
return nkfd_form.encode('ASCII', 'ignore').decode('ASCII')
def strip_tags(s):
"""Basic regexp based HTML / XML tag stripper function
For serious HTML/XML preprocessing you should rather use an external
library such as lxml or BeautifulSoup.
"""
return re.compile(r"<([^>]+)>", flags=re.UNICODE).sub(" ", s)
def _check_stop_list(stop):
if stop == "english":
return ENGLISH_STOP_WORDS
elif isinstance(stop, six.string_types):
raise ValueError("not a built-in stop list: %s" % stop)
elif stop is None:
return None
else: # assume it's a collection
return frozenset(stop)
class VectorizerMixin(object):
"""Provides common code for text vectorizers (tokenization logic)."""
_white_spaces = re.compile(r"\s\s+")
def decode(self, doc):
"""Decode the input into a string of unicode symbols
The decoding strategy depends on the vectorizer parameters.
"""
if self.input == 'filename':
with open(doc, 'rb') as fh:
doc = fh.read()
elif self.input == 'file':
doc = doc.read()
if isinstance(doc, bytes):
doc = doc.decode(self.encoding, self.decode_error)
if doc is np.nan:
raise ValueError("np.nan is an invalid document, expected byte or "
"unicode string.")
return doc
def _word_ngrams(self, tokens, stop_words=None):
"""Turn tokens into a sequence of n-grams after stop words filtering"""
# handle stop words
if stop_words is not None:
tokens = [w for w in tokens if w not in stop_words]
# handle token n-grams
min_n, max_n = self.ngram_range
if max_n != 1:
original_tokens = tokens
tokens = []
n_original_tokens = len(original_tokens)
for n in xrange(min_n,
min(max_n + 1, n_original_tokens + 1)):
for i in xrange(n_original_tokens - n + 1):
tokens.append(" ".join(original_tokens[i: i + n]))
return tokens
def _char_ngrams(self, text_document):
"""Tokenize text_document into a sequence of character n-grams"""
# normalize white spaces
text_document = self._white_spaces.sub(" ", text_document)
text_len = len(text_document)
ngrams = []
min_n, max_n = self.ngram_range
for n in xrange(min_n, min(max_n + 1, text_len + 1)):
for i in xrange(text_len - n + 1):
ngrams.append(text_document[i: i + n])
return ngrams
def _char_wb_ngrams(self, text_document):
"""Whitespace sensitive char-n-gram tokenization.
Tokenize text_document into a sequence of character n-grams
excluding any whitespace (operating only inside word boundaries)"""
# normalize white spaces
text_document = self._white_spaces.sub(" ", text_document)
min_n, max_n = self.ngram_range
ngrams = []
for w in text_document.split():
w = ' ' + w + ' '
w_len = len(w)
for n in xrange(min_n, max_n + 1):
offset = 0
ngrams.append(w[offset:offset + n])
while offset + n < w_len:
offset += 1
ngrams.append(w[offset:offset + n])
if offset == 0: # count a short word (w_len < n) only once
break
return ngrams
def build_preprocessor(self):
"""Return a function to preprocess the text before tokenization"""
if self.preprocessor is not None:
return self.preprocessor
# unfortunately python functools package does not have an efficient
# `compose` function that would have allowed us to chain a dynamic
# number of functions. However the cost of a lambda call is a few
# hundreds of nanoseconds which is negligible when compared to the
# cost of tokenizing a string of 1000 chars for instance.
noop = lambda x: x
# accent stripping
if not self.strip_accents:
strip_accents = noop
elif callable(self.strip_accents):
strip_accents = self.strip_accents
elif self.strip_accents == 'ascii':
strip_accents = strip_accents_ascii
elif self.strip_accents == 'unicode':
strip_accents = strip_accents_unicode
else:
raise ValueError('Invalid value for "strip_accents": %s' %
self.strip_accents)
if self.lowercase:
return lambda x: strip_accents(x.lower())
else:
return strip_accents
def build_tokenizer(self):
"""Return a function that splits a string into a sequence of tokens"""
if self.tokenizer is not None:
return self.tokenizer
token_pattern = re.compile(self.token_pattern)
return lambda doc: token_pattern.findall(doc)
def get_stop_words(self):
"""Build or fetch the effective stop words list"""
return _check_stop_list(self.stop_words)
def build_analyzer(self):
"""Return a callable that handles preprocessing and tokenization"""
if callable(self.analyzer):
return self.analyzer
preprocess = self.build_preprocessor()
if self.analyzer == 'char':
return lambda doc: self._char_ngrams(preprocess(self.decode(doc)))
elif self.analyzer == 'char_wb':
return lambda doc: self._char_wb_ngrams(
preprocess(self.decode(doc)))
elif self.analyzer == 'word':
stop_words = self.get_stop_words()
tokenize = self.build_tokenizer()
return lambda doc: self._word_ngrams(
tokenize(preprocess(self.decode(doc))), stop_words)
else:
raise ValueError('%s is not a valid tokenization scheme/analyzer' %
self.analyzer)
def _validate_vocabulary(self):
vocabulary = self.vocabulary
if vocabulary is not None:
if isinstance(vocabulary, set):
vocabulary = sorted(vocabulary)
if not isinstance(vocabulary, Mapping):
vocab = {}
for i, t in enumerate(vocabulary):
if vocab.setdefault(t, i) != i:
msg = "Duplicate term in vocabulary: %r" % t
raise ValueError(msg)
vocabulary = vocab
else:
indices = set(six.itervalues(vocabulary))
if len(indices) != len(vocabulary):
raise ValueError("Vocabulary contains repeated indices.")
for i in xrange(len(vocabulary)):
if i not in indices:
msg = ("Vocabulary of size %d doesn't contain index "
"%d." % (len(vocabulary), i))
raise ValueError(msg)
if not vocabulary:
raise ValueError("empty vocabulary passed to fit")
self.fixed_vocabulary_ = True
self.vocabulary_ = dict(vocabulary)
else:
self.fixed_vocabulary_ = False
def _check_vocabulary(self):
"""Check if vocabulary is empty or missing (not fit-ed)"""
msg = "%(name)s - Vocabulary wasn't fitted."
check_is_fitted(self, 'vocabulary_', msg=msg),
if len(self.vocabulary_) == 0:
raise ValueError("Vocabulary is empty")
class HashingVectorizer(BaseEstimator, VectorizerMixin):
"""Convert a collection of text documents to a matrix of token occurrences
It turns a collection of text documents into a scipy.sparse matrix holding
token occurrence counts (or binary occurrence information), possibly
normalized as token frequencies if norm='l1' or projected on the euclidean
unit sphere if norm='l2'.
This text vectorizer implementation uses the hashing trick to find the
token string name to feature integer index mapping.
This strategy has several advantages:
- it is very low memory scalable to large datasets as there is no need to
store a vocabulary dictionary in memory
- it is fast to pickle and un-pickle as it holds no state besides the
constructor parameters
- it can be used in a streaming (partial fit) or parallel pipeline as there
is no state computed during fit.
There are also a couple of cons (vs using a CountVectorizer with an
in-memory vocabulary):
- there is no way to compute the inverse transform (from feature indices to
string feature names) which can be a problem when trying to introspect
which features are most important to a model.
- there can be collisions: distinct tokens can be mapped to the same
feature index. However in practice this is rarely an issue if n_features
is large enough (e.g. 2 ** 18 for text classification problems).
- no IDF weighting as this would render the transformer stateful.
The hash function employed is the signed 32-bit version of Murmurhash3.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
input : string {'filename', 'file', 'content'}
If 'filename', the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
If 'file', the sequence items must have a 'read' method (file-like
object) that is called to fetch the bytes in memory.
Otherwise the input is expected to be the sequence strings or
bytes items are expected to be analyzed directly.
encoding : string, default='utf-8'
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore', 'replace'}
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
strip_accents : {'ascii', 'unicode', None}
Remove accents during the preprocessing step.
'ascii' is a fast method that only works on characters that have
an direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) does nothing.
analyzer : string, {'word', 'char', 'char_wb'} or callable
Whether the feature should be made of word or character n-grams.
Option 'char_wb' creates character n-grams only from text inside
word boundaries.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
preprocessor : callable or None (default)
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
tokenizer : callable or None (default)
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
Only applies if ``analyzer == 'word'``.
ngram_range : tuple (min_n, max_n), default=(1, 1)
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used.
stop_words : string {'english'}, list, or None (default)
If 'english', a built-in stop word list for English is used.
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
Only applies if ``analyzer == 'word'``.
lowercase : boolean, default=True
Convert all characters to lowercase before tokenizing.
token_pattern : string
Regular expression denoting what constitutes a "token", only used
if ``analyzer == 'word'``. The default regexp selects tokens of 2
or more alphanumeric characters (punctuation is completely ignored
and always treated as a token separator).
n_features : integer, default=(2 ** 20)
The number of features (columns) in the output matrices. Small numbers
of features are likely to cause hash collisions, but large numbers
will cause larger coefficient dimensions in linear learners.
norm : 'l1', 'l2' or None, optional
Norm used to normalize term vectors. None for no normalization.
binary : boolean, default=False.
If True, all non zero counts are set to 1. This is useful for discrete
probabilistic models that model binary events rather than integer
counts.
dtype : type, optional
Type of the matrix returned by fit_transform() or transform().
non_negative : boolean, default=False
Whether output matrices should contain non-negative values only;
effectively calls abs on the matrix prior to returning it.
When True, output values can be interpreted as frequencies.
When False, output values will have expected value zero.
See also
--------
CountVectorizer, TfidfVectorizer
"""
def __init__(self, input='content', encoding='utf-8',
decode_error='strict', strip_accents=None,
lowercase=True, preprocessor=None, tokenizer=None,
stop_words=None, token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1), analyzer='word', n_features=(2 ** 20),
binary=False, norm='l2', non_negative=False,
dtype=np.float64):
self.input = input
self.encoding = encoding
self.decode_error = decode_error
self.strip_accents = strip_accents
self.preprocessor = preprocessor
self.tokenizer = tokenizer
self.analyzer = analyzer
self.lowercase = lowercase
self.token_pattern = token_pattern
self.stop_words = stop_words
self.n_features = n_features
self.ngram_range = ngram_range
self.binary = binary
self.norm = norm
self.non_negative = non_negative
self.dtype = dtype
def partial_fit(self, X, y=None):
"""Does nothing: this transformer is stateless.
This method is just there to mark the fact that this transformer
can work in a streaming setup.
"""
return self
def fit(self, X, y=None):
"""Does nothing: this transformer is stateless."""
# triggers a parameter validation
if isinstance(X, six.string_types):
raise ValueError(
"Iterable over raw text documents expected, "
"string object received.")
self._get_hasher().fit(X, y=y)
return self
def transform(self, X, y=None):
"""Transform a sequence of documents to a document-term matrix.
Parameters
----------
X : iterable over raw text documents, length = n_samples
Samples. Each sample must be a text document (either bytes or
unicode strings, file name or file object depending on the
constructor argument) which will be tokenized and hashed.
y : (ignored)
Returns
-------
X : scipy.sparse matrix, shape = (n_samples, self.n_features)
Document-term matrix.
"""
if isinstance(X, six.string_types):
raise ValueError(
"Iterable over raw text documents expected, "
"string object received.")
analyzer = self.build_analyzer()
X = self._get_hasher().transform(analyzer(doc) for doc in X)
if self.binary:
X.data.fill(1)
if self.norm is not None:
X = normalize(X, norm=self.norm, copy=False)
return X
# Alias transform to fit_transform for convenience
fit_transform = transform
def _get_hasher(self):
return FeatureHasher(n_features=self.n_features,
input_type='string', dtype=self.dtype,
non_negative=self.non_negative)
def _document_frequency(X):
"""Count the number of non-zero values for each feature in sparse X."""
if sp.isspmatrix_csr(X):
return bincount(X.indices, minlength=X.shape[1])
else:
return np.diff(sp.csc_matrix(X, copy=False).indptr)
class CountVectorizer(BaseEstimator, VectorizerMixin):
"""Convert a collection of text documents to a matrix of token counts
This implementation produces a sparse representation of the counts using
scipy.sparse.csr_matrix.
If you do not provide an a-priori dictionary and you do not use an analyzer
that does some kind of feature selection then the number of features will
be equal to the vocabulary size found by analyzing the data.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
input : string {'filename', 'file', 'content'}
If 'filename', the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
If 'file', the sequence items must have a 'read' method (file-like
object) that is called to fetch the bytes in memory.
Otherwise the input is expected to be the sequence strings or
bytes items are expected to be analyzed directly.
encoding : string, 'utf-8' by default.
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore', 'replace'}
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
strip_accents : {'ascii', 'unicode', None}
Remove accents during the preprocessing step.
'ascii' is a fast method that only works on characters that have
an direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) does nothing.
analyzer : string, {'word', 'char', 'char_wb'} or callable
Whether the feature should be made of word or character n-grams.
Option 'char_wb' creates character n-grams only from text inside
word boundaries.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
preprocessor : callable or None (default)
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
tokenizer : callable or None (default)
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
Only applies if ``analyzer == 'word'``.
ngram_range : tuple (min_n, max_n)
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used.
stop_words : string {'english'}, list, or None (default)
If 'english', a built-in stop word list for English is used.
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
Only applies if ``analyzer == 'word'``.
If None, no stop words will be used. max_df can be set to a value
in the range [0.7, 1.0) to automatically detect and filter stop
words based on intra corpus document frequency of terms.
lowercase : boolean, True by default
Convert all characters to lowercase before tokenizing.
token_pattern : string
Regular expression denoting what constitutes a "token", only used
if ``analyzer == 'word'``. The default regexp select tokens of 2
or more alphanumeric characters (punctuation is completely ignored
and always treated as a token separator).
max_df : float in range [0.0, 1.0] or int, default=1.0
When building the vocabulary ignore terms that have a document
frequency strictly higher than the given threshold (corpus-specific
stop words).
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
min_df : float in range [0.0, 1.0] or int, default=1
When building the vocabulary ignore terms that have a document
frequency strictly lower than the given threshold. This value is also
called cut-off in the literature.
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
max_features : int or None, default=None
If not None, build a vocabulary that only consider the top
max_features ordered by term frequency across the corpus.
This parameter is ignored if vocabulary is not None.
vocabulary : Mapping or iterable, optional
Either a Mapping (e.g., a dict) where keys are terms and values are
indices in the feature matrix, or an iterable over terms. If not
given, a vocabulary is determined from the input documents. Indices
in the mapping should not be repeated and should not have any gap
between 0 and the largest index.
binary : boolean, default=False
If True, all non zero counts are set to 1. This is useful for discrete
probabilistic models that model binary events rather than integer
counts.
dtype : type, optional
Type of the matrix returned by fit_transform() or transform().
Attributes
----------
vocabulary_ : dict
A mapping of terms to feature indices.
stop_words_ : set
Terms that were ignored because they either:
- occurred in too many documents (`max_df`)
- occurred in too few documents (`min_df`)
- were cut off by feature selection (`max_features`).
This is only available if no vocabulary was given.
See also
--------
HashingVectorizer, TfidfVectorizer
Notes
-----
The ``stop_words_`` attribute can get large and increase the model size
when pickling. This attribute is provided only for introspection and can
be safely removed using delattr or set to None before pickling.
"""
def __init__(self, input='content', encoding='utf-8',
decode_error='strict', strip_accents=None,
lowercase=True, preprocessor=None, tokenizer=None,
stop_words=None, token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1), analyzer='word',
max_df=1.0, min_df=1, max_features=None,
vocabulary=None, binary=False, dtype=np.int64):
self.input = input
self.encoding = encoding
self.decode_error = decode_error
self.strip_accents = strip_accents
self.preprocessor = preprocessor
self.tokenizer = tokenizer
self.analyzer = analyzer
self.lowercase = lowercase
self.token_pattern = token_pattern
self.stop_words = stop_words
self.max_df = max_df
self.min_df = min_df
if max_df < 0 or min_df < 0:
raise ValueError("negative value for max_df or min_df")
self.max_features = max_features
if max_features is not None:
if (not isinstance(max_features, numbers.Integral) or
max_features <= 0):
raise ValueError(
"max_features=%r, neither a positive integer nor None"
% max_features)
self.ngram_range = ngram_range
self.vocabulary = vocabulary
self.binary = binary
self.dtype = dtype
def _sort_features(self, X, vocabulary):
"""Sort features by name
Returns a reordered matrix and modifies the vocabulary in place
"""
sorted_features = sorted(six.iteritems(vocabulary))
map_index = np.empty(len(sorted_features), dtype=np.int32)
for new_val, (term, old_val) in enumerate(sorted_features):
vocabulary[term] = new_val
map_index[old_val] = new_val
X.indices = map_index.take(X.indices, mode='clip')
return X
def _limit_features(self, X, vocabulary, high=None, low=None,
limit=None):
"""Remove too rare or too common features.
Prune features that are non zero in more samples than high or less
documents than low, modifying the vocabulary, and restricting it to
at most the limit most frequent.
This does not prune samples with zero features.
"""
if high is None and low is None and limit is None:
return X, set()
# Calculate a mask based on document frequencies
dfs = _document_frequency(X)
tfs = np.asarray(X.sum(axis=0)).ravel()
mask = np.ones(len(dfs), dtype=bool)
if high is not None:
mask &= dfs <= high
if low is not None:
mask &= dfs >= low
if limit is not None and mask.sum() > limit:
mask_inds = (-tfs[mask]).argsort()[:limit]
new_mask = np.zeros(len(dfs), dtype=bool)
new_mask[np.where(mask)[0][mask_inds]] = True
mask = new_mask
new_indices = np.cumsum(mask) - 1 # maps old indices to new
removed_terms = set()
for term, old_index in list(six.iteritems(vocabulary)):
if mask[old_index]:
vocabulary[term] = new_indices[old_index]
else:
del vocabulary[term]
removed_terms.add(term)
kept_indices = np.where(mask)[0]
if len(kept_indices) == 0:
raise ValueError("After pruning, no terms remain. Try a lower"
" min_df or a higher max_df.")
return X[:, kept_indices], removed_terms
def _count_vocab(self, raw_documents, fixed_vocab):
"""Create sparse feature matrix, and vocabulary where fixed_vocab=False
"""
if fixed_vocab:
vocabulary = self.vocabulary_
else:
# Add a new value when a new vocabulary item is seen
vocabulary = defaultdict()
vocabulary.default_factory = vocabulary.__len__
analyze = self.build_analyzer()
j_indices = []
indptr = _make_int_array()
values = _make_int_array()
indptr.append(0)
for doc in raw_documents:
feature_counter = {}
for feature in analyze(doc):
try:
feature_idx = vocabulary[feature]
if feature_idx not in feature_counter:
feature_counter[feature_idx] = 1
else:
feature_counter[feature_idx] += 1
except KeyError:
# Ignore out-of-vocabulary items for fixed_vocab=True
continue
j_indices.extend(feature_counter.keys())
values.extend(feature_counter.values())
indptr.append(len(j_indices))
if not fixed_vocab:
# disable defaultdict behaviour
vocabulary = dict(vocabulary)
if not vocabulary:
raise ValueError("empty vocabulary; perhaps the documents only"
" contain stop words")
j_indices = np.asarray(j_indices, dtype=np.intc)
indptr = np.frombuffer(indptr, dtype=np.intc)
values = frombuffer_empty(values, dtype=np.intc)
X = sp.csr_matrix((values, j_indices, indptr),
shape=(len(indptr) - 1, len(vocabulary)),
dtype=self.dtype)
X.sort_indices()
return vocabulary, X
def fit(self, raw_documents, y=None):
"""Learn a vocabulary dictionary of all tokens in the raw documents.
Parameters
----------
raw_documents : iterable
An iterable which yields either str, unicode or file objects.
Returns
-------
self
"""
self.fit_transform(raw_documents)
return self
def fit_transform(self, raw_documents, y=None):
"""Learn the vocabulary dictionary and return term-document matrix.
This is equivalent to fit followed by transform, but more efficiently
implemented.
Parameters
----------
raw_documents : iterable
An iterable which yields either str, unicode or file objects.
Returns
-------
X : array, [n_samples, n_features]
Document-term matrix.
"""
# We intentionally don't call the transform method to make
# fit_transform overridable without unwanted side effects in
# TfidfVectorizer.
if isinstance(raw_documents, six.string_types):
raise ValueError(
"Iterable over raw text documents expected, "
"string object received.")
self._validate_vocabulary()
max_df = self.max_df
min_df = self.min_df
max_features = self.max_features
vocabulary, X = self._count_vocab(raw_documents,
self.fixed_vocabulary_)
if self.binary:
X.data.fill(1)
if not self.fixed_vocabulary_:
X = self._sort_features(X, vocabulary)
n_doc = X.shape[0]
max_doc_count = (max_df
if isinstance(max_df, numbers.Integral)
else max_df * n_doc)
min_doc_count = (min_df
if isinstance(min_df, numbers.Integral)
else min_df * n_doc)
if max_doc_count < min_doc_count:
raise ValueError(
"max_df corresponds to < documents than min_df")
X, self.stop_words_ = self._limit_features(X, vocabulary,
max_doc_count,
min_doc_count,
max_features)
self.vocabulary_ = vocabulary
return X
def transform(self, raw_documents):
"""Transform documents to document-term matrix.
Extract token counts out of raw text documents using the vocabulary
fitted with fit or the one provided to the constructor.
Parameters
----------
raw_documents : iterable
An iterable which yields either str, unicode or file objects.
Returns
-------
X : sparse matrix, [n_samples, n_features]
Document-term matrix.
"""
if isinstance(raw_documents, six.string_types):
raise ValueError(
"Iterable over raw text documents expected, "
"string object received.")
if not hasattr(self, 'vocabulary_'):
self._validate_vocabulary()
self._check_vocabulary()
# use the same matrix-building strategy as fit_transform
_, X = self._count_vocab(raw_documents, fixed_vocab=True)
if self.binary:
X.data.fill(1)
return X
def inverse_transform(self, X):
"""Return terms per document with nonzero entries in X.
Parameters
----------
X : {array, sparse matrix}, shape = [n_samples, n_features]
Returns
-------
X_inv : list of arrays, len = n_samples
List of arrays of terms.
"""
self._check_vocabulary()
if sp.issparse(X):
# We need CSR format for fast row manipulations.
X = X.tocsr()
else:
# We need to convert X to a matrix, so that the indexing
# returns 2D objects
X = np.asmatrix(X)
n_samples = X.shape[0]
terms = np.array(list(self.vocabulary_.keys()))
indices = np.array(list(self.vocabulary_.values()))
inverse_vocabulary = terms[np.argsort(indices)]
return [inverse_vocabulary[X[i, :].nonzero()[1]].ravel()
for i in range(n_samples)]
def get_feature_names(self):
"""Array mapping from feature integer indices to feature name"""
self._check_vocabulary()
return [t for t, i in sorted(six.iteritems(self.vocabulary_),
key=itemgetter(1))]
def _make_int_array():
"""Construct an array.array of a type suitable for scipy.sparse indices."""
return array.array(str("i"))
class TfidfTransformer(BaseEstimator, TransformerMixin):
"""Transform a count matrix to a normalized tf or tf-idf representation
Tf means term-frequency while tf-idf means term-frequency times inverse
document-frequency. This is a common term weighting scheme in information
retrieval, that has also found good use in document classification.
The goal of using tf-idf instead of the raw frequencies of occurrence of a
token in a given document is to scale down the impact of tokens that occur
very frequently in a given corpus and that are hence empirically less
informative than features that occur in a small fraction of the training
corpus.
The formula that is used to compute the tf-idf of term t is
tf-idf(d, t) = tf(t) * idf(d, t), and the idf is computed as
idf(d, t) = log [ n / df(d, t) ] + 1 (if ``smooth_idf=False``),
where n is the total number of documents and df(d, t) is the
document frequency; the document frequency is the number of documents d
that contain term t. The effect of adding "1" to the idf in the equation
above is that terms with zero idf, i.e., terms that occur in all documents
in a training set, will not be entirely ignored.
(Note that the idf formula above differs from the standard
textbook notation that defines the idf as
idf(d, t) = log [ n / (df(d, t) + 1) ]).
If ``smooth_idf=True`` (the default), the constant "1" is added to the
numerator and denominator of the idf as if an extra document was seen
containing every term in the collection exactly once, which prevents
zero divisions: idf(d, t) = log [ (1 + n) / (1 + df(d, t)) ] + 1.
Furthermore, the formulas used to compute tf and idf depend
on parameter settings that correspond to the SMART notation used in IR
as follows:
Tf is "n" (natural) by default, "l" (logarithmic) when
``sublinear_tf=True``.
Idf is "t" when use_idf is given, "n" (none) otherwise.
Normalization is "c" (cosine) when ``norm='l2'``, "n" (none)
when ``norm=None``.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
norm : 'l1', 'l2' or None, optional
Norm used to normalize term vectors. None for no normalization.
use_idf : boolean, default=True
Enable inverse-document-frequency reweighting.
smooth_idf : boolean, default=True
Smooth idf weights by adding one to document frequencies, as if an
extra document was seen containing every term in the collection
exactly once. Prevents zero divisions.
sublinear_tf : boolean, default=False
Apply sublinear tf scaling, i.e. replace tf with 1 + log(tf).
References
----------
.. [Yates2011] `R. Baeza-Yates and B. Ribeiro-Neto (2011). Modern
Information Retrieval. Addison Wesley, pp. 68-74.`
.. [MRS2008] `C.D. Manning, P. Raghavan and H. Schütze (2008).
Introduction to Information Retrieval. Cambridge University
Press, pp. 118-120.`
"""
def __init__(self, norm='l2', use_idf=True, smooth_idf=True,
sublinear_tf=False):
self.norm = norm
self.use_idf = use_idf
self.smooth_idf = smooth_idf
self.sublinear_tf = sublinear_tf
def fit(self, X, y=None):
"""Learn the idf vector (global term weights)
Parameters
----------
X : sparse matrix, [n_samples, n_features]
a matrix of term/token counts
"""
if not sp.issparse(X):
X = sp.csc_matrix(X)
if self.use_idf:
n_samples, n_features = X.shape
df = _document_frequency(X)
# perform idf smoothing if required
df += int(self.smooth_idf)
n_samples += int(self.smooth_idf)
# log+1 instead of log makes sure terms with zero idf don't get
# suppressed entirely.
idf = np.log(float(n_samples) / df) + 1.0
self._idf_diag = sp.spdiags(idf, diags=0, m=n_features,
n=n_features, format='csr')
return self
def transform(self, X, copy=True):
"""Transform a count matrix to a tf or tf-idf representation
Parameters
----------
X : sparse matrix, [n_samples, n_features]
a matrix of term/token counts
copy : boolean, default True
Whether to copy X and operate on the copy or perform in-place
operations.
Returns
-------
vectors : sparse matrix, [n_samples, n_features]
"""
if hasattr(X, 'dtype') and np.issubdtype(X.dtype, np.float):
# preserve float family dtype
X = sp.csr_matrix(X, copy=copy)
else:
# convert counts or binary occurrences to floats
X = sp.csr_matrix(X, dtype=np.float64, copy=copy)
n_samples, n_features = X.shape
if self.sublinear_tf:
np.log(X.data, X.data)
X.data += 1
if self.use_idf:
check_is_fitted(self, '_idf_diag', 'idf vector is not fitted')
expected_n_features = self._idf_diag.shape[0]
if n_features != expected_n_features:
raise ValueError("Input has n_features=%d while the model"
" has been trained with n_features=%d" % (
n_features, expected_n_features))
# *= doesn't work
X = X * self._idf_diag
if self.norm:
X = normalize(X, norm=self.norm, copy=False)
return X
@property
def idf_(self):
if hasattr(self, "_idf_diag"):
return np.ravel(self._idf_diag.sum(axis=0))
else:
return None
class TfidfVectorizer(CountVectorizer):
"""Convert a collection of raw documents to a matrix of TF-IDF features.
Equivalent to CountVectorizer followed by TfidfTransformer.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
input : string {'filename', 'file', 'content'}
If 'filename', the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
If 'file', the sequence items must have a 'read' method (file-like
object) that is called to fetch the bytes in memory.
Otherwise the input is expected to be the sequence strings or
bytes items are expected to be analyzed directly.
encoding : string, 'utf-8' by default.
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore', 'replace'}
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
strip_accents : {'ascii', 'unicode', None}
Remove accents during the preprocessing step.
'ascii' is a fast method that only works on characters that have
an direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) does nothing.
analyzer : string, {'word', 'char'} or callable
Whether the feature should be made of word or character n-grams.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
preprocessor : callable or None (default)
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
tokenizer : callable or None (default)
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
Only applies if ``analyzer == 'word'``.
ngram_range : tuple (min_n, max_n)
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used.
stop_words : string {'english'}, list, or None (default)
If a string, it is passed to _check_stop_list and the appropriate stop
list is returned. 'english' is currently the only supported string
value.
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
Only applies if ``analyzer == 'word'``.
If None, no stop words will be used. max_df can be set to a value
in the range [0.7, 1.0) to automatically detect and filter stop
words based on intra corpus document frequency of terms.
lowercase : boolean, default True
Convert all characters to lowercase before tokenizing.
token_pattern : string
Regular expression denoting what constitutes a "token", only used
if ``analyzer == 'word'``. The default regexp selects tokens of 2
or more alphanumeric characters (punctuation is completely ignored
and always treated as a token separator).
max_df : float in range [0.0, 1.0] or int, default=1.0
When building the vocabulary ignore terms that have a document
frequency strictly higher than the given threshold (corpus-specific
stop words).
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
min_df : float in range [0.0, 1.0] or int, default=1
When building the vocabulary ignore terms that have a document
frequency strictly lower than the given threshold. This value is also
called cut-off in the literature.
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
max_features : int or None, default=None
If not None, build a vocabulary that only consider the top
max_features ordered by term frequency across the corpus.
This parameter is ignored if vocabulary is not None.
vocabulary : Mapping or iterable, optional
Either a Mapping (e.g., a dict) where keys are terms and values are
indices in the feature matrix, or an iterable over terms. If not
given, a vocabulary is determined from the input documents.
binary : boolean, default=False
If True, all non-zero term counts are set to 1. This does not mean
outputs will have only 0/1 values, only that the tf term in tf-idf
is binary. (Set idf and normalization to False to get 0/1 outputs.)
dtype : type, optional
Type of the matrix returned by fit_transform() or transform().
norm : 'l1', 'l2' or None, optional
Norm used to normalize term vectors. None for no normalization.
use_idf : boolean, default=True
Enable inverse-document-frequency reweighting.
smooth_idf : boolean, default=True
Smooth idf weights by adding one to document frequencies, as if an
extra document was seen containing every term in the collection
exactly once. Prevents zero divisions.
sublinear_tf : boolean, default=False
Apply sublinear tf scaling, i.e. replace tf with 1 + log(tf).
Attributes
----------
vocabulary_ : dict
A mapping of terms to feature indices.
idf_ : array, shape = [n_features], or None
The learned idf vector (global term weights)
when ``use_idf`` is set to True, None otherwise.
stop_words_ : set
Terms that were ignored because they either:
- occurred in too many documents (`max_df`)
- occurred in too few documents (`min_df`)
- were cut off by feature selection (`max_features`).
This is only available if no vocabulary was given.
See also
--------
CountVectorizer
Tokenize the documents and count the occurrences of token and return
them as a sparse matrix
TfidfTransformer
Apply Term Frequency Inverse Document Frequency normalization to a
sparse matrix of occurrence counts.
Notes
-----
The ``stop_words_`` attribute can get large and increase the model size
when pickling. This attribute is provided only for introspection and can
be safely removed using delattr or set to None before pickling.
"""
def __init__(self, input='content', encoding='utf-8',
decode_error='strict', strip_accents=None, lowercase=True,
preprocessor=None, tokenizer=None, analyzer='word',
stop_words=None, token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1), max_df=1.0, min_df=1,
max_features=None, vocabulary=None, binary=False,
dtype=np.int64, norm='l2', use_idf=True, smooth_idf=True,
sublinear_tf=False):
super(TfidfVectorizer, self).__init__(
input=input, encoding=encoding, decode_error=decode_error,
strip_accents=strip_accents, lowercase=lowercase,
preprocessor=preprocessor, tokenizer=tokenizer, analyzer=analyzer,
stop_words=stop_words, token_pattern=token_pattern,
ngram_range=ngram_range, max_df=max_df, min_df=min_df,
max_features=max_features, vocabulary=vocabulary, binary=binary,
dtype=dtype)
self._tfidf = TfidfTransformer(norm=norm, use_idf=use_idf,
smooth_idf=smooth_idf,
sublinear_tf=sublinear_tf)
# Broadcast the TF-IDF parameters to the underlying transformer instance
# for easy grid search and repr
@property
def norm(self):
return self._tfidf.norm
@norm.setter
def norm(self, value):
self._tfidf.norm = value
@property
def use_idf(self):
return self._tfidf.use_idf
@use_idf.setter
def use_idf(self, value):
self._tfidf.use_idf = value
@property
def smooth_idf(self):
return self._tfidf.smooth_idf
@smooth_idf.setter
def smooth_idf(self, value):
self._tfidf.smooth_idf = value
@property
def sublinear_tf(self):
return self._tfidf.sublinear_tf
@sublinear_tf.setter
def sublinear_tf(self, value):
self._tfidf.sublinear_tf = value
@property
def idf_(self):
return self._tfidf.idf_
def fit(self, raw_documents, y=None):
"""Learn vocabulary and idf from training set.
Parameters
----------
raw_documents : iterable
an iterable which yields either str, unicode or file objects
Returns
-------
self : TfidfVectorizer
"""
X = super(TfidfVectorizer, self).fit_transform(raw_documents)
self._tfidf.fit(X)
return self
def fit_transform(self, raw_documents, y=None):
"""Learn vocabulary and idf, return term-document matrix.
This is equivalent to fit followed by transform, but more efficiently
implemented.
Parameters
----------
raw_documents : iterable
an iterable which yields either str, unicode or file objects
Returns
-------
X : sparse matrix, [n_samples, n_features]
Tf-idf-weighted document-term matrix.
"""
X = super(TfidfVectorizer, self).fit_transform(raw_documents)
self._tfidf.fit(X)
# X is already a transformed view of raw_documents so
# we set copy to False
return self._tfidf.transform(X, copy=False)
def transform(self, raw_documents, copy=True):
"""Transform documents to document-term matrix.
Uses the vocabulary and document frequencies (df) learned by fit (or
fit_transform).
Parameters
----------
raw_documents : iterable
an iterable which yields either str, unicode or file objects
copy : boolean, default True
Whether to copy X and operate on the copy or perform in-place
operations.
Returns
-------
X : sparse matrix, [n_samples, n_features]
Tf-idf-weighted document-term matrix.
"""
check_is_fitted(self, '_tfidf', 'The tfidf vector is not fitted')
X = super(TfidfVectorizer, self).transform(raw_documents)
return self._tfidf.transform(X, copy=False)
| bsd-3-clause |
go-bears/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/artist.py | 69 | 33042 | from __future__ import division
import re, warnings
import matplotlib
import matplotlib.cbook as cbook
from transforms import Bbox, IdentityTransform, TransformedBbox, TransformedPath
from path import Path
## Note, matplotlib artists use the doc strings for set and get
# methods to enable the introspection methods of setp and getp. Every
# set_* method should have a docstring containing the line
#
# ACCEPTS: [ legal | values ]
#
# and aliases for setters and getters should have a docstring that
# starts with 'alias for ', as in 'alias for set_somemethod'
#
# You may wonder why we use so much boiler-plate manually defining the
# set_alias and get_alias functions, rather than using some clever
# python trick. The answer is that I need to be able to manipulate
# the docstring, and there is no clever way to do that in python 2.2,
# as far as I can see - see
# http://groups.google.com/groups?hl=en&lr=&threadm=mailman.5090.1098044946.5135.python-list%40python.org&rnum=1&prev=/groups%3Fq%3D__doc__%2Bauthor%253Ajdhunter%2540ace.bsd.uchicago.edu%26hl%3Den%26btnG%3DGoogle%2BSearch
class Artist(object):
"""
Abstract base class for someone who renders into a
:class:`FigureCanvas`.
"""
aname = 'Artist'
zorder = 0
def __init__(self):
self.figure = None
self._transform = None
self._transformSet = False
self._visible = True
self._animated = False
self._alpha = 1.0
self.clipbox = None
self._clippath = None
self._clipon = True
self._lod = False
self._label = ''
self._picker = None
self._contains = None
self.eventson = False # fire events only if eventson
self._oid = 0 # an observer id
self._propobservers = {} # a dict from oids to funcs
self.axes = None
self._remove_method = None
self._url = None
self.x_isdata = True # False to avoid updating Axes.dataLim with x
self.y_isdata = True # with y
self._snap = None
def remove(self):
"""
Remove the artist from the figure if possible. The effect
will not be visible until the figure is redrawn, e.g., with
:meth:`matplotlib.axes.Axes.draw_idle`. Call
:meth:`matplotlib.axes.Axes.relim` to update the axes limits
if desired.
Note: :meth:`~matplotlib.axes.Axes.relim` will not see
collections even if the collection was added to axes with
*autolim* = True.
Note: there is no support for removing the artist's legend entry.
"""
# There is no method to set the callback. Instead the parent should set
# the _remove_method attribute directly. This would be a protected
# attribute if Python supported that sort of thing. The callback
# has one parameter, which is the child to be removed.
if self._remove_method != None:
self._remove_method(self)
else:
raise NotImplementedError('cannot remove artist')
# TODO: the fix for the collections relim problem is to move the
# limits calculation into the artist itself, including the property
# of whether or not the artist should affect the limits. Then there
# will be no distinction between axes.add_line, axes.add_patch, etc.
# TODO: add legend support
def have_units(self):
'Return *True* if units are set on the *x* or *y* axes'
ax = self.axes
if ax is None or ax.xaxis is None:
return False
return ax.xaxis.have_units() or ax.yaxis.have_units()
def convert_xunits(self, x):
"""For artists in an axes, if the xaxis has units support,
convert *x* using xaxis unit type
"""
ax = getattr(self, 'axes', None)
if ax is None or ax.xaxis is None:
#print 'artist.convert_xunits no conversion: ax=%s'%ax
return x
return ax.xaxis.convert_units(x)
def convert_yunits(self, y):
"""For artists in an axes, if the yaxis has units support,
convert *y* using yaxis unit type
"""
ax = getattr(self, 'axes', None)
if ax is None or ax.yaxis is None: return y
return ax.yaxis.convert_units(y)
def set_axes(self, axes):
"""
Set the :class:`~matplotlib.axes.Axes` instance in which the
artist resides, if any.
ACCEPTS: an :class:`~matplotlib.axes.Axes` instance
"""
self.axes = axes
def get_axes(self):
"""
Return the :class:`~matplotlib.axes.Axes` instance the artist
resides in, or *None*
"""
return self.axes
def add_callback(self, func):
"""
Adds a callback function that will be called whenever one of
the :class:`Artist`'s properties changes.
Returns an *id* that is useful for removing the callback with
:meth:`remove_callback` later.
"""
oid = self._oid
self._propobservers[oid] = func
self._oid += 1
return oid
def remove_callback(self, oid):
"""
Remove a callback based on its *id*.
.. seealso::
:meth:`add_callback`
"""
try: del self._propobservers[oid]
except KeyError: pass
def pchanged(self):
"""
Fire an event when property changed, calling all of the
registered callbacks.
"""
for oid, func in self._propobservers.items():
func(self)
def is_transform_set(self):
"""
Returns *True* if :class:`Artist` has a transform explicitly
set.
"""
return self._transformSet
def set_transform(self, t):
"""
Set the :class:`~matplotlib.transforms.Transform` instance
used by this artist.
ACCEPTS: :class:`~matplotlib.transforms.Transform` instance
"""
self._transform = t
self._transformSet = True
self.pchanged()
def get_transform(self):
"""
Return the :class:`~matplotlib.transforms.Transform`
instance used by this artist.
"""
if self._transform is None:
self._transform = IdentityTransform()
return self._transform
def hitlist(self, event):
"""
List the children of the artist which contain the mouse event *event*.
"""
import traceback
L = []
try:
hascursor,info = self.contains(event)
if hascursor:
L.append(self)
except:
traceback.print_exc()
print "while checking",self.__class__
for a in self.get_children():
L.extend(a.hitlist(event))
return L
def get_children(self):
"""
Return a list of the child :class:`Artist`s this
:class:`Artist` contains.
"""
return []
def contains(self, mouseevent):
"""Test whether the artist contains the mouse event.
Returns the truth value and a dictionary of artist specific details of
selection, such as which points are contained in the pick radius. See
individual artists for details.
"""
if callable(self._contains): return self._contains(self,mouseevent)
#raise NotImplementedError,str(self.__class__)+" needs 'contains' method"
warnings.warn("'%s' needs 'contains' method" % self.__class__.__name__)
return False,{}
def set_contains(self,picker):
"""
Replace the contains test used by this artist. The new picker
should be a callable function which determines whether the
artist is hit by the mouse event::
hit, props = picker(artist, mouseevent)
If the mouse event is over the artist, return *hit* = *True*
and *props* is a dictionary of properties you want returned
with the contains test.
ACCEPTS: a callable function
"""
self._contains = picker
def get_contains(self):
"""
Return the _contains test used by the artist, or *None* for default.
"""
return self._contains
def pickable(self):
'Return *True* if :class:`Artist` is pickable.'
return (self.figure is not None and
self.figure.canvas is not None and
self._picker is not None)
def pick(self, mouseevent):
"""
call signature::
pick(mouseevent)
each child artist will fire a pick event if *mouseevent* is over
the artist and the artist has picker set
"""
# Pick self
if self.pickable():
picker = self.get_picker()
if callable(picker):
inside,prop = picker(self,mouseevent)
else:
inside,prop = self.contains(mouseevent)
if inside:
self.figure.canvas.pick_event(mouseevent, self, **prop)
# Pick children
for a in self.get_children():
a.pick(mouseevent)
def set_picker(self, picker):
"""
Set the epsilon for picking used by this artist
*picker* can be one of the following:
* *None*: picking is disabled for this artist (default)
* A boolean: if *True* then picking will be enabled and the
artist will fire a pick event if the mouse event is over
the artist
* A float: if picker is a number it is interpreted as an
epsilon tolerance in points and the artist will fire
off an event if it's data is within epsilon of the mouse
event. For some artists like lines and patch collections,
the artist may provide additional data to the pick event
that is generated, e.g. the indices of the data within
epsilon of the pick event
* A function: if picker is callable, it is a user supplied
function which determines whether the artist is hit by the
mouse event::
hit, props = picker(artist, mouseevent)
to determine the hit test. if the mouse event is over the
artist, return *hit=True* and props is a dictionary of
properties you want added to the PickEvent attributes.
ACCEPTS: [None|float|boolean|callable]
"""
self._picker = picker
def get_picker(self):
'Return the picker object used by this artist'
return self._picker
def is_figure_set(self):
"""
Returns True if the artist is assigned to a
:class:`~matplotlib.figure.Figure`.
"""
return self.figure is not None
def get_url(self):
"""
Returns the url
"""
return self._url
def set_url(self, url):
"""
Sets the url for the artist
"""
self._url = url
def get_snap(self):
"""
Returns the snap setting which may be:
* True: snap vertices to the nearest pixel center
* False: leave vertices as-is
* None: (auto) If the path contains only rectilinear line
segments, round to the nearest pixel center
Only supported by the Agg backends.
"""
return self._snap
def set_snap(self, snap):
"""
Sets the snap setting which may be:
* True: snap vertices to the nearest pixel center
* False: leave vertices as-is
* None: (auto) If the path contains only rectilinear line
segments, round to the nearest pixel center
Only supported by the Agg backends.
"""
self._snap = snap
def get_figure(self):
"""
Return the :class:`~matplotlib.figure.Figure` instance the
artist belongs to.
"""
return self.figure
def set_figure(self, fig):
"""
Set the :class:`~matplotlib.figure.Figure` instance the artist
belongs to.
ACCEPTS: a :class:`matplotlib.figure.Figure` instance
"""
self.figure = fig
self.pchanged()
def set_clip_box(self, clipbox):
"""
Set the artist's clip :class:`~matplotlib.transforms.Bbox`.
ACCEPTS: a :class:`matplotlib.transforms.Bbox` instance
"""
self.clipbox = clipbox
self.pchanged()
def set_clip_path(self, path, transform=None):
"""
Set the artist's clip path, which may be:
* a :class:`~matplotlib.patches.Patch` (or subclass) instance
* a :class:`~matplotlib.path.Path` instance, in which case
an optional :class:`~matplotlib.transforms.Transform`
instance may be provided, which will be applied to the
path before using it for clipping.
* *None*, to remove the clipping path
For efficiency, if the path happens to be an axis-aligned
rectangle, this method will set the clipping box to the
corresponding rectangle and set the clipping path to *None*.
ACCEPTS: [ (:class:`~matplotlib.path.Path`,
:class:`~matplotlib.transforms.Transform`) |
:class:`~matplotlib.patches.Patch` | None ]
"""
from patches import Patch, Rectangle
success = False
if transform is None:
if isinstance(path, Rectangle):
self.clipbox = TransformedBbox(Bbox.unit(), path.get_transform())
self._clippath = None
success = True
elif isinstance(path, Patch):
self._clippath = TransformedPath(
path.get_path(),
path.get_transform())
success = True
if path is None:
self._clippath = None
success = True
elif isinstance(path, Path):
self._clippath = TransformedPath(path, transform)
success = True
if not success:
print type(path), type(transform)
raise TypeError("Invalid arguments to set_clip_path")
self.pchanged()
def get_alpha(self):
"""
Return the alpha value used for blending - not supported on all
backends
"""
return self._alpha
def get_visible(self):
"Return the artist's visiblity"
return self._visible
def get_animated(self):
"Return the artist's animated state"
return self._animated
def get_clip_on(self):
'Return whether artist uses clipping'
return self._clipon
def get_clip_box(self):
'Return artist clipbox'
return self.clipbox
def get_clip_path(self):
'Return artist clip path'
return self._clippath
def get_transformed_clip_path_and_affine(self):
'''
Return the clip path with the non-affine part of its
transformation applied, and the remaining affine part of its
transformation.
'''
if self._clippath is not None:
return self._clippath.get_transformed_path_and_affine()
return None, None
def set_clip_on(self, b):
"""
Set whether artist uses clipping.
ACCEPTS: [True | False]
"""
self._clipon = b
self.pchanged()
def _set_gc_clip(self, gc):
'Set the clip properly for the gc'
if self._clipon:
if self.clipbox is not None:
gc.set_clip_rectangle(self.clipbox)
gc.set_clip_path(self._clippath)
else:
gc.set_clip_rectangle(None)
gc.set_clip_path(None)
def draw(self, renderer, *args, **kwargs):
'Derived classes drawing method'
if not self.get_visible(): return
def set_alpha(self, alpha):
"""
Set the alpha value used for blending - not supported on
all backends
ACCEPTS: float (0.0 transparent through 1.0 opaque)
"""
self._alpha = alpha
self.pchanged()
def set_lod(self, on):
"""
Set Level of Detail on or off. If on, the artists may examine
things like the pixel width of the axes and draw a subset of
their contents accordingly
ACCEPTS: [True | False]
"""
self._lod = on
self.pchanged()
def set_visible(self, b):
"""
Set the artist's visiblity.
ACCEPTS: [True | False]
"""
self._visible = b
self.pchanged()
def set_animated(self, b):
"""
Set the artist's animation state.
ACCEPTS: [True | False]
"""
self._animated = b
self.pchanged()
def update(self, props):
"""
Update the properties of this :class:`Artist` from the
dictionary *prop*.
"""
store = self.eventson
self.eventson = False
changed = False
for k,v in props.items():
func = getattr(self, 'set_'+k, None)
if func is None or not callable(func):
raise AttributeError('Unknown property %s'%k)
func(v)
changed = True
self.eventson = store
if changed: self.pchanged()
def get_label(self):
"""
Get the label used for this artist in the legend.
"""
return self._label
def set_label(self, s):
"""
Set the label to *s* for auto legend.
ACCEPTS: any string
"""
self._label = s
self.pchanged()
def get_zorder(self):
"""
Return the :class:`Artist`'s zorder.
"""
return self.zorder
def set_zorder(self, level):
"""
Set the zorder for the artist. Artists with lower zorder
values are drawn first.
ACCEPTS: any number
"""
self.zorder = level
self.pchanged()
def update_from(self, other):
'Copy properties from *other* to *self*.'
self._transform = other._transform
self._transformSet = other._transformSet
self._visible = other._visible
self._alpha = other._alpha
self.clipbox = other.clipbox
self._clipon = other._clipon
self._clippath = other._clippath
self._lod = other._lod
self._label = other._label
self.pchanged()
def set(self, **kwargs):
"""
A tkstyle set command, pass *kwargs* to set properties
"""
ret = []
for k,v in kwargs.items():
k = k.lower()
funcName = "set_%s"%k
func = getattr(self,funcName)
ret.extend( [func(v)] )
return ret
def findobj(self, match=None):
"""
pyplot signature:
findobj(o=gcf(), match=None)
Recursively find all :class:matplotlib.artist.Artist instances
contained in self.
*match* can be
- None: return all objects contained in artist (including artist)
- function with signature ``boolean = match(artist)`` used to filter matches
- class instance: eg Line2D. Only return artists of class type
.. plot:: mpl_examples/pylab_examples/findobj_demo.py
"""
if match is None: # always return True
def matchfunc(x): return True
elif cbook.issubclass_safe(match, Artist):
def matchfunc(x):
return isinstance(x, match)
elif callable(match):
matchfunc = match
else:
raise ValueError('match must be None, an matplotlib.artist.Artist subclass, or a callable')
artists = []
for c in self.get_children():
if matchfunc(c):
artists.append(c)
artists.extend([thisc for thisc in c.findobj(matchfunc) if matchfunc(thisc)])
if matchfunc(self):
artists.append(self)
return artists
class ArtistInspector:
"""
A helper class to inspect an :class:`~matplotlib.artist.Artist`
and return information about it's settable properties and their
current values.
"""
def __init__(self, o):
"""
Initialize the artist inspector with an
:class:`~matplotlib.artist.Artist` or sequence of
:class:`Artists`. If a sequence is used, we assume it is a
homogeneous sequence (all :class:`Artists` are of the same
type) and it is your responsibility to make sure this is so.
"""
if cbook.iterable(o) and len(o): o = o[0]
self.oorig = o
if not isinstance(o, type):
o = type(o)
self.o = o
self.aliasd = self.get_aliases()
def get_aliases(self):
"""
Get a dict mapping *fullname* -> *alias* for each *alias* in
the :class:`~matplotlib.artist.ArtistInspector`.
Eg., for lines::
{'markerfacecolor': 'mfc',
'linewidth' : 'lw',
}
"""
names = [name for name in dir(self.o) if
(name.startswith('set_') or name.startswith('get_'))
and callable(getattr(self.o,name))]
aliases = {}
for name in names:
func = getattr(self.o, name)
if not self.is_alias(func): continue
docstring = func.__doc__
fullname = docstring[10:]
aliases.setdefault(fullname[4:], {})[name[4:]] = None
return aliases
_get_valid_values_regex = re.compile(r"\n\s*ACCEPTS:\s*((?:.|\n)*?)(?:$|(?:\n\n))")
def get_valid_values(self, attr):
"""
Get the legal arguments for the setter associated with *attr*.
This is done by querying the docstring of the function *set_attr*
for a line that begins with ACCEPTS:
Eg., for a line linestyle, return
[ '-' | '--' | '-.' | ':' | 'steps' | 'None' ]
"""
name = 'set_%s'%attr
if not hasattr(self.o, name):
raise AttributeError('%s has no function %s'%(self.o,name))
func = getattr(self.o, name)
docstring = func.__doc__
if docstring is None: return 'unknown'
if docstring.startswith('alias for '):
return None
match = self._get_valid_values_regex.search(docstring)
if match is not None:
return match.group(1).replace('\n', ' ')
return 'unknown'
def _get_setters_and_targets(self):
"""
Get the attribute strings and a full path to where the setter
is defined for all setters in an object.
"""
setters = []
for name in dir(self.o):
if not name.startswith('set_'): continue
o = getattr(self.o, name)
if not callable(o): continue
func = o
if self.is_alias(func): continue
source_class = self.o.__module__ + "." + self.o.__name__
for cls in self.o.mro():
if name in cls.__dict__:
source_class = cls.__module__ + "." + cls.__name__
break
setters.append((name[4:], source_class + "." + name))
return setters
def get_setters(self):
"""
Get the attribute strings with setters for object. Eg., for a line,
return ``['markerfacecolor', 'linewidth', ....]``.
"""
return [prop for prop, target in self._get_setters_and_targets()]
def is_alias(self, o):
"""
Return *True* if method object *o* is an alias for another
function.
"""
ds = o.__doc__
if ds is None: return False
return ds.startswith('alias for ')
def aliased_name(self, s):
"""
return 'PROPNAME or alias' if *s* has an alias, else return
PROPNAME.
E.g. for the line markerfacecolor property, which has an
alias, return 'markerfacecolor or mfc' and for the transform
property, which does not, return 'transform'
"""
if s in self.aliasd:
return s + ''.join([' or %s' % x for x in self.aliasd[s].keys()])
else:
return s
def aliased_name_rest(self, s, target):
"""
return 'PROPNAME or alias' if *s* has an alias, else return
PROPNAME formatted for ReST
E.g. for the line markerfacecolor property, which has an
alias, return 'markerfacecolor or mfc' and for the transform
property, which does not, return 'transform'
"""
if s in self.aliasd:
aliases = ''.join([' or %s' % x for x in self.aliasd[s].keys()])
else:
aliases = ''
return ':meth:`%s <%s>`%s' % (s, target, aliases)
def pprint_setters(self, prop=None, leadingspace=2):
"""
If *prop* is *None*, return a list of strings of all settable properies
and their valid values.
If *prop* is not *None*, it is a valid property name and that
property will be returned as a string of property : valid
values.
"""
if leadingspace:
pad = ' '*leadingspace
else:
pad = ''
if prop is not None:
accepts = self.get_valid_values(prop)
return '%s%s: %s' %(pad, prop, accepts)
attrs = self._get_setters_and_targets()
attrs.sort()
lines = []
for prop, path in attrs:
accepts = self.get_valid_values(prop)
name = self.aliased_name(prop)
lines.append('%s%s: %s' %(pad, name, accepts))
return lines
def pprint_setters_rest(self, prop=None, leadingspace=2):
"""
If *prop* is *None*, return a list of strings of all settable properies
and their valid values. Format the output for ReST
If *prop* is not *None*, it is a valid property name and that
property will be returned as a string of property : valid
values.
"""
if leadingspace:
pad = ' '*leadingspace
else:
pad = ''
if prop is not None:
accepts = self.get_valid_values(prop)
return '%s%s: %s' %(pad, prop, accepts)
attrs = self._get_setters_and_targets()
attrs.sort()
lines = []
########
names = [self.aliased_name_rest(prop, target) for prop, target in attrs]
accepts = [self.get_valid_values(prop) for prop, target in attrs]
col0_len = max([len(n) for n in names])
col1_len = max([len(a) for a in accepts])
table_formatstr = pad + '='*col0_len + ' ' + '='*col1_len
lines.append('')
lines.append(table_formatstr)
lines.append(pad + 'Property'.ljust(col0_len+3) + \
'Description'.ljust(col1_len))
lines.append(table_formatstr)
lines.extend([pad + n.ljust(col0_len+3) + a.ljust(col1_len)
for n, a in zip(names, accepts)])
lines.append(table_formatstr)
lines.append('')
return lines
########
for prop, path in attrs:
accepts = self.get_valid_values(prop)
name = self.aliased_name_rest(prop, path)
lines.append('%s%s: %s' %(pad, name, accepts))
return lines
def pprint_getters(self):
"""
Return the getters and actual values as list of strings.
"""
o = self.oorig
getters = [name for name in dir(o)
if name.startswith('get_')
and callable(getattr(o, name))]
#print getters
getters.sort()
lines = []
for name in getters:
func = getattr(o, name)
if self.is_alias(func): continue
try: val = func()
except: continue
if getattr(val, 'shape', ()) != () and len(val)>6:
s = str(val[:6]) + '...'
else:
s = str(val)
s = s.replace('\n', ' ')
if len(s)>50:
s = s[:50] + '...'
name = self.aliased_name(name[4:])
lines.append(' %s = %s' %(name, s))
return lines
def findobj(self, match=None):
"""
Recursively find all :class:`matplotlib.artist.Artist`
instances contained in *self*.
If *match* is not None, it can be
- function with signature ``boolean = match(artist)``
- class instance: eg :class:`~matplotlib.lines.Line2D`
used to filter matches.
"""
if match is None: # always return True
def matchfunc(x): return True
elif issubclass(match, Artist):
def matchfunc(x):
return isinstance(x, match)
elif callable(match):
matchfunc = func
else:
raise ValueError('match must be None, an matplotlib.artist.Artist subclass, or a callable')
artists = []
for c in self.get_children():
if matchfunc(c):
artists.append(c)
artists.extend([thisc for thisc in c.findobj(matchfunc) if matchfunc(thisc)])
if matchfunc(self):
artists.append(self)
return artists
def getp(o, property=None):
"""
Return the value of handle property. property is an optional string
for the property you want to return
Example usage::
getp(o) # get all the object properties
getp(o, 'linestyle') # get the linestyle property
*o* is a :class:`Artist` instance, eg
:class:`~matplotllib.lines.Line2D` or an instance of a
:class:`~matplotlib.axes.Axes` or :class:`matplotlib.text.Text`.
If the *property* is 'somename', this function returns
o.get_somename()
:func:`getp` can be used to query all the gettable properties with
``getp(o)``. Many properties have aliases for shorter typing, e.g.
'lw' is an alias for 'linewidth'. In the output, aliases and full
property names will be listed as:
property or alias = value
e.g.:
linewidth or lw = 2
"""
insp = ArtistInspector(o)
if property is None:
ret = insp.pprint_getters()
print '\n'.join(ret)
return
func = getattr(o, 'get_' + property)
return func()
# alias
get = getp
def setp(h, *args, **kwargs):
"""
matplotlib supports the use of :func:`setp` ("set property") and
:func:`getp` to set and get object properties, as well as to do
introspection on the object. For example, to set the linestyle of a
line to be dashed, you can do::
>>> line, = plot([1,2,3])
>>> setp(line, linestyle='--')
If you want to know the valid types of arguments, you can provide the
name of the property you want to set without a value::
>>> setp(line, 'linestyle')
linestyle: [ '-' | '--' | '-.' | ':' | 'steps' | 'None' ]
If you want to see all the properties that can be set, and their
possible values, you can do::
>>> setp(line)
... long output listing omitted
:func:`setp` operates on a single instance or a list of instances.
If you are in query mode introspecting the possible values, only
the first instance in the sequence is used. When actually setting
values, all the instances will be set. E.g., suppose you have a
list of two lines, the following will make both lines thicker and
red::
>>> x = arange(0,1.0,0.01)
>>> y1 = sin(2*pi*x)
>>> y2 = sin(4*pi*x)
>>> lines = plot(x, y1, x, y2)
>>> setp(lines, linewidth=2, color='r')
:func:`setp` works with the matlab(TM) style string/value pairs or
with python kwargs. For example, the following are equivalent::
>>> setp(lines, 'linewidth', 2, 'color', r') # matlab style
>>> setp(lines, linewidth=2, color='r') # python style
"""
insp = ArtistInspector(h)
if len(kwargs)==0 and len(args)==0:
print '\n'.join(insp.pprint_setters())
return
if len(kwargs)==0 and len(args)==1:
print insp.pprint_setters(prop=args[0])
return
if not cbook.iterable(h): h = [h]
else: h = cbook.flatten(h)
if len(args)%2:
raise ValueError('The set args must be string, value pairs')
funcvals = []
for i in range(0, len(args)-1, 2):
funcvals.append((args[i], args[i+1]))
funcvals.extend(kwargs.items())
ret = []
for o in h:
for s, val in funcvals:
s = s.lower()
funcName = "set_%s"%s
func = getattr(o,funcName)
ret.extend( [func(val)] )
return [x for x in cbook.flatten(ret)]
def kwdoc(a):
hardcopy = matplotlib.rcParams['docstring.hardcopy']
if hardcopy:
return '\n'.join(ArtistInspector(a).pprint_setters_rest(leadingspace=2))
else:
return '\n'.join(ArtistInspector(a).pprint_setters(leadingspace=2))
kwdocd = dict()
kwdocd['Artist'] = kwdoc(Artist)
| agpl-3.0 |
karstenw/nodebox-pyobjc | examples/Extended Application/matplotlib/examples/userdemo/colormap_normalizations_lognorm.py | 1 | 1868 | """
===============================
Colormap Normalizations Lognorm
===============================
Demonstration of using norm to map colormaps onto data in non-linear ways.
"""
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as colors
from matplotlib.mlab import bivariate_normal
'''
Lognorm: Instead of pcolor log10(Z1) you can have colorbars that have
the exponential labels using a norm.
'''
# nodebox section
if __name__ == '__builtin__':
# were in nodebox
import os
import tempfile
W = 800
inset = 20
size(W, 600)
plt.cla()
plt.clf()
plt.close('all')
def tempimage():
fob = tempfile.NamedTemporaryFile(mode='w+b', suffix='.png', delete=False)
fname = fob.name
fob.close()
return fname
imgx = 20
imgy = 0
def pltshow(plt, dpi=150):
global imgx, imgy
temppath = tempimage()
plt.savefig(temppath, dpi=dpi)
dx,dy = imagesize(temppath)
w = min(W,dx)
image(temppath,imgx,imgy,width=w)
imgy = imgy + dy + 20
os.remove(temppath)
size(W, HEIGHT+dy+40)
else:
def pltshow(mplpyplot):
mplpyplot.show()
# nodebox section end
N = 100
X, Y = np.mgrid[-3:3:complex(0, N), -2:2:complex(0, N)]
# A low hump with a spike coming out of the top right. Needs to have
# z/colour axis on a log scale so we see both hump and spike. linear
# scale only shows the spike.
Z1 = bivariate_normal(X, Y, 0.1, 0.2, 1.0, 1.0) + \
0.1 * bivariate_normal(X, Y, 1.0, 1.0, 0.0, 0.0)
fig, ax = plt.subplots(2, 1)
pcm = ax[0].pcolor(X, Y, Z1,
norm=colors.LogNorm(vmin=Z1.min(), vmax=Z1.max()),
cmap='PuBu_r')
fig.colorbar(pcm, ax=ax[0], extend='max')
pcm = ax[1].pcolor(X, Y, Z1, cmap='PuBu_r')
fig.colorbar(pcm, ax=ax[1], extend='max')
pltshow(plt)
| mit |
devanshdalal/scikit-learn | examples/hetero_feature_union.py | 81 | 6241 | """
=============================================
Feature Union with Heterogeneous Data Sources
=============================================
Datasets can often contain components of that require different feature
extraction and processing pipelines. This scenario might occur when:
1. Your dataset consists of heterogeneous data types (e.g. raster images and
text captions)
2. Your dataset is stored in a Pandas DataFrame and different columns
require different processing pipelines.
This example demonstrates how to use
:class:`sklearn.feature_extraction.FeatureUnion` on a dataset containing
different types of features. We use the 20-newsgroups dataset and compute
standard bag-of-words features for the subject line and body in separate
pipelines as well as ad hoc features on the body. We combine them (with
weights) using a FeatureUnion and finally train a classifier on the combined
set of features.
The choice of features is not particularly helpful, but serves to illustrate
the technique.
"""
# Author: Matt Terry <[email protected]>
#
# License: BSD 3 clause
from __future__ import print_function
import numpy as np
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.datasets import fetch_20newsgroups
from sklearn.datasets.twenty_newsgroups import strip_newsgroup_footer
from sklearn.datasets.twenty_newsgroups import strip_newsgroup_quoting
from sklearn.decomposition import TruncatedSVD
from sklearn.feature_extraction import DictVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics import classification_report
from sklearn.pipeline import FeatureUnion
from sklearn.pipeline import Pipeline
from sklearn.svm import SVC
class ItemSelector(BaseEstimator, TransformerMixin):
"""For data grouped by feature, select subset of data at a provided key.
The data is expected to be stored in a 2D data structure, where the first
index is over features and the second is over samples. i.e.
>> len(data[key]) == n_samples
Please note that this is the opposite convention to scikit-learn feature
matrixes (where the first index corresponds to sample).
ItemSelector only requires that the collection implement getitem
(data[key]). Examples include: a dict of lists, 2D numpy array, Pandas
DataFrame, numpy record array, etc.
>> data = {'a': [1, 5, 2, 5, 2, 8],
'b': [9, 4, 1, 4, 1, 3]}
>> ds = ItemSelector(key='a')
>> data['a'] == ds.transform(data)
ItemSelector is not designed to handle data grouped by sample. (e.g. a
list of dicts). If your data is structured this way, consider a
transformer along the lines of `sklearn.feature_extraction.DictVectorizer`.
Parameters
----------
key : hashable, required
The key corresponding to the desired value in a mappable.
"""
def __init__(self, key):
self.key = key
def fit(self, x, y=None):
return self
def transform(self, data_dict):
return data_dict[self.key]
class TextStats(BaseEstimator, TransformerMixin):
"""Extract features from each document for DictVectorizer"""
def fit(self, x, y=None):
return self
def transform(self, posts):
return [{'length': len(text),
'num_sentences': text.count('.')}
for text in posts]
class SubjectBodyExtractor(BaseEstimator, TransformerMixin):
"""Extract the subject & body from a usenet post in a single pass.
Takes a sequence of strings and produces a dict of sequences. Keys are
`subject` and `body`.
"""
def fit(self, x, y=None):
return self
def transform(self, posts):
features = np.recarray(shape=(len(posts),),
dtype=[('subject', object), ('body', object)])
for i, text in enumerate(posts):
headers, _, bod = text.partition('\n\n')
bod = strip_newsgroup_footer(bod)
bod = strip_newsgroup_quoting(bod)
features['body'][i] = bod
prefix = 'Subject:'
sub = ''
for line in headers.split('\n'):
if line.startswith(prefix):
sub = line[len(prefix):]
break
features['subject'][i] = sub
return features
pipeline = Pipeline([
# Extract the subject & body
('subjectbody', SubjectBodyExtractor()),
# Use FeatureUnion to combine the features from subject and body
('union', FeatureUnion(
transformer_list=[
# Pipeline for pulling features from the post's subject line
('subject', Pipeline([
('selector', ItemSelector(key='subject')),
('tfidf', TfidfVectorizer(min_df=50)),
])),
# Pipeline for standard bag-of-words model for body
('body_bow', Pipeline([
('selector', ItemSelector(key='body')),
('tfidf', TfidfVectorizer()),
('best', TruncatedSVD(n_components=50)),
])),
# Pipeline for pulling ad hoc features from post's body
('body_stats', Pipeline([
('selector', ItemSelector(key='body')),
('stats', TextStats()), # returns a list of dicts
('vect', DictVectorizer()), # list of dicts -> feature matrix
])),
],
# weight components in FeatureUnion
transformer_weights={
'subject': 0.8,
'body_bow': 0.5,
'body_stats': 1.0,
},
)),
# Use a SVC classifier on the combined features
('svc', SVC(kernel='linear')),
])
# limit the list of categories to make running this example faster.
categories = ['alt.atheism', 'talk.religion.misc']
train = fetch_20newsgroups(random_state=1,
subset='train',
categories=categories,
)
test = fetch_20newsgroups(random_state=1,
subset='test',
categories=categories,
)
pipeline.fit(train.data, train.target)
y = pipeline.predict(test.data)
print(classification_report(y, test.target))
| bsd-3-clause |
backtou/longlab | gr-filter/examples/fir_filter_ccc.py | 13 | 3154 | #!/usr/bin/env python
from gnuradio import gr, filter
from gnuradio import eng_notation
from gnuradio.eng_option import eng_option
from optparse import OptionParser
try:
import scipy
except ImportError:
print "Error: could not import scipy (http://www.scipy.org/)"
sys.exit(1)
try:
import pylab
except ImportError:
print "Error: could not import pylab (http://matplotlib.sourceforge.net/)"
sys.exit(1)
class example_fir_filter_ccc(gr.top_block):
def __init__(self, N, fs, bw, tw, atten, D):
gr.top_block.__init__(self)
self._nsamps = N
self._fs = fs
self._bw = bw
self._tw = tw
self._at = atten
self._decim = D
taps = filter.firdes.low_pass_2(1, self._fs, self._bw, self._tw, self._at)
print "Num. Taps: ", len(taps)
self.src = gr.noise_source_c(gr.GR_GAUSSIAN, 1)
self.head = gr.head(gr.sizeof_gr_complex, self._nsamps)
self.filt0 = filter.fir_filter_ccc(self._decim, taps)
self.vsnk_src = gr.vector_sink_c()
self.vsnk_out = gr.vector_sink_c()
self.connect(self.src, self.head, self.vsnk_src)
self.connect(self.head, self.filt0, self.vsnk_out)
def main():
parser = OptionParser(option_class=eng_option, conflict_handler="resolve")
parser.add_option("-N", "--nsamples", type="int", default=10000,
help="Number of samples to process [default=%default]")
parser.add_option("-s", "--samplerate", type="eng_float", default=8000,
help="System sample rate [default=%default]")
parser.add_option("-B", "--bandwidth", type="eng_float", default=1000,
help="Filter bandwidth [default=%default]")
parser.add_option("-T", "--transition", type="eng_float", default=100,
help="Transition band [default=%default]")
parser.add_option("-A", "--attenuation", type="eng_float", default=80,
help="Stopband attenuation [default=%default]")
parser.add_option("-D", "--decimation", type="int", default=1,
help="Decmation factor [default=%default]")
(options, args) = parser.parse_args ()
put = example_fir_filter_ccc(options.nsamples,
options.samplerate,
options.bandwidth,
options.transition,
options.attenuation,
options.decimation)
put.run()
data_src = scipy.array(put.vsnk_src.data())
data_snk = scipy.array(put.vsnk_out.data())
# Plot the signals PSDs
nfft = 1024
f1 = pylab.figure(1, figsize=(12,10))
s1 = f1.add_subplot(1,1,1)
s1.psd(data_src, NFFT=nfft, noverlap=nfft/4,
Fs=options.samplerate)
s1.psd(data_snk, NFFT=nfft, noverlap=nfft/4,
Fs=options.samplerate)
f2 = pylab.figure(2, figsize=(12,10))
s2 = f2.add_subplot(1,1,1)
s2.plot(data_src)
s2.plot(data_snk.real, 'g')
pylab.show()
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
| gpl-3.0 |
williamFalcon/pytorch-lightning | pytorch_lightning/loggers/neptune.py | 1 | 12216 | """
Log using `neptune-logger <https://www.neptune.ml>`_
.. _neptune:
NeptuneLogger
--------------
"""
import argparse
from logging import getLogger
from typing import Optional, List, Dict, Any, Union, Iterable
try:
import neptune
from neptune.experiments import Experiment
except ImportError:
raise ImportError('You want to use `neptune` logger which is not installed yet,'
' install it with `pip install neptune-client`.')
import torch
from torch import is_tensor
from pytorch_lightning.loggers.base import LightningLoggerBase, rank_zero_only
logger = getLogger(__name__)
class NeptuneLogger(LightningLoggerBase):
r"""
Neptune logger can be used in the online mode or offline (silent) mode.
To log experiment data in online mode, NeptuneLogger requries an API key:
"""
def __init__(self, api_key: Optional[str] = None, project_name: Optional[str] = None,
offline_mode: bool = False, experiment_name: Optional[str] = None,
upload_source_files: Optional[List[str]] = None, params: Optional[Dict[str, Any]] = None,
properties: Optional[Dict[str, Any]] = None, tags: Optional[List[str]] = None, **kwargs):
r"""
Initialize a neptune.ml logger.
.. note:: Requires either an API Key (online mode) or a local directory path (offline mode)
.. code-block:: python
# ONLINE MODE
from pytorch_lightning.loggers import NeptuneLogger
# arguments made to NeptuneLogger are passed on to the neptune.experiments.Experiment class
neptune_logger = NeptuneLogger(
api_key=os.environ["NEPTUNE_API_TOKEN"],
project_name="USER_NAME/PROJECT_NAME",
experiment_name="default", # Optional,
params={"max_epochs": 10}, # Optional,
tags=["pytorch-lightning","mlp"] # Optional,
)
trainer = Trainer(max_epochs=10, logger=neptune_logger)
.. code-block:: python
# OFFLINE MODE
from pytorch_lightning.loggers import NeptuneLogger
# arguments made to NeptuneLogger are passed on to the neptune.experiments.Experiment class
neptune_logger = NeptuneLogger(
project_name="USER_NAME/PROJECT_NAME",
experiment_name="default", # Optional,
params={"max_epochs": 10}, # Optional,
tags=["pytorch-lightning","mlp"] # Optional,
)
trainer = Trainer(max_epochs=10, logger=neptune_logger)
Use the logger anywhere in you LightningModule as follows:
.. code-block:: python
def train_step(...):
# example
self.logger.experiment.log_metric("acc_train", acc_train) # log metrics
self.logger.experiment.log_image("worse_predictions", prediction_image) # log images
self.logger.experiment.log_artifact("model_checkpoint.pt", prediction_image) # log model checkpoint
self.logger.experiment.whatever_neptune_supports(...)
def any_lightning_module_function_or_hook(...):
self.logger.experiment.log_metric("acc_train", acc_train) # log metrics
self.logger.experiment.log_image("worse_predictions", prediction_image) # log images
self.logger.experiment.log_artifact("model_checkpoint.pt", prediction_image) # log model checkpoint
self.logger.experiment.whatever_neptune_supports(...)
Args:
api_key (str | None): Required in online mode. Neputne API token, found on https://neptune.ml.
Read how to get your API key
https://docs.neptune.ml/python-api/tutorials/get-started.html#copy-api-token.
project_name (str): Required in online mode. Qualified name of a project in a form of
"namespace/project_name" for example "tom/minst-classification".
If None, the value of NEPTUNE_PROJECT environment variable will be taken.
You need to create the project in https://neptune.ml first.
offline_mode (bool): Optional default False. If offline_mode=True no logs will be send to neptune.
Usually used for debug purposes.
experiment_name (str|None): Optional. Editable name of the experiment.
Name is displayed in the experiment’s Details (Metadata section) and in experiments view as a column.
upload_source_files (list|None): Optional. List of source files to be uploaded.
Must be list of str or single str. Uploaded sources are displayed in the experiment’s Source code tab.
If None is passed, Python file from which experiment was created will be uploaded.
Pass empty list ([]) to upload no files. Unix style pathname pattern expansion is supported.
For example, you can pass '\*.py'
to upload all python source files from the current directory.
For recursion lookup use '\**/\*.py' (for Python 3.5 and later).
For more information see glob library.
params (dict|None): Optional. Parameters of the experiment. After experiment creation params are read-only.
Parameters are displayed in the experiment’s Parameters section and each key-value pair can be
viewed in experiments view as a column.
properties (dict|None): Optional default is {}. Properties of the experiment.
They are editable after experiment is created. Properties are displayed in the experiment’s Details and
each key-value pair can be viewed in experiments view as a column.
tags (list|None): Optional default []. Must be list of str. Tags of the experiment.
They are editable after experiment is created (see: append_tag() and remove_tag()).
Tags are displayed in the experiment’s Details and can be viewed in experiments view as a column.
"""
super().__init__()
self.api_key = api_key
self.project_name = project_name
self.offline_mode = offline_mode
self.experiment_name = experiment_name
self.upload_source_files = upload_source_files
self.params = params
self.properties = properties
self.tags = tags
self._experiment = None
self._kwargs = kwargs
if offline_mode:
self.mode = 'offline'
neptune.init(project_qualified_name='dry-run/project',
backend=neptune.OfflineBackend())
else:
self.mode = 'online'
neptune.init(api_token=self.api_key,
project_qualified_name=self.project_name)
logger.info(f'NeptuneLogger was initialized in {self.mode} mode')
@property
def experiment(self) -> Experiment:
r"""
Actual neptune object. To use neptune features do the following.
Example::
self.logger.experiment.some_neptune_function()
"""
if self._experiment is not None:
return self._experiment
else:
self._experiment = neptune.create_experiment(name=self.experiment_name,
params=self.params,
properties=self.properties,
tags=self.tags,
upload_source_files=self.upload_source_files,
**self._kwargs)
return self._experiment
@rank_zero_only
def log_hyperparams(self, params: argparse.Namespace):
for key, val in vars(params).items():
self.experiment.set_property(f'param__{key}', val)
@rank_zero_only
def log_metrics(
self,
metrics: Dict[str, Union[torch.Tensor, float]],
step: Optional[int] = None
):
"""Log metrics (numeric values) in Neptune experiments
Args:
metrics: Dictionary with metric names as keys and measured quantities as values
step: Step number at which the metrics should be recorded, must be strictly increasing
"""
for key, val in metrics.items():
self.log_metric(key, val, step=step)
@rank_zero_only
def finalize(self, status: str):
self.experiment.stop()
@property
def name(self) -> str:
if self.mode == 'offline':
return 'offline-name'
else:
return self.experiment.name
@property
def version(self) -> str:
if self.mode == 'offline':
return 'offline-id-1234'
else:
return self.experiment.id
@rank_zero_only
def log_metric(
self,
metric_name: str,
metric_value: Union[torch.Tensor, float, str],
step: Optional[int] = None
):
"""Log metrics (numeric values) in Neptune experiments
Args:
metric_name: The name of log, i.e. mse, loss, accuracy.
metric_value: The value of the log (data-point).
step: Step number at which the metrics should be recorded, must be strictly increasing
"""
if is_tensor(metric_value):
metric_value = metric_value.cpu().detach()
if step is None:
self.experiment.log_metric(metric_name, metric_value)
else:
self.experiment.log_metric(metric_name, x=step, y=metric_value)
@rank_zero_only
def log_text(self, log_name: str, text: str, step: Optional[int] = None):
"""Log text data in Neptune experiment
Args:
log_name: The name of log, i.e. mse, my_text_data, timing_info.
text: The value of the log (data-point).
step: Step number at which the metrics should be recorded, must be strictly increasing
"""
self.log_metric(log_name, text, step=step)
@rank_zero_only
def log_image(self, log_name: str, image: Union[str, Any], step: Optional[int] = None):
"""Log image data in Neptune experiment
Args:
log_name: The name of log, i.e. bboxes, visualisations, sample_images.
image (str|PIL.Image|matplotlib.figure.Figure): The value of the log (data-point).
Can be one of the following types: PIL image, matplotlib.figure.Figure, path to image file (str)
step: Step number at which the metrics should be recorded, must be strictly increasing
"""
if step is None:
self.experiment.log_image(log_name, image)
else:
self.experiment.log_image(log_name, x=step, y=image)
@rank_zero_only
def log_artifact(self, artifact: str, destination: Optional[str] = None):
"""Save an artifact (file) in Neptune experiment storage.
Args:
artifact: A path to the file in local filesystem.
destination: Optional default None. A destination path.
If None is passed, an artifact file name will be used.
"""
self.experiment.log_artifact(artifact, destination)
@rank_zero_only
def set_property(self, key: str, value: Any):
"""Set key-value pair as Neptune experiment property.
Args:
key: Property key.
value: New value of a property.
"""
self.experiment.set_property(key, value)
@rank_zero_only
def append_tags(self, tags: Union[str, Iterable[str]]):
"""appends tags to neptune experiment
Args:
tags: Tags to add to the current experiment. If str is passed, singe tag is added.
If multiple - comma separated - str are passed, all of them are added as tags.
If list of str is passed, all elements of the list are added as tags.
"""
if str(tags) == tags:
tags = [tags] # make it as an iterable is if it is not yet
self.experiment.append_tags(*tags)
| apache-2.0 |
caryan/PyBio | TnSeq/HopCountPlotter.py | 1 | 4972 | # Copyright 2013 Colm Ryan [email protected]
# License GPL v3 (http://www.gnu.org/licenses/gpl.txt)
from __future__ import division
#Let's write to SVG style graphics
#import matplotlib
#matplotlib.use('svg')
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import matplotlib.lines as mlines
import matplotlib.collections as mcollections
import numpy as np
from xlrd import open_workbook
#Worksheets with data
hopCountsFile_SampA = '/home/cryan/Desktop/Colm NCTC8325/Vancomycin Ring A Galaxy1174-(Hopcount_on_data_99_and_data_1172__Hop_Table).tabular.xlsx'
hopCountsFile_SampB = '/home/cryan/Desktop/Colm NCTC8325/vancomycin ring B Galaxy1176-(Hopcount_on_data_99_and_data_1173__Hop_Table).tabular.xlsx'
hopCountsAggr = '/home/cryan/Desktop/Colm NCTC8325/Vancomycin ring AGalaxy1178-(Aggregate_Hop_Table_on_data_99_and_data_1174__Result_Table).tabular.xlsx'
#Region of interest
ROI = [0, 200000]
#Minimum number of counts to plot
minPlotCts = 10
sheetA = open_workbook(hopCountsFile_SampA).sheet_by_index(0);
sheetB = open_workbook(hopCountsFile_SampB).sheet_by_index(0);
sheetAggr = open_workbook(hopCountsAggr).sheet_by_index(0);
#Find the full set of insertion sites
colHeaders = [tmpCell.value for tmpCell in sheetA.row(0)]
posCol = colHeaders.index(u'Position')
plusCtsCol = colHeaders.index(u'PlusCount')
minusCtsCol = colHeaders.index(u'MinusCount')
insertionSitesA = [int(tmpValue) for tmpValue in sheetA.col_values(posCol,1)]
plusCtsA = {tmpSite:int(tmpValue) for tmpSite,tmpValue in zip(insertionSitesA,sheetA.col_values(plusCtsCol,1))}
minusCtsA = {tmpSite:int(tmpValue) for tmpSite,tmpValue in zip(insertionSitesA,sheetA.col_values(minusCtsCol,1))}
insertionSitesB = [int(tmpValue) for tmpValue in sheetB.col_values(posCol,1)]
plusCtsB = {tmpSite:int(tmpValue) for tmpSite,tmpValue in zip(insertionSitesB,sheetB.col_values(plusCtsCol,1))}
minusCtsB = {tmpSite:int(tmpValue) for tmpSite,tmpValue in zip(insertionSitesB,sheetB.col_values(minusCtsCol,1))}
allInsertionSites = set(insertionSitesA).union(set(insertionSitesB))
#Find the mean value of the insertions
meanPlusCts = {}
meanMinusCts = {}
for tmpSite in allInsertionSites:
tmpPlusA = plusCtsA[tmpSite] if tmpSite in plusCtsA else 0
tmpPlusB = plusCtsB[tmpSite] if tmpSite in minusCtsB else 0
meanPlusCts[tmpSite] = 0.5*(tmpPlusA + tmpPlusB)
tmpMinusA = minusCtsA[tmpSite] if tmpSite in plusCtsA else 0
tmpMinusB = minusCtsB[tmpSite] if tmpSite in minusCtsB else 0
meanMinusCts[tmpSite] = 0.5*(tmpMinusA + tmpMinusB)
#Load the gene locations from the aggregate sheet
colHeaders = [tmpCell.value for tmpCell in sheetAggr.row(0)]
locusCol = colHeaders.index(u'Locus')
startCol = colHeaders.index(u'Start')
endCol = colHeaders.index(u'End')
strandCol = colHeaders.index(u'Strand')
geneNames = sheetAggr.col_values(locusCol, 2)
geneInfo = {tmpGene: {'start':int(tmpStart), 'stop':int(tmpEnd), 'strand':tmpStrand} for \
tmpGene, tmpStart, tmpEnd, tmpStrand in zip(geneNames, \
sheetAggr.col_values(startCol,2), sheetAggr.col_values(endCol,2), \
sheetAggr.col_values(strandCol,2)) }
#Now go through and add arrows for each gene
figH = plt.figure()
axesH = figH.add_subplot(111)
totLength = np.max(sheetAggr.col_values(endCol,2))
arrowPatches = []
for tmpGeneName, tmpGeneInfo in geneInfo.items():
if tmpGeneInfo['stop'] > ROI[0] and tmpGeneInfo['start'] < ROI[1] :
#Convert into figure coordinate
startFigCoord = tmpGeneInfo['start']
stopFigCoord = tmpGeneInfo['stop']
axesH.text(0.5*(stopFigCoord+startFigCoord), 0, tmpGeneName, horizontalalignment='center', verticalalignment='center')
if tmpGeneInfo['strand'] == '+':
arrowPatches.append(mpatches.FancyArrow(startFigCoord, 0, stopFigCoord-startFigCoord, 0,
width = 0.1, length_includes_head=True, head_length=0.1*(stopFigCoord-startFigCoord), edgecolor='none'))
else:
arrowPatches.append(mpatches.FancyArrow(stopFigCoord, 0, startFigCoord-stopFigCoord, 0,
width = 0.1, length_includes_head=True, head_length=0.1*(stopFigCoord-startFigCoord), edgecolor='none'))
arrowCollection = mcollections.PatchCollection(arrowPatches)
axesH.add_collection(arrowCollection)
plt.xlim((ROI[0],ROI[1]))
#Add the bars for insertions
lineCollectionPlus = mcollections.LineCollection([((tmpInsertion, 0),(tmpInsertion, np.log10(meanPlusCts[tmpInsertion])))
for tmpInsertion in allInsertionSites if (meanPlusCts[tmpInsertion] > minPlotCts and tmpInsertion>ROI[0] and tmpInsertion<ROI[1]) ],
linewidths=2.0, color='g')
lineCollectionMinus = mcollections.LineCollection([((tmpInsertion, 0),(tmpInsertion, -np.log10(meanMinusCts[tmpInsertion])))
for tmpInsertion in allInsertionSites if (meanMinusCts[tmpInsertion] > minPlotCts and tmpInsertion>ROI[0] and tmpInsertion<ROI[1]) ],
linewidths=2.0, color='r')
axesH.add_collection(lineCollectionPlus)
axesH.add_collection(lineCollectionMinus)
plt.ylim((-6,6))
plt.show()
| gpl-3.0 |
JavierGarciaD/AlgoRepo | EChanBook2/functions.py | 1 | 6659 |
from datetime import *
import datetime
from numpy import *
import statsmodels.api as sm
import statsmodels.tsa.stattools as ts
import scipy.io as sio
import pandas as pd
def normcdf(X):
(a1,a2,a3,a4,a5) = (0.31938153, -0.356563782, 1.781477937, -1.821255978, 1.330274429)
L = abs(X)
K = 1.0 / (1.0 + 0.2316419 * L)
w = 1.0 - 1.0 / sqrt(2*pi)*exp(-L*L/2.) * (a1*K + a2*K*K + a3*pow(K,3) + a4*pow(K,4) + a5*pow(K,5))
if X < 0:
w = 1.0-w
return w
def vratio(a, lag = 2, cor = 'hom'):
""" the implementation found in the blog Leinenbock
http://www.leinenbock.com/variance-ratio-test/
"""
#t = (std((a[lag:]) - (a[1:-lag+1])))**2;
#b = (std((a[2:]) - (a[1:-1]) ))**2;
n = len(a)
mu = sum(a[1:n]-a[:n-1])/n;
m=(n-lag+1)*(1-lag/n);
#print( mu, m, lag)
b=sum(square(a[1:n]-a[:n-1]-mu))/(n-1)
t=sum(square(a[lag:n]-a[:n-lag]-lag*mu))/m
vratio = t/(lag*b);
la = float(lag)
if cor == 'hom':
varvrt=2*(2*la-1)*(la-1)/(3*la*n)
elif cor == 'het':
varvrt=0;
sum2=sum(square(a[1:n]-a[:n-1]-mu));
for j in range(lag-1):
sum1a=square(a[j+1:n]-a[j:n-1]-mu);
sum1b=square(a[1:n-j]-a[0:n-j-1]-mu)
sum1=dot(sum1a,sum1b);
delta=sum1/(sum2**2);
varvrt=varvrt+((2*(la-j)/la)**2)*delta
zscore = (vratio - 1) / sqrt(float(varvrt))
pval = normcdf(zscore);
return vratio, zscore, pval
def hurst2(ts):
""" the implementation found in the blog Leinenbock
http://www.leinenbock.com/calculation-of-the-hurst-exponent-to-test-for-trend-and-mean-reversion/
"""
tau = []; lagvec = []
# Step through the different lags
for lag in range(2,100):
# produce price difference with lag
pp = subtract(ts[lag:],ts[:-lag])
# Write the different lags into a vector
lagvec.append(lag)
# Calculate the variance of the differnce vector
tau.append(sqrt(std(pp)))
# linear fit to double-log graph (gives power)
m = polyfit(log10(lagvec),log10(tau),1)
# calculate hurst
hurst = m[0]*2.0
# plot lag vs variance
#plt.plot(lagvec,tau,'o')
#plt.show()
return hurst
def hurst(ts):
""" the implewmentation on the blog http://www.quantstart.com
http://www.quantstart.com/articles/Basics-of-Statistical-Mean-Reversion-Testing
Returns the Hurst Exponent of the time series vector ts"""
# Create the range of lag values
lags = range(2, 100)
# Calculate the array of the variances of the lagged differences
tau = [sqrt(std(subtract(ts[lag:], ts[:-lag]))) for lag in lags]
# Use a linear fit to estimate the Hurst Exponent
poly = polyfit(log(lags), log(tau), 1)
# Return the Hurst exponent from the polyfit output
return poly[0]*2.0
def half_life(ts):
""" this function calculate the half life of mean reversion
"""
# calculate the delta for each observation.
# delta = p(t) - p(t-1)
delta_ts = diff(ts)
# calculate the vector of lagged prices. lag = 1
# stack up a vector of ones and transpose
lag_ts = vstack([ts[1:], ones(len(ts[1:]))]).T
# calculate the slope (beta) of the deltas vs the lagged values
beta = linalg.lstsq(lag_ts, delta_ts)
# compute half life
half_life = log(2) / beta[0]
return half_life[0]
def random_walk(seed=1000, mu = 0.0, sigma = 1, length=1000):
""" this function creates a series of independent, identically distributed values
with the form of a random walk. Where the best prediction of the next value is the present
value plus some random variable with mean and variance finite
We distinguish two types of random walks: (1) random walk without drift (i.e., no constant
or intercept term) and (2) random walk with drift (i.e., a constant term is present).
The random walk model is an example of what is known in the literature as a unit root process.
RWM without drift: Yt = Yt−1 + ut
RWM with drift: Yt = δ + Yt−1 + ut
"""
ts = []
for i in range(length):
if i == 0:
ts.append(seed)
else:
ts.append(mu + ts[i-1] + random.gauss(0, sigma))
return ts
def subset_dataframe(data, start_date, end_date):
start = data.index.searchsorted(start_date)
end = data.index.searchsorted(end_date)
return data.ix[start:end]
def cointegration_test(y, x):
ols_result = sm.OLS(y, x).fit()
return ts.adfuller(ols_result.resid, maxlag=1)
def get_data_from_matlab(file_url, index, columns, data):
"""Description:*
This function takes a Matlab file .mat and extract some
information to a pandas data frame. The structure of the mat
file must be known, as the loadmat function used returns a
dictionary of arrays and they must be called by the key name
Args:
file_url: the ubication of the .mat file
index: the key for the array of string date-like to be used as index
for the dataframe
columns: the key for the array of data to be used as columns in
the dataframe
data: the key for the array to be used as data in the dataframe
Returns:
Pandas dataframe
"""
import scipy.io as sio
import datetime as dt
# load mat file to dictionary
mat = sio.loadmat(file_url)
# define data to import, columns names and index
cl = mat[data]
stocks = mat[columns]
dates = mat[index]
# extract the ticket to be used as columns name in dataframe
# to-do: list compression here
columns = []
for each_item in stocks:
for inside_item in each_item:
for ticket in inside_item:
columns.append(ticket)
# extract string ins date array and convert to datetimeindex
# to-do list compression here
df_dates =[]
for each_item in dates:
for inside_item in each_item:
df_dates.append(inside_item)
df_dates = pd.Series([pd.to_datetime(date, format= '%Y%m%d') for date in df_dates], name='date')
# construct the final dataframe
data = pd.DataFrame(cl, columns=columns, index=df_dates)
return data
def my_path(loc):
if loc == 'PC':
root_path = 'C:/Users/javgar119/Documents/Python/Data/'
elif loc == 'MAC':
root_path = '/Users/Javi/Documents/MarketData/'
return root_path | apache-2.0 |
mwv/scikit-learn | sklearn/neighbors/unsupervised.py | 22 | 4751 | """Unsupervised nearest neighbors learner"""
from .base import NeighborsBase
from .base import KNeighborsMixin
from .base import RadiusNeighborsMixin
from .base import UnsupervisedMixin
class NearestNeighbors(NeighborsBase, KNeighborsMixin,
RadiusNeighborsMixin, UnsupervisedMixin):
"""Unsupervised learner for implementing neighbor searches.
Read more in the :ref:`User Guide <unsupervised_neighbors>`.
Parameters
----------
n_neighbors : int, optional (default = 5)
Number of neighbors to use by default for :meth:`k_neighbors` queries.
radius : float, optional (default = 1.0)
Range of parameter space to use by default for :meth`radius_neighbors`
queries.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDtree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
p: integer, optional (default = 2)
Parameter for the Minkowski metric from
sklearn.metrics.pairwise.pairwise_distances. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric : string or callable, default 'minkowski'
metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath',
'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
metric_params : dict, optional (default = None)
Additional keyword arguments for the metric function.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run for neighbors search.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Affects only :meth:`k_neighbors` and :meth:`kneighbors_graph` methods.
Examples
--------
>>> import numpy as np
>>> from sklearn.neighbors import NearestNeighbors
>>> samples = [[0, 0, 2], [1, 0, 0], [0, 0, 1]]
>>> neigh = NearestNeighbors(2, 0.4)
>>> neigh.fit(samples) #doctest: +ELLIPSIS
NearestNeighbors(...)
>>> neigh.kneighbors([[0, 0, 1.3]], 2, return_distance=False)
... #doctest: +ELLIPSIS
array([[2, 0]]...)
>>> rng = neigh.radius_neighbors([0, 0, 1.3], 0.4, return_distance=False)
>>> np.asarray(rng[0][0])
array(2)
See also
--------
KNeighborsClassifier
RadiusNeighborsClassifier
KNeighborsRegressor
RadiusNeighborsRegressor
BallTree
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
http://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, n_neighbors=5, radius=1.0,
algorithm='auto', leaf_size=30, metric='minkowski',
p=2, metric_params=None, n_jobs=1, **kwargs):
self._init_params(n_neighbors=n_neighbors,
radius=radius,
algorithm=algorithm,
leaf_size=leaf_size, metric=metric, p=p,
metric_params=metric_params, n_jobs=n_jobs, **kwargs)
| bsd-3-clause |
uclmr/inferbeddings | inferbeddings/evaluation/extra/base.py | 1 | 8375 | # -*- coding: utf-8 -*-
import abc
import numpy as np
from sklearn import metrics
from sklearn.preprocessing import normalize
import inferbeddings.evaluation.extra.davis as davis
import math
import logging
class RankingEvaluationMetric(metaclass=abc.ABCMeta):
"""
Abstract class inherited by all Evaluation Metrics.
"""
def __init__(self, pos_label=1, normalize_scores=True):
self.pos_label = pos_label
self.normalize_scores = normalize_scores
def _preprocess_scores(self, scores):
"""
Normalizes a vector of scores.
:param scores: Vector of scores.
:return: Normalized scores.
"""
preprocessed_scores = scores
if self.normalize_scores is True:
preprocessed_scores = normalize(preprocessed_scores.reshape(-1, 1), axis=0).ravel()
return preprocessed_scores
@abc.abstractmethod
def __call__(self, y, scores):
while False:
yield None
@property
@abc.abstractmethod
def name(self):
while False:
yield None
class AUCPRDavis(RankingEvaluationMetric):
"""
Area Under the Precision-Recall Curve (AUC-PR), calculated using the procedure described in [1].
[1] J Davis et al. - The Relationship Between Precision-Recall and ROC Curves - ICML 2006
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def __call__(self, y, scores):
scores = self._preprocess_scores(scores)
n, n_pos = len(scores), np.sum(y == self.pos_label)
metric = davis.AUC(n_pos, n - n_pos)
order = np.argsort(scores)[::-1]
ordered_y = y[order]
_tp = np.sum(y == self.pos_label)
pn_points = []
for i in reversed(range(y.shape[0])):
_y = ordered_y[:i + 1]
n = _y.shape[0]
_tp -= 1 if (i + 1) < ordered_y.shape[0] and ordered_y[i + 1] == self.pos_label else 0
fp = n - _tp
point = davis.PNPoint(_tp, fp)
pn_points += [point]
metric.set_pn_points(pn_points)
metric.interpolate()
ans = metric.calculate_auc_pr()
return ans
@property
def name(self):
return 'AUC-PR (Davis)'
class AUCPRSciKit(RankingEvaluationMetric):
"""
Area Under the Precision-Recall Curve (AUC-PR), calculated using scikit-learn.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def __call__(self, y, scores):
scores = self._preprocess_scores(scores)
precision, recall, thresholds = metrics.precision_recall_curve(y, scores, pos_label=self.pos_label)
ans = metrics.auc(recall, precision)
return ans
@property
def name(self):
return 'AUC-PR (scikit-learn)'
class AUCROCDavis(RankingEvaluationMetric):
"""
Area Under the ROC Curve (AUC-ROC), calculated using the procedure described in [1].
[1] J Davis et al. - The Relationship Between Precision-Recall and ROC Curves - ICML 2006
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def __call__(self, y, scores):
scores = self._preprocess_scores(scores)
n, n_pos = len(scores), np.sum(y == self.pos_label)
metric = davis.AUC(n_pos, n - n_pos)
order = np.argsort(scores)[::-1]
ordered_y = y[order]
_tp = np.sum(y == self.pos_label)
pn_points = []
for i in reversed(range(y.shape[0])):
_y = ordered_y[:i + 1]
n = _y.shape[0]
_tp -= 1 if (i + 1) < ordered_y.shape[0] and ordered_y[i + 1] == self.pos_label else 0
fp = n - _tp
point = davis.PNPoint(_tp, fp)
pn_points += [point]
metric.set_pn_points(pn_points)
metric.interpolate()
ans = metric.calculate_auc_roc()
return ans
@property
def name(self):
return 'AUC-ROC (Davis)'
class AUCROCSciKit(RankingEvaluationMetric):
"""
Area Under the Precision-Recall Curve (AUC-PR), calculated using scikit-learn.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def __call__(self, y, scores):
scores = self._preprocess_scores(scores)
ans = metrics.roc_auc_score((y == self.pos_label).astype(int), scores)
return ans
@property
def name(self):
return 'AUC-ROC (scikit-learn)'
AUCPR = AUCPRSciKit
AUCROC = AUCROCSciKit
class HitsAtK(RankingEvaluationMetric):
"""
Hits@K: Number of correct elements retrieved among the K elements with the
highest score.
"""
def __init__(self, k=10, *args, **kwargs):
super().__init__(*args, **kwargs)
self.k = k
def __call__(self, y, scores):
scores = self._preprocess_scores(scores)
k = len(y) if self.k is None else self.k
return (y[np.argsort(scores)[::-1]][:k] == self.pos_label).sum()
@property
def name(self):
return 'Hits@%d' % self.k
class PrecisionAtK(RankingEvaluationMetric):
"""
Precision@K [1]: Fraction of relevant elements retrieved among the K elements with the highest score.
[1] T Y Liu - Learning to Rank for Information Retrieval - Springer 2011
"""
def __init__(self, k=None, *args, **kwargs):
super().__init__(*args, **kwargs)
self.k = k
def __call__(self, y, scores):
scores = self._preprocess_scores(scores)
order = np.argsort(scores)[::-1]
n_pos = np.sum(y == self.pos_label)
k = len(y) if self.k is None else self.k
n_relevant = np.sum(y[order[:k]] == self.pos_label)
return float(n_relevant) / min(n_pos, self.k)
@property
def name(self):
return 'Precision' + (('@%s' % self.k) if self.k is not None else '')
class AveragePrecision(RankingEvaluationMetric):
"""
Average Precision [1]
[1] T Y Liu - Learning to Rank for Information Retrieval - Springer 2011
"""
def __init__(self, k=None, *args, **kwargs):
super().__init__(*args, **kwargs)
self.k = k
def __call__(self, y, scores):
scores = self._preprocess_scores(scores)
order = np.argsort(scores)[::-1]
k = len(y) if self.k is None else self.k
_y, _order = y[:k], order[:k]
n, ord_y = _y.shape[0], _y[_order]
num, n_pos = .0, 0
for i in range(n):
n_pos += 1 if ord_y[i] == self.pos_label else 0
num += (n_pos / (i + 1)) if ord_y[i] == self.pos_label else .0
return num / n_pos
@property
def name(self):
return 'Average Precision' + (('@%s' % self.k) if self.k is not None else '')
class DCG(RankingEvaluationMetric):
"""
Discounted Cumulative Gain [1]
[1] T Y Liu - Learning to Rank for Information Retrieval - Springer 2011
"""
def __init__(self,
k=None,
G=lambda x: 2 ** x - 1,
eta=lambda x: 1 / math.log2(x + 1),
*args, **kwargs):
super().__init__(*args, **kwargs)
self.k = k
self.G = G
self.eta = eta
def __call__(self, y, scores):
scores = self._preprocess_scores(scores)
order = np.argsort(scores)[::-1]
ord_y, dcg = y[order], .0
k = len(y) if self.k is None else self.k
for i in range(k):
dcg += self.G(ord_y[i] == self.pos_label) * self.eta(i + 1)
return dcg
@property
def name(self):
return 'DCG' + (('@%s' % self.k) if self.k is not None else '')
class NDCG(RankingEvaluationMetric):
"""
Normalized Discounted Cumulative Gain [1]
[1] T Y Liu - Learning to Rank for Information Retrieval - Springer 2011
"""
def __init__(self,
k=None,
G=lambda x: 2 ** x - 1,
eta=lambda x: 1 / math.log2(x + 1),
*args, **kwargs):
super().__init__(*args, **kwargs)
self.k = k
self.dcg = DCG(k=self.k, G=G, eta=eta, *args, **kwargs)
def __call__(self, y, scores):
dcg_score = self.dcg(y, scores)
normalization_term = self.dcg(y, y)
return dcg_score / normalization_term
@property
def name(self):
return 'NDCG' + (('@%s' % self.k) if self.k is not None else '')
| mit |
RPGOne/scikit-learn | examples/applications/plot_prediction_latency.py | 85 | 11395 | """
==================
Prediction Latency
==================
This is an example showing the prediction latency of various scikit-learn
estimators.
The goal is to measure the latency one can expect when doing predictions
either in bulk or atomic (i.e. one by one) mode.
The plots represent the distribution of the prediction latency as a boxplot.
"""
# Authors: Eustache Diemert <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from collections import defaultdict
import time
import gc
import numpy as np
import matplotlib.pyplot as plt
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from scipy.stats import scoreatpercentile
from sklearn.datasets.samples_generator import make_regression
from sklearn.ensemble.forest import RandomForestRegressor
from sklearn.linear_model.ridge import Ridge
from sklearn.linear_model.stochastic_gradient import SGDRegressor
from sklearn.svm.classes import SVR
from sklearn.utils import shuffle
def _not_in_sphinx():
# Hack to detect whether we are running by the sphinx builder
return '__file__' in globals()
def atomic_benchmark_estimator(estimator, X_test, verbose=False):
"""Measure runtime prediction of each instance."""
n_instances = X_test.shape[0]
runtimes = np.zeros(n_instances, dtype=np.float)
for i in range(n_instances):
instance = X_test[[i], :]
start = time.time()
estimator.predict(instance)
runtimes[i] = time.time() - start
if verbose:
print("atomic_benchmark runtimes:", min(runtimes), scoreatpercentile(
runtimes, 50), max(runtimes))
return runtimes
def bulk_benchmark_estimator(estimator, X_test, n_bulk_repeats, verbose):
"""Measure runtime prediction of the whole input."""
n_instances = X_test.shape[0]
runtimes = np.zeros(n_bulk_repeats, dtype=np.float)
for i in range(n_bulk_repeats):
start = time.time()
estimator.predict(X_test)
runtimes[i] = time.time() - start
runtimes = np.array(list(map(lambda x: x / float(n_instances), runtimes)))
if verbose:
print("bulk_benchmark runtimes:", min(runtimes), scoreatpercentile(
runtimes, 50), max(runtimes))
return runtimes
def benchmark_estimator(estimator, X_test, n_bulk_repeats=30, verbose=False):
"""
Measure runtimes of prediction in both atomic and bulk mode.
Parameters
----------
estimator : already trained estimator supporting `predict()`
X_test : test input
n_bulk_repeats : how many times to repeat when evaluating bulk mode
Returns
-------
atomic_runtimes, bulk_runtimes : a pair of `np.array` which contain the
runtimes in seconds.
"""
atomic_runtimes = atomic_benchmark_estimator(estimator, X_test, verbose)
bulk_runtimes = bulk_benchmark_estimator(estimator, X_test, n_bulk_repeats,
verbose)
return atomic_runtimes, bulk_runtimes
def generate_dataset(n_train, n_test, n_features, noise=0.1, verbose=False):
"""Generate a regression dataset with the given parameters."""
if verbose:
print("generating dataset...")
X, y, coef = make_regression(n_samples=n_train + n_test,
n_features=n_features, noise=noise, coef=True)
random_seed = 13
X_train, X_test, y_train, y_test = train_test_split(
X, y, train_size=n_train, random_state=random_seed)
X_train, y_train = shuffle(X_train, y_train, random_state=random_seed)
X_scaler = StandardScaler()
X_train = X_scaler.fit_transform(X_train)
X_test = X_scaler.transform(X_test)
y_scaler = StandardScaler()
y_train = y_scaler.fit_transform(y_train[:, None])[:, 0]
y_test = y_scaler.transform(y_test[:, None])[:, 0]
gc.collect()
if verbose:
print("ok")
return X_train, y_train, X_test, y_test
def boxplot_runtimes(runtimes, pred_type, configuration):
"""
Plot a new `Figure` with boxplots of prediction runtimes.
Parameters
----------
runtimes : list of `np.array` of latencies in micro-seconds
cls_names : list of estimator class names that generated the runtimes
pred_type : 'bulk' or 'atomic'
"""
fig, ax1 = plt.subplots(figsize=(10, 6))
bp = plt.boxplot(runtimes, )
cls_infos = ['%s\n(%d %s)' % (estimator_conf['name'],
estimator_conf['complexity_computer'](
estimator_conf['instance']),
estimator_conf['complexity_label']) for
estimator_conf in configuration['estimators']]
plt.setp(ax1, xticklabels=cls_infos)
plt.setp(bp['boxes'], color='black')
plt.setp(bp['whiskers'], color='black')
plt.setp(bp['fliers'], color='red', marker='+')
ax1.yaxis.grid(True, linestyle='-', which='major', color='lightgrey',
alpha=0.5)
ax1.set_axisbelow(True)
ax1.set_title('Prediction Time per Instance - %s, %d feats.' % (
pred_type.capitalize(),
configuration['n_features']))
ax1.set_ylabel('Prediction Time (us)')
plt.show()
def benchmark(configuration):
"""Run the whole benchmark."""
X_train, y_train, X_test, y_test = generate_dataset(
configuration['n_train'], configuration['n_test'],
configuration['n_features'])
stats = {}
for estimator_conf in configuration['estimators']:
print("Benchmarking", estimator_conf['instance'])
estimator_conf['instance'].fit(X_train, y_train)
gc.collect()
a, b = benchmark_estimator(estimator_conf['instance'], X_test)
stats[estimator_conf['name']] = {'atomic': a, 'bulk': b}
cls_names = [estimator_conf['name'] for estimator_conf in configuration[
'estimators']]
runtimes = [1e6 * stats[clf_name]['atomic'] for clf_name in cls_names]
boxplot_runtimes(runtimes, 'atomic', configuration)
runtimes = [1e6 * stats[clf_name]['bulk'] for clf_name in cls_names]
boxplot_runtimes(runtimes, 'bulk (%d)' % configuration['n_test'],
configuration)
def n_feature_influence(estimators, n_train, n_test, n_features, percentile):
"""
Estimate influence of the number of features on prediction time.
Parameters
----------
estimators : dict of (name (str), estimator) to benchmark
n_train : nber of training instances (int)
n_test : nber of testing instances (int)
n_features : list of feature-space dimensionality to test (int)
percentile : percentile at which to measure the speed (int [0-100])
Returns:
--------
percentiles : dict(estimator_name,
dict(n_features, percentile_perf_in_us))
"""
percentiles = defaultdict(defaultdict)
for n in n_features:
print("benchmarking with %d features" % n)
X_train, y_train, X_test, y_test = generate_dataset(n_train, n_test, n)
for cls_name, estimator in estimators.items():
estimator.fit(X_train, y_train)
gc.collect()
runtimes = bulk_benchmark_estimator(estimator, X_test, 30, False)
percentiles[cls_name][n] = 1e6 * scoreatpercentile(runtimes,
percentile)
return percentiles
def plot_n_features_influence(percentiles, percentile):
fig, ax1 = plt.subplots(figsize=(10, 6))
colors = ['r', 'g', 'b']
for i, cls_name in enumerate(percentiles.keys()):
x = np.array(sorted([n for n in percentiles[cls_name].keys()]))
y = np.array([percentiles[cls_name][n] for n in x])
plt.plot(x, y, color=colors[i], )
ax1.yaxis.grid(True, linestyle='-', which='major', color='lightgrey',
alpha=0.5)
ax1.set_axisbelow(True)
ax1.set_title('Evolution of Prediction Time with #Features')
ax1.set_xlabel('#Features')
ax1.set_ylabel('Prediction Time at %d%%-ile (us)' % percentile)
plt.show()
def benchmark_throughputs(configuration, duration_secs=0.1):
"""benchmark throughput for different estimators."""
X_train, y_train, X_test, y_test = generate_dataset(
configuration['n_train'], configuration['n_test'],
configuration['n_features'])
throughputs = dict()
for estimator_config in configuration['estimators']:
estimator_config['instance'].fit(X_train, y_train)
start_time = time.time()
n_predictions = 0
while (time.time() - start_time) < duration_secs:
estimator_config['instance'].predict(X_test[[0]])
n_predictions += 1
throughputs[estimator_config['name']] = n_predictions / duration_secs
return throughputs
def plot_benchmark_throughput(throughputs, configuration):
fig, ax = plt.subplots(figsize=(10, 6))
colors = ['r', 'g', 'b']
cls_infos = ['%s\n(%d %s)' % (estimator_conf['name'],
estimator_conf['complexity_computer'](
estimator_conf['instance']),
estimator_conf['complexity_label']) for
estimator_conf in configuration['estimators']]
cls_values = [throughputs[estimator_conf['name']] for estimator_conf in
configuration['estimators']]
plt.bar(range(len(throughputs)), cls_values, width=0.5, color=colors)
ax.set_xticks(np.linspace(0.25, len(throughputs) - 0.75, len(throughputs)))
ax.set_xticklabels(cls_infos, fontsize=10)
ymax = max(cls_values) * 1.2
ax.set_ylim((0, ymax))
ax.set_ylabel('Throughput (predictions/sec)')
ax.set_title('Prediction Throughput for different estimators (%d '
'features)' % configuration['n_features'])
plt.show()
###############################################################################
# main code
start_time = time.time()
# benchmark bulk/atomic prediction speed for various regressors
configuration = {
'n_train': int(1e3),
'n_test': int(1e2),
'n_features': int(1e2),
'estimators': [
{'name': 'Linear Model',
'instance': SGDRegressor(penalty='elasticnet', alpha=0.01,
l1_ratio=0.25, fit_intercept=True),
'complexity_label': 'non-zero coefficients',
'complexity_computer': lambda clf: np.count_nonzero(clf.coef_)},
{'name': 'RandomForest',
'instance': RandomForestRegressor(),
'complexity_label': 'estimators',
'complexity_computer': lambda clf: clf.n_estimators},
{'name': 'SVR',
'instance': SVR(kernel='rbf'),
'complexity_label': 'support vectors',
'complexity_computer': lambda clf: len(clf.support_vectors_)},
]
}
benchmark(configuration)
# benchmark n_features influence on prediction speed
percentile = 90
percentiles = n_feature_influence({'ridge': Ridge()},
configuration['n_train'],
configuration['n_test'],
[100, 250, 500], percentile)
plot_n_features_influence(percentiles, percentile)
# benchmark throughput
throughputs = benchmark_throughputs(configuration)
plot_benchmark_throughput(throughputs, configuration)
stop_time = time.time()
print("example run in %.2fs" % (stop_time - start_time))
| bsd-3-clause |
dsm054/pandas | asv_bench/benchmarks/algorithms.py | 3 | 3226 | import warnings
from importlib import import_module
import numpy as np
import pandas as pd
from pandas.util import testing as tm
for imp in ['pandas.util', 'pandas.tools.hashing']:
try:
hashing = import_module(imp)
break
except (ImportError, TypeError, ValueError):
pass
class Factorize(object):
params = [True, False]
param_names = ['sort']
def setup(self, sort):
N = 10**5
self.int_idx = pd.Int64Index(np.arange(N).repeat(5))
self.float_idx = pd.Float64Index(np.random.randn(N).repeat(5))
self.string_idx = tm.makeStringIndex(N)
def time_factorize_int(self, sort):
self.int_idx.factorize(sort=sort)
def time_factorize_float(self, sort):
self.float_idx.factorize(sort=sort)
def time_factorize_string(self, sort):
self.string_idx.factorize(sort=sort)
class Duplicated(object):
params = ['first', 'last', False]
param_names = ['keep']
def setup(self, keep):
N = 10**5
self.int_idx = pd.Int64Index(np.arange(N).repeat(5))
self.float_idx = pd.Float64Index(np.random.randn(N).repeat(5))
self.string_idx = tm.makeStringIndex(N)
def time_duplicated_int(self, keep):
self.int_idx.duplicated(keep=keep)
def time_duplicated_float(self, keep):
self.float_idx.duplicated(keep=keep)
def time_duplicated_string(self, keep):
self.string_idx.duplicated(keep=keep)
class DuplicatedUniqueIndex(object):
def setup(self):
N = 10**5
self.idx_int_dup = pd.Int64Index(np.arange(N * 5))
# cache is_unique
self.idx_int_dup.is_unique
def time_duplicated_unique_int(self):
self.idx_int_dup.duplicated()
class Match(object):
def setup(self):
self.uniques = tm.makeStringIndex(1000).values
self.all = self.uniques.repeat(10)
def time_match_string(self):
with warnings.catch_warnings(record=True):
pd.match(self.all, self.uniques)
class Hashing(object):
def setup_cache(self):
N = 10**5
df = pd.DataFrame(
{'strings': pd.Series(tm.makeStringIndex(10000).take(
np.random.randint(0, 10000, size=N))),
'floats': np.random.randn(N),
'ints': np.arange(N),
'dates': pd.date_range('20110101', freq='s', periods=N),
'timedeltas': pd.timedelta_range('1 day', freq='s', periods=N)})
df['categories'] = df['strings'].astype('category')
df.iloc[10:20] = np.nan
return df
def time_frame(self, df):
hashing.hash_pandas_object(df)
def time_series_int(self, df):
hashing.hash_pandas_object(df['ints'])
def time_series_string(self, df):
hashing.hash_pandas_object(df['strings'])
def time_series_float(self, df):
hashing.hash_pandas_object(df['floats'])
def time_series_categorical(self, df):
hashing.hash_pandas_object(df['categories'])
def time_series_timedeltas(self, df):
hashing.hash_pandas_object(df['timedeltas'])
def time_series_dates(self, df):
hashing.hash_pandas_object(df['dates'])
from .pandas_vb_common import setup # noqa: F401
| bsd-3-clause |
JamesJeffryes/MINE-Database | Scripts/model_jacquard.py | 1 | 1130 | import pandas
import seaborn
import matplotlib.pyplot as plt
import numpy
from minedatabase.databases import MINE
import sys
db = MINE(sys.argv[1])
fields = ['Compounds', 'Compound_ids', 'Reactions', 'Operators']
def pw_jaccard(series, reduce=numpy.median):
pw = []
for i, x in enumerate(series):
tc = []
for j, y in enumerate(series):
if i != j:
tc.append(len(x & y) / float(len(x | y)))
pw.append(reduce(tc))
return pw
keys = {}
results = []
for model in db.models.find():
results.append([model['_id']]+[set([y[0] for y in model[x]])
if isinstance(model[x][0], list)
else set(model[x]) for x in fields])
sets = pandas.DataFrame(results, columns=['_id']+fields).set_index('_id')
sets.to_csv("model_sets.csv")
tcs = sets.apply(pw_jaccard)
tcs.to_csv("model_pairwise_jaccard.csv")
results = pandas.DataFrame.from_csv("model_pairwise_jaccard.csv",
index_col='_id')
seaborn.boxplot(data=results)
plt.tight_layout()
plt.savefig("%s_boxplots.png" % db.name) | mit |
camallen/aggregation | algorithms/blanks/user_analysis.py | 2 | 2084 | #!/usr/bin/env python
__author__ = 'greg'
from cassandra.cluster import Cluster
import numpy
import matplotlib.pyplot as plt
import datetime
import csv
import bisect
import random
import json
import matplotlib.pyplot as plt
def index(a, x):
'Locate the leftmost value exactly equal to x'
i = bisect.bisect_left(a, x)
if i != len(a) and a[i] == x:
return i
raise ValueError
# load subject data from CSV
subjects_index = {}
with open('/home/greg/Documents/subject_species_all.csv', 'rb') as csvfile:
reader = csv.reader(csvfile, delimiter=',', quotechar='"')
for row in reader:
subjects_index[row[1]] = row[2]
def unix_time(dt):
epoch = datetime.datetime.utcfromtimestamp(0)
delta = dt - epoch
return delta.total_seconds()
def unix_time_millis(dt):
return long(unix_time(dt) * 1000.0)
cluster = Cluster()
cassandra_session = cluster.connect('serengeti')
ips = []
for ii,row in enumerate(cassandra_session.execute("select * from classifications where id =1")):
try:
index(ips,row.user_ip)
except ValueError:
bisect.insort(ips,row.user_ip)
# ips.add(row.user_ip)
if ii == 100000:
break
animal_accuracy = []
for ip in random.sample(ips,500):
true_blank = 0.
false_blank = 0.
true_animal = 0.
false_animal = 0.
for classification in cassandra_session.execute("select * from ip_classifications where id =1 and user_ip='"+str(ip)+"'"):
zooniverse_id = classification.zooniverse_id
annotatons = json.loads(classification.annotations)
nothing = "nothing" in annotatons[-1]
if subjects_index[zooniverse_id]=="blank":
if nothing:
true_blank += 1
else:
false_animal += 1
else:
if nothing:
false_blank += 1
else:
true_animal += 1
if (true_animal+false_blank) == 0:
continue
animal_accuracy.append(true_animal/(true_animal+false_blank))
plt.hist(animal_accuracy,50,cumulative=True,normed=1)
plt.show() | apache-2.0 |
saketkc/statsmodels | statsmodels/stats/outliers_influence.py | 27 | 25639 | # -*- coding: utf-8 -*-
"""Influence and Outlier Measures
Created on Sun Jan 29 11:16:09 2012
Author: Josef Perktold
License: BSD-3
"""
from statsmodels.compat.python import lzip
from collections import defaultdict
import numpy as np
from statsmodels.regression.linear_model import OLS
from statsmodels.tools.decorators import cache_readonly
from statsmodels.stats.multitest import multipletests
from statsmodels.tools.tools import maybe_unwrap_results
# outliers test convenience wrapper
def outlier_test(model_results, method='bonf', alpha=.05, labels=None,
order=False):
"""
Outlier Tests for RegressionResults instances.
Parameters
----------
model_results : RegressionResults instance
Linear model results
method : str
- `bonferroni` : one-step correction
- `sidak` : one-step correction
- `holm-sidak` :
- `holm` :
- `simes-hochberg` :
- `hommel` :
- `fdr_bh` : Benjamini/Hochberg
- `fdr_by` : Benjamini/Yekutieli
See `statsmodels.stats.multitest.multipletests` for details.
alpha : float
familywise error rate
order : bool
Whether or not to order the results by the absolute value of the
studentized residuals. If labels are provided they will also be sorted.
Returns
-------
table : ndarray or DataFrame
Returns either an ndarray or a DataFrame if labels is not None.
Will attempt to get labels from model_results if available. The
columns are the Studentized residuals, the unadjusted p-value,
and the corrected p-value according to method.
Notes
-----
The unadjusted p-value is stats.t.sf(abs(resid), df) where
df = df_resid - 1.
"""
from scipy import stats # lazy import
infl = getattr(model_results, 'get_influence', None)
if infl is None:
results = maybe_unwrap_results(model_results)
raise AttributeError("model_results object %s does not have a "
"get_influence method." % results.__class__.__name__)
resid = infl().resid_studentized_external
if order:
idx = np.abs(resid).argsort()[::-1]
resid = resid[idx]
if labels is not None:
labels = np.array(labels)[idx].tolist()
df = model_results.df_resid - 1
unadj_p = stats.t.sf(np.abs(resid), df) * 2
adj_p = multipletests(unadj_p, alpha=alpha, method=method)
data = np.c_[resid, unadj_p, adj_p[1]]
if labels is None:
labels = getattr(model_results.model.data, 'row_labels', None)
if labels is not None:
from pandas import DataFrame
return DataFrame(data,
columns=['student_resid', 'unadj_p', method+"(p)"],
index=labels)
return data
#influence measures
def reset_ramsey(res, degree=5):
'''Ramsey's RESET specification test for linear models
This is a general specification test, for additional non-linear effects
in a model.
Notes
-----
The test fits an auxiliary OLS regression where the design matrix, exog,
is augmented by powers 2 to degree of the fitted values. Then it performs
an F-test whether these additional terms are significant.
If the p-value of the f-test is below a threshold, e.g. 0.1, then this
indicates that there might be additional non-linear effects in the model
and that the linear model is mis-specified.
References
----------
http://en.wikipedia.org/wiki/Ramsey_RESET_test
'''
order = degree + 1
k_vars = res.model.exog.shape[1]
#vander without constant and x:
y_fitted_vander = np.vander(res.fittedvalues, order)[:, :-2] #drop constant
exog = np.column_stack((res.model.exog, y_fitted_vander))
res_aux = OLS(res.model.endog, exog).fit()
#r_matrix = np.eye(degree, exog.shape[1], k_vars)
r_matrix = np.eye(degree-1, exog.shape[1], k_vars)
#df1 = degree - 1
#df2 = exog.shape[0] - degree - res.df_model (without constant)
return res_aux.f_test(r_matrix) #, r_matrix, res_aux
def variance_inflation_factor(exog, exog_idx):
'''variance inflation factor, VIF, for one exogenous variable
The variance inflation factor is a measure for the increase of the
variance of the parameter estimates if an additional variable, given by
exog_idx is added to the linear regression. It is a measure for
multicollinearity of the design matrix, exog.
One recommendation is that if VIF is greater than 5, then the explanatory
variable given by exog_idx is highly collinear with the other explanatory
variables, and the parameter estimates will have large standard errors
because of this.
Parameters
----------
exog : ndarray, (nobs, k_vars)
design matrix with all explanatory variables, as for example used in
regression
exog_idx : int
index of the exogenous variable in the columns of exog
Returns
-------
vif : float
variance inflation factor
Notes
-----
This function does not save the auxiliary regression.
See Also
--------
xxx : class for regression diagnostics TODO: doesn't exist yet
References
----------
http://en.wikipedia.org/wiki/Variance_inflation_factor
'''
k_vars = exog.shape[1]
x_i = exog[:, exog_idx]
mask = np.arange(k_vars) != exog_idx
x_noti = exog[:, mask]
r_squared_i = OLS(x_i, x_noti).fit().rsquared
vif = 1. / (1. - r_squared_i)
return vif
class OLSInfluence(object):
'''class to calculate outlier and influence measures for OLS result
Parameters
----------
results : Regression Results instance
currently assumes the results are from an OLS regression
Notes
-----
One part of the results can be calculated without any auxiliary regression
(some of which have the `_internal` postfix in the name. Other statistics
require leave-one-observation-out (LOOO) auxiliary regression, and will be
slower (mainly results with `_external` postfix in the name).
The auxiliary LOOO regression only the required results are stored.
Using the LOO measures is currently only recommended if the data set
is not too large. One possible approach for LOOO measures would be to
identify possible problem observations with the _internal measures, and
then run the leave-one-observation-out only with observations that are
possible outliers. (However, this is not yet available in an automized way.)
This should be extended to general least squares.
The leave-one-variable-out (LOVO) auxiliary regression are currently not
used.
'''
def __init__(self, results):
#check which model is allowed
self.results = maybe_unwrap_results(results)
self.nobs, self.k_vars = results.model.exog.shape
self.endog = results.model.endog
self.exog = results.model.exog
self.model_class = results.model.__class__
self.sigma_est = np.sqrt(results.mse_resid)
self.aux_regression_exog = {}
self.aux_regression_endog = {}
@cache_readonly
def hat_matrix_diag(self):
'''(cached attribute) diagonal of the hat_matrix for OLS
Notes
-----
temporarily calculated here, this should go to model class
'''
return (self.exog * self.results.model.pinv_wexog.T).sum(1)
@cache_readonly
def resid_press(self):
'''(cached attribute) PRESS residuals
'''
hii = self.hat_matrix_diag
return self.results.resid / (1 - hii)
@cache_readonly
def influence(self):
'''(cached attribute) influence measure
matches the influence measure that gretl reports
u * h / (1 - h)
where u are the residuals and h is the diagonal of the hat_matrix
'''
hii = self.hat_matrix_diag
return self.results.resid * hii / (1 - hii)
@cache_readonly
def hat_diag_factor(self):
'''(cached attribute) factor of diagonal of hat_matrix used in influence
this might be useful for internal reuse
h / (1 - h)
'''
hii = self.hat_matrix_diag
return hii / (1 - hii)
@cache_readonly
def ess_press(self):
'''(cached attribute) error sum of squares of PRESS residuals
'''
return np.dot(self.resid_press, self.resid_press)
@cache_readonly
def resid_studentized_internal(self):
'''(cached attribute) studentized residuals using variance from OLS
this uses sigma from original estimate
does not require leave one out loop
'''
return self.get_resid_studentized_external(sigma=None)
#return self.results.resid / self.sigma_est
@cache_readonly
def resid_studentized_external(self):
'''(cached attribute) studentized residuals using LOOO variance
this uses sigma from leave-one-out estimates
requires leave one out loop for observations
'''
sigma_looo = np.sqrt(self.sigma2_not_obsi)
return self.get_resid_studentized_external(sigma=sigma_looo)
def get_resid_studentized_external(self, sigma=None):
'''calculate studentized residuals
Parameters
----------
sigma : None or float
estimate of the standard deviation of the residuals. If None, then
the estimate from the regression results is used.
Returns
-------
stzd_resid : ndarray
studentized residuals
Notes
-----
studentized residuals are defined as ::
resid / sigma / np.sqrt(1 - hii)
where resid are the residuals from the regression, sigma is an
estimate of the standard deviation of the residuals, and hii is the
diagonal of the hat_matrix.
'''
hii = self.hat_matrix_diag
if sigma is None:
sigma2_est = self.results.mse_resid
#can be replace by different estimators of sigma
sigma = np.sqrt(sigma2_est)
return self.results.resid / sigma / np.sqrt(1 - hii)
@cache_readonly
def dffits_internal(self):
'''(cached attribute) dffits measure for influence of an observation
based on resid_studentized_internal
uses original results, no nobs loop
'''
#TODO: do I want to use different sigma estimate in
# resid_studentized_external
# -> move definition of sigma_error to the __init__
hii = self.hat_matrix_diag
dffits_ = self.resid_studentized_internal * np.sqrt(hii / (1 - hii))
dffits_threshold = 2 * np.sqrt(self.k_vars * 1. / self.nobs)
return dffits_, dffits_threshold
@cache_readonly
def dffits(self):
'''(cached attribute) dffits measure for influence of an observation
based on resid_studentized_external,
uses results from leave-one-observation-out loop
It is recommended that observations with dffits large than a
threshold of 2 sqrt{k / n} where k is the number of parameters, should
be investigated.
Returns
-------
dffits: float
dffits_threshold : float
References
----------
`Wikipedia <http://en.wikipedia.org/wiki/DFFITS>`_
'''
#TODO: do I want to use different sigma estimate in
# resid_studentized_external
# -> move definition of sigma_error to the __init__
hii = self.hat_matrix_diag
dffits_ = self.resid_studentized_external * np.sqrt(hii / (1 - hii))
dffits_threshold = 2 * np.sqrt(self.k_vars * 1. / self.nobs)
return dffits_, dffits_threshold
@cache_readonly
def dfbetas(self):
'''(cached attribute) dfbetas
uses results from leave-one-observation-out loop
'''
dfbetas = self.results.params - self.params_not_obsi#[None,:]
dfbetas /= np.sqrt(self.sigma2_not_obsi[:,None])
dfbetas /= np.sqrt(np.diag(self.results.normalized_cov_params))
return dfbetas
@cache_readonly
def sigma2_not_obsi(self):
'''(cached attribute) error variance for all LOOO regressions
This is 'mse_resid' from each auxiliary regression.
uses results from leave-one-observation-out loop
'''
return np.asarray(self._res_looo['mse_resid'])
@cache_readonly
def params_not_obsi(self):
'''(cached attribute) parameter estimates for all LOOO regressions
uses results from leave-one-observation-out loop
'''
return np.asarray(self._res_looo['params'])
@cache_readonly
def det_cov_params_not_obsi(self):
'''(cached attribute) determinant of cov_params of all LOOO regressions
uses results from leave-one-observation-out loop
'''
return np.asarray(self._res_looo['det_cov_params'])
@cache_readonly
def cooks_distance(self):
'''(cached attribute) Cooks distance
uses original results, no nobs loop
'''
hii = self.hat_matrix_diag
#Eubank p.93, 94
cooks_d2 = self.resid_studentized_internal**2 / self.k_vars
cooks_d2 *= hii / (1 - hii)
from scipy import stats
#alpha = 0.1
#print stats.f.isf(1-alpha, n_params, res.df_modelwc)
pvals = stats.f.sf(cooks_d2, self.k_vars, self.results.df_resid)
return cooks_d2, pvals
@cache_readonly
def cov_ratio(self):
'''(cached attribute) covariance ratio between LOOO and original
This uses determinant of the estimate of the parameter covariance
from leave-one-out estimates.
requires leave one out loop for observations
'''
#don't use inplace division / because then we change original
cov_ratio = (self.det_cov_params_not_obsi
/ np.linalg.det(self.results.cov_params()))
return cov_ratio
@cache_readonly
def resid_var(self):
'''(cached attribute) estimate of variance of the residuals
::
sigma2 = sigma2_OLS * (1 - hii)
where hii is the diagonal of the hat matrix
'''
#TODO:check if correct outside of ols
return self.results.mse_resid * (1 - self.hat_matrix_diag)
@cache_readonly
def resid_std(self):
'''(cached attribute) estimate of standard deviation of the residuals
See Also
--------
resid_var
'''
return np.sqrt(self.resid_var)
def _ols_xnoti(self, drop_idx, endog_idx='endog', store=True):
'''regression results from LOVO auxiliary regression with cache
The result instances are stored, which could use a large amount of
memory if the datasets are large. There are too many combinations to
store them all, except for small problems.
Parameters
----------
drop_idx : int
index of exog that is dropped from the regression
endog_idx : 'endog' or int
If 'endog', then the endogenous variable of the result instance
is regressed on the exogenous variables, excluding the one at
drop_idx. If endog_idx is an integer, then the exog with that
index is regressed with OLS on all other exogenous variables.
(The latter is the auxiliary regression for the variance inflation
factor.)
this needs more thought, memory versus speed
not yet used in any other parts, not sufficiently tested
'''
#reverse the structure, access store, if fail calculate ?
#this creates keys in store even if store = false ! bug
if endog_idx == 'endog':
stored = self.aux_regression_endog
if hasattr(stored, drop_idx):
return stored[drop_idx]
x_i = self.results.model.endog
else:
#nested dictionary
try:
self.aux_regression_exog[endog_idx][drop_idx]
except KeyError:
pass
stored = self.aux_regression_exog[endog_idx]
stored = {}
x_i = self.exog[:, endog_idx]
k_vars = self.exog.shape[1]
mask = np.arange(k_vars) != drop_idx
x_noti = self.exog[:, mask]
res = OLS(x_i, x_noti).fit()
if store:
stored[drop_idx] = res
return res
def _get_drop_vari(self, attributes):
'''regress endog on exog without one of the variables
This uses a k_vars loop, only attributes of the OLS instance are stored.
Parameters
----------
attributes : list of strings
These are the names of the attributes of the auxiliary OLS results
instance that are stored and returned.
not yet used
'''
from statsmodels.sandbox.tools.cross_val import LeaveOneOut
endog = self.results.model.endog
exog = self.exog
cv_iter = LeaveOneOut(self.k_vars)
res_loo = defaultdict(list)
for inidx, outidx in cv_iter:
for att in attributes:
res_i = self.model_class(endog, exog[:,inidx]).fit()
res_loo[att].append(getattr(res_i, att))
return res_loo
@cache_readonly
def _res_looo(self):
'''collect required results from the LOOO loop
all results will be attached.
currently only 'params', 'mse_resid', 'det_cov_params' are stored
regresses endog on exog dropping one observation at a time
this uses a nobs loop, only attributes of the OLS instance are stored.
'''
from statsmodels.sandbox.tools.cross_val import LeaveOneOut
get_det_cov_params = lambda res: np.linalg.det(res.cov_params())
endog = self.endog
exog = self.exog
params = np.zeros(exog.shape, dtype=np.float)
mse_resid = np.zeros(endog.shape, dtype=np.float)
det_cov_params = np.zeros(endog.shape, dtype=np.float)
cv_iter = LeaveOneOut(self.nobs)
for inidx, outidx in cv_iter:
res_i = self.model_class(endog[inidx], exog[inidx]).fit()
params[outidx] = res_i.params
mse_resid[outidx] = res_i.mse_resid
det_cov_params[outidx] = get_det_cov_params(res_i)
return dict(params=params, mse_resid=mse_resid,
det_cov_params=det_cov_params)
def summary_frame(self):
"""
Creates a DataFrame with all available influence results.
Returns
-------
frame : DataFrame
A DataFrame with all results.
Notes
-----
The resultant DataFrame contains six variables in addition to the
DFBETAS. These are:
* cooks_d : Cook's Distance defined in `Influence.cooks_distance`
* standard_resid : Standardized residuals defined in
`Influence.resid_studentized_internal`
* hat_diag : The diagonal of the projection, or hat, matrix defined in
`Influence.hat_matrix_diag`
* dffits_internal : DFFITS statistics using internally Studentized
residuals defined in `Influence.dffits_internal`
* dffits : DFFITS statistics using externally Studentized residuals
defined in `Influence.dffits`
* student_resid : Externally Studentized residuals defined in
`Influence.resid_studentized_external`
"""
from pandas import DataFrame
# row and column labels
data = self.results.model.data
row_labels = data.row_labels
beta_labels = ['dfb_' + i for i in data.xnames]
# grab the results
summary_data = DataFrame(dict(
cooks_d = self.cooks_distance[0],
standard_resid = self.resid_studentized_internal,
hat_diag = self.hat_matrix_diag,
dffits_internal = self.dffits_internal[0],
student_resid = self.resid_studentized_external,
dffits = self.dffits[0],
),
index = row_labels)
#NOTE: if we don't give columns, order of above will be arbitrary
dfbeta = DataFrame(self.dfbetas, columns=beta_labels,
index=row_labels)
return dfbeta.join(summary_data)
def summary_table(self, float_fmt="%6.3f"):
'''create a summary table with all influence and outlier measures
This does currently not distinguish between statistics that can be
calculated from the original regression results and for which a
leave-one-observation-out loop is needed
Returns
-------
res : SimpleTable instance
SimpleTable instance with the results, can be printed
Notes
-----
This also attaches table_data to the instance.
'''
#print self.dfbetas
# table_raw = [ np.arange(self.nobs),
# self.endog,
# self.fittedvalues,
# self.cooks_distance(),
# self.resid_studentized_internal,
# self.hat_matrix_diag,
# self.dffits_internal,
# self.resid_studentized_external,
# self.dffits,
# self.dfbetas
# ]
table_raw = [ ('obs', np.arange(self.nobs)),
('endog', self.endog),
('fitted\nvalue', self.results.fittedvalues),
("Cook's\nd", self.cooks_distance[0]),
("student.\nresidual", self.resid_studentized_internal),
('hat diag', self.hat_matrix_diag),
('dffits \ninternal', self.dffits_internal[0]),
("ext.stud.\nresidual", self.resid_studentized_external),
('dffits', self.dffits[0])
]
colnames, data = lzip(*table_raw) #unzip
data = np.column_stack(data)
self.table_data = data
from statsmodels.iolib.table import SimpleTable, default_html_fmt
from statsmodels.iolib.tableformatting import fmt_base
from copy import deepcopy
fmt = deepcopy(fmt_base)
fmt_html = deepcopy(default_html_fmt)
fmt['data_fmts'] = ["%4d"] + [float_fmt] * (data.shape[1] - 1)
#fmt_html['data_fmts'] = fmt['data_fmts']
return SimpleTable(data, headers=colnames, txt_fmt=fmt,
html_fmt=fmt_html)
def summary_table(res, alpha=0.05):
'''generate summary table of outlier and influence similar to SAS
Parameters
----------
alpha : float
significance level for confidence interval
Returns
-------
st : SimpleTable instance
table with results that can be printed
data : ndarray
calculated measures and statistics for the table
ss2 : list of strings
column_names for table (Note: rows of table are observations)
'''
from scipy import stats
from statsmodels.sandbox.regression.predstd import wls_prediction_std
infl = OLSInfluence(res)
#standard error for predicted mean
#Note: using hat_matrix only works for fitted values
predict_mean_se = np.sqrt(infl.hat_matrix_diag*res.mse_resid)
tppf = stats.t.isf(alpha/2., res.df_resid)
predict_mean_ci = np.column_stack([
res.fittedvalues - tppf * predict_mean_se,
res.fittedvalues + tppf * predict_mean_se])
#standard error for predicted observation
predict_se, predict_ci_low, predict_ci_upp = wls_prediction_std(res)
predict_ci = np.column_stack((predict_ci_low, predict_ci_upp))
#standard deviation of residual
resid_se = np.sqrt(res.mse_resid * (1 - infl.hat_matrix_diag))
table_sm = np.column_stack([
np.arange(res.nobs) + 1,
res.model.endog,
res.fittedvalues,
predict_mean_se,
predict_mean_ci[:,0],
predict_mean_ci[:,1],
predict_ci[:,0],
predict_ci[:,1],
res.resid,
resid_se,
infl.resid_studentized_internal,
infl.cooks_distance[0]
])
#colnames, data = lzip(*table_raw) #unzip
data = table_sm
ss2 = ['Obs', 'Dep Var\nPopulation', 'Predicted\nValue', 'Std Error\nMean Predict', 'Mean ci\n95% low', 'Mean ci\n95% upp', 'Predict ci\n95% low', 'Predict ci\n95% upp', 'Residual', 'Std Error\nResidual', 'Student\nResidual', "Cook's\nD"]
colnames = ss2
#self.table_data = data
#data = np.column_stack(data)
from statsmodels.iolib.table import SimpleTable, default_html_fmt
from statsmodels.iolib.tableformatting import fmt_base
from copy import deepcopy
fmt = deepcopy(fmt_base)
fmt_html = deepcopy(default_html_fmt)
fmt['data_fmts'] = ["%4d"] + ["%6.3f"] * (data.shape[1] - 1)
#fmt_html['data_fmts'] = fmt['data_fmts']
st = SimpleTable(data, headers=colnames, txt_fmt=fmt,
html_fmt=fmt_html)
return st, data, ss2
| bsd-3-clause |
lbishal/scikit-learn | sklearn/covariance/tests/test_covariance.py | 34 | 11120 | # Author: Alexandre Gramfort <[email protected]>
# Gael Varoquaux <[email protected]>
# Virgile Fritsch <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_greater
from sklearn import datasets
from sklearn.covariance import empirical_covariance, EmpiricalCovariance, \
ShrunkCovariance, shrunk_covariance, \
LedoitWolf, ledoit_wolf, ledoit_wolf_shrinkage, OAS, oas
X = datasets.load_diabetes().data
X_1d = X[:, 0]
n_samples, n_features = X.shape
def test_covariance():
# Tests Covariance module on a simple dataset.
# test covariance fit from data
cov = EmpiricalCovariance()
cov.fit(X)
emp_cov = empirical_covariance(X)
assert_array_almost_equal(emp_cov, cov.covariance_, 4)
assert_almost_equal(cov.error_norm(emp_cov), 0)
assert_almost_equal(
cov.error_norm(emp_cov, norm='spectral'), 0)
assert_almost_equal(
cov.error_norm(emp_cov, norm='frobenius'), 0)
assert_almost_equal(
cov.error_norm(emp_cov, scaling=False), 0)
assert_almost_equal(
cov.error_norm(emp_cov, squared=False), 0)
assert_raises(NotImplementedError,
cov.error_norm, emp_cov, norm='foo')
# Mahalanobis distances computation test
mahal_dist = cov.mahalanobis(X)
assert_greater(np.amin(mahal_dist), 0)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
cov = EmpiricalCovariance()
cov.fit(X_1d)
assert_array_almost_equal(empirical_covariance(X_1d), cov.covariance_, 4)
assert_almost_equal(cov.error_norm(empirical_covariance(X_1d)), 0)
assert_almost_equal(
cov.error_norm(empirical_covariance(X_1d), norm='spectral'), 0)
# test with one sample
# Create X with 1 sample and 5 features
X_1sample = np.arange(5).reshape(1, 5)
cov = EmpiricalCovariance()
assert_warns(UserWarning, cov.fit, X_1sample)
assert_array_almost_equal(cov.covariance_,
np.zeros(shape=(5, 5), dtype=np.float64))
# test integer type
X_integer = np.asarray([[0, 1], [1, 0]])
result = np.asarray([[0.25, -0.25], [-0.25, 0.25]])
assert_array_almost_equal(empirical_covariance(X_integer), result)
# test centered case
cov = EmpiricalCovariance(assume_centered=True)
cov.fit(X)
assert_array_equal(cov.location_, np.zeros(X.shape[1]))
def test_shrunk_covariance():
# Tests ShrunkCovariance module on a simple dataset.
# compare shrunk covariance obtained from data and from MLE estimate
cov = ShrunkCovariance(shrinkage=0.5)
cov.fit(X)
assert_array_almost_equal(
shrunk_covariance(empirical_covariance(X), shrinkage=0.5),
cov.covariance_, 4)
# same test with shrinkage not provided
cov = ShrunkCovariance()
cov.fit(X)
assert_array_almost_equal(
shrunk_covariance(empirical_covariance(X)), cov.covariance_, 4)
# same test with shrinkage = 0 (<==> empirical_covariance)
cov = ShrunkCovariance(shrinkage=0.)
cov.fit(X)
assert_array_almost_equal(empirical_covariance(X), cov.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
cov = ShrunkCovariance(shrinkage=0.3)
cov.fit(X_1d)
assert_array_almost_equal(empirical_covariance(X_1d), cov.covariance_, 4)
# test shrinkage coeff on a simple data set (without saving precision)
cov = ShrunkCovariance(shrinkage=0.5, store_precision=False)
cov.fit(X)
assert(cov.precision_ is None)
def test_ledoit_wolf():
# Tests LedoitWolf module on a simple dataset.
# test shrinkage coeff on a simple data set
X_centered = X - X.mean(axis=0)
lw = LedoitWolf(assume_centered=True)
lw.fit(X_centered)
shrinkage_ = lw.shrinkage_
score_ = lw.score(X_centered)
assert_almost_equal(ledoit_wolf_shrinkage(X_centered,
assume_centered=True),
shrinkage_)
assert_almost_equal(ledoit_wolf_shrinkage(X_centered, assume_centered=True,
block_size=6),
shrinkage_)
# compare shrunk covariance obtained from data and from MLE estimate
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X_centered,
assume_centered=True)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
# compare estimates given by LW and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=lw.shrinkage_, assume_centered=True)
scov.fit(X_centered)
assert_array_almost_equal(scov.covariance_, lw.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
lw = LedoitWolf(assume_centered=True)
lw.fit(X_1d)
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X_1d,
assume_centered=True)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
assert_array_almost_equal((X_1d ** 2).sum() / n_samples, lw.covariance_, 4)
# test shrinkage coeff on a simple data set (without saving precision)
lw = LedoitWolf(store_precision=False, assume_centered=True)
lw.fit(X_centered)
assert_almost_equal(lw.score(X_centered), score_, 4)
assert(lw.precision_ is None)
# Same tests without assuming centered data
# test shrinkage coeff on a simple data set
lw = LedoitWolf()
lw.fit(X)
assert_almost_equal(lw.shrinkage_, shrinkage_, 4)
assert_almost_equal(lw.shrinkage_, ledoit_wolf_shrinkage(X))
assert_almost_equal(lw.shrinkage_, ledoit_wolf(X)[1])
assert_almost_equal(lw.score(X), score_, 4)
# compare shrunk covariance obtained from data and from MLE estimate
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
# compare estimates given by LW and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=lw.shrinkage_)
scov.fit(X)
assert_array_almost_equal(scov.covariance_, lw.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
lw = LedoitWolf()
lw.fit(X_1d)
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X_1d)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
assert_array_almost_equal(empirical_covariance(X_1d), lw.covariance_, 4)
# test with one sample
# warning should be raised when using only 1 sample
X_1sample = np.arange(5).reshape(1, 5)
lw = LedoitWolf()
assert_warns(UserWarning, lw.fit, X_1sample)
assert_array_almost_equal(lw.covariance_,
np.zeros(shape=(5, 5), dtype=np.float64))
# test shrinkage coeff on a simple data set (without saving precision)
lw = LedoitWolf(store_precision=False)
lw.fit(X)
assert_almost_equal(lw.score(X), score_, 4)
assert(lw.precision_ is None)
def test_ledoit_wolf_large():
# test that ledoit_wolf doesn't error on data that is wider than block_size
rng = np.random.RandomState(0)
# use a number of features that is larger than the block-size
X = rng.normal(size=(10, 20))
lw = LedoitWolf(block_size=10).fit(X)
# check that covariance is about diagonal (random normal noise)
assert_almost_equal(lw.covariance_, np.eye(20), 0)
cov = lw.covariance_
# check that the result is consistent with not splitting data into blocks.
lw = LedoitWolf(block_size=25).fit(X)
assert_almost_equal(lw.covariance_, cov)
def test_oas():
# Tests OAS module on a simple dataset.
# test shrinkage coeff on a simple data set
X_centered = X - X.mean(axis=0)
oa = OAS(assume_centered=True)
oa.fit(X_centered)
shrinkage_ = oa.shrinkage_
score_ = oa.score(X_centered)
# compare shrunk covariance obtained from data and from MLE estimate
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X_centered,
assume_centered=True)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
# compare estimates given by OAS and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=oa.shrinkage_, assume_centered=True)
scov.fit(X_centered)
assert_array_almost_equal(scov.covariance_, oa.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0:1]
oa = OAS(assume_centered=True)
oa.fit(X_1d)
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X_1d, assume_centered=True)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
assert_array_almost_equal((X_1d ** 2).sum() / n_samples, oa.covariance_, 4)
# test shrinkage coeff on a simple data set (without saving precision)
oa = OAS(store_precision=False, assume_centered=True)
oa.fit(X_centered)
assert_almost_equal(oa.score(X_centered), score_, 4)
assert(oa.precision_ is None)
# Same tests without assuming centered data--------------------------------
# test shrinkage coeff on a simple data set
oa = OAS()
oa.fit(X)
assert_almost_equal(oa.shrinkage_, shrinkage_, 4)
assert_almost_equal(oa.score(X), score_, 4)
# compare shrunk covariance obtained from data and from MLE estimate
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
# compare estimates given by OAS and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=oa.shrinkage_)
scov.fit(X)
assert_array_almost_equal(scov.covariance_, oa.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
oa = OAS()
oa.fit(X_1d)
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X_1d)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
assert_array_almost_equal(empirical_covariance(X_1d), oa.covariance_, 4)
# test with one sample
# warning should be raised when using only 1 sample
X_1sample = np.arange(5).reshape(1, 5)
oa = OAS()
assert_warns(UserWarning, oa.fit, X_1sample)
assert_array_almost_equal(oa.covariance_,
np.zeros(shape=(5, 5), dtype=np.float64))
# test shrinkage coeff on a simple data set (without saving precision)
oa = OAS(store_precision=False)
oa.fit(X)
assert_almost_equal(oa.score(X), score_, 4)
assert(oa.precision_ is None)
| bsd-3-clause |
throwable-one/lettuce | tests/integration/django/dill/leaves/features/steps.py | 17 | 1432 | import json
from django.core.management import call_command
from leaves.models import *
from lettuce import after, step
from lettuce.django.steps.models import *
from nose.tools import assert_equals
after.each_scenario(clean_db)
max_rego = 0
@creates_models(Harvester)
def create_with_rego(step):
data = hashes_data(step)
for hash_ in data:
hash_['rego'] = hash_['make'][:3].upper() + "001"
create_models(Harvester, data)
@checks_existence(Harvester)
def check_with_rego(step):
data = hashes_data(step)
for hash_ in data:
try:
hash_['rego'] = hash_['rego'].upper()
except KeyError:
pass
models_exist(Harvester, data)
@step(r'The database dump is as follows')
def database_dump(step):
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
output = StringIO()
call_command('dumpdata', stdout=output, indent=2)
output = output.getvalue()
assert_equals(json.loads(output), json.loads(step.multiline))
@step(r'I have populated the database')
def database_populated(step):
pass
@step(r'I count the harvesters')
def count_harvesters(step):
print "Harvester count: %d" % Harvester.objects.count()
@creates_models(Panda)
def create_pandas(step):
data = hashes_data(step)
if 'name' in data:
data['name'] += ' Panda'
return create_models(Panda, data)
| gpl-3.0 |
WarrenWeckesser/scipy | doc/source/tutorial/examples/newton_krylov_preconditioning.py | 36 | 2492 | import numpy as np
from scipy.optimize import root
from scipy.sparse import spdiags, kron
from scipy.sparse.linalg import spilu, LinearOperator
from numpy import cosh, zeros_like, mgrid, zeros, eye
# parameters
nx, ny = 75, 75
hx, hy = 1./(nx-1), 1./(ny-1)
P_left, P_right = 0, 0
P_top, P_bottom = 1, 0
def get_preconditioner():
"""Compute the preconditioner M"""
diags_x = zeros((3, nx))
diags_x[0,:] = 1/hx/hx
diags_x[1,:] = -2/hx/hx
diags_x[2,:] = 1/hx/hx
Lx = spdiags(diags_x, [-1,0,1], nx, nx)
diags_y = zeros((3, ny))
diags_y[0,:] = 1/hy/hy
diags_y[1,:] = -2/hy/hy
diags_y[2,:] = 1/hy/hy
Ly = spdiags(diags_y, [-1,0,1], ny, ny)
J1 = kron(Lx, eye(ny)) + kron(eye(nx), Ly)
# Now we have the matrix `J_1`. We need to find its inverse `M` --
# however, since an approximate inverse is enough, we can use
# the *incomplete LU* decomposition
J1_ilu = spilu(J1)
# This returns an object with a method .solve() that evaluates
# the corresponding matrix-vector product. We need to wrap it into
# a LinearOperator before it can be passed to the Krylov methods:
M = LinearOperator(shape=(nx*ny, nx*ny), matvec=J1_ilu.solve)
return M
def solve(preconditioning=True):
"""Compute the solution"""
count = [0]
def residual(P):
count[0] += 1
d2x = zeros_like(P)
d2y = zeros_like(P)
d2x[1:-1] = (P[2:] - 2*P[1:-1] + P[:-2])/hx/hx
d2x[0] = (P[1] - 2*P[0] + P_left)/hx/hx
d2x[-1] = (P_right - 2*P[-1] + P[-2])/hx/hx
d2y[:,1:-1] = (P[:,2:] - 2*P[:,1:-1] + P[:,:-2])/hy/hy
d2y[:,0] = (P[:,1] - 2*P[:,0] + P_bottom)/hy/hy
d2y[:,-1] = (P_top - 2*P[:,-1] + P[:,-2])/hy/hy
return d2x + d2y + 5*cosh(P).mean()**2
# preconditioner
if preconditioning:
M = get_preconditioner()
else:
M = None
# solve
guess = zeros((nx, ny), float)
sol = root(residual, guess, method='krylov',
options={'disp': True,
'jac_options': {'inner_M': M}})
print('Residual', abs(residual(sol.x)).max())
print('Evaluations', count[0])
return sol.x
def main():
sol = solve(preconditioning=True)
# visualize
import matplotlib.pyplot as plt
x, y = mgrid[0:1:(nx*1j), 0:1:(ny*1j)]
plt.clf()
plt.pcolor(x, y, sol)
plt.clim(0, 1)
plt.colorbar()
plt.show()
if __name__ == "__main__":
main()
| bsd-3-clause |
zctea/biocode | sandbox/jorvis/generate_expression_by_size_plot.py | 2 | 3201 | #!/usr/bin/env python3
"""
I was given data generated using this protocol:
http://trinityrnaseq.sourceforge.net/analysis/abundance_estimation.html
Specifically, this file:
RSEM.isoforms.results
OUTPUT
If you pass a value of 'plot' to the -o parameter it will invoke the interactive plot viewer rather
than writing an output file. (You can still save a file from within the viewer)
"""
import argparse
import biocodeutils
import os
import matplotlib.pyplot as plt
def main():
parser = argparse.ArgumentParser( description='Generates a figure showing coverage/abundance vs. molecule size.')
## output file to be written
parser.add_argument('-i', '--input_file', type=str, required=True, help='Path to an input pileup file' )
parser.add_argument('-o', '--output_file', type=str, required=True, help='Path to an output file to be created' )
parser.add_argument('-g', '--graph_using', type=str, required=False, default='TPM', help='TPM or FPKM' )
parser.add_argument('-t', '--title', type=str, required=False, help='Plot title' )
parser.add_argument('-m', '--max_size', type=int, required=False, help='Ignore transcripts over this size (limits X-axis)' )
parser.add_argument('-a', '--alpha_factor', type=float, required=False, default=0.05, help='Sets the opacity factor for overlapping dots')
args = parser.parse_args()
x = list()
y = list()
skipped_datapoints = 0
total_datapoints = 0
if args.graph_using not in ['TPM', 'FPKM']:
raise Exception("ERROR: --graph_using value must be either TPM or FPKM")
for line in open(args.input_file):
cols = line.split("\t")
if cols[0] == 'transcript_id' and cols[1] == 'gene_id':
continue
transcript_size = int(cols[2])
if args.max_size is not None and transcript_size > args.max_size:
continue
tpm = float(cols[5])
fpkm = float(cols[6])
total_datapoints += 1
if args.graph_using == 'TPM':
if tpm > 0:
x.append(transcript_size)
y.append(tpm)
else:
skipped_datapoints += 1
else:
if fpkm > 0:
x.append(transcript_size)
y.append(fpkm)
else:
skipped_datapoints += 1
if args.graph_using == 'TPM':
print("LOG: {0}/{1} data points were skipped because the TPM value was 0.0".format(skipped_datapoints, total_datapoints))
else:
print("LOG: {0}/{1} data points were skipped because the FPKM value was 0.0".format(skipped_datapoints, total_datapoints))
fig = plt.figure()
if args.title is not None:
fig.suptitle(args.title)
plt.xlabel('Molecule length')
if args.graph_using == 'TPM':
plt.ylabel('TPM')
else:
plt.ylabel('FPKM')
ax = plt.gca()
ax.plot(x, y, 'o', c='blue', alpha=args.alpha_factor, markeredgecolor='none')
ax.set_yscale('log')
if args.output_file == 'plot':
plt.show()
else:
plt.savefig(args.output_file)
if __name__ == '__main__':
main()
| gpl-3.0 |
dfm/exopop | code/main.py | 1 | 6854 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This script runs the analysis on either the real catalog or a simulated one.
To run on the real data, call:
python main.py
and to run on a simulation, call:
python main.py path/to/the/simulated/catalog/
All of the MCMC tuning parameters are hard coded. Sorry!
"""
from __future__ import division, print_function
import os
import h5py
import triangle
import numpy as np
import cPickle as pickle
import matplotlib.pyplot as pl
from load_data import load_candidates, load_detection_efficiency
from population import ProbabilisticModel, Dataset, Population
def inverse_detection_efficiency(pop, censor, catalog, err, truth=None):
c = np.log(catalog)
ind = [np.digitize(x, b) for x, b in zip(c.T, censor.bins)]
weights = np.exp(-censor.lnprob[ind])
val, x, y = np.histogram2d(c[:, 0], c[:, 1], pop.bins, weights=weights)
var, x, y = np.histogram2d(c[:, 0], c[:, 1], pop.bins, weights=weights**2)
val[~np.isfinite(val)] = 0.0
var[~np.isfinite(var)] = 0.0
# Build the model for plotting.
v = pop.initial()
lg = np.log(val).flatten()
m = np.isfinite(lg)
lg[~m] = 0.0
v[-len(lg):] = lg
# Compute the marginalized errorbars.
marg = [np.sum(val, axis=(i+1) % 2) for i in range(2)]
norm = [np.sum(a * np.diff(pop.bins[i])) for i, a in enumerate(marg)]
literature = [(pop.bins[i], a/norm[i],
np.sqrt(np.sum(var, axis=(i+1) % 2))/norm[i])
for i, a in enumerate(marg)]
# Plot the results.
labels = ["$\ln T/\mathrm{day}$", "$\ln R/R_\oplus$"]
top_axes = ["$T\,[\mathrm{days}]$", "$R\,[R_\oplus]$"]
fig = pop.plot_2d(v, censor=censor, catalog=np.log(catalog),
err=err, true=truth, labels=labels,
top_axes=top_axes, literature=literature)
# Extrapolate to the Earth.
o = np.argsort(c[:, 0])
s = c[o, :]
ws = weights[o]
m = np.isfinite(ws) * (s[:, 1] <= np.log(2)) * (s[:, 1] >= np.log(1))
cs = np.cumsum(ws[m])
cs_var = np.cumsum(ws[m] ** 2)
i = s[m, 0] > np.log(50)
# Do the linear fit.
A = np.vander(s[m, 0][i], 2)
Cinv = np.diag(1.0 / cs_var[i])
S = np.linalg.inv(np.dot(A.T, np.dot(Cinv, A)))
mu = np.dot(S, np.dot(A.T, np.dot(Cinv, cs[i])))
# Compute the predictive value.
ys = np.dot(np.array([[np.log(200), 1],
[np.log(400), 1]]),
np.random.multivariate_normal(mu, S, 5000).T)
frac = np.diff(ys, axis=0)
q = triangle.quantile(frac, [0.16, 0.5, 0.84])
fig2 = pl.figure()
ax = fig2.add_subplot(111)
a = np.vander(np.linspace(np.log(50), np.log(400), 500), 2)
y = np.dot(a, np.random.multivariate_normal(mu, S, 50).T)
ax.plot(a[:, 0], y, "r", alpha=0.3)
ax.plot(a[:, 0], np.dot(a, mu), "--r", lw=2)
ax.errorbar(s[m, 0], cs, yerr=np.sqrt(cs_var), fmt="k", capsize=0)
return val, var, literature, fig, fig2, (q[1], np.diff(q))
def main(bp, real_data, ep_bins=False, ignore_uncert=False):
if ignore_uncert:
bp = "{0}-no-uncert".format(bp)
try:
os.makedirs(bp)
except os.error:
pass
# Load the censoring function.
censor = load_detection_efficiency()
# The values from EP's paper (+some made up numbers).
lpb, lrb = censor.bins
x, y = lpb[::4], lrb[::4]
# Load the candidates.
if real_data:
ids, catalog, err = load_candidates()
truth = None
m = np.log(catalog[:, 0]) > np.min(x)
catalog, err = catalog[m], err[m]
if ignore_uncert:
err = np.zeros_like(err)
else:
catalog, err, truth = \
pickle.load(open(os.path.join(bp, "catalog.pkl")))
K = 1 if ignore_uncert else 512
dataset = Dataset.sample(catalog, err, samples=K, censor=censor,
functions=[np.log, np.log])
print("{0} entries in catalog".format(dataset.catalogs.shape[1]))
# ...
rerr = [np.log(catalog[:, 1]) - np.log(catalog[:, 1]-err[:, 1]),
np.log(catalog[:, 1]+err[:, 1]) - np.log(catalog[:, 1])]
err = [0, rerr]
# Build the binned model.
bins = [x, y]
print("Run inference on a grid with shape: {0}"
.format([len(b)-1 for b in bins]))
pop = Population(bins, censor.bins, 11.0)
model = ProbabilisticModel(dataset, pop, censor, [3.6, 2.6, 1.6, 0.0],
np.array([2.0, 0.5, 0.3, 0.3]) / 2.4)
# Do V-max.
val, var, literature, fig1, fig2, ext = \
inverse_detection_efficiency(pop, censor, catalog, err, truth)
open(os.path.join(bp, "extrap.txt"), "w").write(
"{0} -{1} +{2}".format(ext[0], *(ext[1])))
# Plot the vmax results.
labels = ["$\ln T/\mathrm{day}$", "$\ln R/R_\oplus$"]
top_axes = ["$T\,[\mathrm{days}]$", "$R\,[R_\oplus]$"]
fig1.savefig(os.path.join(bp, "vmax.png"))
fig1.savefig(os.path.join(bp, "vmax.pdf"))
# Plot the extrapolation.
fig2.savefig(os.path.join(bp, "extrapolation.png"))
fig2.savefig(os.path.join(bp, "extrapolation.pdf"))
# Save the model and the other things needed for plotting the results.
pickle.dump((model, catalog, err, truth, labels, top_axes,
literature),
open(os.path.join(bp, "model.pkl"), "w"), -1)
# Set up the output files.
nblock = 500
N, ndim, nhyper = 2000 * nblock, len(pop), 4
samples = np.empty((nblock, ndim))
hyper = np.empty((nblock, nhyper))
lnprob = np.empty(nblock)
fn = os.path.join(bp, "results.h5")
with h5py.File(fn, "w") as f:
f.create_dataset("samples", shape=(N, ndim), dtype=np.float64)
f.create_dataset("hyper", shape=(N, nhyper), dtype=np.float64)
f.create_dataset("lnprob", shape=(N,), dtype=np.float64)
for i, (th, hy, lp, acc) in enumerate(model.sample()):
n = i % nblock
samples[n, :] = th
hyper[n, :] = hy
lnprob[n] = lp
if n == nblock - 1:
print(i+1, (i+1) / N, np.max(lnprob), acc)
s = slice(i-n, i+1)
with h5py.File(fn, "a") as f:
f.attrs["iteration"] = i+1
f["samples"][s, :] = samples
f["hyper"][s, :] = hyper
f["lnprob"][s] = lnprob
if i >= N-1:
break
pl.clf()
pl.plot(samples[:, 0])
pl.savefig("test.png")
pl.clf()
pl.plot(lnprob)
pl.savefig("test-lp.png")
pl.clf()
pl.plot(hyper[:, 0])
pl.savefig("test-hyper.png")
if __name__ == "__main__":
import sys
ignore = False
if "--ignore" in sys.argv:
sys.argv.remove("--ignore")
ignore = True
if len(sys.argv) > 1:
main(sys.argv[1], False, ignore_uncert=ignore)
else:
main("results", True, ignore_uncert=ignore)
| mit |
ZENGXH/scikit-learn | examples/svm/plot_svm_regression.py | 249 | 1451 | """
===================================================================
Support Vector Regression (SVR) using linear and non-linear kernels
===================================================================
Toy example of 1D regression using linear, polynomial and RBF kernels.
"""
print(__doc__)
import numpy as np
from sklearn.svm import SVR
import matplotlib.pyplot as plt
###############################################################################
# Generate sample data
X = np.sort(5 * np.random.rand(40, 1), axis=0)
y = np.sin(X).ravel()
###############################################################################
# Add noise to targets
y[::5] += 3 * (0.5 - np.random.rand(8))
###############################################################################
# Fit regression model
svr_rbf = SVR(kernel='rbf', C=1e3, gamma=0.1)
svr_lin = SVR(kernel='linear', C=1e3)
svr_poly = SVR(kernel='poly', C=1e3, degree=2)
y_rbf = svr_rbf.fit(X, y).predict(X)
y_lin = svr_lin.fit(X, y).predict(X)
y_poly = svr_poly.fit(X, y).predict(X)
###############################################################################
# look at the results
plt.scatter(X, y, c='k', label='data')
plt.hold('on')
plt.plot(X, y_rbf, c='g', label='RBF model')
plt.plot(X, y_lin, c='r', label='Linear model')
plt.plot(X, y_poly, c='b', label='Polynomial model')
plt.xlabel('data')
plt.ylabel('target')
plt.title('Support Vector Regression')
plt.legend()
plt.show()
| bsd-3-clause |
cython-testbed/pandas | pandas/io/json/json.py | 3 | 33520 | # pylint: disable-msg=E1101,W0613,W0603
from itertools import islice
import os
import numpy as np
import pandas._libs.json as json
from pandas._libs.tslibs import iNaT
from pandas.compat import StringIO, long, u, to_str
from pandas import compat, isna
from pandas import Series, DataFrame, to_datetime, MultiIndex
from pandas.io.common import (get_filepath_or_buffer, _get_handle,
_infer_compression, _stringify_path,
BaseIterator)
from pandas.io.parsers import _validate_integer
import pandas.core.common as com
from pandas.core.reshape.concat import concat
from pandas.io.formats.printing import pprint_thing
from .normalize import _convert_to_line_delimits
from .table_schema import build_table_schema, parse_table_schema
from pandas.core.dtypes.common import is_period_dtype
loads = json.loads
dumps = json.dumps
TABLE_SCHEMA_VERSION = '0.20.0'
# interface to/from
def to_json(path_or_buf, obj, orient=None, date_format='epoch',
double_precision=10, force_ascii=True, date_unit='ms',
default_handler=None, lines=False, compression='infer',
index=True):
if not index and orient not in ['split', 'table']:
raise ValueError("'index=False' is only valid when 'orient' is "
"'split' or 'table'")
path_or_buf = _stringify_path(path_or_buf)
if lines and orient != 'records':
raise ValueError(
"'lines' keyword only valid when 'orient' is records")
if orient == 'table' and isinstance(obj, Series):
obj = obj.to_frame(name=obj.name or 'values')
if orient == 'table' and isinstance(obj, DataFrame):
writer = JSONTableWriter
elif isinstance(obj, Series):
writer = SeriesWriter
elif isinstance(obj, DataFrame):
writer = FrameWriter
else:
raise NotImplementedError("'obj' should be a Series or a DataFrame")
s = writer(
obj, orient=orient, date_format=date_format,
double_precision=double_precision, ensure_ascii=force_ascii,
date_unit=date_unit, default_handler=default_handler,
index=index).write()
if lines:
s = _convert_to_line_delimits(s)
if isinstance(path_or_buf, compat.string_types):
fh, handles = _get_handle(path_or_buf, 'w', compression=compression)
try:
fh.write(s)
finally:
fh.close()
elif path_or_buf is None:
return s
else:
path_or_buf.write(s)
class Writer(object):
def __init__(self, obj, orient, date_format, double_precision,
ensure_ascii, date_unit, index, default_handler=None):
self.obj = obj
if orient is None:
orient = self._default_orient
self.orient = orient
self.date_format = date_format
self.double_precision = double_precision
self.ensure_ascii = ensure_ascii
self.date_unit = date_unit
self.default_handler = default_handler
self.index = index
self.is_copy = None
self._format_axes()
def _format_axes(self):
raise com.AbstractMethodError(self)
def write(self):
return self._write(self.obj, self.orient, self.double_precision,
self.ensure_ascii, self.date_unit,
self.date_format == 'iso', self.default_handler)
def _write(self, obj, orient, double_precision, ensure_ascii,
date_unit, iso_dates, default_handler):
return dumps(
obj,
orient=orient,
double_precision=double_precision,
ensure_ascii=ensure_ascii,
date_unit=date_unit,
iso_dates=iso_dates,
default_handler=default_handler
)
class SeriesWriter(Writer):
_default_orient = 'index'
def _format_axes(self):
if not self.obj.index.is_unique and self.orient == 'index':
raise ValueError("Series index must be unique for orient="
"'{orient}'".format(orient=self.orient))
def _write(self, obj, orient, double_precision, ensure_ascii,
date_unit, iso_dates, default_handler):
if not self.index and orient == 'split':
obj = {"name": obj.name, "data": obj.values}
return super(SeriesWriter, self)._write(obj, orient,
double_precision,
ensure_ascii, date_unit,
iso_dates, default_handler)
class FrameWriter(Writer):
_default_orient = 'columns'
def _format_axes(self):
""" try to axes if they are datelike """
if not self.obj.index.is_unique and self.orient in (
'index', 'columns'):
raise ValueError("DataFrame index must be unique for orient="
"'{orient}'.".format(orient=self.orient))
if not self.obj.columns.is_unique and self.orient in (
'index', 'columns', 'records'):
raise ValueError("DataFrame columns must be unique for orient="
"'{orient}'.".format(orient=self.orient))
def _write(self, obj, orient, double_precision, ensure_ascii,
date_unit, iso_dates, default_handler):
if not self.index and orient == 'split':
obj = obj.to_dict(orient='split')
del obj["index"]
return super(FrameWriter, self)._write(obj, orient,
double_precision,
ensure_ascii, date_unit,
iso_dates, default_handler)
class JSONTableWriter(FrameWriter):
_default_orient = 'records'
def __init__(self, obj, orient, date_format, double_precision,
ensure_ascii, date_unit, index, default_handler=None):
"""
Adds a `schema` attribute with the Table Schema, resets
the index (can't do in caller, because the schema inference needs
to know what the index is, forces orient to records, and forces
date_format to 'iso'.
"""
super(JSONTableWriter, self).__init__(
obj, orient, date_format, double_precision, ensure_ascii,
date_unit, index, default_handler=default_handler)
if date_format != 'iso':
msg = ("Trying to write with `orient='table'` and "
"`date_format='{fmt}'`. Table Schema requires dates "
"to be formatted with `date_format='iso'`"
.format(fmt=date_format))
raise ValueError(msg)
self.schema = build_table_schema(obj, index=self.index)
# NotImplementd on a column MultiIndex
if obj.ndim == 2 and isinstance(obj.columns, MultiIndex):
raise NotImplementedError(
"orient='table' is not supported for MultiIndex")
# TODO: Do this timedelta properly in objToJSON.c See GH #15137
if ((obj.ndim == 1) and (obj.name in set(obj.index.names)) or
len(obj.columns & obj.index.names)):
msg = "Overlapping names between the index and columns"
raise ValueError(msg)
obj = obj.copy()
timedeltas = obj.select_dtypes(include=['timedelta']).columns
if len(timedeltas):
obj[timedeltas] = obj[timedeltas].applymap(
lambda x: x.isoformat())
# Convert PeriodIndex to datetimes before serialzing
if is_period_dtype(obj.index):
obj.index = obj.index.to_timestamp()
# exclude index from obj if index=False
if not self.index:
self.obj = obj.reset_index(drop=True)
else:
self.obj = obj.reset_index(drop=False)
self.date_format = 'iso'
self.orient = 'records'
self.index = index
def _write(self, obj, orient, double_precision, ensure_ascii,
date_unit, iso_dates, default_handler):
data = super(JSONTableWriter, self)._write(obj, orient,
double_precision,
ensure_ascii, date_unit,
iso_dates,
default_handler)
serialized = '{{"schema": {schema}, "data": {data}}}'.format(
schema=dumps(self.schema), data=data)
return serialized
def read_json(path_or_buf=None, orient=None, typ='frame', dtype=True,
convert_axes=True, convert_dates=True, keep_default_dates=True,
numpy=False, precise_float=False, date_unit=None, encoding=None,
lines=False, chunksize=None, compression='infer'):
"""
Convert a JSON string to pandas object
Parameters
----------
path_or_buf : a valid JSON string or file-like, default: None
The string could be a URL. Valid URL schemes include http, ftp, s3,
gcs, and file. For file URLs, a host is expected. For instance, a local
file could be ``file://localhost/path/to/table.json``
orient : string,
Indication of expected JSON string format.
Compatible JSON strings can be produced by ``to_json()`` with a
corresponding orient value.
The set of possible orients is:
- ``'split'`` : dict like
``{index -> [index], columns -> [columns], data -> [values]}``
- ``'records'`` : list like
``[{column -> value}, ... , {column -> value}]``
- ``'index'`` : dict like ``{index -> {column -> value}}``
- ``'columns'`` : dict like ``{column -> {index -> value}}``
- ``'values'`` : just the values array
The allowed and default values depend on the value
of the `typ` parameter.
* when ``typ == 'series'``,
- allowed orients are ``{'split','records','index'}``
- default is ``'index'``
- The Series index must be unique for orient ``'index'``.
* when ``typ == 'frame'``,
- allowed orients are ``{'split','records','index',
'columns','values', 'table'}``
- default is ``'columns'``
- The DataFrame index must be unique for orients ``'index'`` and
``'columns'``.
- The DataFrame columns must be unique for orients ``'index'``,
``'columns'``, and ``'records'``.
.. versionadded:: 0.23.0
'table' as an allowed value for the ``orient`` argument
typ : type of object to recover (series or frame), default 'frame'
dtype : boolean or dict, default True
If True, infer dtypes, if a dict of column to dtype, then use those,
if False, then don't infer dtypes at all, applies only to the data.
convert_axes : boolean, default True
Try to convert the axes to the proper dtypes.
convert_dates : boolean, default True
List of columns to parse for dates; If True, then try to parse
datelike columns default is True; a column label is datelike if
* it ends with ``'_at'``,
* it ends with ``'_time'``,
* it begins with ``'timestamp'``,
* it is ``'modified'``, or
* it is ``'date'``
keep_default_dates : boolean, default True
If parsing dates, then parse the default datelike columns
numpy : boolean, default False
Direct decoding to numpy arrays. Supports numeric data only, but
non-numeric column and index labels are supported. Note also that the
JSON ordering MUST be the same for each term if numpy=True.
precise_float : boolean, default False
Set to enable usage of higher precision (strtod) function when
decoding string to double values. Default (False) is to use fast but
less precise builtin functionality
date_unit : string, default None
The timestamp unit to detect if converting dates. The default behaviour
is to try and detect the correct precision, but if this is not desired
then pass one of 's', 'ms', 'us' or 'ns' to force parsing only seconds,
milliseconds, microseconds or nanoseconds respectively.
lines : boolean, default False
Read the file as a json object per line.
.. versionadded:: 0.19.0
encoding : str, default is 'utf-8'
The encoding to use to decode py3 bytes.
.. versionadded:: 0.19.0
chunksize: integer, default None
Return JsonReader object for iteration.
See the `line-delimted json docs
<http://pandas.pydata.org/pandas-docs/stable/io.html#io-jsonl>`_
for more information on ``chunksize``.
This can only be passed if `lines=True`.
If this is None, the file will be read into memory all at once.
.. versionadded:: 0.21.0
compression : {'infer', 'gzip', 'bz2', 'zip', 'xz', None}, default 'infer'
For on-the-fly decompression of on-disk data. If 'infer', then use
gzip, bz2, zip or xz if path_or_buf is a string ending in
'.gz', '.bz2', '.zip', or 'xz', respectively, and no decompression
otherwise. If using 'zip', the ZIP file must contain only one data
file to be read in. Set to None for no decompression.
.. versionadded:: 0.21.0
Returns
-------
result : Series or DataFrame, depending on the value of `typ`.
Notes
-----
Specific to ``orient='table'``, if a :class:`DataFrame` with a literal
:class:`Index` name of `index` gets written with :func:`to_json`, the
subsequent read operation will incorrectly set the :class:`Index` name to
``None``. This is because `index` is also used by :func:`DataFrame.to_json`
to denote a missing :class:`Index` name, and the subsequent
:func:`read_json` operation cannot distinguish between the two. The same
limitation is encountered with a :class:`MultiIndex` and any names
beginning with ``'level_'``.
See Also
--------
DataFrame.to_json
Examples
--------
>>> df = pd.DataFrame([['a', 'b'], ['c', 'd']],
... index=['row 1', 'row 2'],
... columns=['col 1', 'col 2'])
Encoding/decoding a Dataframe using ``'split'`` formatted JSON:
>>> df.to_json(orient='split')
'{"columns":["col 1","col 2"],
"index":["row 1","row 2"],
"data":[["a","b"],["c","d"]]}'
>>> pd.read_json(_, orient='split')
col 1 col 2
row 1 a b
row 2 c d
Encoding/decoding a Dataframe using ``'index'`` formatted JSON:
>>> df.to_json(orient='index')
'{"row 1":{"col 1":"a","col 2":"b"},"row 2":{"col 1":"c","col 2":"d"}}'
>>> pd.read_json(_, orient='index')
col 1 col 2
row 1 a b
row 2 c d
Encoding/decoding a Dataframe using ``'records'`` formatted JSON.
Note that index labels are not preserved with this encoding.
>>> df.to_json(orient='records')
'[{"col 1":"a","col 2":"b"},{"col 1":"c","col 2":"d"}]'
>>> pd.read_json(_, orient='records')
col 1 col 2
0 a b
1 c d
Encoding with Table Schema
>>> df.to_json(orient='table')
'{"schema": {"fields": [{"name": "index", "type": "string"},
{"name": "col 1", "type": "string"},
{"name": "col 2", "type": "string"}],
"primaryKey": "index",
"pandas_version": "0.20.0"},
"data": [{"index": "row 1", "col 1": "a", "col 2": "b"},
{"index": "row 2", "col 1": "c", "col 2": "d"}]}'
"""
compression = _infer_compression(path_or_buf, compression)
filepath_or_buffer, _, compression, should_close = get_filepath_or_buffer(
path_or_buf, encoding=encoding, compression=compression,
)
json_reader = JsonReader(
filepath_or_buffer, orient=orient, typ=typ, dtype=dtype,
convert_axes=convert_axes, convert_dates=convert_dates,
keep_default_dates=keep_default_dates, numpy=numpy,
precise_float=precise_float, date_unit=date_unit, encoding=encoding,
lines=lines, chunksize=chunksize, compression=compression,
)
if chunksize:
return json_reader
result = json_reader.read()
if should_close:
try:
filepath_or_buffer.close()
except: # noqa: flake8
pass
return result
class JsonReader(BaseIterator):
"""
JsonReader provides an interface for reading in a JSON file.
If initialized with ``lines=True`` and ``chunksize``, can be iterated over
``chunksize`` lines at a time. Otherwise, calling ``read`` reads in the
whole document.
"""
def __init__(self, filepath_or_buffer, orient, typ, dtype, convert_axes,
convert_dates, keep_default_dates, numpy, precise_float,
date_unit, encoding, lines, chunksize, compression):
self.path_or_buf = filepath_or_buffer
self.orient = orient
self.typ = typ
self.dtype = dtype
self.convert_axes = convert_axes
self.convert_dates = convert_dates
self.keep_default_dates = keep_default_dates
self.numpy = numpy
self.precise_float = precise_float
self.date_unit = date_unit
self.encoding = encoding
self.compression = compression
self.lines = lines
self.chunksize = chunksize
self.nrows_seen = 0
self.should_close = False
if self.chunksize is not None:
self.chunksize = _validate_integer("chunksize", self.chunksize, 1)
if not self.lines:
raise ValueError("chunksize can only be passed if lines=True")
data = self._get_data_from_filepath(filepath_or_buffer)
self.data = self._preprocess_data(data)
def _preprocess_data(self, data):
"""
At this point, the data either has a `read` attribute (e.g. a file
object or a StringIO) or is a string that is a JSON document.
If self.chunksize, we prepare the data for the `__next__` method.
Otherwise, we read it into memory for the `read` method.
"""
if hasattr(data, 'read') and not self.chunksize:
data = data.read()
if not hasattr(data, 'read') and self.chunksize:
data = StringIO(data)
return data
def _get_data_from_filepath(self, filepath_or_buffer):
"""
read_json accepts three input types:
1. filepath (string-like)
2. file-like object (e.g. open file object, StringIO)
3. JSON string
This method turns (1) into (2) to simplify the rest of the processing.
It returns input types (2) and (3) unchanged.
"""
data = filepath_or_buffer
exists = False
if isinstance(data, compat.string_types):
try:
exists = os.path.exists(filepath_or_buffer)
# gh-5874: if the filepath is too long will raise here
except (TypeError, ValueError):
pass
if exists or self.compression is not None:
data, _ = _get_handle(filepath_or_buffer, 'r',
encoding=self.encoding,
compression=self.compression)
self.should_close = True
self.open_stream = data
return data
def _combine_lines(self, lines):
"""Combines a list of JSON objects into one JSON object"""
lines = filter(None, map(lambda x: x.strip(), lines))
return '[' + ','.join(lines) + ']'
def read(self):
"""Read the whole JSON input into a pandas object"""
if self.lines and self.chunksize:
obj = concat(self)
elif self.lines:
data = to_str(self.data)
obj = self._get_object_parser(
self._combine_lines(data.split('\n'))
)
else:
obj = self._get_object_parser(self.data)
self.close()
return obj
def _get_object_parser(self, json):
"""parses a json document into a pandas object"""
typ = self.typ
dtype = self.dtype
kwargs = {
"orient": self.orient, "dtype": self.dtype,
"convert_axes": self.convert_axes,
"convert_dates": self.convert_dates,
"keep_default_dates": self.keep_default_dates, "numpy": self.numpy,
"precise_float": self.precise_float, "date_unit": self.date_unit
}
obj = None
if typ == 'frame':
obj = FrameParser(json, **kwargs).parse()
if typ == 'series' or obj is None:
if not isinstance(dtype, bool):
kwargs['dtype'] = dtype
obj = SeriesParser(json, **kwargs).parse()
return obj
def close(self):
"""
If we opened a stream earlier, in _get_data_from_filepath, we should
close it. If an open stream or file was passed, we leave it open.
"""
if self.should_close:
try:
self.open_stream.close()
except (IOError, AttributeError):
pass
def __next__(self):
lines = list(islice(self.data, self.chunksize))
if lines:
lines_json = self._combine_lines(lines)
obj = self._get_object_parser(lines_json)
# Make sure that the returned objects have the right index.
obj.index = range(self.nrows_seen, self.nrows_seen + len(obj))
self.nrows_seen += len(obj)
return obj
self.close()
raise StopIteration
class Parser(object):
_STAMP_UNITS = ('s', 'ms', 'us', 'ns')
_MIN_STAMPS = {
's': long(31536000),
'ms': long(31536000000),
'us': long(31536000000000),
'ns': long(31536000000000000)}
def __init__(self, json, orient, dtype=True, convert_axes=True,
convert_dates=True, keep_default_dates=False, numpy=False,
precise_float=False, date_unit=None):
self.json = json
if orient is None:
orient = self._default_orient
self.orient = orient
self.dtype = dtype
if orient == "split":
numpy = False
if date_unit is not None:
date_unit = date_unit.lower()
if date_unit not in self._STAMP_UNITS:
raise ValueError('date_unit must be one of {units}'
.format(units=self._STAMP_UNITS))
self.min_stamp = self._MIN_STAMPS[date_unit]
else:
self.min_stamp = self._MIN_STAMPS['s']
self.numpy = numpy
self.precise_float = precise_float
self.convert_axes = convert_axes
self.convert_dates = convert_dates
self.date_unit = date_unit
self.keep_default_dates = keep_default_dates
self.obj = None
def check_keys_split(self, decoded):
"checks that dict has only the appropriate keys for orient='split'"
bad_keys = set(decoded.keys()).difference(set(self._split_keys))
if bad_keys:
bad_keys = ", ".join(bad_keys)
raise ValueError(u("JSON data had unexpected key(s): {bad_keys}")
.format(bad_keys=pprint_thing(bad_keys)))
def parse(self):
# try numpy
numpy = self.numpy
if numpy:
self._parse_numpy()
else:
self._parse_no_numpy()
if self.obj is None:
return None
if self.convert_axes:
self._convert_axes()
self._try_convert_types()
return self.obj
def _convert_axes(self):
""" try to convert axes """
for axis in self.obj._AXIS_NUMBERS.keys():
new_axis, result = self._try_convert_data(
axis, self.obj._get_axis(axis), use_dtypes=False,
convert_dates=True)
if result:
setattr(self.obj, axis, new_axis)
def _try_convert_types(self):
raise com.AbstractMethodError(self)
def _try_convert_data(self, name, data, use_dtypes=True,
convert_dates=True):
""" try to parse a ndarray like into a column by inferring dtype """
# don't try to coerce, unless a force conversion
if use_dtypes:
if self.dtype is False:
return data, False
elif self.dtype is True:
pass
else:
# dtype to force
dtype = (self.dtype.get(name)
if isinstance(self.dtype, dict) else self.dtype)
if dtype is not None:
try:
dtype = np.dtype(dtype)
return data.astype(dtype), True
except (TypeError, ValueError):
return data, False
if convert_dates:
new_data, result = self._try_convert_to_date(data)
if result:
return new_data, True
result = False
if data.dtype == 'object':
# try float
try:
data = data.astype('float64')
result = True
except (TypeError, ValueError):
pass
if data.dtype.kind == 'f':
if data.dtype != 'float64':
# coerce floats to 64
try:
data = data.astype('float64')
result = True
except (TypeError, ValueError):
pass
# do't coerce 0-len data
if len(data) and (data.dtype == 'float' or data.dtype == 'object'):
# coerce ints if we can
try:
new_data = data.astype('int64')
if (new_data == data).all():
data = new_data
result = True
except (TypeError, ValueError):
pass
# coerce ints to 64
if data.dtype == 'int':
# coerce floats to 64
try:
data = data.astype('int64')
result = True
except (TypeError, ValueError):
pass
return data, result
def _try_convert_to_date(self, data):
""" try to parse a ndarray like into a date column
try to coerce object in epoch/iso formats and
integer/float in epcoh formats, return a boolean if parsing
was successful """
# no conversion on empty
if not len(data):
return data, False
new_data = data
if new_data.dtype == 'object':
try:
new_data = data.astype('int64')
except (TypeError, ValueError, OverflowError):
pass
# ignore numbers that are out of range
if issubclass(new_data.dtype.type, np.number):
in_range = (isna(new_data.values) | (new_data > self.min_stamp) |
(new_data.values == iNaT))
if not in_range.all():
return data, False
date_units = (self.date_unit,) if self.date_unit else self._STAMP_UNITS
for date_unit in date_units:
try:
new_data = to_datetime(new_data, errors='raise',
unit=date_unit)
except ValueError:
continue
except Exception:
break
return new_data, True
return data, False
def _try_convert_dates(self):
raise com.AbstractMethodError(self)
class SeriesParser(Parser):
_default_orient = 'index'
_split_keys = ('name', 'index', 'data')
def _parse_no_numpy(self):
json = self.json
orient = self.orient
if orient == "split":
decoded = {str(k): v for k, v in compat.iteritems(
loads(json, precise_float=self.precise_float))}
self.check_keys_split(decoded)
self.obj = Series(dtype=None, **decoded)
else:
self.obj = Series(
loads(json, precise_float=self.precise_float), dtype=None)
def _parse_numpy(self):
json = self.json
orient = self.orient
if orient == "split":
decoded = loads(json, dtype=None, numpy=True,
precise_float=self.precise_float)
decoded = {str(k): v for k, v in compat.iteritems(decoded)}
self.check_keys_split(decoded)
self.obj = Series(**decoded)
elif orient == "columns" or orient == "index":
self.obj = Series(*loads(json, dtype=None, numpy=True,
labelled=True,
precise_float=self.precise_float))
else:
self.obj = Series(loads(json, dtype=None, numpy=True,
precise_float=self.precise_float))
def _try_convert_types(self):
if self.obj is None:
return
obj, result = self._try_convert_data(
'data', self.obj, convert_dates=self.convert_dates)
if result:
self.obj = obj
class FrameParser(Parser):
_default_orient = 'columns'
_split_keys = ('columns', 'index', 'data')
def _parse_numpy(self):
json = self.json
orient = self.orient
if orient == "columns":
args = loads(json, dtype=None, numpy=True, labelled=True,
precise_float=self.precise_float)
if len(args):
args = (args[0].T, args[2], args[1])
self.obj = DataFrame(*args)
elif orient == "split":
decoded = loads(json, dtype=None, numpy=True,
precise_float=self.precise_float)
decoded = {str(k): v for k, v in compat.iteritems(decoded)}
self.check_keys_split(decoded)
self.obj = DataFrame(**decoded)
elif orient == "values":
self.obj = DataFrame(loads(json, dtype=None, numpy=True,
precise_float=self.precise_float))
else:
self.obj = DataFrame(*loads(json, dtype=None, numpy=True,
labelled=True,
precise_float=self.precise_float))
def _parse_no_numpy(self):
json = self.json
orient = self.orient
if orient == "columns":
self.obj = DataFrame(
loads(json, precise_float=self.precise_float), dtype=None)
elif orient == "split":
decoded = {str(k): v for k, v in compat.iteritems(
loads(json, precise_float=self.precise_float))}
self.check_keys_split(decoded)
self.obj = DataFrame(dtype=None, **decoded)
elif orient == "index":
self.obj = DataFrame(
loads(json, precise_float=self.precise_float), dtype=None).T
elif orient == 'table':
self.obj = parse_table_schema(json,
precise_float=self.precise_float)
else:
self.obj = DataFrame(
loads(json, precise_float=self.precise_float), dtype=None)
def _process_converter(self, f, filt=None):
""" take a conversion function and possibly recreate the frame """
if filt is None:
filt = lambda col, c: True
needs_new_obj = False
new_obj = dict()
for i, (col, c) in enumerate(self.obj.iteritems()):
if filt(col, c):
new_data, result = f(col, c)
if result:
c = new_data
needs_new_obj = True
new_obj[i] = c
if needs_new_obj:
# possibly handle dup columns
new_obj = DataFrame(new_obj, index=self.obj.index)
new_obj.columns = self.obj.columns
self.obj = new_obj
def _try_convert_types(self):
if self.obj is None:
return
if self.convert_dates:
self._try_convert_dates()
self._process_converter(
lambda col, c: self._try_convert_data(col, c, convert_dates=False))
def _try_convert_dates(self):
if self.obj is None:
return
# our columns to parse
convert_dates = self.convert_dates
if convert_dates is True:
convert_dates = []
convert_dates = set(convert_dates)
def is_ok(col):
""" return if this col is ok to try for a date parse """
if not isinstance(col, compat.string_types):
return False
col_lower = col.lower()
if (col_lower.endswith('_at') or
col_lower.endswith('_time') or
col_lower == 'modified' or
col_lower == 'date' or
col_lower == 'datetime' or
col_lower.startswith('timestamp')):
return True
return False
self._process_converter(
lambda col, c: self._try_convert_to_date(c),
lambda col, c: ((self.keep_default_dates and is_ok(col)) or
col in convert_dates))
| bsd-3-clause |
fdudatamining/framework | framework/process/__init__.py | 1 | 3836 | import pandas as pd
import numpy as np
import itertools as it
from sklearn import \
feature_selection, \
model_selection, \
linear_model, \
neighbors, \
cluster
from .tunable import *
def outlier_detector(X, y,
selector=feature_selection.SelectKBest,
cv=model_selection.StratifiedKFold,
regressor=linear_model.LinearRegression,
distance=lambda d: neighbors.DistanceMetric.get_metric("mahalanobis", V=np.cov(d)),
threshold=3.0):
'''
1. selector returns most relevant features in data
2. cross validator splits data into folds and trains regressor on data
3. Errors calculated with mahalanobis distance metric
3. Data with z scores above our threshold is generated
'''
selector = selector(k='all')#min(10, len(X))
relevant_data = selector.fit_transform(X, y)
cv = cv(relevant_data)
regressor = regressor()
mean_value_line = model_selection.cross_val_predict(regressor, relevant_data, cv=cv)
distance = distance(mean_value_line)
error = distance.pairwise(mean_value_line.predict(relevant_data), relevant_data)
return data[(error-error.mean())/error.std() > threshold]
def iterative_kmeans(X, z=-1, n_clusters=8, **kwargs):
'''
Process X with KMeans yielding rows that appear in clusters whos
size is < mean(cluster_size) - z
, and recursively processing on the remaining clusters.
Parameters
----------
X : np.array | pd.DataFrame
The numpy array or pandas dataframe you wish to process
with iterative k-means.
z : float
The z-index cutoff for determining outlier vs inlier.
(default 1.5)
n_clusters: int
The number of clusters to use each iteration. Different
numbers of clusters will require z-value tweaking.
**kwargs
Extra arguments passed to `sklearn.cluster.KMeans`.
Yields
------
(SubsetIndex[], OutlierIndex[])
Index pandas selectors for both
'''
X = pd.DataFrame(X)
while X.shape[0] > n_clusters:
km = cluster.KMeans(n_clusters=n_clusters, **kwargs)
x = km.fit_predict(X)
c = pd.value_counts(x).sort_values()
thresh = c.mean() + z*c.std()
O = X[np.in1d(x, c[c < thresh].index)]
yield(X.index, O.index)
if O.shape[0] != 0:
X = X[np.in1d(x, c[c >= thresh].index)]
else:
break
def aggregate_bins1d(x=None, n=10, aggfunc='count', fillna=np.NaN):
''' An easier to use histogram function '''
c = pd.cut(x, n)
g = pd.Series(x).groupby(c)
return g.agg(aggfunc)
def aggregate_bins(df=None, x=None, y=None, z=None, n=10, aggfunc=None, fillna=np.NaN):
''' 2d/3d binning view that lays out x and y binned in 2 dimensions and then the count in the bins
as a color in the z direction or a custom z field and custom `aggfunc` for that field.
To plot use:
df = aggregate_bins(...)
draw(kind='heatmap', df=df, clabel='Count', show=True)
'''
if type(n) == int:
# Support different x and y binning, if an iterable
# isn't passed, we turn it into an iterable.
n = [n, n]
if z is None:
# Yes it's hacky, I know. This is required when a count is expected and z isn't
# necessary.
if aggfunc is None:
aggfunc = 'count'
z = '_'
df = df[[x, y]]
df[z] = 1
elif aggfunc is None:
aggfunc = 'mean'
gx, gy = [pd.cut(df[g], c) for g, c in zip([x,y], n)]
# right edges of bins for ticks,
# note that pandas uses strings to represent the cuts so we need to parse those
g = df.groupby([gx, gy])
return g[z].agg(aggfunc).reset_index() \
.pivot_table(index=[y], columns=[x], values=[z]) \
.fillna(fillna)
def polyfit(*kargs):
''' A simple wrapper for np.polyfit that returns a more useful object '''
return np.poly1d(
np.polyfit(
*kargs
))
| gpl-2.0 |
slundberg/shap | tests/explainers/test_kernel.py | 1 | 9132 | import numpy as np
import scipy as sp
import pandas as pd
import sklearn
import shap
def test_null_model_small():
""" Test a small null model.
"""
explainer = shap.KernelExplainer(lambda x: np.zeros(x.shape[0]), np.ones((2, 4)), nsamples=100)
e = explainer.explain(np.ones((1, 4)))
assert np.sum(np.abs(e)) < 1e-8
def test_null_model():
""" Test a larger null model.
"""
explainer = shap.KernelExplainer(lambda x: np.zeros(x.shape[0]), np.ones((2, 10)), nsamples=100)
e = explainer.explain(np.ones((1, 10)))
assert np.sum(np.abs(e)) < 1e-8
def test_front_page_model_agnostic():
""" Test the ReadMe kernel expainer example.
"""
# print the JS visualization code to the notebook
shap.initjs()
# train a SVM classifier
X_train, X_test, Y_train, _ = sklearn.model_selection.train_test_split(*shap.datasets.iris(), test_size=0.1, random_state=0)
svm = sklearn.svm.SVC(kernel='rbf', probability=True)
svm.fit(X_train, Y_train)
# use Kernel SHAP to explain test set predictions
explainer = shap.KernelExplainer(svm.predict_proba, X_train, nsamples=100, link="logit")
shap_values = explainer.shap_values(X_test)
# plot the SHAP values for the Setosa output of the first instance
shap.force_plot(explainer.expected_value[0], shap_values[0][0, :], X_test.iloc[0, :], link="logit")
def test_front_page_model_agnostic_rank():
""" Test the rank regularized explanation of the ReadMe example.
"""
# print the JS visualization code to the notebook
shap.initjs()
# train a SVM classifier
X_train, X_test, Y_train, _ = sklearn.model_selection.train_test_split(*shap.datasets.iris(), test_size=0.1, random_state=0)
svm = sklearn.svm.SVC(kernel='rbf', probability=True)
svm.fit(X_train, Y_train)
# use Kernel SHAP to explain test set predictions
explainer = shap.KernelExplainer(svm.predict_proba, X_train, nsamples=100, link="logit", l1_reg="rank(3)")
shap_values = explainer.shap_values(X_test)
# plot the SHAP values for the Setosa output of the first instance
shap.force_plot(explainer.expected_value[0], shap_values[0][0, :], X_test.iloc[0, :], link="logit")
def test_kernel_shap_with_dataframe():
""" Test with a Pandas DataFrame.
"""
np.random.seed(3)
df_X = pd.DataFrame(np.random.random((10, 3)), columns=list('abc'))
df_X.index = pd.date_range('2018-01-01', periods=10, freq='D', tz='UTC')
df_y = df_X.eval('a - 2 * b + 3 * c')
df_y = df_y + np.random.normal(0.0, 0.1, df_y.shape)
linear_model = sklearn.linear_model.LinearRegression()
linear_model.fit(df_X, df_y)
explainer = shap.KernelExplainer(linear_model.predict, df_X, keep_index=True)
_ = explainer.shap_values(df_X)
def test_kernel_shap_with_a1a_sparse_zero_background():
""" Test with a sparse matrix for the background.
"""
X, y = shap.datasets.a1a() # pylint: disable=unbalanced-tuple-unpacking
x_train, x_test, y_train, _ = sklearn.model_selection.train_test_split(X, y, test_size=0.01, random_state=0)
linear_model = sklearn.linear_model.LinearRegression()
linear_model.fit(x_train, y_train)
_, cols = x_train.shape
shape = 1, cols
background = sp.sparse.csr_matrix(shape, dtype=x_train.dtype)
explainer = shap.KernelExplainer(linear_model.predict, background)
explainer.shap_values(x_test)
def test_kernel_shap_with_a1a_sparse_nonzero_background():
""" Check with a sparse non zero background matrix.
"""
np.set_printoptions(threshold=100000)
np.random.seed(0)
X, y = shap.datasets.a1a() # pylint: disable=unbalanced-tuple-unpacking
x_train, x_test, y_train, _ = sklearn.model_selection.train_test_split(X, y, test_size=0.01, random_state=0)
linear_model = sklearn.linear_model.LinearRegression()
linear_model.fit(x_train, y_train)
# Calculate median of background data
median_dense = sklearn.utils.sparsefuncs.csc_median_axis_0(x_train.tocsc())
median = sp.sparse.csr_matrix(median_dense)
explainer = shap.KernelExplainer(linear_model.predict, median)
shap_values = explainer.shap_values(x_test)
def dense_to_sparse_predict(data):
sparse_data = sp.sparse.csr_matrix(data)
return linear_model.predict(sparse_data)
explainer_dense = shap.KernelExplainer(dense_to_sparse_predict, median_dense.reshape((1, len(median_dense))))
x_test_dense = x_test.toarray()
shap_values_dense = explainer_dense.shap_values(x_test_dense)
# Validate sparse and dense result is the same
assert np.allclose(shap_values, shap_values_dense, rtol=1e-02, atol=1e-01)
def test_kernel_shap_with_high_dim_sparse():
""" Verifies we can run on very sparse data produced from feature hashing.
"""
remove = ('headers', 'footers', 'quotes')
categories = [
'alt.atheism',
'talk.religion.misc',
'comp.graphics',
'sci.space',
]
ngroups = sklearn.datasets.fetch_20newsgroups(subset='train', categories=categories, shuffle=True, random_state=42, remove=remove)
x_train, x_test, y_train, _ = sklearn.model_selection.train_test_split(ngroups.data, ngroups.target, test_size=0.01, random_state=42)
vectorizer = sklearn.feature_extraction.text.HashingVectorizer(stop_words='english', alternate_sign=False, n_features=2**16)
x_train = vectorizer.transform(x_train)
x_test = vectorizer.transform(x_test)
# Fit a linear regression model
linear_model = sklearn.linear_model.LinearRegression()
linear_model.fit(x_train, y_train)
_, cols = x_train.shape
shape = 1, cols
background = sp.sparse.csr_matrix(shape, dtype=x_train.dtype)
explainer = shap.KernelExplainer(linear_model.predict, background)
_ = explainer.shap_values(x_test)
def test_kernel_sparse_vs_dense_multirow_background():
""" Mix sparse and dense matrix values.
"""
# train a logistic regression classifier
X_train, X_test, Y_train, _ = sklearn.model_selection.train_test_split(*shap.datasets.iris(), test_size=0.1, random_state=0)
lr = sklearn.linear_model.LogisticRegression(solver='lbfgs')
lr.fit(X_train, Y_train)
# use Kernel SHAP to explain test set predictions with dense data
explainer = shap.KernelExplainer(lr.predict_proba, X_train, nsamples=100, link="logit", l1_reg="rank(3)")
shap_values = explainer.shap_values(X_test)
X_sparse_train = sp.sparse.csr_matrix(X_train)
X_sparse_test = sp.sparse.csr_matrix(X_test)
lr_sparse = sklearn.linear_model.LogisticRegression(solver='lbfgs')
lr_sparse.fit(X_sparse_train, Y_train)
# use Kernel SHAP again but with sparse data
sparse_explainer = shap.KernelExplainer(lr.predict_proba, X_sparse_train, nsamples=100, link="logit", l1_reg="rank(3)")
sparse_shap_values = sparse_explainer.shap_values(X_sparse_test)
assert np.allclose(shap_values, sparse_shap_values, rtol=1e-05, atol=1e-05)
# Use sparse evaluation examples with dense background
sparse_sv_dense_bg = explainer.shap_values(X_sparse_test)
assert np.allclose(shap_values, sparse_sv_dense_bg, rtol=1e-05, atol=1e-05)
def test_linear():
""" Tests that KernelExplainer returns the correct result when the model is linear.
(as per corollary 1 of https://arxiv.org/abs/1705.07874)
"""
np.random.seed(2)
x = np.random.normal(size=(200, 3), scale=1)
# a linear model
def f(x):
return x[:, 0] + 2.0*x[:, 1]
phi = shap.KernelExplainer(f, x).shap_values(x, l1_reg="num_features(2)", silent=True)
assert phi.shape == x.shape
# corollary 1
expected = (x - x.mean(0)) * np.array([1.0, 2.0, 0.0])
np.testing.assert_allclose(expected, phi, rtol=1e-3)
def test_non_numeric():
""" Test using non-numeric data.
"""
# create dummy data
X = np.array([['A', '0', '0'], ['A', '1', '0'], ['B', '0', '0'], ['B', '1', '0'], ['A', '1', '0']])
y = np.array([0, 1, 2, 3, 4])
# build and train the pipeline
pipeline = sklearn.pipeline.Pipeline([
('oneHotEncoder', sklearn.preprocessing.OneHotEncoder()),
('linear', sklearn.linear_model.LinearRegression())
])
pipeline.fit(X, y)
# use KernelExplainer
explainer = shap.KernelExplainer(pipeline.predict, X, nsamples=100)
shap_values = explainer.explain(X[0, :].reshape(1, -1))
assert np.abs(explainer.expected_value + shap_values.sum(0) - pipeline.predict(X[0, :].reshape(1, -1))[0]) < 1e-4
assert shap_values[2] == 0
# tests for shap.KernelExplainer.not_equal
assert shap.KernelExplainer.not_equal(0, 0) == shap.KernelExplainer.not_equal('0', '0')
assert shap.KernelExplainer.not_equal(0, 1) == shap.KernelExplainer.not_equal('0', '1')
assert shap.KernelExplainer.not_equal(0, np.nan) == shap.KernelExplainer.not_equal('0', np.nan)
assert shap.KernelExplainer.not_equal(0, np.nan) == shap.KernelExplainer.not_equal('0', None)
assert shap.KernelExplainer.not_equal(np.nan, 0) == shap.KernelExplainer.not_equal(np.nan, '0')
assert shap.KernelExplainer.not_equal(np.nan, 0) == shap.KernelExplainer.not_equal(None, '0')
| mit |
JsNoNo/scikit-learn | examples/datasets/plot_random_dataset.py | 348 | 2254 | """
==============================================
Plot randomly generated classification dataset
==============================================
Plot several randomly generated 2D classification datasets.
This example illustrates the :func:`datasets.make_classification`
:func:`datasets.make_blobs` and :func:`datasets.make_gaussian_quantiles`
functions.
For ``make_classification``, three binary and two multi-class classification
datasets are generated, with different numbers of informative features and
clusters per class. """
print(__doc__)
import matplotlib.pyplot as plt
from sklearn.datasets import make_classification
from sklearn.datasets import make_blobs
from sklearn.datasets import make_gaussian_quantiles
plt.figure(figsize=(8, 8))
plt.subplots_adjust(bottom=.05, top=.9, left=.05, right=.95)
plt.subplot(321)
plt.title("One informative feature, one cluster per class", fontsize='small')
X1, Y1 = make_classification(n_features=2, n_redundant=0, n_informative=1,
n_clusters_per_class=1)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(322)
plt.title("Two informative features, one cluster per class", fontsize='small')
X1, Y1 = make_classification(n_features=2, n_redundant=0, n_informative=2,
n_clusters_per_class=1)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(323)
plt.title("Two informative features, two clusters per class", fontsize='small')
X2, Y2 = make_classification(n_features=2, n_redundant=0, n_informative=2)
plt.scatter(X2[:, 0], X2[:, 1], marker='o', c=Y2)
plt.subplot(324)
plt.title("Multi-class, two informative features, one cluster",
fontsize='small')
X1, Y1 = make_classification(n_features=2, n_redundant=0, n_informative=2,
n_clusters_per_class=1, n_classes=3)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(325)
plt.title("Three blobs", fontsize='small')
X1, Y1 = make_blobs(n_features=2, centers=3)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(326)
plt.title("Gaussian divided into three quantiles", fontsize='small')
X1, Y1 = make_gaussian_quantiles(n_features=2, n_classes=3)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.show()
| bsd-3-clause |
ZenDevelopmentSystems/scikit-learn | sklearn/decomposition/pca.py | 192 | 23117 | """ Principal Component Analysis
"""
# Author: Alexandre Gramfort <[email protected]>
# Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Denis A. Engemann <[email protected]>
# Michael Eickenberg <[email protected]>
#
# License: BSD 3 clause
from math import log, sqrt
import numpy as np
from scipy import linalg
from scipy.special import gammaln
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_random_state, as_float_array
from ..utils import check_array
from ..utils.extmath import fast_dot, fast_logdet, randomized_svd
from ..utils.validation import check_is_fitted
def _assess_dimension_(spectrum, rank, n_samples, n_features):
"""Compute the likelihood of a rank ``rank`` dataset
The dataset is assumed to be embedded in gaussian noise of shape(n,
dimf) having spectrum ``spectrum``.
Parameters
----------
spectrum: array of shape (n)
Data spectrum.
rank: int
Tested rank value.
n_samples: int
Number of samples.
n_features: int
Number of features.
Returns
-------
ll: float,
The log-likelihood
Notes
-----
This implements the method of `Thomas P. Minka:
Automatic Choice of Dimensionality for PCA. NIPS 2000: 598-604`
"""
if rank > len(spectrum):
raise ValueError("The tested rank cannot exceed the rank of the"
" dataset")
pu = -rank * log(2.)
for i in range(rank):
pu += (gammaln((n_features - i) / 2.)
- log(np.pi) * (n_features - i) / 2.)
pl = np.sum(np.log(spectrum[:rank]))
pl = -pl * n_samples / 2.
if rank == n_features:
pv = 0
v = 1
else:
v = np.sum(spectrum[rank:]) / (n_features - rank)
pv = -np.log(v) * n_samples * (n_features - rank) / 2.
m = n_features * rank - rank * (rank + 1.) / 2.
pp = log(2. * np.pi) * (m + rank + 1.) / 2.
pa = 0.
spectrum_ = spectrum.copy()
spectrum_[rank:n_features] = v
for i in range(rank):
for j in range(i + 1, len(spectrum)):
pa += log((spectrum[i] - spectrum[j]) *
(1. / spectrum_[j] - 1. / spectrum_[i])) + log(n_samples)
ll = pu + pl + pv + pp - pa / 2. - rank * log(n_samples) / 2.
return ll
def _infer_dimension_(spectrum, n_samples, n_features):
"""Infers the dimension of a dataset of shape (n_samples, n_features)
The dataset is described by its spectrum `spectrum`.
"""
n_spectrum = len(spectrum)
ll = np.empty(n_spectrum)
for rank in range(n_spectrum):
ll[rank] = _assess_dimension_(spectrum, rank, n_samples, n_features)
return ll.argmax()
class PCA(BaseEstimator, TransformerMixin):
"""Principal component analysis (PCA)
Linear dimensionality reduction using Singular Value Decomposition of the
data and keeping only the most significant singular vectors to project the
data to a lower dimensional space.
This implementation uses the scipy.linalg implementation of the singular
value decomposition. It only works for dense arrays and is not scalable to
large dimensional data.
The time complexity of this implementation is ``O(n ** 3)`` assuming
n ~ n_samples ~ n_features.
Read more in the :ref:`User Guide <PCA>`.
Parameters
----------
n_components : int, None or string
Number of components to keep.
if n_components is not set all components are kept::
n_components == min(n_samples, n_features)
if n_components == 'mle', Minka\'s MLE is used to guess the dimension
if ``0 < n_components < 1``, select the number of components such that
the amount of variance that needs to be explained is greater than the
percentage specified by n_components
copy : bool
If False, data passed to fit are overwritten and running
fit(X).transform(X) will not yield the expected results,
use fit_transform(X) instead.
whiten : bool, optional
When True (False by default) the `components_` vectors are divided
by n_samples times singular values to ensure uncorrelated outputs
with unit component-wise variances.
Whitening will remove some information from the transformed signal
(the relative variance scales of the components) but can sometime
improve the predictive accuracy of the downstream estimators by
making there data respect some hard-wired assumptions.
Attributes
----------
components_ : array, [n_components, n_features]
Principal axes in feature space, representing the directions of
maximum variance in the data.
explained_variance_ratio_ : array, [n_components]
Percentage of variance explained by each of the selected components.
If ``n_components`` is not set then all components are stored and the
sum of explained variances is equal to 1.0
mean_ : array, [n_features]
Per-feature empirical mean, estimated from the training set.
n_components_ : int
The estimated number of components. Relevant when n_components is set
to 'mle' or a number between 0 and 1 to select using explained
variance.
noise_variance_ : float
The estimated noise covariance following the Probabilistic PCA model
from Tipping and Bishop 1999. See "Pattern Recognition and
Machine Learning" by C. Bishop, 12.2.1 p. 574 or
http://www.miketipping.com/papers/met-mppca.pdf. It is required to
computed the estimated data covariance and score samples.
Notes
-----
For n_components='mle', this class uses the method of `Thomas P. Minka:
Automatic Choice of Dimensionality for PCA. NIPS 2000: 598-604`
Implements the probabilistic PCA model from:
M. Tipping and C. Bishop, Probabilistic Principal Component Analysis,
Journal of the Royal Statistical Society, Series B, 61, Part 3, pp. 611-622
via the score and score_samples methods.
See http://www.miketipping.com/papers/met-mppca.pdf
Due to implementation subtleties of the Singular Value Decomposition (SVD),
which is used in this implementation, running fit twice on the same matrix
can lead to principal components with signs flipped (change in direction).
For this reason, it is important to always use the same estimator object to
transform data in a consistent fashion.
Examples
--------
>>> import numpy as np
>>> from sklearn.decomposition import PCA
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> pca = PCA(n_components=2)
>>> pca.fit(X)
PCA(copy=True, n_components=2, whiten=False)
>>> print(pca.explained_variance_ratio_) # doctest: +ELLIPSIS
[ 0.99244... 0.00755...]
See also
--------
RandomizedPCA
KernelPCA
SparsePCA
TruncatedSVD
"""
def __init__(self, n_components=None, copy=True, whiten=False):
self.n_components = n_components
self.copy = copy
self.whiten = whiten
def fit(self, X, y=None):
"""Fit the model with X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
self._fit(X)
return self
def fit_transform(self, X, y=None):
"""Fit the model with X and apply the dimensionality reduction on X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
U, S, V = self._fit(X)
U = U[:, :self.n_components_]
if self.whiten:
# X_new = X * V / S * sqrt(n_samples) = U * sqrt(n_samples)
U *= sqrt(X.shape[0])
else:
# X_new = X * V = U * S * V^T * V = U * S
U *= S[:self.n_components_]
return U
def _fit(self, X):
"""Fit the model on X
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples and
n_features is the number of features.
Returns
-------
U, s, V : ndarrays
The SVD of the input data, copied and centered when
requested.
"""
X = check_array(X)
n_samples, n_features = X.shape
X = as_float_array(X, copy=self.copy)
# Center data
self.mean_ = np.mean(X, axis=0)
X -= self.mean_
U, S, V = linalg.svd(X, full_matrices=False)
explained_variance_ = (S ** 2) / n_samples
explained_variance_ratio_ = (explained_variance_ /
explained_variance_.sum())
components_ = V
n_components = self.n_components
if n_components is None:
n_components = n_features
elif n_components == 'mle':
if n_samples < n_features:
raise ValueError("n_components='mle' is only supported "
"if n_samples >= n_features")
n_components = _infer_dimension_(explained_variance_,
n_samples, n_features)
elif not 0 <= n_components <= n_features:
raise ValueError("n_components=%r invalid for n_features=%d"
% (n_components, n_features))
if 0 < n_components < 1.0:
# number of components for which the cumulated explained variance
# percentage is superior to the desired threshold
ratio_cumsum = explained_variance_ratio_.cumsum()
n_components = np.sum(ratio_cumsum < n_components) + 1
# Compute noise covariance using Probabilistic PCA model
# The sigma2 maximum likelihood (cf. eq. 12.46)
if n_components < n_features:
self.noise_variance_ = explained_variance_[n_components:].mean()
else:
self.noise_variance_ = 0.
# store n_samples to revert whitening when getting covariance
self.n_samples_ = n_samples
self.components_ = components_[:n_components]
self.explained_variance_ = explained_variance_[:n_components]
explained_variance_ratio_ = explained_variance_ratio_[:n_components]
self.explained_variance_ratio_ = explained_variance_ratio_
self.n_components_ = n_components
return (U, S, V)
def get_covariance(self):
"""Compute data covariance with the generative model.
``cov = components_.T * S**2 * components_ + sigma2 * eye(n_features)``
where S**2 contains the explained variances.
Returns
-------
cov : array, shape=(n_features, n_features)
Estimated covariance of data.
"""
components_ = self.components_
exp_var = self.explained_variance_
if self.whiten:
components_ = components_ * np.sqrt(exp_var[:, np.newaxis])
exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.)
cov = np.dot(components_.T * exp_var_diff, components_)
cov.flat[::len(cov) + 1] += self.noise_variance_ # modify diag inplace
return cov
def get_precision(self):
"""Compute data precision matrix with the generative model.
Equals the inverse of the covariance but computed with
the matrix inversion lemma for efficiency.
Returns
-------
precision : array, shape=(n_features, n_features)
Estimated precision of data.
"""
n_features = self.components_.shape[1]
# handle corner cases first
if self.n_components_ == 0:
return np.eye(n_features) / self.noise_variance_
if self.n_components_ == n_features:
return linalg.inv(self.get_covariance())
# Get precision using matrix inversion lemma
components_ = self.components_
exp_var = self.explained_variance_
exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.)
precision = np.dot(components_, components_.T) / self.noise_variance_
precision.flat[::len(precision) + 1] += 1. / exp_var_diff
precision = np.dot(components_.T,
np.dot(linalg.inv(precision), components_))
precision /= -(self.noise_variance_ ** 2)
precision.flat[::len(precision) + 1] += 1. / self.noise_variance_
return precision
def transform(self, X):
"""Apply the dimensionality reduction on X.
X is projected on the first principal components previous extracted
from a training set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
check_is_fitted(self, 'mean_')
X = check_array(X)
if self.mean_ is not None:
X = X - self.mean_
X_transformed = fast_dot(X, self.components_.T)
if self.whiten:
X_transformed /= np.sqrt(self.explained_variance_)
return X_transformed
def inverse_transform(self, X):
"""Transform data back to its original space, i.e.,
return an input X_original whose transform would be X
Parameters
----------
X : array-like, shape (n_samples, n_components)
New data, where n_samples is the number of samples
and n_components is the number of components.
Returns
-------
X_original array-like, shape (n_samples, n_features)
"""
check_is_fitted(self, 'mean_')
if self.whiten:
return fast_dot(
X,
np.sqrt(self.explained_variance_[:, np.newaxis]) *
self.components_) + self.mean_
else:
return fast_dot(X, self.components_) + self.mean_
def score_samples(self, X):
"""Return the log-likelihood of each sample
See. "Pattern Recognition and Machine Learning"
by C. Bishop, 12.2.1 p. 574
or http://www.miketipping.com/papers/met-mppca.pdf
Parameters
----------
X: array, shape(n_samples, n_features)
The data.
Returns
-------
ll: array, shape (n_samples,)
Log-likelihood of each sample under the current model
"""
check_is_fitted(self, 'mean_')
X = check_array(X)
Xr = X - self.mean_
n_features = X.shape[1]
log_like = np.zeros(X.shape[0])
precision = self.get_precision()
log_like = -.5 * (Xr * (np.dot(Xr, precision))).sum(axis=1)
log_like -= .5 * (n_features * log(2. * np.pi)
- fast_logdet(precision))
return log_like
def score(self, X, y=None):
"""Return the average log-likelihood of all samples
See. "Pattern Recognition and Machine Learning"
by C. Bishop, 12.2.1 p. 574
or http://www.miketipping.com/papers/met-mppca.pdf
Parameters
----------
X: array, shape(n_samples, n_features)
The data.
Returns
-------
ll: float
Average log-likelihood of the samples under the current model
"""
return np.mean(self.score_samples(X))
class RandomizedPCA(BaseEstimator, TransformerMixin):
"""Principal component analysis (PCA) using randomized SVD
Linear dimensionality reduction using approximated Singular Value
Decomposition of the data and keeping only the most significant
singular vectors to project the data to a lower dimensional space.
Read more in the :ref:`User Guide <RandomizedPCA>`.
Parameters
----------
n_components : int, optional
Maximum number of components to keep. When not given or None, this
is set to n_features (the second dimension of the training data).
copy : bool
If False, data passed to fit are overwritten and running
fit(X).transform(X) will not yield the expected results,
use fit_transform(X) instead.
iterated_power : int, optional
Number of iterations for the power method. 3 by default.
whiten : bool, optional
When True (False by default) the `components_` vectors are divided
by the singular values to ensure uncorrelated outputs with unit
component-wise variances.
Whitening will remove some information from the transformed signal
(the relative variance scales of the components) but can sometime
improve the predictive accuracy of the downstream estimators by
making their data respect some hard-wired assumptions.
random_state : int or RandomState instance or None (default)
Pseudo Random Number generator seed control. If None, use the
numpy.random singleton.
Attributes
----------
components_ : array, [n_components, n_features]
Components with maximum variance.
explained_variance_ratio_ : array, [n_components]
Percentage of variance explained by each of the selected components. \
k is not set then all components are stored and the sum of explained \
variances is equal to 1.0
mean_ : array, [n_features]
Per-feature empirical mean, estimated from the training set.
Examples
--------
>>> import numpy as np
>>> from sklearn.decomposition import RandomizedPCA
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> pca = RandomizedPCA(n_components=2)
>>> pca.fit(X) # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
RandomizedPCA(copy=True, iterated_power=3, n_components=2,
random_state=None, whiten=False)
>>> print(pca.explained_variance_ratio_) # doctest: +ELLIPSIS
[ 0.99244... 0.00755...]
See also
--------
PCA
TruncatedSVD
References
----------
.. [Halko2009] `Finding structure with randomness: Stochastic algorithms
for constructing approximate matrix decompositions Halko, et al., 2009
(arXiv:909)`
.. [MRT] `A randomized algorithm for the decomposition of matrices
Per-Gunnar Martinsson, Vladimir Rokhlin and Mark Tygert`
"""
def __init__(self, n_components=None, copy=True, iterated_power=3,
whiten=False, random_state=None):
self.n_components = n_components
self.copy = copy
self.iterated_power = iterated_power
self.whiten = whiten
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model with X by extracting the first principal components.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
self._fit(check_array(X))
return self
def _fit(self, X):
"""Fit the model to the data X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples and
n_features is the number of features.
Returns
-------
X : ndarray, shape (n_samples, n_features)
The input data, copied, centered and whitened when requested.
"""
random_state = check_random_state(self.random_state)
X = np.atleast_2d(as_float_array(X, copy=self.copy))
n_samples = X.shape[0]
# Center data
self.mean_ = np.mean(X, axis=0)
X -= self.mean_
if self.n_components is None:
n_components = X.shape[1]
else:
n_components = self.n_components
U, S, V = randomized_svd(X, n_components,
n_iter=self.iterated_power,
random_state=random_state)
self.explained_variance_ = exp_var = (S ** 2) / n_samples
full_var = np.var(X, axis=0).sum()
self.explained_variance_ratio_ = exp_var / full_var
if self.whiten:
self.components_ = V / S[:, np.newaxis] * sqrt(n_samples)
else:
self.components_ = V
return X
def transform(self, X, y=None):
"""Apply dimensionality reduction on X.
X is projected on the first principal components previous extracted
from a training set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
check_is_fitted(self, 'mean_')
X = check_array(X)
if self.mean_ is not None:
X = X - self.mean_
X = fast_dot(X, self.components_.T)
return X
def fit_transform(self, X, y=None):
"""Fit the model with X and apply the dimensionality reduction on X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
X = check_array(X)
X = self._fit(X)
return fast_dot(X, self.components_.T)
def inverse_transform(self, X, y=None):
"""Transform data back to its original space.
Returns an array X_original whose transform would be X.
Parameters
----------
X : array-like, shape (n_samples, n_components)
New data, where n_samples in the number of samples
and n_components is the number of components.
Returns
-------
X_original array-like, shape (n_samples, n_features)
Notes
-----
If whitening is enabled, inverse_transform does not compute the
exact inverse operation of transform.
"""
check_is_fitted(self, 'mean_')
X_original = fast_dot(X, self.components_)
if self.mean_ is not None:
X_original = X_original + self.mean_
return X_original
| bsd-3-clause |
Mendeley/mrec | mrec/tests/test_sparse.py | 3 | 2273 | import tempfile
import os
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from mrec.testing import get_random_coo_matrix
from mrec.testing import assert_sparse_matrix_equal
from mrec.sparse import loadtxt
from mrec.sparse import savez
from mrec.sparse import loadz
from mrec.sparse import fast_sparse_matrix
def test_loadtxt():
X = get_random_coo_matrix()
f,path = tempfile.mkstemp(suffix='.npz')
with open(path,'w') as f:
for i,j,v in zip(X.row,X.col,X.data):
print >>f,'{0}\t{1}\t{2}'.format(i+1,j+1,v)
Y = loadtxt(path)
os.remove(path)
assert_sparse_matrix_equal(X,Y)
def test_savez_loadz():
m = get_random_coo_matrix()
f,path = tempfile.mkstemp(suffix='.npz')
savez(m,path)
n = loadz(path)
os.remove(path)
assert_array_equal(n.toarray(),m.toarray())
def test_init_fast_sparse_matrix():
X = get_random_coo_matrix()
Y = X.tocsr()
Z = X.tocsc()
for M in [X,Y,Z]:
m = fast_sparse_matrix(M)
assert_array_equal(m.X.toarray(),M.toarray())
assert_equal(m.shape,M.shape)
def test_fast_get_col():
X = get_random_coo_matrix().tocsc()
m = fast_sparse_matrix(X)
rows,cols = X.shape
for j in xrange(cols):
assert_array_equal(m.fast_get_col(j).toarray(),X[:,j].toarray())
def test_fast_update_col():
X = get_random_coo_matrix().tocsc()
m = fast_sparse_matrix(X)
cols = X.shape[1]
for j in xrange(cols):
vals = m.fast_get_col(j).data
if (vals==0).all():
continue
vals[vals!=0] += 1
m.fast_update_col(j,vals)
expected = X[:,j].toarray()
for i in xrange(expected.shape[0]):
if expected[i] != 0:
expected[i] += 1
assert_array_equal(m.fast_get_col(j).toarray(),expected)
def test_save_load():
"""Save to file as arrays in numpy binary format."""
X = get_random_coo_matrix()
m = fast_sparse_matrix(X)
f,path = tempfile.mkstemp(suffix='.npz')
m.save(path)
n = fast_sparse_matrix.load(path)
os.remove(path)
assert_equal(m.shape,n.shape)
assert_array_equal(m.X.toarray(),n.X.toarray())
assert_array_equal(m.col_view.toarray(),n.col_view.toarray())
| bsd-3-clause |
AntonSax/plantcv | plantcv/learn/naive_bayes.py | 1 | 8920 | # Naive Bayes
import os
import cv2
import numpy as np
from scipy import stats
def naive_bayes(imgdir, maskdir, outfile, mkplots=False):
"""Naive Bayes training function
Inputs:
imgdir = Path to a directory of original 8-bit RGB images.
maskdir = Path to a directory of binary mask images. Mask images must have the same name as their corresponding
color images.
outfile = Name of the output text file that will store the color channel probability density functions.
mkplots = Make PDF plots (True or False).
:param imgdir: str
:param maskdir: str
:param outfile: str
:param mkplots: bool
"""
# Initialize color channel ndarrays for plant (foreground) and background
plant = {"hue": np.array([], dtype=np.uint8), "saturation": np.array([], dtype=np.uint8),
"value": np.array([], dtype=np.uint8)}
background = {"hue": np.array([], dtype=np.uint8), "saturation": np.array([], dtype=np.uint8),
"value": np.array([], dtype=np.uint8)}
# Walk through the image directory
print("Reading images...")
for (dirpath, dirnames, filenames) in os.walk(imgdir):
for filename in filenames:
# Is this an image type we can work with?
if filename[-3:] in ['png', 'jpg', 'jpeg']:
# Does the mask exist?
if os.path.exists(os.path.join(maskdir, filename)):
# Read the image as BGR
img = cv2.imread(os.path.join(dirpath, filename), 1)
# Read the mask as grayscale
mask = cv2.imread(os.path.join(maskdir, filename), 0)
# Convert the image to HSV and split into component channels
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
hue, saturation, value = cv2.split(hsv)
# Store channels in a dictionary
channels = {"hue": hue, "saturation": saturation, "value": value}
# Split channels into plant and non-plant signal
for channel in channels.keys():
fg, bg = _split_plant_background_signal(channels[channel], mask)
# Randomly sample from the plant class (sample 10% of the pixels)
fg = fg[np.random.random_integers(0, len(fg) - 1, len(fg) / 10)]
# Randomly sample from the background class the same n as the plant class
bg = bg[np.random.random_integers(0, len(bg) - 1, len(fg))]
plant[channel] = np.append(plant[channel], fg)
background[channel] = np.append(background[channel], bg)
# Calculate a probability density function for each channel using a Gaussian kernel density estimator
# Create an output file for the PDFs
out = open(outfile, "w")
out.write("class\tchannel\t" + "\t".join(map(str, range(0, 256))) + "\n")
for channel in plant.keys():
print("Calculating PDF for the " + channel + " channel...")
plant_kde = stats.gaussian_kde(plant[channel])
bg_kde = stats.gaussian_kde(background[channel])
# Calculate p from the PDFs for each 8-bit intensity value and save to outfile
plant_pdf = plant_kde(range(0, 256))
out.write("plant\t" + channel + "\t" + "\t".join(map(str, plant_pdf)) + "\n")
bg_pdf = bg_kde(range(0, 256))
out.write("background\t" + channel + "\t" + "\t".join(map(str, bg_pdf)) + "\n")
if mkplots:
# If mkplots is True, make the PDF charts
_plot_pdf(channel, os.path.dirname(outfile), plant=plant_pdf, background=bg_pdf)
out.close()
def naive_bayes_multiclass(samples_file, outfile, mkplots=False):
"""Naive Bayes training function for two or more classes from sampled pixel RGB values
Inputs:
samples_file = Input text file containing sampled pixel RGB values for each training class. The file should be a
tab-delimited table with one training class per column. The required first row must contain header
labels for each class. The row values for each class must be comma-delimited RGB values. See the
file plantcv/tests/data/sampled_rgb_points.txt for an example.
outfile = Name of the output text file that will store the color channel probability density functions.
mkplots = Make PDF plots (True or False).
:param samples_file: str
:param outfile: str
:param mkplots: bool
"""
# Initialize a dictionary to store sampled RGB pixel values for each input class
sample_points = {}
# Open the sampled points text file
f = open(samples_file, "r")
# Read the first line and use the column headers as class labels
header = f.readline()
header = header.rstrip("\n")
class_list = header.split("\t")
# Initialize a dictionary for the red, green, and blue channels for each class
for cls in class_list:
sample_points[cls] = {"red": [], "green": [], "blue": []}
# Loop over the rest of the data in the input file
for row in f:
# Remove newlines and quotes
row = row.rstrip("\n")
row = row.replace('"', '')
# If this is not a blank line, parse the data
if len(row) > 0:
# Split the row into a list of points per class
points = row.split("\t")
# For each point per class
for i, point in enumerate(points):
# Split the point into red, green, and blue integer values
red, green, blue = map(int, point.split(","))
# Append each intensity value into the appropriate class list
sample_points[class_list[i]]["red"].append(red)
sample_points[class_list[i]]["green"].append(green)
sample_points[class_list[i]]["blue"].append(blue)
f.close()
# Initialize a dictionary to store probability density functions per color channel in HSV colorspace
pdfs = {"hue": {}, "saturation": {}, "value": {}}
# For each class
for cls in class_list:
# Create a blue, green, red-formatted image ndarray with the class RGB values
bgr_img = cv2.merge((np.asarray(sample_points[cls]["blue"], dtype=np.uint8),
np.asarray(sample_points[cls]["green"], dtype=np.uint8),
np.asarray(sample_points[cls]["red"], dtype=np.uint8)))
# Convert the BGR ndarray to an HSV ndarray
hsv_img = cv2.cvtColor(bgr_img, cv2.COLOR_BGR2HSV)
# Split the HSV ndarray into the component HSV channels
hue, saturation, value = cv2.split(hsv_img)
# Create an HSV channel dictionary that stores the channels as lists (horizontally stacked ndarrays)
channels = {"hue": np.hstack(hue), "saturation": np.hstack(saturation), "value": np.hstack(value)}
# For each channel
for channel in channels.keys():
# Create a kernel density estimator for the channel values (Guassian kernel)
kde = stats.gaussian_kde(channels[channel])
# Use the KDE to calculate a probability density function for the channel
# Sample at each of the possible 8-bit values
pdfs[channel][cls] = kde(range(0, 256))
if mkplots:
# If mkplots is True, generate a density curve plot per channel for each class
for channel, cls in pdfs.items():
_plot_pdf(channel, os.path.dirname(outfile), **cls)
# Write the PDFs to a text file
out = open(outfile, "w")
# Write the column labels
out.write("class\tchannel\t" + "\t".join(map(str, range(0, 256))) + "\n")
# For each channel
for channel, cls in pdfs.items():
# For each class
for class_name, pdf in cls.items():
# Each row is the PDF for the given class and color channel
out.write(class_name + "\t" + channel + "\t" + "\t".join(map(str, pdf)) + "\n")
def _split_plant_background_signal(channel, mask):
"""Split a single-channel image by foreground and background using a mask
:param channel: ndarray
:param mask: ndarray
:return plant: ndarray
:return background: ndarray
"""
plant = channel[np.where(mask == 255)]
background = channel[np.where(mask == 0)]
return plant, background
def _plot_pdf(channel, outdir, **kwargs):
"""Plot the probability density function of one or more classes for the given channel
:param channel: str
:param outdir: str
:param kwargs: dict
"""
import matplotlib
matplotlib.use("Agg")
from matplotlib import pyplot as plt
for class_name, pdf in kwargs.items():
plt.plot(pdf, label=class_name)
plt.legend(loc="best")
plt.savefig(os.path.join(outdir, str(channel) + "_pdf.png"))
plt.close()
| mit |
magne-max/zipline-ja | zipline/utils/security_list.py | 14 | 4625 | from datetime import datetime
from os import listdir
import os.path
import pandas as pd
import pytz
import zipline
from zipline.errors import SymbolNotFound
DATE_FORMAT = "%Y%m%d"
zipline_dir = os.path.dirname(zipline.__file__)
SECURITY_LISTS_DIR = os.path.join(zipline_dir, 'resources', 'security_lists')
class SecurityList(object):
def __init__(self, data, current_date_func, asset_finder):
"""
data: a nested dictionary:
knowledge_date -> lookup_date ->
{add: [symbol list], 'delete': []}, delete: [symbol list]}
current_date_func: function taking no parameters, returning
current datetime
"""
self.data = data
self._cache = {}
self._knowledge_dates = self.make_knowledge_dates(self.data)
self.current_date = current_date_func
self.count = 0
self._current_set = set()
self.asset_finder = asset_finder
def make_knowledge_dates(self, data):
knowledge_dates = sorted(
[pd.Timestamp(k) for k in data.keys()])
return knowledge_dates
def __iter__(self):
return iter(self.restricted_list)
def __contains__(self, item):
return item in self.restricted_list
@property
def restricted_list(self):
cd = self.current_date()
for kd in self._knowledge_dates:
if cd < kd:
break
if kd in self._cache:
self._current_set = self._cache[kd]
continue
for effective_date, changes in iter(self.data[kd].items()):
self.update_current(
effective_date,
changes['add'],
self._current_set.add
)
self.update_current(
effective_date,
changes['delete'],
self._current_set.remove
)
self._cache[kd] = self._current_set
return self._current_set
def update_current(self, effective_date, symbols, change_func):
for symbol in symbols:
try:
asset = self.asset_finder.lookup_symbol(
symbol,
as_of_date=effective_date
)
# Pass if no Asset exists for the symbol
except SymbolNotFound:
continue
change_func(asset.sid)
class SecurityListSet(object):
# provide a cut point to substitute other security
# list implementations.
security_list_type = SecurityList
def __init__(self, current_date_func, asset_finder):
self.current_date_func = current_date_func
self.asset_finder = asset_finder
self._leveraged_etf = None
@property
def leveraged_etf_list(self):
if self._leveraged_etf is None:
self._leveraged_etf = self.security_list_type(
load_from_directory('leveraged_etf_list'),
self.current_date_func,
asset_finder=self.asset_finder
)
return self._leveraged_etf
def load_from_directory(list_name):
"""
To resolve the symbol in the LEVERAGED_ETF list,
the date on which the symbol was in effect is needed.
Furthermore, to maintain a point in time record of our own maintenance
of the restricted list, we need a knowledge date. Thus, restricted lists
are dictionaries of datetime->symbol lists.
new symbols should be entered as a new knowledge date entry.
This method assumes a directory structure of:
SECURITY_LISTS_DIR/listname/knowledge_date/lookup_date/add.txt
SECURITY_LISTS_DIR/listname/knowledge_date/lookup_date/delete.txt
The return value is a dictionary with:
knowledge_date -> lookup_date ->
{add: [symbol list], 'delete': [symbol list]}
"""
data = {}
dir_path = os.path.join(SECURITY_LISTS_DIR, list_name)
for kd_name in listdir(dir_path):
kd = datetime.strptime(kd_name, DATE_FORMAT).replace(
tzinfo=pytz.utc)
data[kd] = {}
kd_path = os.path.join(dir_path, kd_name)
for ld_name in listdir(kd_path):
ld = datetime.strptime(ld_name, DATE_FORMAT).replace(
tzinfo=pytz.utc)
data[kd][ld] = {}
ld_path = os.path.join(kd_path, ld_name)
for fname in listdir(ld_path):
fpath = os.path.join(ld_path, fname)
with open(fpath) as f:
symbols = f.read().splitlines()
data[kd][ld][fname] = symbols
return data
| apache-2.0 |
wlamond/scikit-learn | sklearn/ensemble/partial_dependence.py | 33 | 15265 | """Partial dependence plots for tree ensembles. """
# Authors: Peter Prettenhofer
# License: BSD 3 clause
from itertools import count
import numbers
import numpy as np
from scipy.stats.mstats import mquantiles
from ..utils.extmath import cartesian
from ..externals.joblib import Parallel, delayed
from ..externals import six
from ..externals.six.moves import map, range, zip
from ..utils import check_array
from ..utils.validation import check_is_fitted
from ..tree._tree import DTYPE
from ._gradient_boosting import _partial_dependence_tree
from .gradient_boosting import BaseGradientBoosting
def _grid_from_X(X, percentiles=(0.05, 0.95), grid_resolution=100):
"""Generate a grid of points based on the ``percentiles of ``X``.
The grid is generated by placing ``grid_resolution`` equally
spaced points between the ``percentiles`` of each column
of ``X``.
Parameters
----------
X : ndarray
The data
percentiles : tuple of floats
The percentiles which are used to construct the extreme
values of the grid axes.
grid_resolution : int
The number of equally spaced points that are placed
on the grid.
Returns
-------
grid : ndarray
All data points on the grid; ``grid.shape[1] == X.shape[1]``
and ``grid.shape[0] == grid_resolution * X.shape[1]``.
axes : seq of ndarray
The axes with which the grid has been created.
"""
if len(percentiles) != 2:
raise ValueError('percentile must be tuple of len 2')
if not all(0. <= x <= 1. for x in percentiles):
raise ValueError('percentile values must be in [0, 1]')
axes = []
for col in range(X.shape[1]):
uniques = np.unique(X[:, col])
if uniques.shape[0] < grid_resolution:
# feature has low resolution use unique vals
axis = uniques
else:
emp_percentiles = mquantiles(X, prob=percentiles, axis=0)
# create axis based on percentiles and grid resolution
axis = np.linspace(emp_percentiles[0, col],
emp_percentiles[1, col],
num=grid_resolution, endpoint=True)
axes.append(axis)
return cartesian(axes), axes
def partial_dependence(gbrt, target_variables, grid=None, X=None,
percentiles=(0.05, 0.95), grid_resolution=100):
"""Partial dependence of ``target_variables``.
Partial dependence plots show the dependence between the joint values
of the ``target_variables`` and the function represented
by the ``gbrt``.
Read more in the :ref:`User Guide <partial_dependence>`.
Parameters
----------
gbrt : BaseGradientBoosting
A fitted gradient boosting model.
target_variables : array-like, dtype=int
The target features for which the partial dependecy should be
computed (size should be smaller than 3 for visual renderings).
grid : array-like, shape=(n_points, len(target_variables))
The grid of ``target_variables`` values for which the
partial dependecy should be evaluated (either ``grid`` or ``X``
must be specified).
X : array-like, shape=(n_samples, n_features)
The data on which ``gbrt`` was trained. It is used to generate
a ``grid`` for the ``target_variables``. The ``grid`` comprises
``grid_resolution`` equally spaced points between the two
``percentiles``.
percentiles : (low, high), default=(0.05, 0.95)
The lower and upper percentile used create the extreme values
for the ``grid``. Only if ``X`` is not None.
grid_resolution : int, default=100
The number of equally spaced points on the ``grid``.
Returns
-------
pdp : array, shape=(n_classes, n_points)
The partial dependence function evaluated on the ``grid``.
For regression and binary classification ``n_classes==1``.
axes : seq of ndarray or None
The axes with which the grid has been created or None if
the grid has been given.
Examples
--------
>>> samples = [[0, 0, 2], [1, 0, 0]]
>>> labels = [0, 1]
>>> from sklearn.ensemble import GradientBoostingClassifier
>>> gb = GradientBoostingClassifier(random_state=0).fit(samples, labels)
>>> kwargs = dict(X=samples, percentiles=(0, 1), grid_resolution=2)
>>> partial_dependence(gb, [0], **kwargs) # doctest: +SKIP
(array([[-4.52..., 4.52...]]), [array([ 0., 1.])])
"""
if not isinstance(gbrt, BaseGradientBoosting):
raise ValueError('gbrt has to be an instance of BaseGradientBoosting')
check_is_fitted(gbrt, 'estimators_')
if (grid is None and X is None) or (grid is not None and X is not None):
raise ValueError('Either grid or X must be specified')
target_variables = np.asarray(target_variables, dtype=np.int32,
order='C').ravel()
if any([not (0 <= fx < gbrt.n_features_) for fx in target_variables]):
raise ValueError('target_variables must be in [0, %d]'
% (gbrt.n_features_ - 1))
if X is not None:
X = check_array(X, dtype=DTYPE, order='C')
grid, axes = _grid_from_X(X[:, target_variables], percentiles,
grid_resolution)
else:
assert grid is not None
# dont return axes if grid is given
axes = None
# grid must be 2d
if grid.ndim == 1:
grid = grid[:, np.newaxis]
if grid.ndim != 2:
raise ValueError('grid must be 2d but is %dd' % grid.ndim)
grid = np.asarray(grid, dtype=DTYPE, order='C')
assert grid.shape[1] == target_variables.shape[0]
n_trees_per_stage = gbrt.estimators_.shape[1]
n_estimators = gbrt.estimators_.shape[0]
pdp = np.zeros((n_trees_per_stage, grid.shape[0],), dtype=np.float64,
order='C')
for stage in range(n_estimators):
for k in range(n_trees_per_stage):
tree = gbrt.estimators_[stage, k].tree_
_partial_dependence_tree(tree, grid, target_variables,
gbrt.learning_rate, pdp[k])
return pdp, axes
def plot_partial_dependence(gbrt, X, features, feature_names=None,
label=None, n_cols=3, grid_resolution=100,
percentiles=(0.05, 0.95), n_jobs=1,
verbose=0, ax=None, line_kw=None,
contour_kw=None, **fig_kw):
"""Partial dependence plots for ``features``.
The ``len(features)`` plots are arranged in a grid with ``n_cols``
columns. Two-way partial dependence plots are plotted as contour
plots.
Read more in the :ref:`User Guide <partial_dependence>`.
Parameters
----------
gbrt : BaseGradientBoosting
A fitted gradient boosting model.
X : array-like, shape=(n_samples, n_features)
The data on which ``gbrt`` was trained.
features : seq of ints, strings, or tuples of ints or strings
If seq[i] is an int or a tuple with one int value, a one-way
PDP is created; if seq[i] is a tuple of two ints, a two-way
PDP is created.
If feature_names is specified and seq[i] is an int, seq[i]
must be < len(feature_names).
If seq[i] is a string, feature_names must be specified, and
seq[i] must be in feature_names.
feature_names : seq of str
Name of each feature; feature_names[i] holds
the name of the feature with index i.
label : object
The class label for which the PDPs should be computed.
Only if gbrt is a multi-class model. Must be in ``gbrt.classes_``.
n_cols : int
The number of columns in the grid plot (default: 3).
percentiles : (low, high), default=(0.05, 0.95)
The lower and upper percentile used to create the extreme values
for the PDP axes.
grid_resolution : int, default=100
The number of equally spaced points on the axes.
n_jobs : int
The number of CPUs to use to compute the PDs. -1 means 'all CPUs'.
Defaults to 1.
verbose : int
Verbose output during PD computations. Defaults to 0.
ax : Matplotlib axis object, default None
An axis object onto which the plots will be drawn.
line_kw : dict
Dict with keywords passed to the ``matplotlib.pyplot.plot`` call.
For one-way partial dependence plots.
contour_kw : dict
Dict with keywords passed to the ``matplotlib.pyplot.plot`` call.
For two-way partial dependence plots.
fig_kw : dict
Dict with keywords passed to the figure() call.
Note that all keywords not recognized above will be automatically
included here.
Returns
-------
fig : figure
The Matplotlib Figure object.
axs : seq of Axis objects
A seq of Axis objects, one for each subplot.
Examples
--------
>>> from sklearn.datasets import make_friedman1
>>> from sklearn.ensemble import GradientBoostingRegressor
>>> X, y = make_friedman1()
>>> clf = GradientBoostingRegressor(n_estimators=10).fit(X, y)
>>> fig, axs = plot_partial_dependence(clf, X, [0, (0, 1)]) #doctest: +SKIP
...
"""
import matplotlib.pyplot as plt
from matplotlib import transforms
from matplotlib.ticker import MaxNLocator
from matplotlib.ticker import ScalarFormatter
if not isinstance(gbrt, BaseGradientBoosting):
raise ValueError('gbrt has to be an instance of BaseGradientBoosting')
check_is_fitted(gbrt, 'estimators_')
# set label_idx for multi-class GBRT
if hasattr(gbrt, 'classes_') and np.size(gbrt.classes_) > 2:
if label is None:
raise ValueError('label is not given for multi-class PDP')
label_idx = np.searchsorted(gbrt.classes_, label)
if gbrt.classes_[label_idx] != label:
raise ValueError('label %s not in ``gbrt.classes_``' % str(label))
else:
# regression and binary classification
label_idx = 0
X = check_array(X, dtype=DTYPE, order='C')
if gbrt.n_features_ != X.shape[1]:
raise ValueError('X.shape[1] does not match gbrt.n_features_')
if line_kw is None:
line_kw = {'color': 'green'}
if contour_kw is None:
contour_kw = {}
# convert feature_names to list
if feature_names is None:
# if not feature_names use fx indices as name
feature_names = [str(i) for i in range(gbrt.n_features_)]
elif isinstance(feature_names, np.ndarray):
feature_names = feature_names.tolist()
def convert_feature(fx):
if isinstance(fx, six.string_types):
try:
fx = feature_names.index(fx)
except ValueError:
raise ValueError('Feature %s not in feature_names' % fx)
return fx
# convert features into a seq of int tuples
tmp_features = []
for fxs in features:
if isinstance(fxs, (numbers.Integral,) + six.string_types):
fxs = (fxs,)
try:
fxs = np.array([convert_feature(fx) for fx in fxs], dtype=np.int32)
except TypeError:
raise ValueError('features must be either int, str, or tuple '
'of int/str')
if not (1 <= np.size(fxs) <= 2):
raise ValueError('target features must be either one or two')
tmp_features.append(fxs)
features = tmp_features
names = []
try:
for fxs in features:
l = []
# explicit loop so "i" is bound for exception below
for i in fxs:
l.append(feature_names[i])
names.append(l)
except IndexError:
raise ValueError('All entries of features must be less than '
'len(feature_names) = {0}, got {1}.'
.format(len(feature_names), i))
# compute PD functions
pd_result = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(partial_dependence)(gbrt, fxs, X=X,
grid_resolution=grid_resolution,
percentiles=percentiles)
for fxs in features)
# get global min and max values of PD grouped by plot type
pdp_lim = {}
for pdp, axes in pd_result:
min_pd, max_pd = pdp[label_idx].min(), pdp[label_idx].max()
n_fx = len(axes)
old_min_pd, old_max_pd = pdp_lim.get(n_fx, (min_pd, max_pd))
min_pd = min(min_pd, old_min_pd)
max_pd = max(max_pd, old_max_pd)
pdp_lim[n_fx] = (min_pd, max_pd)
# create contour levels for two-way plots
if 2 in pdp_lim:
Z_level = np.linspace(*pdp_lim[2], num=8)
if ax is None:
fig = plt.figure(**fig_kw)
else:
fig = ax.get_figure()
fig.clear()
n_cols = min(n_cols, len(features))
n_rows = int(np.ceil(len(features) / float(n_cols)))
axs = []
for i, fx, name, (pdp, axes) in zip(count(), features, names,
pd_result):
ax = fig.add_subplot(n_rows, n_cols, i + 1)
if len(axes) == 1:
ax.plot(axes[0], pdp[label_idx].ravel(), **line_kw)
else:
# make contour plot
assert len(axes) == 2
XX, YY = np.meshgrid(axes[0], axes[1])
Z = pdp[label_idx].reshape(list(map(np.size, axes))).T
CS = ax.contour(XX, YY, Z, levels=Z_level, linewidths=0.5,
colors='k')
ax.contourf(XX, YY, Z, levels=Z_level, vmax=Z_level[-1],
vmin=Z_level[0], alpha=0.75, **contour_kw)
ax.clabel(CS, fmt='%2.2f', colors='k', fontsize=10, inline=True)
# plot data deciles + axes labels
deciles = mquantiles(X[:, fx[0]], prob=np.arange(0.1, 1.0, 0.1))
trans = transforms.blended_transform_factory(ax.transData,
ax.transAxes)
ylim = ax.get_ylim()
ax.vlines(deciles, [0], 0.05, transform=trans, color='k')
ax.set_xlabel(name[0])
ax.set_ylim(ylim)
# prevent x-axis ticks from overlapping
ax.xaxis.set_major_locator(MaxNLocator(nbins=6, prune='lower'))
tick_formatter = ScalarFormatter()
tick_formatter.set_powerlimits((-3, 4))
ax.xaxis.set_major_formatter(tick_formatter)
if len(axes) > 1:
# two-way PDP - y-axis deciles + labels
deciles = mquantiles(X[:, fx[1]], prob=np.arange(0.1, 1.0, 0.1))
trans = transforms.blended_transform_factory(ax.transAxes,
ax.transData)
xlim = ax.get_xlim()
ax.hlines(deciles, [0], 0.05, transform=trans, color='k')
ax.set_ylabel(name[1])
# hline erases xlim
ax.set_xlim(xlim)
else:
ax.set_ylabel('Partial dependence')
if len(axes) == 1:
ax.set_ylim(pdp_lim[1])
axs.append(ax)
fig.subplots_adjust(bottom=0.15, top=0.7, left=0.1, right=0.95, wspace=0.4,
hspace=0.3)
return fig, axs
| bsd-3-clause |
sepehr125/pybrain | pybrain/rl/environments/cartpole/cartpole.py | 24 | 4817 | __author__ = 'Thomas Rueckstiess, [email protected]'
from matplotlib.mlab import rk4
from math import sin, cos
import time
from scipy import eye, matrix, random, asarray
from pybrain.rl.environments.graphical import GraphicalEnvironment
class CartPoleEnvironment(GraphicalEnvironment):
""" This environment implements the cart pole balancing benchmark, as stated in:
Riedmiller, Peters, Schaal: "Evaluation of Policy Gradient Methods and
Variants on the Cart-Pole Benchmark". ADPRL 2007.
It implements a set of differential equations, solved with a 4th order
Runge-Kutta method.
"""
indim = 1
outdim = 4
# some physical constants
g = 9.81
l = 0.5
mp = 0.1
mc = 1.0
dt = 0.02
randomInitialization = True
def __init__(self, polelength=None):
GraphicalEnvironment.__init__(self)
if polelength != None:
self.l = polelength
# initialize the environment (randomly)
self.reset()
self.action = 0.0
self.delay = False
def getSensors(self):
""" returns the state one step (dt) ahead in the future. stores the state in
self.sensors because it is needed for the next calculation. The sensor return
vector has 4 elements: theta, theta', s, s' (s being the distance from the
origin).
"""
return asarray(self.sensors)
def performAction(self, action):
""" stores the desired action for the next runge-kutta step.
"""
self.action = action
self.step()
def step(self):
self.sensors = rk4(self._derivs, self.sensors, [0, self.dt])
self.sensors = self.sensors[-1]
if self.hasRenderer():
self.getRenderer().updateData(self.sensors)
if self.delay:
time.sleep(0.05)
def reset(self):
""" re-initializes the environment, setting the cart back in a random position.
"""
if self.randomInitialization:
angle = random.uniform(-0.2, 0.2)
pos = random.uniform(-0.5, 0.5)
else:
angle = -0.2
pos = 0.2
self.sensors = (angle, 0.0, pos, 0.0)
def _derivs(self, x, t):
""" This function is needed for the Runge-Kutta integration approximation method. It calculates the
derivatives of the state variables given in x. for each variable in x, it returns the first order
derivative at time t.
"""
F = self.action
(theta, theta_, _s, s_) = x
u = theta_
sin_theta = sin(theta)
cos_theta = cos(theta)
mp = self.mp
mc = self.mc
l = self.l
u_ = (self.g * sin_theta * (mc + mp) - (F + mp * l * theta_ ** 2 * sin_theta) * cos_theta) / (4 / 3 * l * (mc + mp) - mp * l * cos_theta ** 2)
v = s_
v_ = (F - mp * l * (u_ * cos_theta - (theta_ ** 2 * sin_theta))) / (mc + mp)
return (u, u_, v, v_)
def getPoleAngles(self):
""" auxiliary access to just the pole angle(s), to be used by BalanceTask """
return [self.sensors[0]]
def getCartPosition(self):
""" auxiliary access to just the cart position, to be used by BalanceTask """
return self.sensors[2]
class CartPoleLinEnvironment(CartPoleEnvironment):
""" This is a linearized implementation of the cart-pole system, as described in
Peters J, Vijayakumar S, Schaal S (2003) Reinforcement learning for humanoid robotics.
Polelength is fixed, the order of sensors has been changed to the above."""
tau = 1. / 60. # sec
def __init__(self, **kwargs):
CartPoleEnvironment.__init__(self, **kwargs)
nu = 13.2 # sec^-2
tau = self.tau
# linearized movement equations
self.A = matrix(eye(4))
self.A[0, 1] = tau
self.A[2, 3] = tau
self.A[1, 0] = nu * tau
self.b = matrix([0.0, nu * tau / 9.80665, 0.0, tau])
def step(self):
self.sensors = random.normal(loc=self.sensors * self.A + self.action * self.b, scale=0.001).flatten()
if self.hasRenderer():
self.getRenderer().updateData(self.sensors)
if self.delay:
time.sleep(self.tau)
def reset(self):
""" re-initializes the environment, setting the cart back in a random position.
"""
self.sensors = random.normal(scale=0.1, size=4)
def getSensors(self):
return self.sensors.flatten()
def getPoleAngles(self):
""" auxiliary access to just the pole angle(s), to be used by BalanceTask """
return [self.sensors[0]]
def getCartPosition(self):
""" auxiliary access to just the cart position, to be used by BalanceTask """
return self.sensors[2]
| bsd-3-clause |
hsiaoyi0504/scikit-learn | examples/classification/plot_classification_probability.py | 242 | 2624 | """
===============================
Plot classification probability
===============================
Plot the classification probability for different classifiers. We use a 3
class dataset, and we classify it with a Support Vector classifier, L1
and L2 penalized logistic regression with either a One-Vs-Rest or multinomial
setting.
The logistic regression is not a multiclass classifier out of the box. As
a result it can identify only the first class.
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn import datasets
iris = datasets.load_iris()
X = iris.data[:, 0:2] # we only take the first two features for visualization
y = iris.target
n_features = X.shape[1]
C = 1.0
# Create different classifiers. The logistic regression cannot do
# multiclass out of the box.
classifiers = {'L1 logistic': LogisticRegression(C=C, penalty='l1'),
'L2 logistic (OvR)': LogisticRegression(C=C, penalty='l2'),
'Linear SVC': SVC(kernel='linear', C=C, probability=True,
random_state=0),
'L2 logistic (Multinomial)': LogisticRegression(
C=C, solver='lbfgs', multi_class='multinomial'
)}
n_classifiers = len(classifiers)
plt.figure(figsize=(3 * 2, n_classifiers * 2))
plt.subplots_adjust(bottom=.2, top=.95)
xx = np.linspace(3, 9, 100)
yy = np.linspace(1, 5, 100).T
xx, yy = np.meshgrid(xx, yy)
Xfull = np.c_[xx.ravel(), yy.ravel()]
for index, (name, classifier) in enumerate(classifiers.items()):
classifier.fit(X, y)
y_pred = classifier.predict(X)
classif_rate = np.mean(y_pred.ravel() == y.ravel()) * 100
print("classif_rate for %s : %f " % (name, classif_rate))
# View probabilities=
probas = classifier.predict_proba(Xfull)
n_classes = np.unique(y_pred).size
for k in range(n_classes):
plt.subplot(n_classifiers, n_classes, index * n_classes + k + 1)
plt.title("Class %d" % k)
if k == 0:
plt.ylabel(name)
imshow_handle = plt.imshow(probas[:, k].reshape((100, 100)),
extent=(3, 9, 1, 5), origin='lower')
plt.xticks(())
plt.yticks(())
idx = (y_pred == k)
if idx.any():
plt.scatter(X[idx, 0], X[idx, 1], marker='o', c='k')
ax = plt.axes([0.15, 0.04, 0.7, 0.05])
plt.title("Probability")
plt.colorbar(imshow_handle, cax=ax, orientation='horizontal')
plt.show()
| bsd-3-clause |
ueshin/apache-spark | python/pyspark/sql/tests/test_pandas_udf_grouped_agg.py | 18 | 20955 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
from pyspark.rdd import PythonEvalType
from pyspark.sql import Row
from pyspark.sql.functions import array, explode, col, lit, mean, sum, \
udf, pandas_udf, PandasUDFType
from pyspark.sql.types import ArrayType, TimestampType
from pyspark.sql.utils import AnalysisException
from pyspark.testing.sqlutils import ReusedSQLTestCase, have_pandas, have_pyarrow, \
pandas_requirement_message, pyarrow_requirement_message
from pyspark.testing.utils import QuietTest
if have_pandas:
import pandas as pd
from pandas.testing import assert_frame_equal
@unittest.skipIf(
not have_pandas or not have_pyarrow,
pandas_requirement_message or pyarrow_requirement_message) # type: ignore[arg-type]
class GroupedAggPandasUDFTests(ReusedSQLTestCase):
@property
def data(self):
return self.spark.range(10).toDF('id') \
.withColumn("vs", array([lit(i * 1.0) + col('id') for i in range(20, 30)])) \
.withColumn("v", explode(col('vs'))) \
.drop('vs') \
.withColumn('w', lit(1.0))
@property
def python_plus_one(self):
@udf('double')
def plus_one(v):
assert isinstance(v, (int, float))
return float(v + 1)
return plus_one
@property
def pandas_scalar_plus_two(self):
@pandas_udf('double', PandasUDFType.SCALAR)
def plus_two(v):
assert isinstance(v, pd.Series)
return v + 2
return plus_two
@property
def pandas_agg_mean_udf(self):
@pandas_udf('double', PandasUDFType.GROUPED_AGG)
def avg(v):
return v.mean()
return avg
@property
def pandas_agg_sum_udf(self):
@pandas_udf('double', PandasUDFType.GROUPED_AGG)
def sum(v):
return v.sum()
return sum
@property
def pandas_agg_weighted_mean_udf(self):
import numpy as np
@pandas_udf('double', PandasUDFType.GROUPED_AGG)
def weighted_mean(v, w):
return np.average(v, weights=w)
return weighted_mean
def test_manual(self):
df = self.data
sum_udf = self.pandas_agg_sum_udf
mean_udf = self.pandas_agg_mean_udf
mean_arr_udf = pandas_udf(
self.pandas_agg_mean_udf.func,
ArrayType(self.pandas_agg_mean_udf.returnType),
self.pandas_agg_mean_udf.evalType)
result1 = df.groupby('id').agg(
sum_udf(df.v),
mean_udf(df.v),
mean_arr_udf(array(df.v))).sort('id')
expected1 = self.spark.createDataFrame(
[[0, 245.0, 24.5, [24.5]],
[1, 255.0, 25.5, [25.5]],
[2, 265.0, 26.5, [26.5]],
[3, 275.0, 27.5, [27.5]],
[4, 285.0, 28.5, [28.5]],
[5, 295.0, 29.5, [29.5]],
[6, 305.0, 30.5, [30.5]],
[7, 315.0, 31.5, [31.5]],
[8, 325.0, 32.5, [32.5]],
[9, 335.0, 33.5, [33.5]]],
['id', 'sum(v)', 'avg(v)', 'avg(array(v))'])
assert_frame_equal(expected1.toPandas(), result1.toPandas())
def test_basic(self):
df = self.data
weighted_mean_udf = self.pandas_agg_weighted_mean_udf
# Groupby one column and aggregate one UDF with literal
result1 = df.groupby('id').agg(weighted_mean_udf(df.v, lit(1.0))).sort('id')
expected1 = df.groupby('id').agg(mean(df.v).alias('weighted_mean(v, 1.0)')).sort('id')
assert_frame_equal(expected1.toPandas(), result1.toPandas())
# Groupby one expression and aggregate one UDF with literal
result2 = df.groupby((col('id') + 1)).agg(weighted_mean_udf(df.v, lit(1.0)))\
.sort(df.id + 1)
expected2 = df.groupby((col('id') + 1))\
.agg(mean(df.v).alias('weighted_mean(v, 1.0)')).sort(df.id + 1)
assert_frame_equal(expected2.toPandas(), result2.toPandas())
# Groupby one column and aggregate one UDF without literal
result3 = df.groupby('id').agg(weighted_mean_udf(df.v, df.w)).sort('id')
expected3 = df.groupby('id').agg(mean(df.v).alias('weighted_mean(v, w)')).sort('id')
assert_frame_equal(expected3.toPandas(), result3.toPandas())
# Groupby one expression and aggregate one UDF without literal
result4 = df.groupby((col('id') + 1).alias('id'))\
.agg(weighted_mean_udf(df.v, df.w))\
.sort('id')
expected4 = df.groupby((col('id') + 1).alias('id'))\
.agg(mean(df.v).alias('weighted_mean(v, w)'))\
.sort('id')
assert_frame_equal(expected4.toPandas(), result4.toPandas())
def test_unsupported_types(self):
with QuietTest(self.sc):
with self.assertRaisesRegex(NotImplementedError, 'not supported'):
pandas_udf(
lambda x: x,
ArrayType(ArrayType(TimestampType())),
PandasUDFType.GROUPED_AGG)
with QuietTest(self.sc):
with self.assertRaisesRegex(NotImplementedError, 'not supported'):
@pandas_udf('mean double, std double', PandasUDFType.GROUPED_AGG)
def mean_and_std_udf(v):
return v.mean(), v.std()
with QuietTest(self.sc):
with self.assertRaisesRegex(NotImplementedError, 'not supported'):
@pandas_udf(ArrayType(TimestampType()), PandasUDFType.GROUPED_AGG)
def mean_and_std_udf(v):
return {v.mean(): v.std()}
def test_alias(self):
df = self.data
mean_udf = self.pandas_agg_mean_udf
result1 = df.groupby('id').agg(mean_udf(df.v).alias('mean_alias'))
expected1 = df.groupby('id').agg(mean(df.v).alias('mean_alias'))
assert_frame_equal(expected1.toPandas(), result1.toPandas())
def test_mixed_sql(self):
"""
Test mixing group aggregate pandas UDF with sql expression.
"""
df = self.data
sum_udf = self.pandas_agg_sum_udf
# Mix group aggregate pandas UDF with sql expression
result1 = (df.groupby('id')
.agg(sum_udf(df.v) + 1)
.sort('id'))
expected1 = (df.groupby('id')
.agg(sum(df.v) + 1)
.sort('id'))
# Mix group aggregate pandas UDF with sql expression (order swapped)
result2 = (df.groupby('id')
.agg(sum_udf(df.v + 1))
.sort('id'))
expected2 = (df.groupby('id')
.agg(sum(df.v + 1))
.sort('id'))
# Wrap group aggregate pandas UDF with two sql expressions
result3 = (df.groupby('id')
.agg(sum_udf(df.v + 1) + 2)
.sort('id'))
expected3 = (df.groupby('id')
.agg(sum(df.v + 1) + 2)
.sort('id'))
assert_frame_equal(expected1.toPandas(), result1.toPandas())
assert_frame_equal(expected2.toPandas(), result2.toPandas())
assert_frame_equal(expected3.toPandas(), result3.toPandas())
def test_mixed_udfs(self):
"""
Test mixing group aggregate pandas UDF with python UDF and scalar pandas UDF.
"""
df = self.data
plus_one = self.python_plus_one
plus_two = self.pandas_scalar_plus_two
sum_udf = self.pandas_agg_sum_udf
# Mix group aggregate pandas UDF and python UDF
result1 = (df.groupby('id')
.agg(plus_one(sum_udf(df.v)))
.sort('id'))
expected1 = (df.groupby('id')
.agg(plus_one(sum(df.v)))
.sort('id'))
# Mix group aggregate pandas UDF and python UDF (order swapped)
result2 = (df.groupby('id')
.agg(sum_udf(plus_one(df.v)))
.sort('id'))
expected2 = (df.groupby('id')
.agg(sum(plus_one(df.v)))
.sort('id'))
# Mix group aggregate pandas UDF and scalar pandas UDF
result3 = (df.groupby('id')
.agg(sum_udf(plus_two(df.v)))
.sort('id'))
expected3 = (df.groupby('id')
.agg(sum(plus_two(df.v)))
.sort('id'))
# Mix group aggregate pandas UDF and scalar pandas UDF (order swapped)
result4 = (df.groupby('id')
.agg(plus_two(sum_udf(df.v)))
.sort('id'))
expected4 = (df.groupby('id')
.agg(plus_two(sum(df.v)))
.sort('id'))
# Wrap group aggregate pandas UDF with two python UDFs and use python UDF in groupby
result5 = (df.groupby(plus_one(df.id))
.agg(plus_one(sum_udf(plus_one(df.v))))
.sort('plus_one(id)'))
expected5 = (df.groupby(plus_one(df.id))
.agg(plus_one(sum(plus_one(df.v))))
.sort('plus_one(id)'))
# Wrap group aggregate pandas UDF with two scala pandas UDF and user scala pandas UDF in
# groupby
result6 = (df.groupby(plus_two(df.id))
.agg(plus_two(sum_udf(plus_two(df.v))))
.sort('plus_two(id)'))
expected6 = (df.groupby(plus_two(df.id))
.agg(plus_two(sum(plus_two(df.v))))
.sort('plus_two(id)'))
assert_frame_equal(expected1.toPandas(), result1.toPandas())
assert_frame_equal(expected2.toPandas(), result2.toPandas())
assert_frame_equal(expected3.toPandas(), result3.toPandas())
assert_frame_equal(expected4.toPandas(), result4.toPandas())
assert_frame_equal(expected5.toPandas(), result5.toPandas())
assert_frame_equal(expected6.toPandas(), result6.toPandas())
def test_multiple_udfs(self):
"""
Test multiple group aggregate pandas UDFs in one agg function.
"""
df = self.data
mean_udf = self.pandas_agg_mean_udf
sum_udf = self.pandas_agg_sum_udf
weighted_mean_udf = self.pandas_agg_weighted_mean_udf
result1 = (df.groupBy('id')
.agg(mean_udf(df.v),
sum_udf(df.v),
weighted_mean_udf(df.v, df.w))
.sort('id')
.toPandas())
expected1 = (df.groupBy('id')
.agg(mean(df.v),
sum(df.v),
mean(df.v).alias('weighted_mean(v, w)'))
.sort('id')
.toPandas())
assert_frame_equal(expected1, result1)
def test_complex_groupby(self):
df = self.data
sum_udf = self.pandas_agg_sum_udf
plus_one = self.python_plus_one
plus_two = self.pandas_scalar_plus_two
# groupby one expression
result1 = df.groupby(df.v % 2).agg(sum_udf(df.v))
expected1 = df.groupby(df.v % 2).agg(sum(df.v))
# empty groupby
result2 = df.groupby().agg(sum_udf(df.v))
expected2 = df.groupby().agg(sum(df.v))
# groupby one column and one sql expression
result3 = df.groupby(df.id, df.v % 2).agg(sum_udf(df.v)).orderBy(df.id, df.v % 2)
expected3 = df.groupby(df.id, df.v % 2).agg(sum(df.v)).orderBy(df.id, df.v % 2)
# groupby one python UDF
result4 = df.groupby(plus_one(df.id)).agg(sum_udf(df.v)).sort('plus_one(id)')
expected4 = df.groupby(plus_one(df.id)).agg(sum(df.v)).sort('plus_one(id)')
# groupby one scalar pandas UDF
result5 = df.groupby(plus_two(df.id)).agg(sum_udf(df.v)).sort('sum(v)')
expected5 = df.groupby(plus_two(df.id)).agg(sum(df.v)).sort('sum(v)')
# groupby one expression and one python UDF
result6 = (df.groupby(df.v % 2, plus_one(df.id))
.agg(sum_udf(df.v)).sort(['(v % 2)', 'plus_one(id)']))
expected6 = (df.groupby(df.v % 2, plus_one(df.id))
.agg(sum(df.v)).sort(['(v % 2)', 'plus_one(id)']))
# groupby one expression and one scalar pandas UDF
result7 = (df.groupby(df.v % 2, plus_two(df.id))
.agg(sum_udf(df.v)).sort(['sum(v)', 'plus_two(id)']))
expected7 = (df.groupby(df.v % 2, plus_two(df.id))
.agg(sum(df.v)).sort(['sum(v)', 'plus_two(id)']))
assert_frame_equal(expected1.toPandas(), result1.toPandas())
assert_frame_equal(expected2.toPandas(), result2.toPandas())
assert_frame_equal(expected3.toPandas(), result3.toPandas())
assert_frame_equal(expected4.toPandas(), result4.toPandas())
assert_frame_equal(expected5.toPandas(), result5.toPandas())
assert_frame_equal(expected6.toPandas(), result6.toPandas())
assert_frame_equal(expected7.toPandas(), result7.toPandas())
def test_complex_expressions(self):
df = self.data
plus_one = self.python_plus_one
plus_two = self.pandas_scalar_plus_two
sum_udf = self.pandas_agg_sum_udf
# Test complex expressions with sql expression, python UDF and
# group aggregate pandas UDF
result1 = (df.withColumn('v1', plus_one(df.v))
.withColumn('v2', df.v + 2)
.groupby(df.id, df.v % 2)
.agg(sum_udf(col('v')),
sum_udf(col('v1') + 3),
sum_udf(col('v2')) + 5,
plus_one(sum_udf(col('v1'))),
sum_udf(plus_one(col('v2'))))
.sort(['id', '(v % 2)'])
.toPandas().sort_values(by=['id', '(v % 2)']))
expected1 = (df.withColumn('v1', df.v + 1)
.withColumn('v2', df.v + 2)
.groupby(df.id, df.v % 2)
.agg(sum(col('v')),
sum(col('v1') + 3),
sum(col('v2')) + 5,
plus_one(sum(col('v1'))),
sum(plus_one(col('v2'))))
.sort(['id', '(v % 2)'])
.toPandas().sort_values(by=['id', '(v % 2)']))
# Test complex expressions with sql expression, scala pandas UDF and
# group aggregate pandas UDF
result2 = (df.withColumn('v1', plus_one(df.v))
.withColumn('v2', df.v + 2)
.groupby(df.id, df.v % 2)
.agg(sum_udf(col('v')),
sum_udf(col('v1') + 3),
sum_udf(col('v2')) + 5,
plus_two(sum_udf(col('v1'))),
sum_udf(plus_two(col('v2'))))
.sort(['id', '(v % 2)'])
.toPandas().sort_values(by=['id', '(v % 2)']))
expected2 = (df.withColumn('v1', df.v + 1)
.withColumn('v2', df.v + 2)
.groupby(df.id, df.v % 2)
.agg(sum(col('v')),
sum(col('v1') + 3),
sum(col('v2')) + 5,
plus_two(sum(col('v1'))),
sum(plus_two(col('v2'))))
.sort(['id', '(v % 2)'])
.toPandas().sort_values(by=['id', '(v % 2)']))
# Test sequential groupby aggregate
result3 = (df.groupby('id')
.agg(sum_udf(df.v).alias('v'))
.groupby('id')
.agg(sum_udf(col('v')))
.sort('id')
.toPandas())
expected3 = (df.groupby('id')
.agg(sum(df.v).alias('v'))
.groupby('id')
.agg(sum(col('v')))
.sort('id')
.toPandas())
assert_frame_equal(expected1, result1)
assert_frame_equal(expected2, result2)
assert_frame_equal(expected3, result3)
def test_retain_group_columns(self):
with self.sql_conf({"spark.sql.retainGroupColumns": False}):
df = self.data
sum_udf = self.pandas_agg_sum_udf
result1 = df.groupby(df.id).agg(sum_udf(df.v))
expected1 = df.groupby(df.id).agg(sum(df.v))
assert_frame_equal(expected1.toPandas(), result1.toPandas())
def test_array_type(self):
df = self.data
array_udf = pandas_udf(lambda x: [1.0, 2.0], 'array<double>', PandasUDFType.GROUPED_AGG)
result1 = df.groupby('id').agg(array_udf(df['v']).alias('v2'))
self.assertEqual(result1.first()['v2'], [1.0, 2.0])
def test_invalid_args(self):
df = self.data
plus_one = self.python_plus_one
mean_udf = self.pandas_agg_mean_udf
with QuietTest(self.sc):
with self.assertRaisesRegex(
AnalysisException,
'nor.*aggregate function'):
df.groupby(df.id).agg(plus_one(df.v)).collect()
with QuietTest(self.sc):
with self.assertRaisesRegex(
AnalysisException,
'aggregate function.*argument.*aggregate function'):
df.groupby(df.id).agg(mean_udf(mean_udf(df.v))).collect()
with QuietTest(self.sc):
with self.assertRaisesRegex(
AnalysisException,
'mixture.*aggregate function.*group aggregate pandas UDF'):
df.groupby(df.id).agg(mean_udf(df.v), mean(df.v)).collect()
def test_register_vectorized_udf_basic(self):
sum_pandas_udf = pandas_udf(
lambda v: v.sum(), "integer", PythonEvalType.SQL_GROUPED_AGG_PANDAS_UDF)
self.assertEqual(sum_pandas_udf.evalType, PythonEvalType.SQL_GROUPED_AGG_PANDAS_UDF)
group_agg_pandas_udf = self.spark.udf.register("sum_pandas_udf", sum_pandas_udf)
self.assertEqual(group_agg_pandas_udf.evalType, PythonEvalType.SQL_GROUPED_AGG_PANDAS_UDF)
q = "SELECT sum_pandas_udf(v1) FROM VALUES (3, 0), (2, 0), (1, 1) tbl(v1, v2) GROUP BY v2"
actual = sorted(map(lambda r: r[0], self.spark.sql(q).collect()))
expected = [1, 5]
self.assertEqual(actual, expected)
def test_grouped_with_empty_partition(self):
data = [Row(id=1, x=2), Row(id=1, x=3), Row(id=2, x=4)]
expected = [Row(id=1, sum=5), Row(id=2, x=4)]
num_parts = len(data) + 1
df = self.spark.createDataFrame(self.sc.parallelize(data, numSlices=num_parts))
f = pandas_udf(lambda x: x.sum(),
'int', PandasUDFType.GROUPED_AGG)
result = df.groupBy('id').agg(f(df['x']).alias('sum')).collect()
self.assertEqual(result, expected)
def test_grouped_without_group_by_clause(self):
@pandas_udf('double', PandasUDFType.GROUPED_AGG)
def max_udf(v):
return v.max()
df = self.spark.range(0, 100)
self.spark.udf.register('max_udf', max_udf)
with self.tempView("table"):
df.createTempView('table')
agg1 = df.agg(max_udf(df['id']))
agg2 = self.spark.sql("select max_udf(id) from table")
assert_frame_equal(agg1.toPandas(), agg2.toPandas())
def test_no_predicate_pushdown_through(self):
# SPARK-30921: We should not pushdown predicates of PythonUDFs through Aggregate.
import numpy as np
@pandas_udf('float', PandasUDFType.GROUPED_AGG)
def mean(x):
return np.mean(x)
df = self.spark.createDataFrame([
Row(id=1, foo=42), Row(id=2, foo=1), Row(id=2, foo=2)
])
agg = df.groupBy('id').agg(mean('foo').alias("mean"))
filtered = agg.filter(agg['mean'] > 40.0)
assert(filtered.collect()[0]["mean"] == 42.0)
if __name__ == "__main__":
from pyspark.sql.tests.test_pandas_udf_grouped_agg import * # noqa: F401
try:
import xmlrunner # type: ignore[import]
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports', verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| apache-2.0 |
poryfly/scikit-learn | sklearn/decomposition/tests/test_pca.py | 199 | 10949 | import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn import datasets
from sklearn.decomposition import PCA
from sklearn.decomposition import RandomizedPCA
from sklearn.decomposition.pca import _assess_dimension_
from sklearn.decomposition.pca import _infer_dimension_
iris = datasets.load_iris()
def test_pca():
# PCA on dense arrays
pca = PCA(n_components=2)
X = iris.data
X_r = pca.fit(X).transform(X)
np.testing.assert_equal(X_r.shape[1], 2)
X_r2 = pca.fit_transform(X)
assert_array_almost_equal(X_r, X_r2)
pca = PCA()
pca.fit(X)
assert_almost_equal(pca.explained_variance_ratio_.sum(), 1.0, 3)
X_r = pca.transform(X)
X_r2 = pca.fit_transform(X)
assert_array_almost_equal(X_r, X_r2)
# Test get_covariance and get_precision with n_components == n_features
# with n_components < n_features and with n_components == 0
for n_components in [0, 2, X.shape[1]]:
pca.n_components = n_components
pca.fit(X)
cov = pca.get_covariance()
precision = pca.get_precision()
assert_array_almost_equal(np.dot(cov, precision),
np.eye(X.shape[1]), 12)
def test_whitening():
# Check that PCA output has unit-variance
rng = np.random.RandomState(0)
n_samples = 100
n_features = 80
n_components = 30
rank = 50
# some low rank data with correlated features
X = np.dot(rng.randn(n_samples, rank),
np.dot(np.diag(np.linspace(10.0, 1.0, rank)),
rng.randn(rank, n_features)))
# the component-wise variance of the first 50 features is 3 times the
# mean component-wise variance of the remaingin 30 features
X[:, :50] *= 3
assert_equal(X.shape, (n_samples, n_features))
# the component-wise variance is thus highly varying:
assert_almost_equal(X.std(axis=0).std(), 43.9, 1)
for this_PCA, copy in [(x, y) for x in (PCA, RandomizedPCA)
for y in (True, False)]:
# whiten the data while projecting to the lower dim subspace
X_ = X.copy() # make sure we keep an original across iterations.
pca = this_PCA(n_components=n_components, whiten=True, copy=copy)
# test fit_transform
X_whitened = pca.fit_transform(X_.copy())
assert_equal(X_whitened.shape, (n_samples, n_components))
X_whitened2 = pca.transform(X_)
assert_array_almost_equal(X_whitened, X_whitened2)
assert_almost_equal(X_whitened.std(axis=0), np.ones(n_components))
assert_almost_equal(X_whitened.mean(axis=0), np.zeros(n_components))
X_ = X.copy()
pca = this_PCA(n_components=n_components, whiten=False,
copy=copy).fit(X_)
X_unwhitened = pca.transform(X_)
assert_equal(X_unwhitened.shape, (n_samples, n_components))
# in that case the output components still have varying variances
assert_almost_equal(X_unwhitened.std(axis=0).std(), 74.1, 1)
# we always center, so no test for non-centering.
def test_explained_variance():
# Check that PCA output has unit-variance
rng = np.random.RandomState(0)
n_samples = 100
n_features = 80
X = rng.randn(n_samples, n_features)
pca = PCA(n_components=2).fit(X)
rpca = RandomizedPCA(n_components=2, random_state=42).fit(X)
assert_array_almost_equal(pca.explained_variance_,
rpca.explained_variance_, 1)
assert_array_almost_equal(pca.explained_variance_ratio_,
rpca.explained_variance_ratio_, 3)
# compare to empirical variances
X_pca = pca.transform(X)
assert_array_almost_equal(pca.explained_variance_,
np.var(X_pca, axis=0))
X_rpca = rpca.transform(X)
assert_array_almost_equal(rpca.explained_variance_,
np.var(X_rpca, axis=0))
def test_pca_check_projection():
# Test that the projection of data is correct
rng = np.random.RandomState(0)
n, p = 100, 3
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5])
Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5])
Yt = PCA(n_components=2).fit(X).transform(Xt)
Yt /= np.sqrt((Yt ** 2).sum())
assert_almost_equal(np.abs(Yt[0][0]), 1., 1)
def test_pca_inverse():
# Test that the projection of data can be inverted
rng = np.random.RandomState(0)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed
# signal (since the data is almost of rank n_components)
pca = PCA(n_components=2).fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=3)
# same as above with whitening (approximate reconstruction)
pca = PCA(n_components=2, whiten=True)
pca.fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=3)
def test_pca_validation():
X = [[0, 1], [1, 0]]
for n_components in [-1, 3]:
assert_raises(ValueError, PCA(n_components).fit, X)
def test_randomized_pca_check_projection():
# Test that the projection by RandomizedPCA on dense data is correct
rng = np.random.RandomState(0)
n, p = 100, 3
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5])
Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5])
Yt = RandomizedPCA(n_components=2, random_state=0).fit(X).transform(Xt)
Yt /= np.sqrt((Yt ** 2).sum())
assert_almost_equal(np.abs(Yt[0][0]), 1., 1)
def test_randomized_pca_check_list():
# Test that the projection by RandomizedPCA on list data is correct
X = [[1.0, 0.0], [0.0, 1.0]]
X_transformed = RandomizedPCA(n_components=1,
random_state=0).fit(X).transform(X)
assert_equal(X_transformed.shape, (2, 1))
assert_almost_equal(X_transformed.mean(), 0.00, 2)
assert_almost_equal(X_transformed.std(), 0.71, 2)
def test_randomized_pca_inverse():
# Test that RandomizedPCA is inversible on dense data
rng = np.random.RandomState(0)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed signal
# (since the data is almost of rank n_components)
pca = RandomizedPCA(n_components=2, random_state=0).fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=2)
# same as above with whitening (approximate reconstruction)
pca = RandomizedPCA(n_components=2, whiten=True,
random_state=0).fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
relative_max_delta = (np.abs(X - Y_inverse) / np.abs(X).mean()).max()
assert_almost_equal(relative_max_delta, 0.11, decimal=2)
def test_pca_dim():
# Check automated dimensionality setting
rng = np.random.RandomState(0)
n, p = 100, 5
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5, 1, 2])
pca = PCA(n_components='mle').fit(X)
assert_equal(pca.n_components, 'mle')
assert_equal(pca.n_components_, 1)
def test_infer_dim_1():
# TODO: explain what this is testing
# Or at least use explicit variable names...
n, p = 1000, 5
rng = np.random.RandomState(0)
X = (rng.randn(n, p) * .1 + rng.randn(n, 1) * np.array([3, 4, 5, 1, 2])
+ np.array([1, 0, 7, 4, 6]))
pca = PCA(n_components=p)
pca.fit(X)
spect = pca.explained_variance_
ll = []
for k in range(p):
ll.append(_assess_dimension_(spect, k, n, p))
ll = np.array(ll)
assert_greater(ll[1], ll.max() - .01 * n)
def test_infer_dim_2():
# TODO: explain what this is testing
# Or at least use explicit variable names...
n, p = 1000, 5
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5, 1, 2])
X[10:20] += np.array([6, 0, 7, 2, -1])
pca = PCA(n_components=p)
pca.fit(X)
spect = pca.explained_variance_
assert_greater(_infer_dimension_(spect, n, p), 1)
def test_infer_dim_3():
n, p = 100, 5
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5, 1, 2])
X[10:20] += np.array([6, 0, 7, 2, -1])
X[30:40] += 2 * np.array([-1, 1, -1, 1, -1])
pca = PCA(n_components=p)
pca.fit(X)
spect = pca.explained_variance_
assert_greater(_infer_dimension_(spect, n, p), 2)
def test_infer_dim_by_explained_variance():
X = iris.data
pca = PCA(n_components=0.95)
pca.fit(X)
assert_equal(pca.n_components, 0.95)
assert_equal(pca.n_components_, 2)
pca = PCA(n_components=0.01)
pca.fit(X)
assert_equal(pca.n_components, 0.01)
assert_equal(pca.n_components_, 1)
rng = np.random.RandomState(0)
# more features than samples
X = rng.rand(5, 20)
pca = PCA(n_components=.5).fit(X)
assert_equal(pca.n_components, 0.5)
assert_equal(pca.n_components_, 2)
def test_pca_score():
# Test that probabilistic PCA scoring yields a reasonable score
n, p = 1000, 3
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1 + np.array([3, 4, 5])
pca = PCA(n_components=2)
pca.fit(X)
ll1 = pca.score(X)
h = -0.5 * np.log(2 * np.pi * np.exp(1) * 0.1 ** 2) * p
np.testing.assert_almost_equal(ll1 / h, 1, 0)
def test_pca_score2():
# Test that probabilistic PCA correctly separated different datasets
n, p = 100, 3
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1 + np.array([3, 4, 5])
pca = PCA(n_components=2)
pca.fit(X)
ll1 = pca.score(X)
ll2 = pca.score(rng.randn(n, p) * .2 + np.array([3, 4, 5]))
assert_greater(ll1, ll2)
# Test that it gives the same scores if whiten=True
pca = PCA(n_components=2, whiten=True)
pca.fit(X)
ll2 = pca.score(X)
assert_almost_equal(ll1, ll2)
def test_pca_score3():
# Check that probabilistic PCA selects the right model
n, p = 200, 3
rng = np.random.RandomState(0)
Xl = (rng.randn(n, p) + rng.randn(n, 1) * np.array([3, 4, 5])
+ np.array([1, 0, 7]))
Xt = (rng.randn(n, p) + rng.randn(n, 1) * np.array([3, 4, 5])
+ np.array([1, 0, 7]))
ll = np.zeros(p)
for k in range(p):
pca = PCA(n_components=k)
pca.fit(Xl)
ll[k] = pca.score(Xt)
assert_true(ll.argmax() == 1)
| bsd-3-clause |
lambday/shogun | applications/easysvm/esvm/plots.py | 7 | 6555 | """
This module contains code for commonly used plots
"""
# This software is distributed under BSD 3-clause license (see LICENSE file).
#
# Authors: Soeren Sonnenburg
import sys
import random
import numpy
import warnings
import shutil
from shogun import Labels
from shogun import *
def plotroc(output, LTE, draw_random=False, figure_fname="", roc_label='ROC'):
"""Plot the receiver operating characteristic curve"""
import pylab
import matplotlib
pylab.figure(1,dpi=150,figsize=(4,4))
fontdict=dict(family="cursive",weight="bold",size=7,y=1.05) ;
pm=PerformanceMeasures(Labels(numpy.array(LTE)), Labels(numpy.array(output)))
points=pm.get_ROC()
points=numpy.array(points).T # for pylab.plot
pylab.plot(points[0], points[1], 'b-', label=roc_label)
if draw_random:
pylab.plot([0, 1], [0, 1], 'r-', label='random guessing')
pylab.axis([0, 1, 0, 1])
ticks=numpy.arange(0., 1., .1, dtype=numpy.float64)
pylab.xticks(ticks,size=10)
pylab.yticks(ticks,size=10)
pylab.xlabel('1 - specificity (false positive rate)',size=10)
pylab.ylabel('sensitivity (true positive rate)',size=10)
pylab.legend(loc='lower right', prop = matplotlib.font_manager.FontProperties('tiny'))
if figure_fname!=None:
warnings.filterwarnings('ignore','Could not match*')
tempfname = figure_fname + '.png'
pylab.savefig(tempfname)
shutil.move(tempfname,figure_fname)
auROC=pm.get_auROC()
return auROC ;
def plotprc(output, LTE, figure_fname="", prc_label='PRC'):
"""Plot the precision recall curve"""
import pylab
import matplotlib
pylab.figure(2,dpi=150,figsize=(4,4))
pm=PerformanceMeasures(Labels(numpy.array(LTE)), Labels(numpy.array(output)))
points=pm.get_PRC()
points=numpy.array(points).T # for pylab.plot
pylab.plot(points[0], points[1], 'b-', label=prc_label)
pylab.axis([0, 1, 0, 1])
ticks=numpy.arange(0., 1., .1, dtype=numpy.float64)
pylab.xticks(ticks,size=10)
pylab.yticks(ticks,size=10)
pylab.xlabel('sensitivity (true positive rate)',size=10)
pylab.ylabel('precision (1 - false discovery rate)',size=10)
pylab.legend(loc='lower right')
if figure_fname!=None:
warnings.filterwarnings('ignore','Could not match*')
tempfname = figure_fname + '.png'
pylab.savefig(tempfname)
shutil.move(tempfname,figure_fname)
auPRC=pm.get_auPRC()
return auPRC ;
def plotcloud(cloud, figure_fname="", label='cloud'):
"""Plot the cloud of points (the first two dimensions only)"""
import pylab
import matplotlib
pylab.figure(1,dpi=150,figsize=(4,4))
pos = []
neg = []
for i in xrange(len(cloud)):
if cloud[i][0]==1:
pos.append(cloud[i][1:])
elif cloud[i][0]==-1:
neg.append(cloud[i][1:])
fontdict=dict(family="cursive",weight="bold",size=10,y=1.05) ;
pylab.title(label, fontdict)
points=numpy.array(pos).T # for pylab.plot
pylab.plot(points[0], points[1], 'b+', label='positive')
points=numpy.array(neg).T # for pylab.plot
pylab.plot(points[0], points[1], 'rx', label='negative')
#pylab.axis([0, 1, 0, 1])
#ticks=numpy.arange(0., 1., .1, dtype=numpy.float64)
#pylab.xticks(ticks,size=10)
#pylab.yticks(ticks,size=10)
pylab.xlabel('dimension 1',size=10)
pylab.ylabel('dimension 2',size=10)
pylab.legend(loc='lower right')
if figure_fname!=None:
warnings.filterwarnings('ignore','Could not match*')
tempfname = figure_fname + '.png'
pylab.savefig(tempfname)
shutil.move(tempfname,figure_fname)
def plot_poims(poimfilename, poim, max_poim, diff_poim, poim_totalmass, poimdegree, max_len):
"""Plot a summary of the information in poims"""
import pylab
import matplotlib
pylab.figure(3, dpi=150, figsize=(4,5))
# summary figures
fontdict=dict(family="cursive",weight="bold",size=7,y=1.05) ;
pylab.subplot(3,2,1)
pylab.title('Total POIM Mass', fontdict)
pylab.plot(poim_totalmass) ;
pylab.ylabel('weight mass', size=5)
pylab.subplot(3,2,3)
pylab.title('POIMs', fontdict)
pylab.pcolor(max_poim, shading='flat') ;
pylab.subplot(3,2,5)
pylab.title('Differential POIMs', fontdict)
pylab.pcolor(diff_poim, shading='flat') ;
for plot in [3, 5]:
pylab.subplot(3,2,plot)
ticks=numpy.arange(1., poimdegree+1, 1, dtype=numpy.float64)
ticks_str = []
for i in xrange(0, poimdegree):
ticks_str.append("%i" % (i+1))
ticks[i] = i + 0.5
pylab.yticks(ticks, ticks_str)
pylab.ylabel('degree', size=5)
# per k-mer figures
fontdict=dict(family="cursive",weight="bold",size=7,y=1.04) ;
# 1-mers
pylab.subplot(3,2,2)
pylab.title('1-mer Positional Importance', fontdict)
pylab.pcolor(poim[0], shading='flat') ;
ticks_str = ['A', 'C', 'G', 'T']
ticks = [0.5, 1.5, 2.5, 3.5]
pylab.yticks(ticks, ticks_str, size=5)
pylab.axis([0, max_len, 0, 4])
# 2-mers
pylab.subplot(3,2,4)
pylab.title('2-mer Positional Importance', fontdict)
pylab.pcolor(poim[1], shading='flat') ;
i=0 ;
ticks=[] ;
ticks_str=[] ;
for l1 in ['A', 'C', 'G', 'T']:
for l2 in ['A', 'C', 'G', 'T']:
ticks_str.append(l1+l2)
ticks.append(0.5+i) ;
i+=1 ;
pylab.yticks(ticks, ticks_str, fontsize=5)
pylab.axis([0, max_len, 0, 16])
# 3-mers
pylab.subplot(3,2,6)
pylab.title('3-mer Positional Importance', fontdict)
pylab.pcolor(poim[2], shading='flat') ;
i=0 ;
ticks=[] ;
ticks_str=[] ;
for l1 in ['A', 'C', 'G', 'T']:
for l2 in ['A', 'C', 'G', 'T']:
for l3 in ['A', 'C', 'G', 'T']:
if numpy.mod(i,4)==0:
ticks_str.append(l1+l2+l3)
ticks.append(0.5+i) ;
i+=1 ;
pylab.yticks(ticks, ticks_str, fontsize=5)
pylab.axis([0, max_len, 0, 64])
# x-axis on last two figures
for plot in [5, 6]:
pylab.subplot(3,2,plot)
pylab.xlabel('sequence position', size=5)
# finishing up
for plot in xrange(0,6):
pylab.subplot(3,2,plot+1)
pylab.xticks(fontsize=5)
for plot in [1,3,5]:
pylab.subplot(3,2,plot)
pylab.yticks(fontsize=5)
pylab.subplots_adjust(hspace=0.35) ;
# write to file
warnings.filterwarnings('ignore','Could not match*')
pylab.savefig('/tmp/temppylabfig.png')
shutil.move('/tmp/temppylabfig.png',poimfilename)
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.