repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
habrman/FaceRecognition | main.py | 1 | 10041 | from sklearn.metrics.pairwise import pairwise_distances
from tensorflow.python.platform import gfile
import tensorflow as tf
import numpy as np
import detect_and_align
import argparse
import easygui
import time
import cv2
import os
class IdData:
"""Keeps track of known identities and calculates id matches"""
def __init__(
self, id_folder, mtcnn, sess, embeddings, images_placeholder, phase_train_placeholder, distance_treshold
):
print("Loading known identities: ", end="")
self.distance_treshold = distance_treshold
self.id_folder = id_folder
self.mtcnn = mtcnn
self.id_names = []
self.embeddings = None
image_paths = []
os.makedirs(id_folder, exist_ok=True)
ids = os.listdir(os.path.expanduser(id_folder))
if not ids:
return
for id_name in ids:
id_dir = os.path.join(id_folder, id_name)
image_paths = image_paths + [os.path.join(id_dir, img) for img in os.listdir(id_dir)]
print("Found %d images in id folder" % len(image_paths))
aligned_images, id_image_paths = self.detect_id_faces(image_paths)
feed_dict = {images_placeholder: aligned_images, phase_train_placeholder: False}
self.embeddings = sess.run(embeddings, feed_dict=feed_dict)
if len(id_image_paths) < 5:
self.print_distance_table(id_image_paths)
def add_id(self, embedding, new_id, face_patch):
if self.embeddings is None:
self.embeddings = np.atleast_2d(embedding)
else:
self.embeddings = np.vstack([self.embeddings, embedding])
self.id_names.append(new_id)
id_folder = os.path.join(self.id_folder, new_id)
os.makedirs(id_folder, exist_ok=True)
filenames = [s.split(".")[0] for s in os.listdir(id_folder)]
numbered_filenames = [int(f) for f in filenames if f.isdigit()]
img_number = max(numbered_filenames) + 1 if numbered_filenames else 0
cv2.imwrite(os.path.join(id_folder, f"{img_number}.jpg"), face_patch)
def detect_id_faces(self, image_paths):
aligned_images = []
id_image_paths = []
for image_path in image_paths:
image = cv2.imread(os.path.expanduser(image_path), cv2.IMREAD_COLOR)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
face_patches, _, _ = detect_and_align.detect_faces(image, self.mtcnn)
if len(face_patches) > 1:
print(
"Warning: Found multiple faces in id image: %s" % image_path
+ "\nMake sure to only have one face in the id images. "
+ "If that's the case then it's a false positive detection and"
+ " you can solve it by increasing the thresolds of the cascade network"
)
aligned_images = aligned_images + face_patches
id_image_paths += [image_path] * len(face_patches)
path = os.path.dirname(image_path)
self.id_names += [os.path.basename(path)] * len(face_patches)
return np.stack(aligned_images), id_image_paths
def print_distance_table(self, id_image_paths):
"""Prints distances between id embeddings"""
distance_matrix = pairwise_distances(self.embeddings, self.embeddings)
image_names = [path.split("/")[-1] for path in id_image_paths]
print("Distance matrix:\n{:20}".format(""), end="")
[print("{:20}".format(name), end="") for name in image_names]
for path, distance_row in zip(image_names, distance_matrix):
print("\n{:20}".format(path), end="")
for distance in distance_row:
print("{:20}".format("%0.3f" % distance), end="")
print()
def find_matching_ids(self, embs):
if self.id_names:
matching_ids = []
matching_distances = []
distance_matrix = pairwise_distances(embs, self.embeddings)
for distance_row in distance_matrix:
min_index = np.argmin(distance_row)
if distance_row[min_index] < self.distance_treshold:
matching_ids.append(self.id_names[min_index])
matching_distances.append(distance_row[min_index])
else:
matching_ids.append(None)
matching_distances.append(None)
else:
matching_ids = [None] * len(embs)
matching_distances = [np.inf] * len(embs)
return matching_ids, matching_distances
def load_model(model):
model_exp = os.path.expanduser(model)
if os.path.isfile(model_exp):
print("Loading model filename: %s" % model_exp)
with gfile.FastGFile(model_exp, "rb") as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
tf.import_graph_def(graph_def, name="")
else:
raise ValueError("Specify model file, not directory!")
def main(args):
with tf.Graph().as_default():
with tf.Session() as sess:
# Setup models
mtcnn = detect_and_align.create_mtcnn(sess, None)
load_model(args.model)
images_placeholder = tf.get_default_graph().get_tensor_by_name("input:0")
embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0")
phase_train_placeholder = tf.get_default_graph().get_tensor_by_name("phase_train:0")
# Load anchor IDs
id_data = IdData(
args.id_folder[0], mtcnn, sess, embeddings, images_placeholder, phase_train_placeholder, args.threshold
)
cap = cv2.VideoCapture(0)
frame_height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
show_landmarks = False
show_bb = False
show_id = True
show_fps = False
frame_detections = None
while True:
start = time.time()
_, frame = cap.read()
# Locate faces and landmarks in frame
face_patches, padded_bounding_boxes, landmarks = detect_and_align.detect_faces(frame, mtcnn)
if len(face_patches) > 0:
face_patches = np.stack(face_patches)
feed_dict = {images_placeholder: face_patches, phase_train_placeholder: False}
embs = sess.run(embeddings, feed_dict=feed_dict)
matching_ids, matching_distances = id_data.find_matching_ids(embs)
frame_detections = {"embs": embs, "bbs": padded_bounding_boxes, "frame": frame.copy()}
print("Matches in frame:")
for bb, landmark, matching_id, dist in zip(
padded_bounding_boxes, landmarks, matching_ids, matching_distances
):
if matching_id is None:
matching_id = "Unknown"
print("Unknown! Couldn't fint match.")
else:
print("Hi %s! Distance: %1.4f" % (matching_id, dist))
if show_id:
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(frame, matching_id, (bb[0], bb[3]), font, 1, (255, 255, 255), 1, cv2.LINE_AA)
if show_bb:
cv2.rectangle(frame, (bb[0], bb[1]), (bb[2], bb[3]), (255, 0, 0), 2)
if show_landmarks:
for j in range(5):
size = 1
top_left = (int(landmark[j]) - size, int(landmark[j + 5]) - size)
bottom_right = (int(landmark[j]) + size, int(landmark[j + 5]) + size)
cv2.rectangle(frame, top_left, bottom_right, (255, 0, 255), 2)
else:
print("Couldn't find a face")
end = time.time()
seconds = end - start
fps = round(1 / seconds, 2)
if show_fps:
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(frame, str(fps), (0, int(frame_height) - 5), font, 1, (255, 255, 255), 1, cv2.LINE_AA)
cv2.imshow("frame", frame)
key = cv2.waitKey(1)
if key == ord("q"):
break
elif key == ord("l"):
show_landmarks = not show_landmarks
elif key == ord("b"):
show_bb = not show_bb
elif key == ord("i"):
show_id = not show_id
elif key == ord("f"):
show_fps = not show_fps
elif key == ord("s") and frame_detections is not None:
for emb, bb in zip(frame_detections["embs"], frame_detections["bbs"]):
patch = frame_detections["frame"][bb[1] : bb[3], bb[0] : bb[2], :]
cv2.imshow("frame", patch)
cv2.waitKey(1)
new_id = easygui.enterbox("Who's in the image? Leave empty for non-valid")
if len(new_id) > 0:
id_data.add_id(emb, new_id, patch)
cap.release()
cv2.destroyAllWindows()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("model", type=str, help="Path to model protobuf (.pb) file")
parser.add_argument("id_folder", type=str, nargs="+", help="Folder containing ID folders")
parser.add_argument("-t", "--threshold", type=float, help="Distance threshold defining an id match", default=1.0)
main(parser.parse_args())
| mit |
ChinaQuants/zipline | zipline/utils/munge.py | 29 | 2299 | #
# Copyright 2015 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pandas.core.common as com
def _interpolate(values, method, axis=None):
if values.ndim == 1:
axis = 0
elif values.ndim == 2:
axis = 1
else:
raise Exception("Cannot interpolate array with more than 2 dims")
values = values.copy()
values = interpolate_2d(values, method, axis=axis)
return values
def interpolate_2d(values, method='pad', axis=0, limit=None, fill_value=None):
"""
Copied from the 0.15.2. This did not exist in 0.12.0.
Differences:
- Don't depend on pad_2d and backfill_2d to return values
- Removed dtype kwarg. 0.12.0 did not have this option.
"""
transf = (lambda x: x) if axis == 0 else (lambda x: x.T)
# reshape a 1 dim if needed
ndim = values.ndim
if values.ndim == 1:
if axis != 0: # pragma: no cover
raise AssertionError("cannot interpolate on a ndim == 1 with "
"axis != 0")
values = values.reshape(tuple((1,) + values.shape))
if fill_value is None:
mask = None
else: # todo create faster fill func without masking
mask = com.mask_missing(transf(values), fill_value)
# Note: pad_2d and backfill_2d work inplace in 0.12.0 and 0.15.2
# in 0.15.2 they also return a reference to values
if method == 'pad':
com.pad_2d(transf(values), limit=limit, mask=mask)
else:
com.backfill_2d(transf(values), limit=limit, mask=mask)
# reshape back
if ndim == 1:
values = values[0]
return values
def ffill(values, axis=None):
return _interpolate(values, 'pad', axis=axis)
def bfill(values, axis=None):
return _interpolate(values, 'bfill', axis=axis)
| apache-2.0 |
oemof/examples | oemof_examples/oemof.solph/v0.4.x/start_and_shutdown_costs/startup_shutdown.py | 1 | 2785 | # -*- coding: utf-8 -*-
"""
General description
-------------------
Example that illustrates how to model startup and shutdown costs attributed
to a binary flow.
Installation requirements
-------------------------
This example requires the version v0.3.x of oemof. Install by:
pip install 'oemof.solph>=0.4,<0.5'
"""
__copyright__ = "oemof developer group"
__license__ = "GPLv3"
import os
import pandas as pd
from oemof import solph
from oemof.network.network import Node
try:
import matplotlib.pyplot as plt
except ImportError:
plt = None
# read sequence data
full_filename = os.path.join(os.getcwd(), "data.csv")
data = pd.read_csv(full_filename, sep=",")
# select periods
periods = len(data) - 1
# create an energy system
idx = pd.date_range("1/1/2017", periods=periods, freq="H")
es = solph.EnergySystem(timeindex=idx)
Node.registry = es
# power bus and components
bel = solph.Bus(label="bel")
demand_el = solph.Sink(
label="demand_el",
inputs={bel: solph.Flow(fix=data["demand_el"], nominal_value=10)},
)
# pp1 and pp2 are competing to serve overall 12 units load at lowest cost
# summed costs for pp1 = 12 * 10 * 10.25 = 1230
# summed costs for pp2 = 4*5 + 4*5 + 12 * 10 * 10 = 1240
# => pp1 serves the load despite of higher variable costs since
# the start and shutdown costs of pp2 change its marginal costs
pp1 = solph.Source(
label="power_plant1",
outputs={bel: solph.Flow(nominal_value=10, variable_costs=10.25)},
)
# shutdown costs only work in combination with a minimum load
# since otherwise the status variable is "allowed" to be active i.e.
# it permanently has a value of one which does not allow to set the shutdown
# variable which is set to one if the status variable changes from one to zero
pp2 = solph.Source(
label="power_plant2",
outputs={
bel: solph.Flow(
nominal_value=10,
min=0.5,
max=1.0,
variable_costs=10,
nonconvex=solph.NonConvex(startup_costs=5, shutdown_costs=5),
)
},
)
# create an optimization problem and solve it
om = solph.Model(es)
# debugging
# om.write('problem.lp', io_options={'symbolic_solver_labels': True})
# solve model
om.solve(solver="cbc", solve_kwargs={"tee": True})
# create result object
results = solph.processing.results(om)
# plot data
if plt is not None:
# plot electrical bus
data = solph.views.node(results, "bel")["sequences"]
data[(("bel", "demand_el"), "flow")] *= -1
columns = [
c
for c in data.columns
if not any(s in c for s in ["status", "startup", "shutdown"])
]
data = data[columns]
ax = data.plot(kind="line", drawstyle="steps-post", grid=True, rot=0)
ax.set_xlabel("Hour")
ax.set_ylabel("P (MW)")
plt.show()
| gpl-3.0 |
HoliestCow/ece692_deeplearning | project3/dae_svm_classifier.py | 1 | 7025 |
from tflearn.data_augmentation import ImageAugmentation
from tflearn.data_preprocessing import ImagePreprocessing
# import glob
# from sklearn import svm
from sklearn.ensemble import BaggingClassifier
from sklearn.multiclass import OneVsRestClassifier
from sklearn.svm import SVC
# from itertools import compress
import numpy as np
import os.path
import datasets
import time
def onehot_labels(labels):
return np.eye(10)[labels]
def unpickle(file):
# import cPickle
import pickle as cPickle
fo = open(file, 'rb')
# dict = cPickle.load(fo)
dict = cPickle.load(fo, encoding='bytes')
fo.close()
return dict
def get_proper_images(raw):
raw_float = np.array(raw, dtype=float)
images = raw_float.reshape([-1, 3, 32, 32])
images = images.transpose([0, 2, 3, 1])
return images
def get_data():
data_norm = True
data_augmentation = True
data1 = unpickle('../cifar-10-batches-py/data_batch_1')
data2 = unpickle('../cifar-10-batches-py/data_batch_2')
data3 = unpickle('../cifar-10-batches-py/data_batch_3')
data4 = unpickle('../cifar-10-batches-py/data_batch_4')
data5 = unpickle('../cifar-10-batches-py/data_batch_5')
# print(list(data1.keys()))
# X = np.concatenate((get_proper_images(data1['data']),
# get_proper_images(data2['data']),
# get_proper_images(data3['data']),
# get_proper_images(data4['data']),
# get_proper_images(data5['data'])))
X = np.concatenate((get_proper_images(data1[b'data']),
get_proper_images(data2[b'data']),
get_proper_images(data3[b'data']),
get_proper_images(data4[b'data']),
get_proper_images(data5[b'data'])))
# Y = np.concatenate((onehot_labels(data1['labels']),
# onehot_labels(data2['labels']),
# onehot_labels(data3['labels']),
# onehot_labels(data4['labels']),
# onehot_labels(data5['labels'])))
Y = np.concatenate((onehot_labels(data1[b'labels']),
onehot_labels(data2[b'labels']),
onehot_labels(data3[b'labels']),
onehot_labels(data4[b'labels']),
onehot_labels(data5[b'labels'])))
# X_test = get_proper_images(unpickle('../cifar-10-batches-py/test_batch')['data'])
# Y_test = onehot_labels(unpickle('../cifar-10-batches-py/test_batch')['labels'])
X_test = get_proper_images(unpickle('../cifar-10-batches-py/test_batch')[b'data'])
Y_test = onehot_labels(unpickle('../cifar-10-batches-py/test_batch')[b'labels'])
img_prep = ImagePreprocessing()
if data_norm:
img_prep.add_featurewise_zero_center()
img_prep.add_featurewise_stdnorm()
img_aug = ImageAugmentation()
if data_augmentation:
img_aug.add_random_flip_leftright()
img_aug.add_random_rotation(max_angle=30.)
img_aug.add_random_crop((32, 32), 6)
return X, Y, X_test, Y_test, img_prep, img_aug
def main():
a = time.time()
# x, y, x_test, y_test, img_prep, img_aug = get_data()
#
# svm_y = np.zeros((y.shape[0], ), dtype=int)
# svm_y_test = np.zeros((y_test.shape[0]), dtype=int)
# for i in range(y.shape[0]):
# # print(y[i, :] == 1)
# mask = y[i, :] == 1
# meh = list(compress(range(len(mask)), mask))
# svm_y[i] = int(meh[0])
# for i in range(y_test.shape[0]):
# mask = y_test[i, :] == 1
# meh = list(compress(range(len(mask)), mask))
# svm_y_test[i] = int(meh[0])
# runs = ['sigmoid_sigmoid_256',
# 'sigmoid_sigmoid_crossentropy_256',
# 'sigmoid_sigmoid_gaussiannoise_256',
# 'sigmoid_tanh_512',
# 'relu_relu_256']
# runs = ['sigmoid_sigmoid_snp_0.1_512',
# 'sigmoid_sigmoid_snp_0.2_512',
# 'sigmoid_sigmoid_snp_0.3_512',
# 'sigmoid_sigmoid_snp_0.4_512',
# 'sigmoid_sigmoid_snp_0.5_512']
# runs = ['sigmoid_sigmoid_mask_0.1_512',
# 'sigmoid_sigmoid_mask_0.2_512',
# 'sigmoid_sigmoid_mask_0.3_512',
# 'sigmoid_sigmoid_mask_0.4_512',
# 'sigmoid_sigmoid_mask_0.5_512',
# 'relu_relu_snp_0.4_512']
# runs = ['sigmoid_sigmoid_gaussian_0.4_512']
runs = ['forcnn_sigmoid_sigmoid_snp_0.4_675']
print('time required to fix the answers {}'.format(time.time() - a))
# feature_generator = DNN(features, session=network.session)
# if len(glob.glob('./data/dae/*train.npy')) != 1:
# svm_features = np.zeros((0, 512))
# for i in range(x.shape[0]):
# if i % 1000 == 0:
# print(i, svm_features.shape)
# chuckmein = x[i, :, :].reshape((1, x.shape[1], x.shape[2], x.shape[3]))
# svm_features = np.vstack((svm_features, feature_generator.predict(chuckmein)))
# np.save('./dae_svm_features.npy', svm_features)
# else:
# svm_features = np.load('./dae_svm_features.npy')
model_directory = './data/dae/'
encode_w_suffix = '-encw.npy'
encode_b_suffix = '-encbh.npy'
# decode_w = '-decw.npy'
# decode_b = '-decb.npy'
# train_suffix = '-forcnn_sigmoid_sigmoid_snp_0.4_675.npy'
train_suffix_answer = '-train-answers.npy'
# test_suffix = '-test.npy'
test_suffix_answer = '-test-answers.npy'
# validation_suffix = '-validate.npy'
x, y, x_test, y_test = datasets.load_cifar10_dataset('./cifar-10-batches-py', mode='supervised')
# y = onehot_labels(y)
# y_test = onehot_labels(y_test)
for item in runs:
# svm_features = np.load(os.path.join(model_directory, item + train_suffix))
# svm_features_test = np.load(os.path.join(model_directory, item + test_suffix))
encode_w = np.load(os.path.join(model_directory, item + encode_w_suffix))
encode_b = np.load(os.path.join(model_directory, item + encode_b_suffix))
encode = np.add(np.dot(x, encode_w), encode_b)
# svm_features = encode.reshape(x.shape[0], 3, 15, 15).transpose(0, 2, 3, 1)
svm_features = encode
encode = np.add(np.dot(x_test, encode_w), encode_b)
svm_features_test = encode
# svm_features_test = encode.reshape(x_test.shape[0], 3, 15, 15).transpose(0, 2, 3, 1)
# print(svm_features.shape, svm_features_test.shape, y.shape, y_test.shape)
# stop
n_estimators = 10
n_jobs = 4
print('training svm')
start = time.time()
clf = OneVsRestClassifier(BaggingClassifier(
SVC(kernel='linear', probability=True, class_weight=None),
max_samples=1.0 / n_estimators, n_estimators=n_estimators, n_jobs=n_jobs))
clf.fit(svm_features, y)
end = time.time()
print("Bagging SVC", end - start, clf.score(svm_features_test, y_test))
return
main() | mit |
epfl-lts2/pygsp | pygsp/graphs/linegraph.py | 1 | 1635 | # -*- coding: utf-8 -*-
import numpy as np
from scipy import sparse
from pygsp import utils
from . import Graph # prevent circular import in Python < 3.5
logger = utils.build_logger(__name__)
class LineGraph(Graph):
r"""Build the line graph of a graph.
Each vertex of the line graph represents an edge in the original graph. Two
vertices are connected if the edges they represent share a vertex in the
original graph.
Parameters
----------
graph : :class:`Graph`
Examples
--------
>>> import matplotlib.pyplot as plt
>>> graph = graphs.Sensor(5, k=2, seed=10)
>>> line_graph = graphs.LineGraph(graph)
>>> fig, ax = plt.subplots()
>>> fig, ax = graph.plot('blue', edge_color='blue', indices=True, ax=ax)
>>> fig, ax = line_graph.plot('red', edge_color='red', indices=True, ax=ax)
>>> _ = ax.set_title('graph and its line graph')
"""
def __init__(self, graph, **kwargs):
if graph.is_weighted():
logger.warning('Your graph is weighted, and is considered '
'unweighted to build a binary line graph.')
graph.compute_differential_operator()
# incidence = np.abs(graph.D) # weighted?
incidence = (graph.D != 0)
adjacency = incidence.T.dot(incidence).astype(np.int)
adjacency -= sparse.identity(graph.n_edges, dtype=np.int)
try:
coords = incidence.T.dot(graph.coords) / 2
except AttributeError:
coords = None
super(LineGraph, self).__init__(adjacency, coords=coords,
plotting=graph.plotting, **kwargs)
| bsd-3-clause |
mne-tools/mne-tools.github.io | stable/_downloads/b44ad6c462a6840ee586cf7cce9e6efb/spm_faces_dataset_sgskip.py | 9 | 4727 | """
.. _ex-spm-faces:
==========================================
From raw data to dSPM on SPM Faces dataset
==========================================
Runs a full pipeline using MNE-Python:
- artifact removal
- averaging Epochs
- forward model computation
- source reconstruction using dSPM on the contrast : "faces - scrambled"
.. note:: This example does quite a bit of processing, so even on a
fast machine it can take several minutes to complete.
"""
# Authors: Alexandre Gramfort <[email protected]>
# Denis Engemann <[email protected]>
#
# License: BSD (3-clause)
# sphinx_gallery_thumbnail_number = 10
import matplotlib.pyplot as plt
import mne
from mne.datasets import spm_face
from mne.preprocessing import ICA, create_eog_epochs
from mne import io, combine_evoked
from mne.minimum_norm import make_inverse_operator, apply_inverse
print(__doc__)
data_path = spm_face.data_path()
subjects_dir = data_path + '/subjects'
###############################################################################
# Load and filter data, set up epochs
raw_fname = data_path + '/MEG/spm/SPM_CTF_MEG_example_faces%d_3D.ds'
raw = io.read_raw_ctf(raw_fname % 1, preload=True) # Take first run
# Here to save memory and time we'll downsample heavily -- this is not
# advised for real data as it can effectively jitter events!
raw.resample(120., npad='auto')
picks = mne.pick_types(raw.info, meg=True, exclude='bads')
raw.filter(1, 30, method='fir', fir_design='firwin')
events = mne.find_events(raw, stim_channel='UPPT001')
# plot the events to get an idea of the paradigm
mne.viz.plot_events(events, raw.info['sfreq'])
event_ids = {"faces": 1, "scrambled": 2}
tmin, tmax = -0.2, 0.6
baseline = None # no baseline as high-pass is applied
reject = dict(mag=5e-12)
epochs = mne.Epochs(raw, events, event_ids, tmin, tmax, picks=picks,
baseline=baseline, preload=True, reject=reject)
# Fit ICA, find and remove major artifacts
ica = ICA(n_components=0.95, max_iter='auto', random_state=0)
ica.fit(raw, decim=1, reject=reject)
# compute correlation scores, get bad indices sorted by score
eog_epochs = create_eog_epochs(raw, ch_name='MRT31-2908', reject=reject)
eog_inds, eog_scores = ica.find_bads_eog(eog_epochs, ch_name='MRT31-2908')
ica.plot_scores(eog_scores, eog_inds) # see scores the selection is based on
ica.plot_components(eog_inds) # view topographic sensitivity of components
ica.exclude += eog_inds[:1] # we saw the 2nd ECG component looked too dipolar
ica.plot_overlay(eog_epochs.average()) # inspect artifact removal
ica.apply(epochs) # clean data, default in place
evoked = [epochs[k].average() for k in event_ids]
contrast = combine_evoked(evoked, weights=[-1, 1]) # Faces - scrambled
evoked.append(contrast)
for e in evoked:
e.plot(ylim=dict(mag=[-400, 400]))
plt.show()
# estimate noise covarariance
noise_cov = mne.compute_covariance(epochs, tmax=0, method='shrunk',
rank=None)
###############################################################################
# Visualize fields on MEG helmet
# The transformation here was aligned using the dig-montage. It's included in
# the spm_faces dataset and is named SPM_dig_montage.fif.
trans_fname = data_path + ('/MEG/spm/SPM_CTF_MEG_example_faces1_3D_'
'raw-trans.fif')
maps = mne.make_field_map(evoked[0], trans_fname, subject='spm',
subjects_dir=subjects_dir, n_jobs=1)
evoked[0].plot_field(maps, time=0.170)
###############################################################################
# Look at the whitened evoked daat
evoked[0].plot_white(noise_cov)
###############################################################################
# Compute forward model
src = data_path + '/subjects/spm/bem/spm-oct-6-src.fif'
bem = data_path + '/subjects/spm/bem/spm-5120-5120-5120-bem-sol.fif'
forward = mne.make_forward_solution(contrast.info, trans_fname, src, bem)
###############################################################################
# Compute inverse solution
snr = 3.0
lambda2 = 1.0 / snr ** 2
method = 'dSPM'
inverse_operator = make_inverse_operator(contrast.info, forward, noise_cov,
loose=0.2, depth=0.8)
# Compute inverse solution on contrast
stc = apply_inverse(contrast, inverse_operator, lambda2, method, pick_ori=None)
# stc.save('spm_%s_dSPM_inverse' % contrast.comment)
# Plot contrast in 3D with PySurfer if available
brain = stc.plot(hemi='both', subjects_dir=subjects_dir, initial_time=0.170,
views=['ven'], clim={'kind': 'value', 'lims': [3., 6., 9.]})
# brain.save_image('dSPM_map.png')
| bsd-3-clause |
GaZ3ll3/scikit-image | doc/examples/plot_template.py | 20 | 1663 | """
=================
Template Matching
=================
In this example, we use template matching to identify the occurrence of an
image patch (in this case, a sub-image centered on a single coin). Here, we
return a single match (the exact same coin), so the maximum value in the
``match_template`` result corresponds to the coin location. The other coins
look similar, and thus have local maxima; if you expect multiple matches, you
should use a proper peak-finding function.
The ``match_template`` function uses fast, normalized cross-correlation [1]_
to find instances of the template in the image. Note that the peaks in the
output of ``match_template`` correspond to the origin (i.e. top-left corner) of
the template.
.. [1] J. P. Lewis, "Fast Normalized Cross-Correlation", Industrial Light and
Magic.
"""
import numpy as np
import matplotlib.pyplot as plt
from skimage import data
from skimage.feature import match_template
image = data.coins()
coin = image[170:220, 75:130]
result = match_template(image, coin)
ij = np.unravel_index(np.argmax(result), result.shape)
x, y = ij[::-1]
fig, (ax1, ax2, ax3) = plt.subplots(ncols=3, figsize=(8, 3))
ax1.imshow(coin)
ax1.set_axis_off()
ax1.set_title('template')
ax2.imshow(image)
ax2.set_axis_off()
ax2.set_title('image')
# highlight matched region
hcoin, wcoin = coin.shape
rect = plt.Rectangle((x, y), wcoin, hcoin, edgecolor='r', facecolor='none')
ax2.add_patch(rect)
ax3.imshow(result)
ax3.set_axis_off()
ax3.set_title('`match_template`\nresult')
# highlight matched region
ax3.autoscale(False)
ax3.plot(x, y, 'o', markeredgecolor='r', markerfacecolor='none', markersize=10)
plt.show()
| bsd-3-clause |
petosegan/scikit-learn | examples/cluster/plot_agglomerative_clustering.py | 343 | 2931 | """
Agglomerative clustering with and without structure
===================================================
This example shows the effect of imposing a connectivity graph to capture
local structure in the data. The graph is simply the graph of 20 nearest
neighbors.
Two consequences of imposing a connectivity can be seen. First clustering
with a connectivity matrix is much faster.
Second, when using a connectivity matrix, average and complete linkage are
unstable and tend to create a few clusters that grow very quickly. Indeed,
average and complete linkage fight this percolation behavior by considering all
the distances between two clusters when merging them. The connectivity
graph breaks this mechanism. This effect is more pronounced for very
sparse graphs (try decreasing the number of neighbors in
kneighbors_graph) and with complete linkage. In particular, having a very
small number of neighbors in the graph, imposes a geometry that is
close to that of single linkage, which is well known to have this
percolation instability.
"""
# Authors: Gael Varoquaux, Nelle Varoquaux
# License: BSD 3 clause
import time
import matplotlib.pyplot as plt
import numpy as np
from sklearn.cluster import AgglomerativeClustering
from sklearn.neighbors import kneighbors_graph
# Generate sample data
n_samples = 1500
np.random.seed(0)
t = 1.5 * np.pi * (1 + 3 * np.random.rand(1, n_samples))
x = t * np.cos(t)
y = t * np.sin(t)
X = np.concatenate((x, y))
X += .7 * np.random.randn(2, n_samples)
X = X.T
# Create a graph capturing local connectivity. Larger number of neighbors
# will give more homogeneous clusters to the cost of computation
# time. A very large number of neighbors gives more evenly distributed
# cluster sizes, but may not impose the local manifold structure of
# the data
knn_graph = kneighbors_graph(X, 30, include_self=False)
for connectivity in (None, knn_graph):
for n_clusters in (30, 3):
plt.figure(figsize=(10, 4))
for index, linkage in enumerate(('average', 'complete', 'ward')):
plt.subplot(1, 3, index + 1)
model = AgglomerativeClustering(linkage=linkage,
connectivity=connectivity,
n_clusters=n_clusters)
t0 = time.time()
model.fit(X)
elapsed_time = time.time() - t0
plt.scatter(X[:, 0], X[:, 1], c=model.labels_,
cmap=plt.cm.spectral)
plt.title('linkage=%s (time %.2fs)' % (linkage, elapsed_time),
fontdict=dict(verticalalignment='top'))
plt.axis('equal')
plt.axis('off')
plt.subplots_adjust(bottom=0, top=.89, wspace=0,
left=0, right=1)
plt.suptitle('n_cluster=%i, connectivity=%r' %
(n_clusters, connectivity is not None), size=17)
plt.show()
| bsd-3-clause |
abonil91/ncanda-data-integration | scripts/xnat/xnat_extractor.py | 1 | 12398 | #!/usr/bin/env python
##
## Copyright 2016 SRI International
## See COPYING file distributed along with the package for the copyright and license terms.
##
"""
NCANDA XNAT Extractor
Extract all experiment, scan, and reading data from NCANDA's XNAT server.
"""
__author__ = "Nolan Nichols <http://orcid.org/0000-0003-1099-3328>"
__modified__ = "2015-08-26"
import os
import glob
import json
import tempfile
import requests
import pandas as pd
from lxml import etree
# Verbose setting for cli
verbose = None
# Define global namespace for parsing XNAT XML files
ns = {'xnat': 'http://nrg.wustl.edu/xnat'}
# Define global format to be used in XNAT requests
return_format = '?format=csv'
def get_config(config_file):
"""
Get a json configuration in pyXNAT format
:param config_file: str
:return: dict
"""
path = os.path.abspath(config_file)
with open(path, 'rb') as fi:
config = json.load(fi)
config.update(api=config['server'] + '/data')
if verbose:
print("Getting configuration file: {0}".format(path))
return config
def get_collections(config):
"""
Get a dictionary of lambda functions that create collection URLs
:param config: dict
:return: dict
"""
server = config['api']
collections = dict(projects=lambda: server + '/projects',
subjects=lambda x: server + '/{0}/subjects'.format(x),
experiments=lambda: server + '/experiments')
if verbose:
print("Getting collections configuration...")
return collections
def get_entities(config):
"""
Get a dictionary of lambda functions that create entity URLs
:param config: dict
:return: dict
"""
server = config['api']
entities = dict(project=lambda x: server + '/projects/{0}'.format(x),
subject=lambda x: server + '/subjects/{0}'.format(x),
experiment=lambda x:
server + '/experiments/{0}'.format(x))
if verbose:
print("Getting entities configuration...")
return entities
def get_xnat_session(config):
"""
Get a requests.session instance from the config
:return: requests.session
"""
jsessionid = ''.join([config['api'], '/JSESSIONID'])
session = requests.session()
session.auth = (config['user'], config['password'])
session.get(jsessionid)
if verbose:
print("Getting an XNAT session using: {0}".format(jsessionid))
return session
def write_experiments(config, session):
"""
Write out a csv file representing all the experiments in the given XNAT
session.
:param config: dict
:param session: requests.session
:return: str
"""
experiments_filename = tempfile.mktemp()
collections = get_collections(config)
experiments = session.get(collections.get('experiments')() + return_format)
with open(experiments_filename, 'w') as fi:
fi.flush()
fi.write(experiments.text)
fi.close()
if verbose:
print("Writing list of experiment ids to temp: {0}".format(experiments_filename))
return experiments_filename
def extract_experiment_xml(config, session, experiment_dir, extract=None):
"""
Open an experiments csv file, then extract the XML representation,
and write it to disk.
:param config: dict
:param session: requests.session
:param experiment_dir: str
:param extract: int
:return: str
"""
entities = get_entities(config)
experiments_file = write_experiments(config, session)
# make sure the output directory exists and is empty
outdir = os.path.abspath(experiment_dir)
if not os.path.exists(outdir):
os.mkdir(outdir)
else:
[os.remove(f) for f in glob.glob(os.path.join(outdir, '*'))]
df_experiments = pd.read_csv(experiments_file)
if not extract:
if verbose:
print("Running XML extraction for all sessions: {0} Total".format(df_experiments.shape[0]))
extract = df_experiments.shape[0]
experiment_ids = df_experiments.ID[:extract]
experiment_files = list()
for idx, experiment_id in experiment_ids.iteritems():
experiment = session.get(entities.get('experiment')(experiment_id) + return_format)
experiment_file = os.path.join(outdir, '{0}.xml'.format(experiment_id))
experiment_files.append(experiment_file)
with open(experiment_file, 'w') as fi:
fi.flush()
fi.write(experiment.text)
fi.close()
if verbose:
num = idx + 1
print("Writing XML file {0} of {1} to: {2}".format(num, extract, experiment_file))
return experiment_files
def get_experiment_info(experiment_xml_file):
"""
Extract basic information from the experiment xml file and return a
dictionary
:param experiment_xml_file: str
:return: dict
"""
xml = etree.parse(experiment_xml_file)
root = xml.getroot()
site_experiment_id = root.attrib.get('label')
site_id = site_experiment_id[0:11]
site_experiment_date = site_experiment_id[12:20]
project = root.attrib.get('project')
experiment_id = root.attrib.get('ID')
experiment_date = root.find('./xnat:date', namespaces=ns).text
subject_id = root.find('./xnat:subject_ID', namespaces=ns).text
result = dict(site_id=site_id,
subject_id=subject_id,
site_experiment_id=site_experiment_id,
site_experiment_date=site_experiment_date,
project=project,
experiment_id=experiment_id,
experiment_date=experiment_date)
if verbose:
print("Parsed experiment info for: {0}".format(result))
return result
def get_experiments_dir_info(experiments_dir):
"""
Get a list of experiment dicts from all the experiment xml files in the
experiments directory
:param experiments_dir: str
:return: list
"""
results = list()
if os.path.exists(os.path.abspath(experiments_dir)):
glob_path = ''.join([os.path.abspath(experiments_dir), '/*'])
experiment_files = glob.glob(glob_path)
else:
experiment_files = list()
for path in experiment_files:
results.append(get_experiment_info(path))
return results
def get_scans_info(experiment_xml_file):
"""
Get a dict of dicts for each scan from an XNAT experiment XML document
:param experiment_xml_file: lxml.etree.Element
:return: list
"""
xml = etree.parse(experiment_xml_file)
root = xml.getroot()
experiment_id = root.attrib.get('ID')
result = list()
scans = root.findall('./xnat:scans/xnat:scan', namespaces=ns)
for scan in scans:
values = dict()
scan_id = scan.attrib.get('ID')
scan_type = scan.attrib.get('type')
# handle null finds
values.update(quality=scan.find('./xnat:quality', namespaces=ns))
values.update(series_description=scan.find(
'./xnat:series_description', namespaces=ns))
values.update(coil=scan.find('./xnat:coil', namespaces=ns))
values.update(field_strength=scan.find('./xnat:fieldStrength',
namespaces=ns))
for k, v in values.iteritems():
try:
values[k] = v.text
except AttributeError, e:
values[k] = None
if verbose:
print(e, "for attribute {0} in scan {1} of experiment {2}".format(k, scan_id, experiment_id))
scan_dict = dict(experiment_id=experiment_id,
scan_id=scan_id,
scan_type=scan_type,
quality=values.get('quality'),
series_description=values.get('series_description'),
coil=values.get('coil'),
field_strength=values.get('field_strength'))
result.append(scan_dict)
return result
def get_reading_info(experiment_xml_file):
"""
Get a dict of dicts for each reading from an XNAT experiment XML document
:param experiment_xml_file: lxml.etree.Element
:return: list
"""
xml = etree.parse(experiment_xml_file)
root = xml.getroot()
experiment_id = root.attrib.get('ID')
try:
note = root.find('./xnat:note', namespaces=ns).text
except AttributeError:
note = None
pass
result = dict(experiment_id=experiment_id,
note=note,
datetodvd=None,
findings=None,
findingsdate=None,
excludefromanalysis=None,
physioproblemoverride=None,
dtimismatchoverride=None,
phantommissingoverride=None)
values = dict()
fields = root.findall('./xnat:fields/xnat:field', namespaces=ns)
for field in fields:
name = field.attrib.get('name')
value = root.xpath('.//xnat:field[@name="{0}"]/text()'.format(name),
namespaces=ns)
# handle null finds
values[name] = value
for k, v in values.iteritems():
try:
values[k] = v[1]
except IndexError:
values[k] = None
result.update(values)
return result
def get_experiments_dir_reading_info(experiments_dir):
"""
Get a list of reading dicts from all the experiment xml files in the
experiments directory
:param experiments_dir: str
:return: list
"""
results = list()
if os.path.exists(os.path.abspath(experiments_dir)):
glob_path = ''.join([os.path.abspath(experiments_dir), '/*'])
experiment_files = glob.glob(glob_path)
else:
experiment_files = list()
for path in experiment_files:
results.append(get_reading_info(path))
return results
def get_experiments_dir_scan_info(experiments_dir):
"""
Get a list of scan dicts from all the experiment xml files in the
experiments directory
:param experiments_dir: str
:return: list
"""
results = list()
if os.path.exists(os.path.abspath(experiments_dir)):
glob_path = ''.join([os.path.abspath(experiments_dir), '/*'])
experiment_files = glob.glob(glob_path)
else:
experiment_files = list()
for path in experiment_files:
results.append(get_scans_info(path))
return results
def get_scans_by_type(scans, scan_type):
"""
Get scans based on their type
:param scans: dict
:param scan_type: str
:return:
"""
result = list()
for scan in scans:
if scan['scan_type'] == scan_type:
result.append(scan)
return result
def scans_to_dataframe(scans):
"""
Convert scan dict to a pandas.DataFrame
:param scans: dict
:return: pandas.DataFrame
"""
flat = [item for sublist in scans for item in sublist]
return pd.DataFrame(flat)
def experiments_to_dataframe(experiments):
"""
Convert a list of experiment dicts to a pandas.DataFrame
:param experiments: dict
:return: pandas.DataFrame
"""
return pd.DataFrame(experiments)
def reading_to_dataframe(reading):
"""
Convert a list of reading dicts to a pandas.DataFrame
:param reading: dict
:return: pandas.DataFrame
"""
return pd.DataFrame(reading)
def merge_experiments_scans_reading(experiments, scans, reading):
"""
Merge an experiments dataframe with a scan dataframe
:param experiments: dict
:param scans: dict
:return: pandas.DataFrame
"""
experiments_df = experiments_to_dataframe(experiments)
scans_df = scans_to_dataframe(scans)
reading_df = reading_to_dataframe(reading)
exp_scan = pd.merge(experiments_df, scans_df, how='inner')
merged = pd.merge(exp_scan, reading_df, how='inner')
# reindex using multi-index of subject, experiment, scan
result = merged.to_records(index=False)
idx = pd.MultiIndex.from_arrays([merged.subject_id.values,
merged.experiment_id.values,
merged.scan_id.values],
names=['subject_id',
'experiment_id',
'scan_id'])
return pd.DataFrame(result, index=idx)
| bsd-3-clause |
gdikos/qstk-on-ec2 | analyzer2.py | 1 | 3921 | import sys
import datetime as dt
import math
import numpy as np
import pandas as pd
# QSTK Imports
import QSTK.qstkutil.DataAccess as da
import QSTK.qstkutil.tsutil as tsu
def get_values_list(s_file_path):
df_values_list = pd.read_csv(s_file_path, sep=',', header=None)
df_values_list.columns = ["year", "month", "day", "total"]
return df_values_list
def get_values(df_values_list):
np_values_list = df_values_list.values
l_values = []
for value in np_values_list:
dt_date = dt.datetime(value[0], value[1], value[2], hour=16)
total = float(value[3])
l_values.append([dt_date, total])
np_values = np.array(l_values)
df_values = pd.DataFrame(np_values[:, 1], index=np_values[:, 0], columns=["val"])
return df_values
def get_data(ldt_timestamps, ls_symbols):
ls_keys = ['open', 'high', 'low', 'close', 'volume', 'actual_close']
dataobj = da.DataAccess('Yahoo')
ldf_data = dataobj.get_data(ldt_timestamps, ls_symbols, ls_keys)
d_data = dict(zip(ls_keys, ldf_data))
for s_key in ls_keys:
d_data[s_key] = d_data[s_key].fillna(method='ffill')
d_data[s_key] = d_data[s_key].fillna(method='bfill')
d_data[s_key] = d_data[s_key].fillna(1.0)
return d_data
def get_prices(ldt_timestamps, ls_symbols, s_key="close"):
# close = adjusted close
# actual_close = actual close
d_data = get_data(ldt_timestamps, ls_symbols)
return d_data[s_key]
def get_performance_indicators(df_data):
na_data = df_data.values
df_result = pd.DataFrame(index=["avg_daily_ret", "std_daily_ret", "sharpe_ratio", "total_ret"], \
columns=df_data.columns)
# Calculating the daily return
# It gets a copy of na_data becouse tsu.returnize0 does not return
# anything, the function changes the argument.
na_daily_ret = na_data.copy()
tsu.returnize0(na_daily_ret)
na_cum_ret = na_data / na_data[0, :]
for col in range(na_data.shape[1]):
df_result.ix["avg_daily_ret", col] = np.mean(na_daily_ret[:, col])
df_result.ix["std_daily_ret", col] = np.std(na_daily_ret[:, col])
df_result.ix["sharpe_ratio", col] = math.sqrt(252) * df_result.ix["avg_daily_ret", col] / df_result.ix["std_daily_ret", col]
df_result.ix["total_ret", col] = na_cum_ret[-1 , col]
return df_result
# print df_result
if __name__ == '__main__':
#print "start analyze.py"
# print
lookback=sys.argv[1]
holding=sys.argv[2]
trigger=sys.argv[3]
market=sys.argv[4]
switch = sys.argv[5]
switch2=sys.argv[6]
s_file_path = "data\\q2_values.csv"
ls_symbols =["IXIC"]
df_values_list = get_values_list(s_file_path)
df_values = get_values(df_values_list)
df_prices = get_prices(list(df_values.index), ls_symbols)
df_data = df_values.join(df_prices)
get_performance_indicators(df_data)
# print
# df_result=get_performance_indicators(df_data)
# print df_result
#score= df_result.ix["sharpe_ratio","val"]-df_result.ix["sharpe_ratio","FTSE.AT"]
#if (score >0):
# print "we have a winner:"
#print lookback, holding, trigger, market, switch,switch2,score
#else:
#print "this is a looser"
# print df_result.ix["sharpe_ratio","val"]-df_result.ix["sharpe_ratio","FTSE.AT"]
# print "end analize.py"
# print
df_result=get_performance_indicators(df_data)
# print df_result
score= df_result.ix["sharpe_ratio","val"]-df_result.ix["sharpe_ratio","IXIC"]
#
delta= df_result.ix["total_ret","val"]-df_result.ix["total_ret","IXIC"]
# if (score >0):
# print "we have a winner:"
print lookback, holding, trigger, market, switch,switch2,score,delta
# else:
# print "this is a looser"
# print df_result.ix["sharpe_ratio","val"]-df_result.ix["sharpe_ratio","FTSE.AT"]
# print "end analize.py"
| mit |
AlexRBaker/ScaffoldM | scaffoldm/compare.py | 1 | 45180 | #!/usr/bin/env python
###############################################################################
# #
# compare.py #
# #
# Makes comparisons between ScaffoldM and SSPACE for improvement #
# #
# Copyright (C) Alexander Baker #
# #
###############################################################################
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
# #
###############################################################################
__author__ = "Alexander Baker"
__copyright__ = "Copyright 2015"
__credits__ = ["Alexander Baker"]
__license__ = "GPLv3"
__maintainer__ = "Alexander Baker"
__email__ = "[email protected]"
###############################################################################
###############################################################################
###############################################################################
import os
import argparse
#from dataloader import DataLoader
#from dataparser import DataParser
#from scaffold import Scaffold
import matplotlib.pyplot as plt
import numpy as np
import scipy as sp
###############################################################################
#First step - parse input to make library file for sspace_basic
#Library name readsfile1 readsfile2 insert size tolerated error, read orientation
#of form Lib1 file.1.1.fasta file.1.2.fasta 400 0.25 FR
# the above indicates that library 1 has first set of reads in file1.1.1 and second set of pair
#in file.1.2 and insert size of 400 and is willing to tolerate a 25% error (100), and reads
#map F(------>)R(<---------) onto the contigs.
def makelibrary(filename,libnames,pairedend1, pairedend2, insertsize,error,orientation,tab=False):
import os
import sys
if not os.path.isfile(filename):
scaffile=open(filename+".txt",'w')
scaffile.close()
with open(filename+".txt",'a+') as library:
for i,libname in enumerate(libnames):
if tab==False:
library.write("{0} bwa {1} {2} {3} {4} {5}\n".\
format(libname,pairedend1[i]+".fasta", pairedend2[i]+".fasta",\
insertsize[i],error[i],orientation[i]))
else:
library.write("{0} TAB {1} {2} {3} {4}\n".\
format(libname,TABfile+".tab",\
insertsize[i],error[i],orientation[i]))
def splitter(interleavedreads):
import sys
try:
with open(interleavedreads+".fna",'r') as reads:
head=reads.readline()
if not head.startswith('>'):
raise TypeError("Not a FASTA file:")
reads.seek(0)
firstreads=[]
secondreads=[]
firstread=False
secondread=False
for line in reads:
if line.startswith('>'):
firstread=False
secondread=False
if ".1" in line:
firstread=True
elif ".2" in line:
secondread=True
if firstread:
firstreads.append(line)
elif secondread:
secondreads.append(line)
elif not line.startswith('>'):
if firstread:
firstreads.append(line)
elif secondread:
secondreads.append(line)
read1=''.join(firstreads)
read2=''.join(secondreads)
with open(interleavedreads+"_1.fasta",'a+') as reads:
reads.write(read1)
with open(interleavedreads+"_2.fasta",'a+') as reads:
reads.write(read2)
return interleavedreads+"_1",interleavedreads+"_2"
except:
print "Error opening file:", interleavedreads, sys.exc_info()[0]
raise
def chunker(string,chunksize,end):
''' Creates chunks from string and appends an end term
Note this return generator - should be iterated over'''
try:
stringmod=string.translate(None,end)
for i in xrange(0,len(stringmod),chunksize):
if len(stringmod)>=i+chunksize:
yield stringmod[i:i+chunksize]+end
else:
yield stringmod[i:i+chunksize]
except TypeError:
print "end must be concatenable to string, \
intended use is type(str) for both"
def getfastalen(fastaname):
try:
with open(fastaname+".fasta",'r+') as fasta:
fasta.readline()
linelen=len(fasta.readline().strip('\n'))
fasta.seek(0)
for i, l in enumerate(fasta):
pass
return (i)*linelen #lose 1 due to header_assuming one header for this type
except IOError:
with open(fastaname,'r+') as fasta:
fasta.readline()
linelen=len(fasta.readline().strip('\n'))
fasta.seek(0)
for i, l in enumerate(fasta):
pass
return (i)*linelen #lose 1 due to header_assuming one header for this type
def slicer(slices,filename):
''' slices are of form start, end, reps, orientation. This function will take those
slices out of a specified fasta file and then print them as contigs'''
import os
import sys
slices=[int(ele) for ele in slices]
#print slices
try:
with open(filename,'r+') as Genome:
linelen=len(Genome.readlines()[3].strip("\n"))
start=min([slices[i] for i in range(len(slices)) if i%4==0])
ends=max(slices) #assumes no reps or orientation greater than max contig position - reasonable
seqslice=[]
Genome.seek(0)
header=Genome.readline().strip('>').rstrip('\n')
refhead=header[0:min(24,len(header))]
sequence=''.join([line.translate(None,"\n") for line in Genome.readlines() if not line.startswith('>')])
for i in range(0,len(slices),4):
if int(slices[i+3])==-1:
seqslice.append(reversecompliment(sequence[int(slices[i]):int(slices[i+1])]*int(slices[i+2])))
elif int(slices[i+3])==1:
seqslice.append(sequence[int(slices[i]):int(slices[i+1])]*int(slices[i+2]))
except IOError: #If not openable try for file in folder one level up
with open('..'+os.sep+filename,'r+') as Genome:
linelen=len(Genome.readlines()[3].strip("\n"))
start=min([slices[i] for i in range(len(slices)) if i%4==0])
ends=max(slices) #assumes no reps or orientation greater than max contig position - reasonable
seqslice=[]
Genome.seek(0)
header=Genome.readline().strip('>').rstrip('\n')
refhead=header[0:min(24,len(header))]
sequence=''.join([line.translate(None,"\n") for line in Genome.readlines() if not line.startswith('>')])
for i in range(0,len(slices),4):
seqslice.append(sequence[int(slices[i]):int(slices[i+1])][::int(slices[i+2])]*int(slices[i+3]))
parts=filename.split(os.sep)
fileend=parts[-1].split(".fasta")[0]
slicefilename=fileend+"slices.fna"
completefilename="{0}S:{1}_E:{2}".format(fileend,start,ends)+"complete.fna"
if not os.path.isfile(slicefilename):
tigfile=open(slicefilename,'w')
tigfile=tigfile.close()
if not os.path.isfile(completefilename):
tigfile=open(completefilename,'w')
tigfile=tigfile.close()
with open(slicefilename,'a+') as tigfile:
for i,seq in enumerate(seqslice):
tigname=refhead+"contig"+str(i+1)+"|"+header[int(min(24,len(header))):]
tigfile.write(">{0}, S:{1}:E:{2}:R:{3}:OR:{4}\n".format(tigname,slices[i*4],slices[i*4+1],slices[i*4+3],slices[i*4+2]))
for chunk in chunker(seq,linelen,"\n"):
tigfile.write(chunk)
tigfile.write('\n')
with open(completefilename,'a+') as tigfile:
tigname=refhead+"complete|"+header[min(24,len(header)):]
tigfile.write(">{0}, S:{1}:E:{2}\n".format(tigname,start,ends))
for chunk in chunker(sequence[start:ends],linelen,"\n"):
tigfile.write(chunk)
tigfile.write('\n')
return slicefilename,completefilename
def reversecompliment(seq):
compliment={'A':'T','G':'C','T':'A','C':'G','a':'t','g':'c','t':'a','c':'g'}
try:
newseq="".join([compliment[char] for char in reversed(seq)])
return newseq
except:
print "This sequence has illegal characters"
raise
def randcuts(gap,seqlen,noslices=10,rep=False,ori=False,steps=100,gapvar=False,randgaps=True):
import random
starts=[]
ends=[]
orientation=[]
reps=[]
m=[1]*90+10*[-1]
replist=[1,2,3,4]
starts.append(random.randint(0,seqlen/4))
upperlen=(seqlen-starts[0])//noslices
for i in range(noslices):
ends.append(random.randint(starts[i]+steps,starts[i]+upperlen+steps))
if i!=(noslices-1):
if randgaps:
starts.append(ends[i]+int(random.gauss(gap,gap/5)))
else:
starts.append(ends[i]+gap)
if ori:
orientation.append(random.sample(m,1)[0])
else:
orientation.append(1)
if rep:
reps.append(random.sample(replist,1)[0])
else:
reps.append(1)
weave=zip(starts,ends,reps,orientation)
return [int(zipdat) for zipped in weave for zipdat in zipped]
def makereads(readnumber,readlength,\
meaninsert,stdinsert,filename,readmaker="metasim"):
if readmaker=="metasim":
statoscom=("/home/baker/Packages/metasim/MetaSim cmd -r {0} -m \
-g /home/baker/Packages/metasim/examples/errormodel-100bp.mconf \
-2 /home/baker/Packages/metasim/examples/errormodel-100bp.mconf \
--empirical-pe-probability {1} --clones-mean {2} --clones-param2 {3} \
{4}").format(readnumber,readlength,meaninsert,stdinsert,\
filename)
elif readmaker=="gemsim":
pass
os.system(statoscom)
def makereadswrap(readnumber,readlength,\
meaninsert,stdinsert,filename,readmaker="metasim"):
makereads(readnumber,readlength,\
meaninsert,stdinsert,filename,readmaker="metasim")
if readmaker=='metasim':
readname=filename.split(".fna")[0]+"-Empirical.fna"
print "This is the read name", readname
setreads(readname)
def setreads(filename,clean=True):
import os
tempname=filename.split(".fna")[0]+"redone.fna"
print tempname, "This is the temporary name"
os.rename(filename,tempname)
#Make a new file for writing output
with open(filename,'w') as make:
pass
with open(tempname) as reads:
seq=[]
Ind=False
with open(filename,'a+') as oldfile:
for line in reads:
if line.startswith('>'):
Ind=False
if seq!=[]:
oldfile.write("".join(seq)+"\n")
if clean:
oldfile.write(line.split(" ")[0]+"\n")
else:
oldfile.write(line)
seq=[]
else:
seq.append(line.translate(None,'\n'))
oldfile.write("".join(seq)+"\n")
seq=[]
os.remove(tempname)
return
def makesummary(cuts):
''' Takes the cuts used for genome slicing and makes
a summary file detailing changes'''
return
def scaffoldparse(scaffoldloc,scaffoldloc2,truegaps,trueorientations):
'''Given two scaffold locations of predefined structure extract information from it and compare'''
return
def Falsejoins(type1errors):
with open("ScafMFalseJoins.fasta",'a+') as Joins:
Joins.write("Mistake1,Mistake2\n")
for tup in type1errors:
ind=False
for tig in tup:
if ind==False:
Joins.write(str(tig)+",")
elif ind:
Joins.write(str(tig)+"\n")
ind=True
def trackdecisions(Truepos,Falsepos,falseneg,notigs=False,N_joins=False):
import os
if not os.path.isfile("../Results.txt"):
with open("../Results.txt",'w') as data:
data.write("{0},{1},{2}\n".format("TruePositive","FalsePositive","FalseNegative"))
with open("../Results.txt",'a+') as data:
data.write("{0},{1},{2}\n".format(len(Truepos),len(Falsepos),len(falseneg)))
return
def Visualise(scaffoldnames,gaps,contigloc,covplot=False):
import matplotlib.pyplot as plt
import numpy as np
import scipy as sp
import pandas as pd
scaffoldMgap,scaffoldMTjoins,scaffoldMFjoins,scaffoldMFNeg=validcheck(contigloc=contigloc)
data={}
Falsejoins(scaffoldMFjoins)
trackdecisions(scaffoldMTjoins,scaffoldMFjoins,scaffoldMFNeg)
sortMgap=sorted(scaffoldMgap,key=lambda x: min(x[0]))
pairs=[(min(x[0])-1,x[1]) for x in sortMgap]
minscafind,gap2=zip(*pairs)
pairedgaps=[gap for i,gap in enumerate(gaps) if i in minscafind]
for scaffold in scaffoldnames:
data[scaffold]=[contiglen(scaffold)]
data[scaffold]+=[[NXcalc(X*0.1,data[scaffold][0]) for X in range(1,11)]]
data[scaffold]+=[[len(data[scaffold])-1]]
#print data
standardplot(gap2,pairedgaps,"The predicted gapsize(nt)","The actual gapsize(nt)","","ScaffoldMVTrueGap")
multiplot([[X*0.1 for X in range(1,11)] for i in range(0,len(scaffoldnames))],[data[scaffold][-2] \
for scaffold in scaffoldnames],"X","NX Value for the scaffold",\
"The NX metric for various scaffolds", ["Contigs","ScaffoldM","SSPACE"],"N50Metric_Scaffolds")
if covplot:
for scaffold in scaffoldnames:
plotcoverage(scaffold)
return
def contigmap(evidencefile,coveragefile='covs.tsv'):
Scaffolds={}
with open(evidencefile,'r+') as SSPACE:
for line in SSPACE:
if line.startswith(">"):
parts=line.split('|')
scaffold=parts[0].strip('>')
Scaffolds[scaffold]=[]
else:
parts=line.split('|')[0] #tig name - in form f_tign
if parts!='\n':
if parts.startswith('r'):
parts='f'+parts[1:]
Scaffolds[scaffold]+=[parts]
with open(coveragefile,'r+') as covs:
covs.readline() #Move paste header
orderedtigs=[]
for line in covs:
orderedtigs.append(line.split('\t')[0]) #Contig name
N_tigs=len(orderedtigs) #Number of contigs
#map f_tigi to orderedtigs[i] in dictionary
Swapdict={"{0}{1}".format('f_tig',i):orderedtigs[i-1] for i in range(1,N_tigs+1)}
Mapped={scaffold:[Swapdict[contig] for contig in contigs] for scaffold,contigs in Scaffolds.iteritems()}
print Mapped
return Mapped
def scaffoldtoedges(mapped):
SS_data=np.zeros((1,3))
for i,(scaffold,contigs) in enumerate(mapped.iteritems()):
for j,contig in enumerate(contigs):
if j<len(contigs)-1:
SS_data=np.vstack((SS_data,np.array([contig,'(0,1)',contigs[j+1]])))
SS_data=SS_data[1:,:] #Remove initial dummy row
np.savetxt('SSPACE_Edges.txt',SS_data,fmt='%s',delimiter='\t',newline='\n', header='Edge1\trel\tEdge2\n')
return SS_data
def graphtosif(graph,graphname,removed=False):
#print graphname, "This is the supposed graph being parsed"
#print "\n",graph
done=set([])
with tryopen("{0}{1}".format(graphname,"_links"),"Contig1\tRelationship\tContig2\n",".txt",True) as network2:
for contig1,connected in graph.iteritems():
for contig2 in connected:
if (contig1,contig2) not in done and (contig2,contig1) not in done:
network2.write("{0}\t{1}\t{3}\n".format())
done|=set([(contig1,contig2)]) #Add current pair
done|=set([(contig1,contig2)[::-1]]) #Reverse of current pair
with tryopen("{0}{1}".format(graphname,"_contigs"),"Contig\n",".txt") as contigs:
for contig in graph:
contigs.write("{0}".format(contig))
return
def addcol(filename,column_s,header,d='\t'):
'''Takes a text file containing tab separated columns and adds tab-separated columns
to the end. This is primarily for updating the .txt files using in Cytoscape with additional
info such as bin allocation etc. Loads whole file into memory.'''
with tryopen(filename,'','') as oldfile:
olf=oldfile.readlines()
Newfile=[]
for i,line in enumerate(olf):
if i==0:
processedline=[x.rstrip('\n') for x in line.split('\t')]
processedline+=[head for head in header]
processedline[-1]=processedline[-1]+"\n"
Newfile+=processedline
else:
processedline=[x.rstrip('\n') for x in line.split('\t')]
processedline+=[column[i] for column in column_s]
processedline[-1]=processedline[-1]+"\n" #Add endline
Newfile+=processedline
with tryopen(filename,'','',True) as newfile:
for line in Newfile:
newfile.write(("{0}".format(d)).join(line))
return
def sspaceconvert(evidencefile,coveragefile='covs.tsv'):
Mapped=scaffoldtoedges(contigmap(evidencefile,coveragefile='covs.tsv'))
return Mapped
def plotcoverage(name):
return
#Simply comparison - one scaffolder and preprocessed dataset
def standardplot(x,y,xname,yname,title,saveloc,log=False):
import matplotlib.pyplot as plt
plt.gca().set_color_cycle(['blue', 'black'])
plt.plot(x,y,'o')
plt.plot(y,y,'-')
plt.xlabel(xname)
plt.ylabel(yname)
plt.title(title)
plt.savefig('./graphs/'+saveloc+'.png',bbox_inches='tight')
plt.close()
return
#More complicated comparisions - Likely between multiple scaffolders
def multiplot(x,y,xname,yname,title,legend,saveloc,log=False):
'''For plotting lists of lists for x and y, along with an appropiate legend'''
import matplotlib.pyplot as plt
plt.gca().set_color_cycle(['red', 'blue', 'black'])
print x, "This is the x variable"
print y, "This is the y variable"
for i,xdat in enumerate(x):
plt.plot(x[i],y[i])
plt.xlabel(xname)
plt.ylabel(yname)
plt.title(title)
plt.xticks([0.1*X for X in range(0,11)])
plt.axis([min(xv for xval in x for xv in xval),max(xv for xval in x for xv in xval),0,1.05*max(yv for yval in y for yv in yval)])
leg=plt.legend(legend, loc='upper right',title='Scaffolder')
leg.get_frame().set_alpha(0)
plt.savefig('./graphs/'+saveloc+'.png',bbox_inches='tight')
plt.close()
def multibar(x,y,xlab,ylab,title,saveloc,legend):
#Sourced from :http://matplotlib.org/examples/api/barchart_demo.html
#To be modified heavily later
import numpy as np
import matplotlib.pyplot as plt
N = 5
menMeans = (20, 35, 30, 35, 27)
menStd = (2, 3, 4, 1, 2)
ind = np.arange(N) # the x locations for the groups
width = 0.35 # the width of the bars
fig, ax = plt.subplots()
rects1 = ax.bar(ind, menMeans, width, color='r', yerr=menStd)
womenMeans = (25, 32, 34, 20, 25)
womenStd = (3, 5, 2, 3, 3)
rects2 = ax.bar(ind+width, womenMeans, width, color='y', yerr=womenStd)
# add some text for labels, title and axes ticks
ax.set_ylabel('Scores')
ax.set_title('Scores by group and gender')
ax.set_xticks(ind+width)
ax.set_xticklabels( ('G1', 'G2', 'G3', 'G4', 'G5') )
ax.legend( (rects1[0], rects2[0]), ('Men', 'Women') )
def autolabel(rects):
# attach some text labels
for rect in rects:
height = rect.get_height()
ax.text(rect.get_x()+rect.get_width()/2., 1.05*height, '%d'%int(height),
ha='center', va='bottom')
autolabel(rects1)
autolabel(rects2)
plt.show()
def validcheck(gapdataloc="Gapdata.txt",contigloc='MG1655refslices.fna'):
'''Uses Gapdata.txt to compare observed scaffolds to known scaffold'''
validgaps=[]
truejoins=[]
falsejoins=[]
falsenegs=[]
truepairs=[(i,i+1) for i in range(1,len(contiglen(contigloc)))]
with open(gapdataloc) as gaps:
gaps.readline() #Move past header
for line in gaps:
compsplit=line.split(",")
tig1=compsplit[0].rstrip("|").split("|")[-1]
tig2=compsplit[1].rstrip("|").split("|")[-1]
#print compsplit[-1]
gap=int(compsplit[-1])
tig1ind=int(tig1.split('g')[-1])
tig2ind=int(tig2.split('g')[-1])
if abs(tig1ind-tig2ind)==1:
validgaps.append(((tig1ind,tig2ind),gap))
truejoins.append((tig1,tig2))
else:
falsejoins.append((tig1,tig2))
print "THESE ARE THE FALSENEGATIVES"
falsenegs=[x for x in truepairs if x not in zip(*validgaps)[0]]
print falsenegs, "The Reject|True"
return [validgaps,truejoins,falsejoins,falsenegs]
def NXcalc(X,tiglengths):
tot=sum(tiglengths)
S_tig=sorted(tiglengths)
N50=0
runtot=0
if X==0:
return S_tig[-1]
for i in range(len(S_tig)):
N50=S_tig[-(i+1)]
runtot+=N50
if runtot>tot*X:
return N50
def parsetsv(filename='links.tsv',delim=',',header=False):
parsed=[]
with open(filename) as tsv:
if header==False:
tsv.readline()
for line in tsv:
parsed.append(line.split(delim))
return parsed
def getlinks(contig1,contig2,filename='links.tsv'):
links=parsetsv(filename,delim='\t')
flags=[]
for link in links[1:]:
tig1=0
tig2=0
for col in link:
#Need to change for simple contig names since I stripped the full name earlier
if contig1 in col:
tig1=1
if contig2 in col:
tig2=1
if tig1+tig2==2:
flags.append(link)
#print flags
return flags
def linkdist(onelink):
'''distance from relevant edge for each contig in the link'''
orientation1=int(onelink[4])
orientation2=int(onelink[7])
if orientation1==1:
dist1=int(onelink[3])
else:
dist1=int(onelink[2])-int(onelink[3])
if orientation2==1:
dist2=int(onelink[6])
else:
dist2=int(onelink[5])-int(onelink[6])
#Returns tuple of distances and contig names
return ((dist1,onelink[0]),(dist2,onelink[1]))
def extracttigs(filename='ScafMFalseJoins.fasta',outfile='mislinkseq.fasta',contigloc='MG1655refslices.fna'):
missjoins=parsetsv(filename,delim=',',header=False)
missjoins=[[y.rstrip('\n') for y in x] for x in missjoins]
print "THe faulty contig pair",missjoins
print "You are right before the loop"
for line in missjoins:
#print line
#print line[0],
tigs=getlinks(line[0],line[1]) #Extract missjoins
#print "This is the links",tigs
orientation=(tigs[0][4],tigs[0][7]) #Assuming only one orientation present - risky
#print "This is the orientation",orientation
#Fix this assumption later
dists=[linkdist(x) for x in tigs] #Get links distances
#print "This is the distance",dists
#Works with assumption that joins are always in same order - they are
maximum=[max(x) for x in zip(*dists)] #Unzips tuple into list for each contig
#print "This is the maximum",maximum
#GEts maximum distance from edge
W_faultylink(maximum[0][0],maximum[1][0],maximum[0][1],maximum[1][1],orientation,outfile,contigloc)
def W_faultylink(distance1,distance2,contig1,contig2,orientation,filename='mislinkseq.fasta',contigloc='MG1655refslices.fna'):
import os
import sys
print "Did I make it this far"
try:
if not os.path.isfile("../{0}".format(filename)):
with open("../{0}".format(filename),'w') as test:
pass
with open("../{0}".format(filename),'a+') as mislink:
mislink.write(">{0}|Distance:{1}bp_from_edge\n".format(contig1,distance1))
for chunk in chunker(cut(extractcontigs(contig1,contigloc,header=False).translate(None,'\n'),distance1,orientation,0),70,'\n'):
mislink.write(chunk)
mislink.write('\n')
mislink.write(">{0}|Distance:{1}bp_from_edge\n".format(contig2,distance2))
for chunk in chunker(cut(extractcontigs(contig2,contigloc,header=False).translate(None,'\n'),distance2,orientation,1),70,'\n'):
mislink.write(chunk)
mislink.write('\n')
except:
print "Errors opening file or running stuff"
raise ValueError
def cut(seq,slicesize,orientation,tigpairno):
or1=int(orientation[0])
or2=int(orientation[1])
if tigpairno==0:
if or1==1:
return seq[:slicesize]
else:
return seq[-slicesize:]
elif tigpairno==1:
if or2==1:
return seq[:slicesize]
else:
return seq[-slicesize:]
def extractcontigs(contigname,contigloc,header=True):
'''Just assigns contigs file via contigloc.
Temporary just for use when making scaffold
Will extract the text for that contig'''
import sys
try:
with open(contigloc,'r') as Contigs:
head=Contigs.readline()
if not head.startswith('>'):
raise TypeError("Not a FASTA file:")
Contigs.seek(0)
title=head[1:].rstrip() ##Strips whitespace and >
record=0
contigseq=[]
for line in Contigs:
if line.startswith('>') and line.find(contigname)>=0:
record=1
if header:
contigseq.append(line)
elif line.startswith('>') and contigname not in line:
record=0
elif record==1:
contigseq.append(line)
else:
pass
seq=''.join(contigseq)
return seq
except:
print "Error opening file:", contigloc,sys.exc_info()[0]
raise
def contiglen(contigloc):
'''Just goes through multi-fasta file, and works out sequence length for eahc entry'''
import sys
try:
lengths=[]
with open(contigloc,'r+') as Contigs:
head=Contigs.readline()
if not head.startswith('>'):
raise TypeError("Not a FASTA file:")
Contigs.seek(0)
contigseq=[]
for line in Contigs:
if line.startswith('>'):
if contigseq!=[]:
lengths.append(len("".join(contigseq)))
contigseq=[]
else:
contigseq.append(line.translate(None,"\n"))
lengths.append(len("".join(contigseq)))
return lengths
except:
print "Error opening file:", contigloc,sys.exc_info()[0]
raise
def writeout(data):
return
def postprocess(sifloc,trueloc,final=False):
with tryopen(trueloc,'','.txt') as correct:
correct.readline() #Move past header
Truescaf={}
for line in correct.readlines():
curline=line.split('\t')
if curline[0] not in Truescaf:
Truescaf[curline[0]]=[]
if curline[1].rstrip('\n') not in Truescaf:
Truescaf[curline[1].rstrip('\n')]=[]
if curline[1].rstrip('\n') not in Truescaf[curline[0]]:
Truescaf[curline[0]]+=[curline[1].rstrip('\n')]
if curline[0] not in Truescaf[curline[1].rstrip('\n')]:
Truescaf[curline[1].rstrip('\n')]+=[curline[0]]
Newdata=[]
with tryopen(sifloc,'','.txt') as olddata:
if not final:
header=olddata.readline().translate(None,'\n')+"\tTrueEdge\tDecision\n"
else:
header=olddata.readline().translate(None,'\n')+"\tTrueEdge\n"
for line in olddata.readlines():
curline=line.split('\t')
tig1=curline[0]
tig2=curline[2]
if tig1 in Truescaf:
if tig2 in Truescaf[tig1]:
TrueEdge="True"
else:
TrueEdge="False"
else:
TrueEdge="False"
if not final:
remove=curline[4].rstrip('\n')
#print remove
if remove=="True" and TrueEdge=="True":
Decision="FalseNeg"
elif remove=="True" and TrueEdge=="False":
Decision="TrueNeg"
elif remove=="False" and TrueEdge=="True":
Decision="TruePos"
elif remove=="False" and TrueEdge=="False":
Decision="FalsePos"
Newdata+=[[x.rstrip('\n') for x in curline]+[TrueEdge]+[Decision]]
else:
Newdata+=[[x.rstrip('\n') for x in curline]+[TrueEdge]]
with tryopen(sifloc,header,'.txt',True) as final:
for line in Newdata:
final.write("{0}\n".format("\t".join(line)))
return
def totprocess(sifloc1,sifloc2,sifloc3,trueloc="TrueEdges"):
postprocess(sifloc1,trueloc,False)
postprocess(sifloc2,trueloc,False)
postprocess(sifloc3,trueloc,True)
return
def maketrueedges():
'''Assumes that contigslices file is both ordered by position withiin each species
and by species'''
TrueEdges=[]
with tryopen("covs",'','.tsv') as covs:
i=1
prevtig=False
curtig=False
donetigs=[]
TrueEdge=[]
covs.readline() #Move past header
for line in covs:
prevtig=curtig
curtig=line.split('\t')[0]
#print "This is the current Contig", curtig
#print "This is the current line", line.split('\t')[0]
tignumber='{0}{1}'.format('tig',i)
if tignumber in curtig:
i+=1
if tignumber in donetigs:
donetigs=[]
i=2
elif prevtig!=False:
TrueEdge+=[(prevtig,curtig)]
donetigs+=[tignumber]
else:
i=2
donetigs=[]
#print tignumber
#print curtig
with tryopen("TrueEdges",'contig1\tcontig2\n','.txt',True) as Edge:
for edge in TrueEdge:
#print edge
Edge.write("{0}\t{1}\n".format(edge[0],edge[1]))
return
def binstotxt(checkdir,fileend='.fa'):
import os
files=os.listdir(checkdir)
bins=[File for File in files if File.endswith('.fa')] #Should get those with .fa in name
binpaths=[os.path.join(checkdir,binfile) for binfile in bins]
contigbinmap={}
for i,binloc in enumerate(binpaths):
contigbinmap[bins[i].strip(fileend)]=getfastaheaders(binloc) #Maps the set of contigs in
#bin file to that bin in a graph
return contigbinmap
def writelis(filename,header,rows):
with tryopen(filename,header,'') as newfile:
for row in rows:
newfile.write("\t".join(row)+"\n")
return
def binwrapper(writedir,filename,checkdir,fileend='.fa'):
import os
bins=binstotxt(checkdir,fileend)
rows=[(item,key) for key,items in bins.iteritems() for item in items]
columns=zip(rows)
writelis(os.path.join(writedir,filename),"Contig\tBin\n",rows)
def getfastaheaders(filename):
try:
Names=[]
with tryopen(filename,'','') as fasta:
for line in fasta:
if line.startswith('>'):
#print line
Names+=[line.strip('>').rstrip('\n')]
else:
pass
return Names
except:
raise
def screenbins(binsfile,contamlevel, completeness):
return
def tryopen(filename,header,filetype,expunge=False):
'''Looks for a file, if its not there, it makes the file, if
it is there then it returns the opened file. Remember to close the file if you call this
function or use: with tryopen(stuff) as morestuff.'''
import os
try:
if not os.path.isfile(filename+filetype):
with open(filename+filetype,'w') as newfile:
newfile.write(header)
elif os.path.isfile(filename+filetype):
if expunge:
temp=open(filename+filetype,'w+')
temp.write(header)
temp.close()
return open(filename+filetype,'a+')
return open(filename+filetype,'a+')
except:
print "Either could not create or open the file"
raise
def makeboolean(string):
Val_T=("true",'t','1','yes')
Val_F=("false","f",'0','no')
try:
if isinstance(string,bool):
return string
if string.lower() in Val_T:
return True
elif string.lower() in Val_F:
return False
except:
raise TypeError("This does not appear to even be an attempt at a boolean")
if __name__ == "__main__": ###Check if arguments coming in from command line
import matplotlib.pyplot as plt
import numpy as np
import scipy as sp
import os
import sys
import datetime
parser = argparse.ArgumentParser(description='Takes a Genome in Fasta format and then parse it into predefined chunks\
These reads and gaps are then parsed to both SSPace and ScaffoldM which then have there output extracted and compared.\
These comparisons have formed the basis for improvements on ScaffoldM.')
parser.add_argument('-N','--name', type=str, nargs='?', \
help='The name of the file',default='/home/baker/Documents/Geneslab/TestFunctions/ReferenceFasta/MG1655ref.fasta')
parser.add_argument('-P','--path', type=str, nargs='?', \
default='/home/baker/Packages/SSPACE-STANDARD-3.0/SSPACE_Standard_v3.0.pl', \
help='The name of absolute path')
parser.add_argument('-L','--lists', type=int, nargs='*', help='Test',default=False)
parser.add_argument('-l','--readlength',type=int,nargs='?',
help='The length of reads in the simulated library',default=100)
parser.add_argument('-c','--coverage',type=int,nargs='?',
help='Coverage in simulated library',default=30)
parser.add_argument('-b','--bams',type=str,nargs='*',
help='Coverage in simulated library',default="NA")
parser.add_argument('-m','--meaninsert',type=int,nargs='?',
help='The mean insert size of the library, this is the expected gap between two paired reads',default=300)
parser.add_argument('-s','--stdinsert',type=int,nargs='?',
help='The standard deviation of the insert size between paired reads',default=30)
parser.add_argument('-g','--gap',type=int,nargs='?',
help='The gap between contigs',default=50)
parser.add_argument('-lim','--linklimit',type=int,nargs='?',
help='The number of linking reads needed to link contigs',default=5)
parser.add_argument('-r','--ratio',type=int,nargs='?',
help='If one contig is linked to multiple others a comparison is made between the number of linking reads. \
this ratio is the value at which both links will be rejected to avoid false positives',default=0.7)
parser.add_argument('-ln','--libno',type=int,nargs='?',
help='The number of libraries expected to be found in a bamm file',default=[1])
parser.add_argument('-ns','--nslice',type=int,nargs='?',
help='The number of slices to use for data simulation',default=100)
parser.add_argument('-t','--trim',type=int,nargs='?',
help='Whether or not',default=0.2)
parser.add_argument('-e','--error',type=int,nargs='?',
help='Whether or not',default=0.75)
parser.add_argument('-w','--wrapperp',type=str,nargs='?',
help='The path to ScaffoldM wrapper',default="~/Documents/Geneslab/ScaffoldM/scaffoldm/")
parser.add_argument('-si','--sim',type=str,nargs='?',
help='Whether or not to simulate',default=True)
parser.add_argument('-C','--contiglocation',type=str,nargs='?',
help='The path to contigs',default="")
parser.add_argument('-O','--randominversions',type=bool,nargs='?',
help='The path to contigs',default=False)
parser.add_argument('-rep','--rep',type=bool,nargs='?',
help='Boolean- whether to repeat sequences',default=False)
parser.add_argument('-Tig','--tigname',type=str,nargs='?',
help='Name of Fasta file for mapping',default="mergedslices.fasta")
args = parser.parse_args()
parser.add_argument('-Rsim','--simreads',type=str,nargs='?',
help='Name of Fasta file for mapping',default=False)
args = parser.parse_args()
###Stuff for Simulation
sim=makeboolean(args.sim) #Whether or not to simulate
path=args.path #path to sspace
Name=args.name #Path to reference genome
contigloc=args.contiglocation #path to contigs
prename=os.sep.join(Name.split(os.sep)[:-1]) #Strips last layer of path
if len(Name.split(os.sep)[-1].split(".fasta"))>1:
postname=Name.split(os.sep)[-1].split(".fasta")[0] #Should be name of reference file - path and file type
elif len(Name.split(os.sep)[-1].split(".fna"))>1:
postname=Name.split(os.sep)[-1].split(".fna")[0]
refpath="../{0}".format(Name.split(os.sep)[-2])
newname="../{0}/{1}".format(Name.split(os.sep)[-2],Name.split(os.sep)[-1].split(".fasta")[0]) #For moving up and into reference folder
coverage=args.coverage # A specified amount of coverage for simulation
readlength=args.readlength #Simulated read length
meaninsert=args.meaninsert #Mean insert size
stdinsert=args.stdinsert #Std deviation in insert size
lists=args.lists #THe list of cuts to be made to the reference genome
linklimit=args.linklimit #The lower limit to accpet a pairing - eg ignore all pairs with less than k links
ratio=args.ratio #Ratio for SSPACE algorithm
gap=args.gap #Mean value for simulated gap size
libno=args.libno #THe number of libraries BAMM should search for amongst the reads
N_slices=args.nslice #THe number of slices to make
error=args.error #The error for SSPACE to accept read inserts
trim=args.trim #Can't remember what this does
ori=args.randominversions #Whether or not to randomly take the reverse compliment
rep=args.rep #Whether or not to randomly repeat sequence in the simulation
wrapperpath=args.wrapperp #The path to the python wrapper
seqlen=getfastalen(Name) #Approximate length of the genome
bams=args.bams #name of bams
contigname=args.tigname #name of fasta file
simreads=makeboolean(args.simreads)
#STuff for all comparisons
if sim:
if trim==True:
#Trim overall length randomly for more variability
pass
#Turn long list of cut locations into a string suitable for os.system
if lists==False:
slices=randcuts(gap,seqlen,N_slices,ori=ori)
#print slices
slices=[str(i) for i in slices]
readcuts=" ".join(slices)
else:
slices=lists
readcuts=" ".join(lists)
start=min([int(j) for i,j in enumerate(slices) if i%4==0]) #Starting position of cuts
end=max([int(j) for i,j in enumerate(slices) if i%4==1]) #Starting position of cuts
os.mkdir(postname+"cuts_S:{0}_E:{1}".format(start,end))
os.chdir(postname+"cuts_S:{0}_E:{1}".format(start,end))
slicename,completename=slicer(slices,Name)
#Make reads via metasim
if simreads:
readnumber=coverage*(end-start)/readlength
makereadswrap(readnumber,readlength,meaninsert,stdinsert,\
completename)
#splits the reads into a format suitable for
completename=completename.split(".fna")[0]
file1,file2=splitter(completename+"-Empirical")
else:
#print os.getcwd()
file1='{0}/{1}-Empirical_1'.format(refpath,postname)
file2='{0}/{1}-Empirical_2'.format(refpath,postname)
completename=newname
contigloc=slicename
#Make libraries.txts
makelibrary("library",['Lib1'], [file1],[file2],[meaninsert],[error],orientation=['FR'])
else:
contigloc=contigname
pass
#To separate from SIM - needs a library file
#perl SSPACE_Basic.pl -l libraries.txt -s contigs.fasta -x 0 -m 32 -o 20 -t 0 -k 5 -a 0.70 -n 15 -p 0 -v 0 -z 0 -g 0 -T 1 -b standard_out
mapreads=False
if mapreads:
print "SSPACE", contigloc, "The contig file"
print "perl {4} -l {0} -s {1} -x 0 \
-k {2} -a {3} -b standard_out"\
.format("library.txt",contigloc,linklimit,ratio,path)
os.system("perl {4} -l {0} -s {1} -x 0 \
-k {2} -a {3} -b standard_out"\
.format("library.txt",contigloc,linklimit,ratio,path))
print "Onto BamM"
if sim:
os.system("bamm make -d {0} -i {1} --quiet".format(slicename,completename+"-Empirical.fna"))
libno=[str(ele) for ele in libno]
librarynumbers=' '.join(libno)
bamname="{0}{1}{2}".format(slicename.split(".fna")[0],".",postname)+"-Empirical"
contigname=slicename
print "The current time: ", datetime.datetime.now().time().isoformat()
if type(bams)!=str: #Check if default is being used
libno=[str(ele) for ele in libno]
librarynumbers=' '.join(libno)
print "The Bams", ' '.join(bams)
os.system("python {0}wrapper.py -b {1} -f {2} -n {3}".format(wrapperpath,' '.join(bams),contigname,librarynumbers))
real=True
if not real:
maketrueedges()
totprocess("Initial_links","Threshold_links","Cov_Links_links")
#print os.getcwd()
if os.path.isdir('./standard_out'): #Only go if SSPACE worked
SSPACEgraph=sspaceconvert("./standard_out/standard_out.final.evidence")
graphtosif(SSPACEgraph,"SSPACE_CONNECTIONS")
#os.procces() - make the graphs to compare SSPACE and ScaffoldM
os.system("python ./process.py")
binned=True
if binned:
binwrapper('.','Node_BinClass.txt','bins_raw/')
print "You made it pass checking the nodes"
#Separate those with high enough completeness/quality scores
#Visualise the remaining
#Work out how to colour based on this in cytoscape
#Bam, done, can compare into and out of binning occurrences
print "This is the real end now"
elif sim: #Should only occur on defaults
os.system("python {0}wrapper.py -b {1} -f {2} -n {3}".format(wrapperpath,bamname,contigname,librarynumbers))
maketrueedges()
totprocess("Initial_links","Threshold_links","Cov_Links_links")
print "This is the real end now"
comparisons=False
if comparisons:
#Make some comparisons between SSPACE and ScaffoldM
os.mkdir('graphs')
Visualise([slicename,"testScaffold.fasta","./standard_out/standard_out.final.scaffolds.fasta"],\
[int(slices[i+4])-int(slices[i+1]) for i in range(0,len(slices)-4,4)],contigloc)
if sim:
print "You made it to the link error extractions"
extracttigs(contigloc=slicename)
else:
pass
| gpl-3.0 |
rseubert/scikit-learn | examples/plot_kernel_approximation.py | 262 | 8004 | """
==================================================
Explicit feature map approximation for RBF kernels
==================================================
An example illustrating the approximation of the feature map
of an RBF kernel.
.. currentmodule:: sklearn.kernel_approximation
It shows how to use :class:`RBFSampler` and :class:`Nystroem` to
approximate the feature map of an RBF kernel for classification with an SVM on
the digits dataset. Results using a linear SVM in the original space, a linear
SVM using the approximate mappings and using a kernelized SVM are compared.
Timings and accuracy for varying amounts of Monte Carlo samplings (in the case
of :class:`RBFSampler`, which uses random Fourier features) and different sized
subsets of the training set (for :class:`Nystroem`) for the approximate mapping
are shown.
Please note that the dataset here is not large enough to show the benefits
of kernel approximation, as the exact SVM is still reasonably fast.
Sampling more dimensions clearly leads to better classification results, but
comes at a greater cost. This means there is a tradeoff between runtime and
accuracy, given by the parameter n_components. Note that solving the Linear
SVM and also the approximate kernel SVM could be greatly accelerated by using
stochastic gradient descent via :class:`sklearn.linear_model.SGDClassifier`.
This is not easily possible for the case of the kernelized SVM.
The second plot visualized the decision surfaces of the RBF kernel SVM and
the linear SVM with approximate kernel maps.
The plot shows decision surfaces of the classifiers projected onto
the first two principal components of the data. This visualization should
be taken with a grain of salt since it is just an interesting slice through
the decision surface in 64 dimensions. In particular note that
a datapoint (represented as a dot) does not necessarily be classified
into the region it is lying in, since it will not lie on the plane
that the first two principal components span.
The usage of :class:`RBFSampler` and :class:`Nystroem` is described in detail
in :ref:`kernel_approximation`.
"""
print(__doc__)
# Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
# Andreas Mueller <[email protected]>
# License: BSD 3 clause
# Standard scientific Python imports
import matplotlib.pyplot as plt
import numpy as np
from time import time
# Import datasets, classifiers and performance metrics
from sklearn import datasets, svm, pipeline
from sklearn.kernel_approximation import (RBFSampler,
Nystroem)
from sklearn.decomposition import PCA
# The digits dataset
digits = datasets.load_digits(n_class=9)
# To apply an classifier on this data, we need to flatten the image, to
# turn the data in a (samples, feature) matrix:
n_samples = len(digits.data)
data = digits.data / 16.
data -= data.mean(axis=0)
# We learn the digits on the first half of the digits
data_train, targets_train = data[:n_samples / 2], digits.target[:n_samples / 2]
# Now predict the value of the digit on the second half:
data_test, targets_test = data[n_samples / 2:], digits.target[n_samples / 2:]
#data_test = scaler.transform(data_test)
# Create a classifier: a support vector classifier
kernel_svm = svm.SVC(gamma=.2)
linear_svm = svm.LinearSVC()
# create pipeline from kernel approximation
# and linear svm
feature_map_fourier = RBFSampler(gamma=.2, random_state=1)
feature_map_nystroem = Nystroem(gamma=.2, random_state=1)
fourier_approx_svm = pipeline.Pipeline([("feature_map", feature_map_fourier),
("svm", svm.LinearSVC())])
nystroem_approx_svm = pipeline.Pipeline([("feature_map", feature_map_nystroem),
("svm", svm.LinearSVC())])
# fit and predict using linear and kernel svm:
kernel_svm_time = time()
kernel_svm.fit(data_train, targets_train)
kernel_svm_score = kernel_svm.score(data_test, targets_test)
kernel_svm_time = time() - kernel_svm_time
linear_svm_time = time()
linear_svm.fit(data_train, targets_train)
linear_svm_score = linear_svm.score(data_test, targets_test)
linear_svm_time = time() - linear_svm_time
sample_sizes = 30 * np.arange(1, 10)
fourier_scores = []
nystroem_scores = []
fourier_times = []
nystroem_times = []
for D in sample_sizes:
fourier_approx_svm.set_params(feature_map__n_components=D)
nystroem_approx_svm.set_params(feature_map__n_components=D)
start = time()
nystroem_approx_svm.fit(data_train, targets_train)
nystroem_times.append(time() - start)
start = time()
fourier_approx_svm.fit(data_train, targets_train)
fourier_times.append(time() - start)
fourier_score = fourier_approx_svm.score(data_test, targets_test)
nystroem_score = nystroem_approx_svm.score(data_test, targets_test)
nystroem_scores.append(nystroem_score)
fourier_scores.append(fourier_score)
# plot the results:
plt.figure(figsize=(8, 8))
accuracy = plt.subplot(211)
# second y axis for timeings
timescale = plt.subplot(212)
accuracy.plot(sample_sizes, nystroem_scores, label="Nystroem approx. kernel")
timescale.plot(sample_sizes, nystroem_times, '--',
label='Nystroem approx. kernel')
accuracy.plot(sample_sizes, fourier_scores, label="Fourier approx. kernel")
timescale.plot(sample_sizes, fourier_times, '--',
label='Fourier approx. kernel')
# horizontal lines for exact rbf and linear kernels:
accuracy.plot([sample_sizes[0], sample_sizes[-1]],
[linear_svm_score, linear_svm_score], label="linear svm")
timescale.plot([sample_sizes[0], sample_sizes[-1]],
[linear_svm_time, linear_svm_time], '--', label='linear svm')
accuracy.plot([sample_sizes[0], sample_sizes[-1]],
[kernel_svm_score, kernel_svm_score], label="rbf svm")
timescale.plot([sample_sizes[0], sample_sizes[-1]],
[kernel_svm_time, kernel_svm_time], '--', label='rbf svm')
# vertical line for dataset dimensionality = 64
accuracy.plot([64, 64], [0.7, 1], label="n_features")
# legends and labels
accuracy.set_title("Classification accuracy")
timescale.set_title("Training times")
accuracy.set_xlim(sample_sizes[0], sample_sizes[-1])
accuracy.set_xticks(())
accuracy.set_ylim(np.min(fourier_scores), 1)
timescale.set_xlabel("Sampling steps = transformed feature dimension")
accuracy.set_ylabel("Classification accuracy")
timescale.set_ylabel("Training time in seconds")
accuracy.legend(loc='best')
timescale.legend(loc='best')
# visualize the decision surface, projected down to the first
# two principal components of the dataset
pca = PCA(n_components=8).fit(data_train)
X = pca.transform(data_train)
# Gemerate grid along first two principal components
multiples = np.arange(-2, 2, 0.1)
# steps along first component
first = multiples[:, np.newaxis] * pca.components_[0, :]
# steps along second component
second = multiples[:, np.newaxis] * pca.components_[1, :]
# combine
grid = first[np.newaxis, :, :] + second[:, np.newaxis, :]
flat_grid = grid.reshape(-1, data.shape[1])
# title for the plots
titles = ['SVC with rbf kernel',
'SVC (linear kernel)\n with Fourier rbf feature map\n'
'n_components=100',
'SVC (linear kernel)\n with Nystroem rbf feature map\n'
'n_components=100']
plt.tight_layout()
plt.figure(figsize=(12, 5))
# predict and plot
for i, clf in enumerate((kernel_svm, nystroem_approx_svm,
fourier_approx_svm)):
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
plt.subplot(1, 3, i + 1)
Z = clf.predict(flat_grid)
# Put the result into a color plot
Z = Z.reshape(grid.shape[:-1])
plt.contourf(multiples, multiples, Z, cmap=plt.cm.Paired)
plt.axis('off')
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=targets_train, cmap=plt.cm.Paired)
plt.title(titles[i])
plt.tight_layout()
plt.show()
| bsd-3-clause |
tosolveit/scikit-learn | examples/ensemble/plot_adaboost_multiclass.py | 354 | 4124 | """
=====================================
Multi-class AdaBoosted Decision Trees
=====================================
This example reproduces Figure 1 of Zhu et al [1] and shows how boosting can
improve prediction accuracy on a multi-class problem. The classification
dataset is constructed by taking a ten-dimensional standard normal distribution
and defining three classes separated by nested concentric ten-dimensional
spheres such that roughly equal numbers of samples are in each class (quantiles
of the :math:`\chi^2` distribution).
The performance of the SAMME and SAMME.R [1] algorithms are compared. SAMME.R
uses the probability estimates to update the additive model, while SAMME uses
the classifications only. As the example illustrates, the SAMME.R algorithm
typically converges faster than SAMME, achieving a lower test error with fewer
boosting iterations. The error of each algorithm on the test set after each
boosting iteration is shown on the left, the classification error on the test
set of each tree is shown in the middle, and the boost weight of each tree is
shown on the right. All trees have a weight of one in the SAMME.R algorithm and
therefore are not shown.
.. [1] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
print(__doc__)
# Author: Noel Dawe <[email protected]>
#
# License: BSD 3 clause
from sklearn.externals.six.moves import zip
import matplotlib.pyplot as plt
from sklearn.datasets import make_gaussian_quantiles
from sklearn.ensemble import AdaBoostClassifier
from sklearn.metrics import accuracy_score
from sklearn.tree import DecisionTreeClassifier
X, y = make_gaussian_quantiles(n_samples=13000, n_features=10,
n_classes=3, random_state=1)
n_split = 3000
X_train, X_test = X[:n_split], X[n_split:]
y_train, y_test = y[:n_split], y[n_split:]
bdt_real = AdaBoostClassifier(
DecisionTreeClassifier(max_depth=2),
n_estimators=600,
learning_rate=1)
bdt_discrete = AdaBoostClassifier(
DecisionTreeClassifier(max_depth=2),
n_estimators=600,
learning_rate=1.5,
algorithm="SAMME")
bdt_real.fit(X_train, y_train)
bdt_discrete.fit(X_train, y_train)
real_test_errors = []
discrete_test_errors = []
for real_test_predict, discrete_train_predict in zip(
bdt_real.staged_predict(X_test), bdt_discrete.staged_predict(X_test)):
real_test_errors.append(
1. - accuracy_score(real_test_predict, y_test))
discrete_test_errors.append(
1. - accuracy_score(discrete_train_predict, y_test))
n_trees_discrete = len(bdt_discrete)
n_trees_real = len(bdt_real)
# Boosting might terminate early, but the following arrays are always
# n_estimators long. We crop them to the actual number of trees here:
discrete_estimator_errors = bdt_discrete.estimator_errors_[:n_trees_discrete]
real_estimator_errors = bdt_real.estimator_errors_[:n_trees_real]
discrete_estimator_weights = bdt_discrete.estimator_weights_[:n_trees_discrete]
plt.figure(figsize=(15, 5))
plt.subplot(131)
plt.plot(range(1, n_trees_discrete + 1),
discrete_test_errors, c='black', label='SAMME')
plt.plot(range(1, n_trees_real + 1),
real_test_errors, c='black',
linestyle='dashed', label='SAMME.R')
plt.legend()
plt.ylim(0.18, 0.62)
plt.ylabel('Test Error')
plt.xlabel('Number of Trees')
plt.subplot(132)
plt.plot(range(1, n_trees_discrete + 1), discrete_estimator_errors,
"b", label='SAMME', alpha=.5)
plt.plot(range(1, n_trees_real + 1), real_estimator_errors,
"r", label='SAMME.R', alpha=.5)
plt.legend()
plt.ylabel('Error')
plt.xlabel('Number of Trees')
plt.ylim((.2,
max(real_estimator_errors.max(),
discrete_estimator_errors.max()) * 1.2))
plt.xlim((-20, len(bdt_discrete) + 20))
plt.subplot(133)
plt.plot(range(1, n_trees_discrete + 1), discrete_estimator_weights,
"b", label='SAMME')
plt.legend()
plt.ylabel('Weight')
plt.xlabel('Number of Trees')
plt.ylim((0, discrete_estimator_weights.max() * 1.2))
plt.xlim((-20, n_trees_discrete + 20))
# prevent overlapping y-axis labels
plt.subplots_adjust(wspace=0.25)
plt.show()
| bsd-3-clause |
mattcaldwell/zipline | tests/risk/answer_key.py | 39 | 11989 | #
# Copyright 2014 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import hashlib
import os
import numpy as np
import pandas as pd
import pytz
import xlrd
import requests
from six.moves import map
def col_letter_to_index(col_letter):
# Only supports single letter,
# but answer key doesn't need multi-letter, yet.
index = 0
for i, char in enumerate(reversed(col_letter)):
index += ((ord(char) - 65) + 1) * pow(26, i)
return index
DIR = os.path.dirname(os.path.realpath(__file__))
ANSWER_KEY_CHECKSUMS_PATH = os.path.join(DIR, 'risk-answer-key-checksums')
ANSWER_KEY_CHECKSUMS = open(ANSWER_KEY_CHECKSUMS_PATH, 'r').read().splitlines()
ANSWER_KEY_FILENAME = 'risk-answer-key.xlsx'
ANSWER_KEY_PATH = os.path.join(DIR, ANSWER_KEY_FILENAME)
ANSWER_KEY_BUCKET_NAME = 'zipline-test_data'
ANSWER_KEY_DL_TEMPLATE = """
https://s3.amazonaws.com/zipline-test-data/risk/{md5}/risk-answer-key.xlsx
""".strip()
LATEST_ANSWER_KEY_URL = ANSWER_KEY_DL_TEMPLATE.format(
md5=ANSWER_KEY_CHECKSUMS[-1])
def answer_key_signature():
with open(ANSWER_KEY_PATH, 'rb') as f:
md5 = hashlib.md5()
buf = f.read(1024)
md5.update(buf)
while buf != b"":
buf = f.read(1024)
md5.update(buf)
return md5.hexdigest()
def ensure_latest_answer_key():
"""
Get the latest answer key from a publically available location.
Logic for determining what and when to download is as such:
- If there is no local spreadsheet file, then get the lastest answer key,
as defined by the last row in the checksum file.
- If there is a local spreadsheet file:
-- If the spreadsheet's checksum is in the checksum file:
--- If the spreadsheet's checksum does not match the latest, then grab the
the latest checksum and replace the local checksum file.
--- If the spreadsheet's checksum matches the latest, then skip download,
and use the local spreadsheet as a cached copy.
-- If the spreadsheet's checksum is not in the checksum file, then leave
the local file alone, assuming that the local xls's md5 is not in the list
due to local modifications during development.
It is possible that md5's could collide, if that is ever case, we should
then find an alternative naming scheme.
The spreadsheet answer sheet is not kept in SCM, as every edit would
increase the repo size by the file size, since it is treated as a binary.
"""
answer_key_dl_checksum = None
local_answer_key_exists = os.path.exists(ANSWER_KEY_PATH)
if local_answer_key_exists:
local_hash = answer_key_signature()
if local_hash in ANSWER_KEY_CHECKSUMS:
# Assume previously downloaded version.
# Check for latest.
if local_hash != ANSWER_KEY_CHECKSUMS[-1]:
# More recent checksum, download
answer_key_dl_checksum = ANSWER_KEY_CHECKSUMS[-1]
else:
# Assume local copy that is being developed on
answer_key_dl_checksum = None
else:
answer_key_dl_checksum = ANSWER_KEY_CHECKSUMS[-1]
if answer_key_dl_checksum:
res = requests.get(
ANSWER_KEY_DL_TEMPLATE.format(md5=answer_key_dl_checksum))
with open(ANSWER_KEY_PATH, 'wb') as f:
f.write(res.content)
# Get latest answer key on load.
ensure_latest_answer_key()
class DataIndex(object):
"""
Coordinates for the spreadsheet, using the values as seen in the notebook.
The python-excel libraries use 0 index, while the spreadsheet in a GUI
uses a 1 index.
"""
def __init__(self, sheet_name, col, row_start, row_end,
value_type='float'):
self.sheet_name = sheet_name
self.col = col
self.row_start = row_start
self.row_end = row_end
self.value_type = value_type
@property
def col_index(self):
return col_letter_to_index(self.col) - 1
@property
def row_start_index(self):
return self.row_start - 1
@property
def row_end_index(self):
return self.row_end - 1
def __str__(self):
return "'{sheet_name}'!{col}{row_start}:{col}{row_end}".format(
sheet_name=self.sheet_name,
col=self.col,
row_start=self.row_start,
row_end=self.row_end
)
class AnswerKey(object):
INDEXES = {
'RETURNS': DataIndex('Sim Period', 'D', 4, 255),
'BENCHMARK': {
'Dates': DataIndex('s_p', 'A', 4, 254, value_type='date'),
'Returns': DataIndex('s_p', 'H', 4, 254)
},
# Below matches the inconsistent capitalization in spreadsheet
'BENCHMARK_PERIOD_RETURNS': {
'Monthly': DataIndex('s_p', 'R', 8, 19),
'3-Month': DataIndex('s_p', 'S', 10, 19),
'6-month': DataIndex('s_p', 'T', 13, 19),
'year': DataIndex('s_p', 'U', 19, 19),
},
'BENCHMARK_PERIOD_VOLATILITY': {
'Monthly': DataIndex('s_p', 'V', 8, 19),
'3-Month': DataIndex('s_p', 'W', 10, 19),
'6-month': DataIndex('s_p', 'X', 13, 19),
'year': DataIndex('s_p', 'Y', 19, 19),
},
'ALGORITHM_PERIOD_RETURNS': {
'Monthly': DataIndex('Sim Period', 'Z', 23, 34),
'3-Month': DataIndex('Sim Period', 'AA', 25, 34),
'6-month': DataIndex('Sim Period', 'AB', 28, 34),
'year': DataIndex('Sim Period', 'AC', 34, 34),
},
'ALGORITHM_PERIOD_VOLATILITY': {
'Monthly': DataIndex('Sim Period', 'AH', 23, 34),
'3-Month': DataIndex('Sim Period', 'AI', 25, 34),
'6-month': DataIndex('Sim Period', 'AJ', 28, 34),
'year': DataIndex('Sim Period', 'AK', 34, 34),
},
'ALGORITHM_PERIOD_SHARPE': {
'Monthly': DataIndex('Sim Period', 'AL', 23, 34),
'3-Month': DataIndex('Sim Period', 'AM', 25, 34),
'6-month': DataIndex('Sim Period', 'AN', 28, 34),
'year': DataIndex('Sim Period', 'AO', 34, 34),
},
'ALGORITHM_PERIOD_BETA': {
'Monthly': DataIndex('Sim Period', 'AP', 23, 34),
'3-Month': DataIndex('Sim Period', 'AQ', 25, 34),
'6-month': DataIndex('Sim Period', 'AR', 28, 34),
'year': DataIndex('Sim Period', 'AS', 34, 34),
},
'ALGORITHM_PERIOD_ALPHA': {
'Monthly': DataIndex('Sim Period', 'AT', 23, 34),
'3-Month': DataIndex('Sim Period', 'AU', 25, 34),
'6-month': DataIndex('Sim Period', 'AV', 28, 34),
'year': DataIndex('Sim Period', 'AW', 34, 34),
},
'ALGORITHM_PERIOD_BENCHMARK_VARIANCE': {
'Monthly': DataIndex('Sim Period', 'BJ', 23, 34),
'3-Month': DataIndex('Sim Period', 'BK', 25, 34),
'6-month': DataIndex('Sim Period', 'BL', 28, 34),
'year': DataIndex('Sim Period', 'BM', 34, 34),
},
'ALGORITHM_PERIOD_COVARIANCE': {
'Monthly': DataIndex('Sim Period', 'BF', 23, 34),
'3-Month': DataIndex('Sim Period', 'BG', 25, 34),
'6-month': DataIndex('Sim Period', 'BH', 28, 34),
'year': DataIndex('Sim Period', 'BI', 34, 34),
},
'ALGORITHM_PERIOD_DOWNSIDE_RISK': {
'Monthly': DataIndex('Sim Period', 'BN', 23, 34),
'3-Month': DataIndex('Sim Period', 'BO', 25, 34),
'6-month': DataIndex('Sim Period', 'BP', 28, 34),
'year': DataIndex('Sim Period', 'BQ', 34, 34),
},
'ALGORITHM_PERIOD_SORTINO': {
'Monthly': DataIndex('Sim Period', 'BR', 23, 34),
'3-Month': DataIndex('Sim Period', 'BS', 25, 34),
'6-month': DataIndex('Sim Period', 'BT', 28, 34),
'year': DataIndex('Sim Period', 'BU', 34, 34),
},
'ALGORITHM_RETURN_VALUES': DataIndex(
'Sim Cumulative', 'D', 4, 254),
'ALGORITHM_CUMULATIVE_VOLATILITY': DataIndex(
'Sim Cumulative', 'P', 4, 254),
'ALGORITHM_CUMULATIVE_SHARPE': DataIndex(
'Sim Cumulative', 'R', 4, 254),
'CUMULATIVE_DOWNSIDE_RISK': DataIndex(
'Sim Cumulative', 'U', 4, 254),
'CUMULATIVE_SORTINO': DataIndex(
'Sim Cumulative', 'V', 4, 254),
'CUMULATIVE_INFORMATION': DataIndex(
'Sim Cumulative', 'AA', 4, 254),
'CUMULATIVE_BETA': DataIndex(
'Sim Cumulative', 'AD', 4, 254),
'CUMULATIVE_ALPHA': DataIndex(
'Sim Cumulative', 'AE', 4, 254),
'CUMULATIVE_MAX_DRAWDOWN': DataIndex(
'Sim Cumulative', 'AH', 4, 254),
}
def __init__(self):
self.workbook = xlrd.open_workbook(ANSWER_KEY_PATH)
self.sheets = {}
self.sheets['Sim Period'] = self.workbook.sheet_by_name('Sim Period')
self.sheets['Sim Cumulative'] = self.workbook.sheet_by_name(
'Sim Cumulative')
self.sheets['s_p'] = self.workbook.sheet_by_name('s_p')
for name, index in self.INDEXES.items():
if isinstance(index, dict):
subvalues = {}
for subkey, subindex in index.items():
subvalues[subkey] = self.get_values(subindex)
setattr(self, name, subvalues)
else:
setattr(self, name, self.get_values(index))
def parse_date_value(self, value):
return xlrd.xldate_as_tuple(value, 0)
def parse_float_value(self, value):
return value if value != '' else np.nan
def get_raw_values(self, data_index):
return self.sheets[data_index.sheet_name].col_values(
data_index.col_index,
data_index.row_start_index,
data_index.row_end_index + 1)
@property
def value_type_to_value_func(self):
return {
'float': self.parse_float_value,
'date': self.parse_date_value,
}
def get_values(self, data_index):
value_parser = self.value_type_to_value_func[data_index.value_type]
return [value for value in
map(value_parser, self.get_raw_values(data_index))]
ANSWER_KEY = AnswerKey()
BENCHMARK_DATES = ANSWER_KEY.BENCHMARK['Dates']
BENCHMARK_RETURNS = ANSWER_KEY.BENCHMARK['Returns']
DATES = [datetime.datetime(*x, tzinfo=pytz.UTC) for x in BENCHMARK_DATES]
BENCHMARK = pd.Series(dict(zip(DATES, BENCHMARK_RETURNS)))
ALGORITHM_RETURNS = pd.Series(
dict(zip(DATES, ANSWER_KEY.ALGORITHM_RETURN_VALUES)))
RETURNS_DATA = pd.DataFrame({'Benchmark Returns': BENCHMARK,
'Algorithm Returns': ALGORITHM_RETURNS})
RISK_CUMULATIVE = pd.DataFrame({
'volatility': pd.Series(dict(zip(
DATES, ANSWER_KEY.ALGORITHM_CUMULATIVE_VOLATILITY))),
'sharpe': pd.Series(dict(zip(
DATES, ANSWER_KEY.ALGORITHM_CUMULATIVE_SHARPE))),
'downside_risk': pd.Series(dict(zip(
DATES, ANSWER_KEY.CUMULATIVE_DOWNSIDE_RISK))),
'sortino': pd.Series(dict(zip(
DATES, ANSWER_KEY.CUMULATIVE_SORTINO))),
'information': pd.Series(dict(zip(
DATES, ANSWER_KEY.CUMULATIVE_INFORMATION))),
'alpha': pd.Series(dict(zip(
DATES, ANSWER_KEY.CUMULATIVE_ALPHA))),
'beta': pd.Series(dict(zip(
DATES, ANSWER_KEY.CUMULATIVE_BETA))),
'max_drawdown': pd.Series(dict(zip(
DATES, ANSWER_KEY.CUMULATIVE_MAX_DRAWDOWN))),
})
| apache-2.0 |
nest/nest-simulator | pynest/examples/csa_spatial_example.py | 14 | 4452 | # -*- coding: utf-8 -*-
#
# csa_spatial_example.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
Using CSA with spatial populations
----------------------------------
This example shows a brute-force way of specifying connections between
NEST populations with spatial data using Connection Set Algebra instead of
the built-in connection routines.
Using the CSA requires NEST to be compiled with support for
libneurosim. For details, see [1]_.
See Also
~~~~~~~~
:doc:`csa_example`
References
~~~~~~~~~~
.. [1] Djurfeldt M, Davison AP and Eppler JM (2014). Efficient generation of
connectivity in neuronal networks from simulator-independent
descriptions. Front. Neuroinform.
https://doi.org/10.3389/fninf.2014.00043
"""
###############################################################################
# First, we import all necessary modules.
import nest
import matplotlib.pyplot as plt
###############################################################################
# Next, we check for the availability of the CSA Python module. If it does
# not import, we exit with an error message.
try:
import csa
haveCSA = True
except ImportError:
print("This example requires CSA to be installed in order to run.\n" +
"Please make sure you compiled NEST using\n" +
" -Dwith-libneurosim=[OFF|ON|</path/to/libneurosim>]\n" +
"and CSA and libneurosim are available.")
import sys
sys.exit(1)
###############################################################################
# We define a factory that returns a CSA-style geometry function for
# the given layer. The function returned will return for each CSA-index
# the position in space of the given neuron as a 2- or 3-element list.
#
# This function stores a copy of the neuron positions internally, entailing
# memory overhead.
def geometryFunction(population):
positions = nest.GetPosition(population)
def geometry_function(idx):
return positions[idx]
return geometry_function
###############################################################################
# We create two spatial populations that have 20x20 neurons of type
# ``iaf_psc_alpha``.
pop1 = nest.Create('iaf_psc_alpha', positions=nest.spatial.grid([20, 20]))
pop2 = nest.Create('iaf_psc_alpha', positions=nest.spatial.grid([20, 20]))
###############################################################################
# For each population, we create a CSA-style geometry function and a CSA metric
# based on them.
g1 = geometryFunction(pop1)
g2 = geometryFunction(pop2)
d = csa.euclidMetric2d(g1, g2)
###############################################################################
# The connection set `cg` describes a Gaussian connectivity profile with
# ``sigma = 0.2`` and cutoff at 0.5, and two values (10000.0 and 1.0) used as
# ``weight`` and ``delay``, respectively.
cg = csa.cset(csa.random * (csa.gaussian(0.2, 0.5) * d), 10000.0, 1.0)
###############################################################################
# We can now connect the populations using the ``Connect`` function
# with the ``conngen`` rule. It takes the IDs of pre- and postsynaptic
# neurons (``pop1`` and ``pop2``), the connection set (``cg``) and a
# dictionary that map the parameters weight and delay to positions in
# the value set associated with the connection set (``params_map``).
params_map = {"weight": 0, "delay": 1}
connspec = {"rule": "conngen", "cg": cg, "params_map": params_map}
nest.Connect(pop1, pop2, connspec)
###############################################################################
# Finally, we use the ``PlotTargets`` function to show all targets in `pop2`
# starting at the center neuron of `pop1`.
cntr = nest.FindCenterElement(pop1)
nest.PlotTargets(cntr, pop2)
plt.show()
| gpl-2.0 |
mgeplf/NeuroM | examples/plot_features.py | 4 | 7112 | #!/usr/bin/env python
# Copyright (c) 2015, Ecole Polytechnique Federale de Lausanne, Blue Brain Project
# All rights reserved.
#
# This file is part of NeuroM <https://github.com/BlueBrain/NeuroM>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of
# its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''Plot a selection of features from a morphology population'''
from collections import defaultdict
from collections import namedtuple
import sys
import json
import argparse
import numpy as np
import neurom as nm
from neurom.view import common as view_utils
import scipy.stats as _st
from matplotlib.backends.backend_pdf import PdfPages
DISTS = {
'normal': lambda p, bins: _st.norm.pdf(bins, p['mu'], p['sigma']),
'uniform': lambda p, bins: _st.uniform.pdf(bins, p['min'], p['max'] - p['min']),
'constant': lambda p, bins: None
}
def bin_centers(bin_edges):
"""Return array of bin centers given an array of bin edges"""
return (bin_edges[1:] + bin_edges[:-1]) / 2.0
def bin_widths(bin_edges):
"""Return array of bin widths given an array of bin edges"""
return bin_edges[1:] - bin_edges[:-1]
def histo_entries(histo):
"""Calculate the number of entries in a histogram
This is the sum of bin height * bin width
"""
bw = bin_widths(histo[1])
return np.sum(histo[0] * bw)
def dist_points(bin_edges, d):
"""Return an array of values according to a distribution
Points are calculated at the center of each bin
"""
bc = bin_centers(bin_edges)
if d is not None:
d = DISTS[d['type']](d, bc)
return d, bc
def calc_limits(data, dist=None, padding=0.25):
"""Calculate a suitable range for a histogram
Returns:
tuple of (min, max)
"""
dmin = sys.float_info.max if dist is None else dist.get('min',
sys.float_info.max)
dmax = sys.float_info.min if dist is None else dist.get('max',
sys.float_info.min)
_min = min(min(data), dmin)
_max = max(max(data), dmax)
padding = padding * (_max - _min)
return _min - padding, _max + padding
# Neurite types of interest
NEURITES_ = (nm.NeuriteType.axon,
nm.NeuriteType.apical_dendrite,
nm.NeuriteType.basal_dendrite,)
# Features of interest
FEATURES = ('segment_lengths',
'section_lengths',
'section_path_distances',
'section_radial_distances',
'trunk_origin_radii')
def load_neurite_features(filepath):
'''Unpack relevant data into megadict'''
stuff = defaultdict(lambda: defaultdict(list))
nrns = nm.load_neurons(filepath)
# unpack data into arrays
for nrn in nrns:
for t in NEURITES_:
for feat in FEATURES:
stuff[feat][str(t).split('.')[1]].extend(
nm.get(feat, nrn, neurite_type=t)
)
return stuff
Plot = namedtuple('Plot', 'fig, ax')
def parse_args():
'''Parse command line arguments'''
parser = argparse.ArgumentParser(
description='Morphology feature plotter',
epilog='Note: Makes plots of various features and superimposes\
input distributions. Plots are saved to PDF file.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('datapath',
help='Morphology data directory path')
parser.add_argument('--mtypeconfig',
required=True,
help='Get mtype JSON configuration file')
parser.add_argument('--output',
default='plots.pdf',
help='Output PDF file name')
return parser.parse_args()
def main(data_dir, mtype_file): # pylint: disable=too-many-locals
'''Run the stuff'''
# data structure to store results
stuff = load_neurite_features(data_dir)
sim_params = json.load(open(mtype_file))
# load histograms, distribution parameter sets and figures into arrays.
# To plot figures, do
# plots[i].fig.show()
# To modify an axis, do
# plots[i].ax.something()
_plots = []
for feat, d in stuff.items():
for typ, data in d.items():
dist = sim_params['components'][typ].get(feat, None)
print('Type = %s, Feature = %s, Distribution = %s' % (typ, feat, dist))
# if no data available, skip this feature
if not data:
print("No data found for feature %s (%s)" % (feat, typ))
continue
# print 'DATA', data
num_bins = 100
limits = calc_limits(data, dist)
bin_edges = np.linspace(limits[0], limits[1], num_bins + 1)
histo = np.histogram(data, bin_edges, normed=True)
print('PLOT LIMITS:', limits)
# print 'DATA:', data
# print 'BIN HEIGHT', histo[0]
plot = Plot(*view_utils.get_figure(new_fig=True, subplot=111))
plot.ax.set_xlim(*limits)
plot.ax.bar(histo[1][:-1], histo[0], width=bin_widths(histo[1]))
dp, bc = dist_points(histo[1], dist)
# print 'BIN CENTERS:', bc, len(bc)
if dp is not None:
# print 'DIST POINTS:', dp, len(dp)
plot.ax.plot(bc, dp, 'r*')
plot.ax.set_title('%s (%s)' % (feat, typ))
_plots.append(plot)
return _plots
if __name__ == '__main__':
args = parse_args()
print('MTYPE FILE:', args.mtypeconfig)
plots = main(args.datapath, args.mtypeconfig)
pp = PdfPages(args.output)
for p in plots:
pp.savefig(p.fig)
pp.close()
| bsd-3-clause |
ABoothInTheWild/baseball-research | 100sTracker.py | 1 | 2343 | # -*- coding: utf-8 -*-
"""
Created on Wed Aug 29 13:36:12 2018
@author: ABooth
"""
from scipy.stats import beta
import matplotlib.pyplot as plt
import os
import pandas as pd
import numpy as np
#read data
os.chdir('C:/Users/abooth/Documents/Python Scripts/PastPreds/mlbPlayoffOdds2018')
beta18Pre = pd.read_csv("mlb2018PreSeasonBetaEstimates.csv")
mlb18Results = pd.read_csv("mlb2018SeasonResults.csv")
teamAbbr = "MIA"
#Set seed, init plots, get beta parameters
np.random.seed(seed=12345)
fig, ax = plt.subplots(1, 1, figsize=(8, 6), dpi=80)
priorA = beta18Pre[beta18Pre.Team_Abbr == teamAbbr]["PriorAlpha"].values[0]
priorB = beta18Pre[beta18Pre.Team_Abbr == teamAbbr]["PriorBeta"].values[0]
teamWins = mlb18Results[mlb18Results.Team_Abbr == teamAbbr].iloc[0]["Wins_20180830"]
teamLosses = mlb18Results[mlb18Results.Team_Abbr == teamAbbr].iloc[0]["Losses_20180830"]
postA = teamWins + priorA
postB = teamLosses + priorB
#Plot pdf
x = np.linspace(beta.ppf(0.001, postA, postB),
beta.ppf(0.999, postA, postB), 1000)
ax.plot(x, beta.pdf(x, postA, postB),
'r-', lw=5, alpha=0.6, label='beta pdf')
#Make plot pretty
ax.legend(loc='best', frameon=False)
ax.set_xlim([0.25, 0.75])
ax.set_ylim([0, 13])
plt.title(teamAbbr + ' 8/30/2018 WP% Beta Estimate')
plt.ylabel('Density')
plt.xlabel('Winning Percentage')
plt.grid(b=True, which='major', color='gray', linestyle='--', alpha= 0.3)
plt.show()
#fig.savefig(teamAbbr + "826PosteriorWP.png", bbox_inches='tight')
#where the magic happens
np.random.seed(seed=12345)
sample = beta.rvs(postA, postB, size=100000)
gamesLeft = 162 - teamWins - teamLosses
winEstimate = np.round(teamWins + sample*gamesLeft,0)
print(np.mean(winEstimate))
print(np.percentile(winEstimate, 2.5))
print(np.percentile(winEstimate, 97.5))
print(np.mean(sample))
print(np.percentile(sample, 2.5))
print(np.percentile(sample, 97.5))
wins100 = 100 - teamWins
losses100 = 100 - teamLosses
#print(wins100/np.mean(sample))
#print(wins100/np.percentile(sample, 2.5))
#print(wins100/np.percentile(sample, 97.5))
print(losses100/(1-np.mean(sample)))
print(losses100/(1-np.percentile(sample, 2.5)))
print(losses100/(1-np.percentile(sample, 97.5)))
prob = len(winEstimate[winEstimate <= 62])/float(100000)
print(prob)
| gpl-3.0 |
amolkahat/pandas | pandas/tests/io/msgpack/test_obj.py | 22 | 2405 | # coding: utf-8
import pytest
from pandas.io.msgpack import packb, unpackb
class DecodeError(Exception):
pass
class TestObj(object):
def _arr_to_str(self, arr):
return ''.join(str(c) for c in arr)
def bad_complex_decoder(self, o):
raise DecodeError("Ooops!")
def _decode_complex(self, obj):
if b'__complex__' in obj:
return complex(obj[b'real'], obj[b'imag'])
return obj
def _encode_complex(self, obj):
if isinstance(obj, complex):
return {b'__complex__': True, b'real': 1, b'imag': 2}
return obj
def test_encode_hook(self):
packed = packb([3, 1 + 2j], default=self._encode_complex)
unpacked = unpackb(packed, use_list=1)
assert unpacked[1] == {b'__complex__': True, b'real': 1, b'imag': 2}
def test_decode_hook(self):
packed = packb([3, {b'__complex__': True, b'real': 1, b'imag': 2}])
unpacked = unpackb(packed, object_hook=self._decode_complex,
use_list=1)
assert unpacked[1] == 1 + 2j
def test_decode_pairs_hook(self):
packed = packb([3, {1: 2, 3: 4}])
prod_sum = 1 * 2 + 3 * 4
unpacked = unpackb(
packed, object_pairs_hook=lambda l: sum(k * v for k, v in l),
use_list=1)
assert unpacked[1] == prod_sum
def test_only_one_obj_hook(self):
pytest.raises(TypeError, unpackb, b'', object_hook=lambda x: x,
object_pairs_hook=lambda x: x)
def test_bad_hook(self):
def f():
packed = packb([3, 1 + 2j], default=lambda o: o)
unpacked = unpackb(packed, use_list=1) # noqa
pytest.raises(TypeError, f)
def test_array_hook(self):
packed = packb([1, 2, 3])
unpacked = unpackb(packed, list_hook=self._arr_to_str, use_list=1)
assert unpacked == '123'
def test_an_exception_in_objecthook1(self):
def f():
packed = packb({1: {'__complex__': True, 'real': 1, 'imag': 2}})
unpackb(packed, object_hook=self.bad_complex_decoder)
pytest.raises(DecodeError, f)
def test_an_exception_in_objecthook2(self):
def f():
packed = packb({1: [{'__complex__': True, 'real': 1, 'imag': 2}]})
unpackb(packed, list_hook=self.bad_complex_decoder, use_list=1)
pytest.raises(DecodeError, f)
| bsd-3-clause |
stefano-martina/pyLaser | maxwell.py | 2 | 8252 | #!/usr/bin/python
# Copyright (C) 2015 Stefano Martina, Nicoletta Granchi
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import numpy as np
import matplotlib.pyplot as plt
import scipy.integrate
from mpl_toolkits.mplot3d import axes3d
import matplotlib.animation as ani
#constants
kMin = 1.
kMax = 1000.
kStep = 0.1
k = 5. #decay rate in laser cavity (beam trasmission) (>0)
g1Min = 1.
g1Max = 1000.
g1Step = 10.
g1 = 1. #decay rates of atomic polarization (>0)
g2Min = 1.
g2Max = 1000.
g2Step = 10.
g2 = 1. #decay rates for population inversion (>0)
lMin = 11. #pumping energy parameter (in R)
lMax = 100.
lStep = 0.01
lInt = 10.
l = lMin
singleStartPoint = [0.5, 0.5, 0.5]
radius = 0.0001
graphLimit = [[singleStartPoint[0]-radius, singleStartPoint[1]-radius, singleStartPoint[2]-radius],[singleStartPoint[0]+radius, singleStartPoint[1]+radius, singleStartPoint[2]+radius]]
viewLimit = [[-3., -3., -7.],[3., 3., 7.]]
#graphLimit = [[-0.5, -0.5, -0.5],[0.5, 0.5, 0.5]]
#viewLimit = [[-2, -2, -2],[2, 2, 2]]
#graphLimit = [[-2, -2, -2],[2, 2, 2]]
#viewLimit = [[-10, -10, -10],[10, 10, 10]]
gridNum = 2
tMin = 0.1
tMax = 100.
tStep = 0.1
t = 100. #integration time
integrationSteps = 10000
multipleStartPoints = []
for x in np.linspace(graphLimit[0][0], graphLimit[1][0], gridNum):
for y in np.linspace(graphLimit[0][1], graphLimit[1][1], gridNum):
for z in np.linspace(graphLimit[0][2], graphLimit[1][2], gridNum):
multipleStartPoints.append([x,y,z])
#E = S[0]
#P = S[1]
#D = S[2]
#full system
#Ed = k(P-E)
#Pd = g1(ED-P)
#Dd = g2(l+1-D-lEP)
maxwell = lambda k, g1, g2, l: lambda S, t:[k*(S[1]-S[0]),g1*(S[0]*S[2]-S[1]), g2*(l+1.-S[2]-l*S[0]*S[1])]
#jacobian
# -k P 0
# g1D -g1P g1E
# -g2lP -g2lE g2
maxwellJac = lambda k, g1, g2, l: lambda S, t:[[-k, S[1], 0], [g1*S[2], -g1*S[1], g1*S[0]], [-g2*l*S[1], -g2*l*S[0], g2]]
#adiabatic elimination system
#Ed = kE((l+1)/(lE^2+1) -1)
maxwellAdiabaticEl = lambda k, l: lambda E, t: k*l*(E-E**3)/(l*E*E+1.)
adiabaticP = lambda l: lambda E: E*(l+1.)/(l*E*E+1.)
adiabaticD = lambda l: lambda E: (l+1.)/(l*E*E+1.)
fig = plt.figure(figsize=(13, 7));
ax = fig.gca(projection='3d')
fig.canvas.set_window_title('Maxwell-Bloch')
ax.set_title('Study of Maxwell-Bloch equations trajectories')
ax.set_xlabel('$E$')
ax.set_ylabel('$P$')
ax.set_zlabel('$D$')
ax.set_xlim(viewLimit[0][0], viewLimit[1][0])
ax.set_ylim(viewLimit[0][1], viewLimit[1][1])
ax.set_zlim(viewLimit[0][2], viewLimit[1][2])
line = []
lineA = []
for i in range(0, len(multipleStartPoints)):
line[len(line):len(line)], = [plt.plot([],[],[])]
lineA[len(lineA):len(lineA)], = [plt.plot([],[],[])]
plt.figtext(0.7, 0.80, '$\dot{E} = \kappa(P-E)$')
plt.figtext(0.7, 0.75, '$\dot{P} = \gamma_1(ED-P)$')
plt.figtext(0.7, 0.70, '$\dot{D} = \gamma_2(\lambda+1-D-\lambda EP)$')
kText = plt.figtext(0.7, 0.65, '')
g1Text = plt.figtext(0.7, 0.60, '')
g2Text = plt.figtext(0.7, 0.55, '')
lText = plt.figtext(0.7, 0.50, '')
tText = plt.figtext(0.7, 0.45, '')
pause = True
reverse = False
adiabatic = False
single = True
def onClick(event):
# print('button=%d, x=%d, y=%d, xdata=%f, ydata=%f'%(event.button, event.x, event.y, event.xdata, event.ydata))
global pause
global reverse
global adiabatic
global single
global k
global g1
global g2
global l
global t
if event.key == ' ':
pause ^= True
elif event.key == 'r':
reverse ^= True
elif event.key == 'a':
adiabatic ^= True
elif event.key == 'z':
single ^= True
elif event.key == '2':
if k < kMax:
k = min(k + kStep, kMax)
elif event.key == '1':
if k > kMin:
k = max(k - kStep, kMin)
elif event.key == '4':
if g1 < g1Max:
g1 = min(g1 + g1Step, g1Max)
elif event.key == '3':
if g1 > g1Min:
g1 = max(g1 - g1Step, g1Min)
elif event.key == '6':
if g2 < g2Max:
g2 = min(g2 + g2Step, g2Max)
elif event.key == '5':
if g2 > g2Min:
g2 = max(g2 - g2Step, g2Min)
elif event.key == '8':
if l < lMax:
l = min(l + lStep, lMax)
elif event.key == '7':
if l > lMin:
l = max(l - lStep, lMin)
elif event.key == '0':
if t < tMax:
t = min(t + tStep, tMax)
elif event.key == '9':
if t > tMin:
t = max(t - tStep, tMin)
elif event.key == 'q':
exit()
#fig.canvas.mpl_connect('button_press_event', onClick)
fig.canvas.mpl_connect('key_press_event', onClick)
def init():
for i in range(0, len(multipleStartPoints)):
line[i].set_data([], [])
line[i].set_3d_properties([])
lineA[i].set_data([], [])
lineA[i].set_3d_properties([])
kText.set_text('')
g1Text.set_text('')
g2Text.set_text('')
lText.set_text('')
tText.set_text('')
return line, lineA, kText, g1Text, g2Text, lText
def makeGenerator(lMin, lMax, lStep):
def generator():
global l
if not reverse:
l = lMin
else:
l = lMax
while l <= lMax+lStep and l >= lMin-lStep:
if not pause:
if not reverse:
l = l + lStep
else:
l = l - lStep
yield l
return generator
def step(l):
global adiabatic
global single
global k
global g1
global g2
global t
ts = np.linspace(0.0, t, integrationSteps)
i = 0
for sp in multipleStartPoints:
if single:
if i==0:
state = scipy.integrate.odeint(maxwell(k, g1, g2, l), singleStartPoint, ts, Dfun=maxwellJac(k, g1, g2, l))#, mxstep=1000)
Es = state[:,0]
Ps = state[:,1]
Ds = state[:,2]
else:
Es = []
Ps = []
Ds = []
else:
state = scipy.integrate.odeint(maxwell(k, g1, g2, l), sp, ts, Dfun=maxwellJac(k, g1, g2, l))#, mxstep=1000)
Es = state[:,0]
Ps = state[:,1]
Ds = state[:,2]
line[i].set_data(Es,Ps)
line[i].set_3d_properties(Ds)
if adiabatic:
if single:
if i==0:
state = scipy.integrate.odeint(maxwellAdiabaticEl(k, l), singleStartPoint[0], ts)#, mxstep=1000)
Es = state[:,0]
Ps = list(map(adiabaticP(l), Es))
Ds = list(map(adiabaticD(l), Es))
else:
Es = []
Ps = []
Ds = []
else:
state = scipy.integrate.odeint(maxwellAdiabaticEl(k, l), sp[0], ts)#, mxstep=1000)
Es = state[:,0]
Ps = list(map(adiabaticP(l), Es))
Ds = list(map(adiabaticD(l), Es))
else:
Es = []
Ps = []
Ds = []
lineA[i].set_data(Es,Ps)
lineA[i].set_3d_properties(Ds)
i = i + 1
kText.set_text('$\kappa$ = %.2f' % k)
g1Text.set_text('$\gamma_1$ = %.2f' % g1)
g2Text.set_text('$\gamma_2$ = %.2f' % g2)
lText.set_text('$\lambda$ = %.2f' % l)
tText.set_text('$t$ = %.2f' % t)
return line, lineA, kText, g1Text, g2Text, lText, tText
anim = ani.FuncAnimation(fig, step, frames=makeGenerator(lMin, lMax, lStep), init_func=init, blit=False, repeat=True) #, interval=lInt
plt.show()
| gpl-2.0 |
gilson27/LearnRevise | external/meetup/devday_sahaj_software/Archive/loader.py | 1 | 1556 | from PIL import Image
import numpy as np
import pandas as pd
labels = pd.read_csv("trainLabels.csv")
def to_char(v):
if v<10:
v = v+ord("0")
elif v<10+26:
v = v-10+ord("A")
else:
v=v-10-26+ord("a")
return chr(v)
def img2arr(i):
im = Image.open("trainResized/" + str(i) + ".bmp").convert('L') #Can be many different formats.
pix = im.load()
d=[]
for i in range(20):
#d.append([])
for j in range(20):
d.append((pix[i,j]-128)/255.0)
# d[i].append([(pix[i,j]-128)/255.0])
return d
def one_hot(row):
global labels
if (row>len(labels)): return None
val = labels["Class"][row-1]
v=[0]*62
st = encode(val)
v[st] = 1
return v;
def encode(val):
st = 0
val = ord(val)
if val >= ord("a"):
st = 26 + 10 + val - ord("a")
elif val >= ord("A"):
st = 10 + val - ord("A")
elif val >= ord("0"):
st = 0 + val - ord("0")
return st
class DataSource:
def __init__(self):
self.start =1
def next_batch(self, lim):
#print "batch %s to %s" %(self.start, self.start+lim)
yval = np.array([one_hot(i) for i in range(self.start, self.start+lim)])
xval = np.array([img2arr(i) for i in range(self.start, self.start + lim)])
self.start += lim
return (xval, yval)
def skip(self, lim):
self.start += lim
def reset(self):
self.start = 1;
train = DataSource()
# a,b= train.next_batch(10)
# print len(a[0])
# print len(b[0]) | gpl-3.0 |
aguimaraesduarte/FirefoxCrashGraphs | main.py | 1 | 11171 | from pyspark import SparkContext
from pyspark.sql import SQLContext
from date_utils import *
from extract import *
from transform import *
from load import *
from math_utils import *
import os
# Define global variables and configurations
S3_BUCKET_NAME = "mozilla-metrics"
S3_PATH = "sguha/crashgraphs/JSON/"
LOAD_FROM_S3 = True
SAVE_TO_S3 = True
REMOVE_LOCAL_JSON = False
def main_alg():
"""
This function ties everything together.
The analysis is done for however many days since the last run (obtained by parsing available JSON files in the same directory).
If no such files are found, the anaysis is run since September 1st 2016.
"""
# setup sparkContext
sc = SparkContext(appName="FirefoxCrashGraphs")
sc.addPyFile('date_utils.py')
sc.addPyFile('math_utils.py')
sc.addPyFile('extract.py')
sc.addPyFile('load.py')
sc.addPyFile('transform.py')
# setup sqlContext
sqlContext = SQLContext(sc)
setup_load(sqlContext)
setup_extract(sqlContext)
setup_transform(sqlContext)
# fetch files from S3
if LOAD_FROM_S3:
print "***** FETCHING FILES FROM S3...",
fetch_latest_from_s3(S3_BUCKET_NAME, S3_PATH)
print "DONE!"
# read and clean data; save as SQL table
print "***** READING DATA...",
filteredPingsDF = read_main_summary()
filteredPingsDF_str = "filteredPingsDF"
sqlContext.registerDataFrameAsTable(filteredPingsDF, filteredPingsDF_str)
print "DONE!"
# create aggregate table by client_id and submission_date; save as SQL table
print "***** CREATING AGGREGATE TABLE...",
aggregateDF = aggregate_by_client_date_e10s(filteredPingsDF_str)
aggregateDF_str = "aggregateDF"
sqlContext.registerDataFrameAsTable(aggregateDF, aggregateDF_str)
print "DONE!"
# get start and end dates
last_date = find_last_date()
start_backfill = last_date + timedelta(days=1)
end_backfill = date.today() - timedelta(days=1)
# get date ranges
dates = [] # --list of all end_dates in period
print "***** FINDING ALL WEEKDAYS BETWEEN {start} AND {end}..."\
.format(start=start_backfill, end=end_backfill),
# get all weekdays between the two provided dates
delta = end_backfill - start_backfill
for i in range(delta.days + 1):
day = end_backfill - timedelta(days=i)
if day.weekday() in [0,1,2,3,4]:
end_date = day
start_date = day - timedelta(days=6)
dates.append( (start_date, end_date) )
print "{} DATES".format(len(dates))
# loop through all dates
for i, d in enumerate(reversed(dates)):
print
print "***** DATE {curr} of {tot}".format(curr=i+1, tot=len(dates))
start_date = d[0]
end_date = d[1]
start_date_str = start_date.strftime("%Y%m%d")
end_date_str = end_date.strftime("%Y%m%d")
print "***** Week of interest: {start} :: {end}".format(start=start_date, end=end_date)
# calculate WAU7
wau7 = get_wau7(aggregateDF_str, start_date_str, end_date_str)
print "\tActive profiles: {:,} (based on a 1% sample)".format(wau7*100)
# calculate number of profiles that crashed
CRASH_GRANULARITY = [1, 2] # specify the granularity for the crash rates (default: 1+ and 2+ crashes)
num_profiles_crashed = get_num_crashed(aggregateDF_str, start_date_str, end_date_str, CRASH_GRANULARITY)
for i, crash in enumerate(CRASH_GRANULARITY):
print "\tNumber of profiles that experienced {}+ crashes: {:,} ({:.2%} of active profiles)"\
.format(crash, num_profiles_crashed[i]*100, float(num_profiles_crashed[i]) / wau7)
# calculate new profiles and proportion crashed
num_new_profiles = get_num_new_profiles(aggregateDF_str, start_date_str, end_date_str)
print "\tNew profiles: {:,} (based on a 1% sample) ({:.2%} of active profiles)".format(num_new_profiles*100,
float(num_new_profiles)/wau7)
num_new_profiles_crashed = get_num_new_profiles_crashed(aggregateDF_str, start_date_str, end_date_str, CRASH_GRANULARITY)
for i, crash in enumerate(CRASH_GRANULARITY):
print "\tNumber of new profiles that experienced {}+ crashes: {:,} ({:.2%} of new profiles)"\
.format(crash, num_new_profiles_crashed[i]*100, float(num_new_profiles_crashed[i]) / num_new_profiles)
# get subset of aggregated dataframe containing only the pings for profiles that crashed
aggregate_crashed = aggregate_subset(aggregateDF_str, start_date_str, end_date_str)
# transform into longitudinal format
crashed_longitudinal = make_longitudinal(aggregate_crashed)
# apply mapping function
crash_statistics = crashed_longitudinal.rdd.map(mapCrashes)
# get counts of crashed user types
crash_statistics_counts = crash_statistics.countByKey()
print "\tNumber of profiles that crashed for the first time: {:,} ({:.2%} of crashed profiles)"\
.format(crash_statistics_counts[False]*100,
float(crash_statistics_counts[False])/num_profiles_crashed[0])
print "\tNumber of profiles that crashed and had a previous crash: {:,} ({:.2%} of crashed profiles)"\
.format(crash_statistics_counts[True]*100,
float(crash_statistics_counts[True])/num_profiles_crashed[0])
# get subset of aggregated dataframe containing only the pings for profiles that were created 3 weeks prior
aggregate_new = aggregate_new_users(aggregateDF_str, start_date_str, end_date_str)
new_longitudinal = make_longitudinal(aggregate_new)
new_statistics = new_longitudinal.rdd.map(mapCrashes_new)
# get counts of new user types
new_statistics_counts = new_statistics.countByKey()
new_crashed = new_statistics_counts[1]
new_tot = new_statistics_counts[0]+new_statistics_counts[1]
print "\tNew profiles created between {} and {}: {:,}"\
.format((str2date(start_date_str)-timedelta(days=14)).isoformat(),
(str2date(end_date_str)-timedelta(days=14)).isoformat(),
new_tot*100)
print "\tNumber of new profiles that crashed 1+ times within 2 weeks of profile creation: {:,} ({:.2%} of new profiles)"\
.format(new_crashed*100, float(new_crashed)/new_tot)
# get profiles that crashed in the second week of activity since profile creation
new_statistics = new_longitudinal.rdd.map(mapCrashes_secondWeek)
# get counts of profiles that crashed and those that crashed only during their second week of activity
new_statistics_counts_bis = new_statistics.collect()
new_statistics_counts_crashed = [n for n in new_statistics_counts_bis if n[0] is True]
p = make_new_df(new_statistics_counts_crashed, ["has_crashed", "crashed_second_week", "days_to_first_crash"])
p2 = p[p.crashed_second_week == True]
print "\tNumber of new profiles that crashed 1+ times 8-14 days after profile creation: {:,} ({:.2%} of new profiles that crashed, {:.2%} of new profiles)"\
.format(p2.shape[0]*100, p2.shape[0]*1.0/new_crashed, p2.shape[0]*1.0/new_tot)
# get number of days to first crash statistics
s = p[p.days_to_first_crash >= 0].days_to_first_crash.describe(percentiles = [.1, .25, .5, .75, .9])
print "\t\tMean number of days to first crash: {:.2f}".format(s["mean"])
print "\t\tMedian number of days to first crash: {:.2f}".format(s["50%"])
print "\t\t90th percentile of days to first crash: {:.2f}".format(s["90%"])
# calculate counts for e10s
e10s_counts = get_e10s_counts(aggregateDF_str, start_date_str, end_date_str)
print "\tNumber of profiles that have e10s enabled: {:,} ({:.2%} of crashed profiles)"\
.format(e10s_counts[0]*100, float(e10s_counts[0])/num_profiles_crashed[0])
print "\tNumber of profiles that have e10s disabled: {:,} ({:.2%} of crashed profiles)"\
.format(e10s_counts[1]*100, float(e10s_counts[1])/num_profiles_crashed[0])
# calculate crash rates
crash_rates_avg_by_user = get_crash_rates_by_user(aggregateDF_str, start_date_str, end_date_str)
print "\tMain crashes per hour: {:.2f}".format(crash_rates_avg_by_user[0]*1000)
print "\tContent crashes per hour: {:.2f}".format(crash_rates_avg_by_user[1]*1000)
print "\tPlugin crashes per hour: {:.2f}".format(crash_rates_avg_by_user[2]*1000)
# calculate crash rates by e10s status
crash_rates_avg_by_user_and_e10s = get_crash_rates_by_user_and_e10s(aggregateDF_str, start_date_str, end_date_str)
print "\tMain crashes per hour (e10s enabled): {:.2f}".format(crash_rates_avg_by_user_and_e10s[0]*1000)
print "\tContent crashes per hour (e10s enabled): {:.2f}".format(crash_rates_avg_by_user_and_e10s[1]*1000)
print "\tPlugin crashes per hour (e10s enabled): {:.2f}".format(crash_rates_avg_by_user_and_e10s[2]*1000)
print "\tMain crashes per hour (e10s disabled): {:.2f}".format(crash_rates_avg_by_user_and_e10s[3]*1000)
print "\tContent crashes per hour (e10s disabled): {:.2f}".format(crash_rates_avg_by_user_and_e10s[4]*1000)
print "\tPlugin crashes per hour (e10s disabled): {:.2f}".format(crash_rates_avg_by_user_and_e10s[5]*1000)
# get crash statistics
print "***** SAVING CRASH DATA TO JSON...",
crash_statistics_pd = RDD_to_pandas(crash_statistics, "has_multiple_crashes = True", ["total_ssl_between_crashes"])
write_col_json("fx_crashgraphs_hours", crash_statistics_pd.total_ssl_between_crashes, "hours",
start_date_str, end_date_str, S3_BUCKET_NAME, S3_PATH, SAVE_TO_S3)
print "DONE!"
# get summary statistics
print "***** SAVING RESULTS TO JSON...",
crash_statistics_pd = RDD_to_pandas(crash_statistics) #TODO: combine the two to_pandas operations into one. This operation is expensive.
summary = make_dict_results(end_date, wau7, num_new_profiles, num_profiles_crashed, num_new_profiles_crashed,
crash_statistics_counts, crash_rates_avg_by_user, crash_rates_avg_by_user_and_e10s,
crash_statistics_pd, e10s_counts, new_statistics_counts, p2.shape[0], CRASH_GRANULARITY)
write_dict_json("fx_crashgraphs", summary, start_date_str, end_date_str, S3_BUCKET_NAME, S3_PATH, SAVE_TO_S3)
print "DONE!"
print "***** MERGING SUMMARY JSON FILES...",
# merge summary JSON files into one
os.system('jq -c -s "[.[]|.[]]" fx_crashgraphs-*.json > "fx_crashgraphs.json"')
if SAVE_TO_S3:
store_latest_on_s3(S3_BUCKET_NAME, S3_PATH, "fx_crashgraphs.json")
print "DONE!"
print
# remove local json files
if REMOVE_LOCAL_JSON:
os.system('rm *.json')
print "DONE!"
if __name__ == '__main__':
main_alg()
| mpl-2.0 |
stggh/PyAbel | examples/example_all_O2.py | 2 | 4428 | # -*- coding: utf-8 -*-
# This example compares the available inverse Abel transform methods
# currently - direct, hansenlaw, and basex
# processing the O2- photoelectron velocity-map image
#
# Note it transforms only the Q0 (top-right) quadrant
# using the fundamental transform code
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
import abel
import collections
import matplotlib.pylab as plt
from time import time
import bz2
# inverse Abel transform methods -----------------------------
# dictionary of method: function()
transforms = {
"basex": abel.basex.basex_transform,
"linbasex": abel.linbasex.linbasex_transform,
"direct": abel.direct.direct_transform,
"hansenlaw": abel.hansenlaw.hansenlaw_transform,
"onion_bordas": abel.onion_bordas.onion_bordas_transform,
"onion_dasch": abel.dasch.onion_peeling_transform,
"three_point": abel.dasch.three_point_transform,
"two_point" : abel.dasch.two_point_transform,
}
# sort dictionary
transforms = collections.OrderedDict(sorted(transforms.items()))
ntrans = np.size(transforms.keys()) # number of transforms
# Image: O2- VMI 1024x1024 pixel ------------------
imagefile = bz2.BZ2File('data/O2-ANU1024.txt.bz2')
IM = np.loadtxt(imagefile)
# recenter the image to mid-pixel (odd image width)
IModd = abel.tools.center.center_image(IM, center="slice", odd_size=True)
h, w = IModd.shape
print("centered image 'data/O2-ANU2048.txt' shape = {:d}x{:d}".format(h, w))
# split image into quadrants
Q = abel.tools.symmetry.get_image_quadrants(IModd, reorient=True)
Q0 = Q[0]
Q0fresh = Q0.copy() # keep clean copy
print ("quadrant shape {}".format(Q0.shape))
# Intensity mask used for intensity normalization
# quadrant image region of bright pixels
mask = np.zeros(Q0.shape, dtype=bool)
mask[500:512, 358:365] = True
# process Q0 quadrant using each method --------------------
iabelQ = [] # keep inverse Abel transformed image
sp = [] # speed distributions
meth = [] # methods
for q, method in enumerate(transforms.keys()):
Q0 = Q0fresh.copy() # top-right quadrant of O2- image
print ("\n------- {:s} inverse ...".format(method))
t0 = time()
# inverse Abel transform using 'method'
IAQ0 = transforms[method](Q0, direction="inverse", dr=0.1,
basis_dir='bases')
print (" {:.1f} sec".format(time()-t0))
# polar projection and speed profile
radial, speed = abel.tools.vmi.angular_integration(IAQ0, origin=(0, 0),
dr=0.1)
# normalize image intensity and speed distribution
IAQ0 /= IAQ0[mask].max()
speed /= speed[radial > 50].max()
# keep data for plots
iabelQ.append(IAQ0)
sp.append((radial, speed))
meth.append(method)
# reassemble image, each quadrant a different method
# plot inverse Abel transformed image slices, and respective speed distributions
ax0 = plt.subplot2grid((1, 2), (0, 0))
ax1 = plt.subplot2grid((1, 2), (0, 1))
def ann_plt (quad, subquad, txt):
# -ve because numpy coords from top
annot_angle = -(30+30*subquad+quad*90)*np.pi/180
annot_coord = (h/2+(h*0.8)*np.cos(annot_angle)/2,
w/2+(w*0.8)*np.sin(annot_angle)/2)
ax0.annotate(txt, annot_coord, color="yellow", horizontalalignment='left')
# for < 4 images pad using a blank quadrant
r, c = Q0.shape
Q = np.zeros((4, r, c))
indx = np.triu_indices(iabelQ[0].shape[0])
iq = 0
for q in range(4):
Q[q] = iabelQ[iq].copy()
ann_plt(q, 0, meth[iq])
ax1.plot(*(sp[iq]), label=meth[iq], alpha=0.5)
iq += 1
if iq < len(transforms):
Q[q][indx] = np.triu(iabelQ[iq])[indx]
ann_plt(q, 1, meth[iq])
ax1.plot(*(sp[iq]), label=meth[iq], alpha=0.5)
iq += 1
# reassemble image from transformed (part-)quadrants
im = abel.tools.symmetry.put_image_quadrants((Q[0], Q[1], Q[2], Q[3]),
original_image_shape=IModd.shape)
ax0.axis('off')
ax0.set_title("inverse Abel transforms")
ax0.imshow(im, vmin=0, vmax=0.8)
ax1.set_title("speed distribution")
ax1.axis(ymin=-0.05, ymax=1.1, xmin=50, xmax=450)
ax1.legend(loc=0, labelspacing=0.1, fontsize=10, frameon=False)
plt.tight_layout()
# save a copy of the plot
plt.savefig('plot_example_all_O2.png', dpi=100)
plt.show()
| mit |
devanshdalal/scikit-learn | sklearn/ensemble/tests/test_partial_dependence.py | 365 | 6996 | """
Testing for the partial dependence module.
"""
import numpy as np
from numpy.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import if_matplotlib
from sklearn.ensemble.partial_dependence import partial_dependence
from sklearn.ensemble.partial_dependence import plot_partial_dependence
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import GradientBoostingRegressor
from sklearn import datasets
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
# also load the boston dataset
boston = datasets.load_boston()
# also load the iris dataset
iris = datasets.load_iris()
def test_partial_dependence_classifier():
# Test partial dependence for classifier
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(X, y)
pdp, axes = partial_dependence(clf, [0], X=X, grid_resolution=5)
# only 4 grid points instead of 5 because only 4 unique X[:,0] vals
assert pdp.shape == (1, 4)
assert axes[0].shape[0] == 4
# now with our own grid
X_ = np.asarray(X)
grid = np.unique(X_[:, 0])
pdp_2, axes = partial_dependence(clf, [0], grid=grid)
assert axes is None
assert_array_equal(pdp, pdp_2)
def test_partial_dependence_multiclass():
# Test partial dependence for multi-class classifier
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(iris.data, iris.target)
grid_resolution = 25
n_classes = clf.n_classes_
pdp, axes = partial_dependence(
clf, [0], X=iris.data, grid_resolution=grid_resolution)
assert pdp.shape == (n_classes, grid_resolution)
assert len(axes) == 1
assert axes[0].shape[0] == grid_resolution
def test_partial_dependence_regressor():
# Test partial dependence for regressor
clf = GradientBoostingRegressor(n_estimators=10, random_state=1)
clf.fit(boston.data, boston.target)
grid_resolution = 25
pdp, axes = partial_dependence(
clf, [0], X=boston.data, grid_resolution=grid_resolution)
assert pdp.shape == (1, grid_resolution)
assert axes[0].shape[0] == grid_resolution
def test_partial_dependecy_input():
# Test input validation of partial dependence.
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(X, y)
assert_raises(ValueError, partial_dependence,
clf, [0], grid=None, X=None)
assert_raises(ValueError, partial_dependence,
clf, [0], grid=[0, 1], X=X)
# first argument must be an instance of BaseGradientBoosting
assert_raises(ValueError, partial_dependence,
{}, [0], X=X)
# Gradient boosting estimator must be fit
assert_raises(ValueError, partial_dependence,
GradientBoostingClassifier(), [0], X=X)
assert_raises(ValueError, partial_dependence, clf, [-1], X=X)
assert_raises(ValueError, partial_dependence, clf, [100], X=X)
# wrong ndim for grid
grid = np.random.rand(10, 2, 1)
assert_raises(ValueError, partial_dependence, clf, [0], grid=grid)
@if_matplotlib
def test_plot_partial_dependence():
# Test partial dependence plot function.
clf = GradientBoostingRegressor(n_estimators=10, random_state=1)
clf.fit(boston.data, boston.target)
grid_resolution = 25
fig, axs = plot_partial_dependence(clf, boston.data, [0, 1, (0, 1)],
grid_resolution=grid_resolution,
feature_names=boston.feature_names)
assert len(axs) == 3
assert all(ax.has_data for ax in axs)
# check with str features and array feature names
fig, axs = plot_partial_dependence(clf, boston.data, ['CRIM', 'ZN',
('CRIM', 'ZN')],
grid_resolution=grid_resolution,
feature_names=boston.feature_names)
assert len(axs) == 3
assert all(ax.has_data for ax in axs)
# check with list feature_names
feature_names = boston.feature_names.tolist()
fig, axs = plot_partial_dependence(clf, boston.data, ['CRIM', 'ZN',
('CRIM', 'ZN')],
grid_resolution=grid_resolution,
feature_names=feature_names)
assert len(axs) == 3
assert all(ax.has_data for ax in axs)
@if_matplotlib
def test_plot_partial_dependence_input():
# Test partial dependence plot function input checks.
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
# not fitted yet
assert_raises(ValueError, plot_partial_dependence,
clf, X, [0])
clf.fit(X, y)
assert_raises(ValueError, plot_partial_dependence,
clf, np.array(X)[:, :0], [0])
# first argument must be an instance of BaseGradientBoosting
assert_raises(ValueError, plot_partial_dependence,
{}, X, [0])
# must be larger than -1
assert_raises(ValueError, plot_partial_dependence,
clf, X, [-1])
# too large feature value
assert_raises(ValueError, plot_partial_dependence,
clf, X, [100])
# str feature but no feature_names
assert_raises(ValueError, plot_partial_dependence,
clf, X, ['foobar'])
# not valid features value
assert_raises(ValueError, plot_partial_dependence,
clf, X, [{'foo': 'bar'}])
@if_matplotlib
def test_plot_partial_dependence_multiclass():
# Test partial dependence plot function on multi-class input.
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(iris.data, iris.target)
grid_resolution = 25
fig, axs = plot_partial_dependence(clf, iris.data, [0, 1],
label=0,
grid_resolution=grid_resolution)
assert len(axs) == 2
assert all(ax.has_data for ax in axs)
# now with symbol labels
target = iris.target_names[iris.target]
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(iris.data, target)
grid_resolution = 25
fig, axs = plot_partial_dependence(clf, iris.data, [0, 1],
label='setosa',
grid_resolution=grid_resolution)
assert len(axs) == 2
assert all(ax.has_data for ax in axs)
# label not in gbrt.classes_
assert_raises(ValueError, plot_partial_dependence,
clf, iris.data, [0, 1], label='foobar',
grid_resolution=grid_resolution)
# label not provided
assert_raises(ValueError, plot_partial_dependence,
clf, iris.data, [0, 1],
grid_resolution=grid_resolution)
| bsd-3-clause |
janisz/marathon | tests/scale/graph.py | 2 | 10778 | #!/usr/bin/env python
import click
import csv
import json
import math
import matplotlib.pyplot as plt
import numpy as np
import sys
from dcos.errors import DCOSException
from common import get_key, empty_stats
"""
Graph functions for scale graphs.
Prints 1up and 2up graphs of scale timings and errors.
If you are running this on a Mac, you will likely need to create a ~/.matplotlib/matplotlibrc
file and include `backend: TkAgg`.
http://stackoverflow.com/questions/21784641/installation-issue-with-matplotlib-python
"""
def index_of_first_failure(stats, marathon_type, test_type):
""" Finds the first occurance of an error during a deployment
"""
index = -1
deploy_status = stats.get(get_key(marathon_type, test_type, 'deployment_status'))
for status in deploy_status:
index += 1
if "f" == status:
return index
return -1
def pad(array, size):
current_size = len(array)
if current_size < size:
pad = np.zeros(size - current_size)
padded = array.tolist() + pad.tolist()
return np.array(padded)
else:
return array
def plot_test_timing(plot, stats, marathon_type, test_type, xticks):
""" Plots a specific test graph.
In addition, it sets the legend title, and flags the highest scale reached.
:param plot: The matplotlib subplot object is the object that will be plotted
:type plot: matplotlib subplot
:param stats: This map contains the data to be plotted
:type stats: map
:param marathon_type: The type of marathon is part of the map key. For scale tests it is `root` (vs. mom1)
:type marathon_type: str
:param test_type: Defines the test type, usually {instances, count, group}
:type test_type: str
:param xticks: An array of scale targets (1, 10, 100) for the x axis of the plot
:type xticks: array
"""
deploy_time = stats.get(get_key(marathon_type, test_type, 'deploy_time'))
if deploy_time is None or len(deploy_time) == 0 or deploy_time[0] <= 0.0:
return
timings = np.array(deploy_time)
title = '{} Scale Times'.format(test_type.title())
timings = pad(timings, len(xticks))
timings_handle, = plot.plot(xticks, timings, label=title)
fail_index = index_of_first_failure(stats, marathon_type, test_type)
if fail_index > 0:
scale_at_fail = stats.get(get_key(marathon_type, test_type, 'max'))[fail_index]
time_at_fail = stats.get(get_key(marathon_type, test_type, 'human_deploy_time'))[fail_index]
text = '{} at {}'.format(scale_at_fail, time_at_fail)
plot.text(fail_index, timings[fail_index], text, wrap=True)
class GraphException(DCOSException):
""" Raised when there is a issue with the ability to graph
"""
def __init__(self, message):
self.message = message
def plot_test_errors(plot, stats, marathon_type, test_type, xticks):
""" Plots the number of errors for a given test
:param plot: The matplotlib subplot object is the object that will be plotted
:type plot: matplotlib subplot
:param stats: This map contains the data to be plotted
:type stats: map
:param marathon_type: The type of marathon is part of the map key. For scale tests it is `root` (vs. mom1)
:type marathon_type: str
:param test_type: Defines the test type, usually {instances, count, group}
:type test_type: str
:param xticks: An array of scale targets (1, 10, 100) for the x axis of the plot
:type xticks: array
"""
test_errors = stats.get(get_key(marathon_type, test_type, 'errors'))
if test_errors is None or len(test_errors) == 0:
return 0
plot.set_title("Errors During Test")
errors = np.array(test_errors)
title = '{} Errors'.format(test_type.title())
errors = pad(errors, len(errors))
errors_handle, = plot.plot(xticks, errors, label=title, marker='o', linestyle='None')
return max(test_errors)
def create_scale_graph(stats, metadata, file_name='scale.png'):
""" Creates a 1up or 2up scale graph depending on if error information is provided.
The first 1up graph "time_plot", is x = scale and y = time to reach scale
The second graph "error_plot", is an error graph that plots the number of errors that occurred during the test.
:param stats: This map contains the data to be plotted
:type stats: map
:param metadata: The JSON object that contains the metadata for the cluster under test
:type metadata: JSON
:param file_name: The file name of the graph to create
:type file_name: str
"""
# strong prefer to have this discoverable, perhaps in the metadata
test_types = ['instances', 'count', 'group']
marathon_type = metadata['marathon']
error_plot = None
fig = None
time_plot = None
# figure and plots setup
if error_graph_enabled(stats, marathon_type, test_types):
fig, (time_plot, error_plot) = plt.subplots(nrows=2)
else:
fig, time_plot = plt.subplots(nrows=1)
# figure size, borders and padding
fig.subplots_adjust(left=0.12, bottom=0.08, right=0.90, top=0.90, wspace=0.25, hspace=0.40)
fig.set_size_inches(9.5, 9.5)
# Titles and X&Y setup
time_plot.title.set_text('Marathon Scale Test for v{}'.format(metadata['marathon-version']))
targets = get_scale_targets(stats, marathon_type, test_types)
if targets is None:
raise GraphException('Unable to create graph due without targets')
xticks = np.array(range(len(targets)))
plt.xticks(xticks, targets)
time_plot.set_xticks(xticks, targets)
agents, cpus, mem = get_resources(metadata)
time_plot.set_xlabel('Scale Targets on {} nodes with {} cpus and {} mem'.format(agents, cpus, mem))
time_plot.set_ylabel('Time to Reach Scale (sec)')
time_plot.grid(True)
# graph of all the things
for test_type in test_types:
plot_test_timing(time_plot, stats, marathon_type, test_type, xticks)
time_plot.legend(loc='upper center', bbox_to_anchor=(0.47, -0.15),fancybox=False, shadow=False, ncol=5)
# graph the errors if they exist
if error_plot is not None:
top = 1
for test_type in test_types:
largest = plot_test_errors(error_plot, stats, marathon_type, test_type, xticks)
if largest > top:
top = largest
error_plot.legend(loc='upper center', bbox_to_anchor=(0.47, -0.10),fancybox=False, shadow=False, ncol=5)
error_plot.set_ylim(bottom=0, top=roundup_to_nearest_10(top))
plt.savefig(file_name)
def roundup_to_nearest_10(x):
return int(math.ceil(x / 10.0)) * 10
def get_scale_targets(stats, marathon_type, test_types):
""" Returns the scale targets 1, 10, 100, 1000
It is possible that some tests are ignored so we may have to
loop to grab the right list.
:param stats: This map contains the data to be plotted
:type stats: map
:param marathon_type: The type of marathon is part of the map key. For scale tests it is `root` (vs. mom1)
:type marathon_type: str
:param test_types: An array of test types to be graphed, usually {instances, count, group}
:type test_types: array
"""
targets = None
for test_type in test_types:
targets = stats.get(get_key(marathon_type, test_type, 'target'))
if targets and len(targets) > 0:
return targets
return targets
def error_graph_enabled(stats, marathon_type, test_types):
""" Returns true if there is any error data to graph
:param stats: This map contains the data to be plotted
:type stats: map
:param marathon_type: The type of marathon is part of the map key. For scale tests it is `root` (vs. mom1)
:type marathon_type: str
:param test_types: An array of test types to be graphed, usually {instances, count, group}
:type test_types: array
"""
enabled = False
for test_type in test_types:
test_errors_key = get_key(marathon_type, test_type, 'errors')
if test_errors_key is not None:
test_errors = stats.get(test_errors_key)
# if there are test errors... graph them
if test_errors is not None and len(test_errors) > 0:
return True
return False
def get_resources(metadata):
agents = 0
cpus = 0
mem = 0
try:
agents = metadata['private-agents']
cpus = metadata['resources']['cpus']
mem = metadata['resources']['memory']
except Exception as e:
print(e)
return (agents, cpus, mem)
def load(csvfile):
""" This is suppose to be short-term. Teammates have better ideas on how to structure the data.
I would like to not break the ability to call it directly from the test (instead of shelling out).
index after table header:
0 - target - expected scale
1 - max - actual scale
2 - deploy_time
3 - human_deploy_time
4 - launch_status
5 - deployment_status
6 - errors
"""
row_keys = ['target', 'max', 'deploy_time', 'human_deploy_time', 'launch_status', 'deployment_status', 'errors']
stats = empty_stats()
current_marathon = None
current_test_type = None
index_from_header = 0
with open(csvfile, 'r') as f:
reader = csv.reader(f, quoting=csv.QUOTE_NONNUMERIC)
for i, row in enumerate(reader):
if 'Marathon:' in row:
current_marathon = row[1]
current_test_type = row[2]
index_from_header = 0
elif len(row) > 0:
key = get_key(current_marathon, current_test_type, row_keys[index_from_header])
stats[key] = row
index_from_header += 1
return stats
def load_metadata(file):
with open(file) as json_data:
return json.load(json_data)
@click.command()
@click.option('--csvfile', default='scale-test.csv', help='Name of csv file to graph')
@click.option('--metadatafile', default='meta-data.json', help='Name of meta-data file to use for graphing')
@click.option('--graphfile', default='scale.png', help='Name of graph to create')
def main(csvfile, metadatafile, graphfile):
"""
CLI entry point for graphing scale data.
Typically, scale tests create a scale-test.csv file which contains the graph points.
It also produces a meta-data.json which is necessary for the graphing process.
"""
stats = load(csvfile)
metadata = load_metadata(metadatafile)
create_scale_graph(stats, metadata, graphfile)
if __name__ == '__main__':
main()
| apache-2.0 |
arjoly/scikit-learn | sklearn/feature_selection/variance_threshold.py | 238 | 2594 | # Author: Lars Buitinck <[email protected]>
# License: 3-clause BSD
import numpy as np
from ..base import BaseEstimator
from .base import SelectorMixin
from ..utils import check_array
from ..utils.sparsefuncs import mean_variance_axis
from ..utils.validation import check_is_fitted
class VarianceThreshold(BaseEstimator, SelectorMixin):
"""Feature selector that removes all low-variance features.
This feature selection algorithm looks only at the features (X), not the
desired outputs (y), and can thus be used for unsupervised learning.
Read more in the :ref:`User Guide <variance_threshold>`.
Parameters
----------
threshold : float, optional
Features with a training-set variance lower than this threshold will
be removed. The default is to keep all features with non-zero variance,
i.e. remove the features that have the same value in all samples.
Attributes
----------
variances_ : array, shape (n_features,)
Variances of individual features.
Examples
--------
The following dataset has integer features, two of which are the same
in every sample. These are removed with the default setting for threshold::
>>> X = [[0, 2, 0, 3], [0, 1, 4, 3], [0, 1, 1, 3]]
>>> selector = VarianceThreshold()
>>> selector.fit_transform(X)
array([[2, 0],
[1, 4],
[1, 1]])
"""
def __init__(self, threshold=0.):
self.threshold = threshold
def fit(self, X, y=None):
"""Learn empirical variances from X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Sample vectors from which to compute variances.
y : any
Ignored. This parameter exists only for compatibility with
sklearn.pipeline.Pipeline.
Returns
-------
self
"""
X = check_array(X, ('csr', 'csc'), dtype=np.float64)
if hasattr(X, "toarray"): # sparse matrix
_, self.variances_ = mean_variance_axis(X, axis=0)
else:
self.variances_ = np.var(X, axis=0)
if np.all(self.variances_ <= self.threshold):
msg = "No feature in X meets the variance threshold {0:.5f}"
if X.shape[0] == 1:
msg += " (X contains only one sample)"
raise ValueError(msg.format(self.threshold))
return self
def _get_support_mask(self):
check_is_fitted(self, 'variances_')
return self.variances_ > self.threshold
| bsd-3-clause |
vybstat/scikit-learn | examples/cluster/plot_kmeans_digits.py | 230 | 4524 | """
===========================================================
A demo of K-Means clustering on the handwritten digits data
===========================================================
In this example we compare the various initialization strategies for
K-means in terms of runtime and quality of the results.
As the ground truth is known here, we also apply different cluster
quality metrics to judge the goodness of fit of the cluster labels to the
ground truth.
Cluster quality metrics evaluated (see :ref:`clustering_evaluation` for
definitions and discussions of the metrics):
=========== ========================================================
Shorthand full name
=========== ========================================================
homo homogeneity score
compl completeness score
v-meas V measure
ARI adjusted Rand index
AMI adjusted mutual information
silhouette silhouette coefficient
=========== ========================================================
"""
print(__doc__)
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn import metrics
from sklearn.cluster import KMeans
from sklearn.datasets import load_digits
from sklearn.decomposition import PCA
from sklearn.preprocessing import scale
np.random.seed(42)
digits = load_digits()
data = scale(digits.data)
n_samples, n_features = data.shape
n_digits = len(np.unique(digits.target))
labels = digits.target
sample_size = 300
print("n_digits: %d, \t n_samples %d, \t n_features %d"
% (n_digits, n_samples, n_features))
print(79 * '_')
print('% 9s' % 'init'
' time inertia homo compl v-meas ARI AMI silhouette')
def bench_k_means(estimator, name, data):
t0 = time()
estimator.fit(data)
print('% 9s %.2fs %i %.3f %.3f %.3f %.3f %.3f %.3f'
% (name, (time() - t0), estimator.inertia_,
metrics.homogeneity_score(labels, estimator.labels_),
metrics.completeness_score(labels, estimator.labels_),
metrics.v_measure_score(labels, estimator.labels_),
metrics.adjusted_rand_score(labels, estimator.labels_),
metrics.adjusted_mutual_info_score(labels, estimator.labels_),
metrics.silhouette_score(data, estimator.labels_,
metric='euclidean',
sample_size=sample_size)))
bench_k_means(KMeans(init='k-means++', n_clusters=n_digits, n_init=10),
name="k-means++", data=data)
bench_k_means(KMeans(init='random', n_clusters=n_digits, n_init=10),
name="random", data=data)
# in this case the seeding of the centers is deterministic, hence we run the
# kmeans algorithm only once with n_init=1
pca = PCA(n_components=n_digits).fit(data)
bench_k_means(KMeans(init=pca.components_, n_clusters=n_digits, n_init=1),
name="PCA-based",
data=data)
print(79 * '_')
###############################################################################
# Visualize the results on PCA-reduced data
reduced_data = PCA(n_components=2).fit_transform(data)
kmeans = KMeans(init='k-means++', n_clusters=n_digits, n_init=10)
kmeans.fit(reduced_data)
# Step size of the mesh. Decrease to increase the quality of the VQ.
h = .02 # point in the mesh [x_min, m_max]x[y_min, y_max].
# Plot the decision boundary. For that, we will assign a color to each
x_min, x_max = reduced_data[:, 0].min() - 1, reduced_data[:, 0].max() + 1
y_min, y_max = reduced_data[:, 1].min() - 1, reduced_data[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
# Obtain labels for each point in mesh. Use last trained model.
Z = kmeans.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure(1)
plt.clf()
plt.imshow(Z, interpolation='nearest',
extent=(xx.min(), xx.max(), yy.min(), yy.max()),
cmap=plt.cm.Paired,
aspect='auto', origin='lower')
plt.plot(reduced_data[:, 0], reduced_data[:, 1], 'k.', markersize=2)
# Plot the centroids as a white X
centroids = kmeans.cluster_centers_
plt.scatter(centroids[:, 0], centroids[:, 1],
marker='x', s=169, linewidths=3,
color='w', zorder=10)
plt.title('K-means clustering on the digits dataset (PCA-reduced data)\n'
'Centroids are marked with white cross')
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
showa-yojyo/notebook | source/_sample/scipy/least_squares.py | 2 | 1033 | #!/usr/bin/env python
"""least_squares.py: Demonstrate least-squares fitting method of SciPy.
References:
* http://sagemath.wikispaces.com/numpy.linalg.lstsq
"""
from scipy.linalg import lstsq
import numpy as np
import matplotlib.pyplot as plt
# pylint: disable=invalid-name
# Sampling data set.
xd = np.array([72, 67, 65, 55, 25, 36, 56, 34,
18, 71, 67, 48, 72, 51, 53])
yd = np.array([202, 186, 187, 180, 156, 169, 174,
172, 153, 199, 193, 174, 198, 183, 178])
# Solve the linear least squares problem.
A = np.c_[xd[:, np.newaxis], np.ones(xd.shape[0])]
B = yd
X, residues, rank, s = lstsq(A, B)
# Show the regression curve (line).
a = X[0]
b = X[1]
print(f"Line: y = {a:.3f}x {b:+.3f}")
# Plot both the sampling data and the regression curve.
# pylint: disable=invalid-slice-index
plt.figure()
xs = np.r_[min(xd):max(xd):15j]
ys = a * xs + b
plt.plot(xs, ys, color='deeppink', label='regression curve')
plt.scatter(xd, yd, color='pink', marker='s', label='data set')
plt.legend()
plt.show()
| mit |
ArianeFire/HaniCam | Hanicam/FACE_KNOWN/program/recog.py | 3 | 2937 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) Philipp Wagner. All rights reserved.
# Licensed under the BSD license. See LICENSE file in the project root for full license information.
import sys, os
sys.path.append("../..")
# import facerec modules
from facerec.feature import Fisherfaces, SpatialHistogram, Identity
from facerec.distance import EuclideanDistance, ChiSquareDistance
from facerec.classifier import NearestNeighbor
from facerec.model import PredictableModel
from facerec.validation import KFoldCrossValidation
from facerec.visual import subplot
from facerec.util import minmax_normalize
from facerec.serialization import save_model, load_model
# import numpy, matplotlib and logging
import numpy as np
# try to import the PIL Image module
try:
from PIL import Image
except ImportError:
import Image
import matplotlib.cm as cm
import logging
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from facerec.lbp import LPQ, ExtendedLBP
def read_images(path, sz=None):
"""Reads the images in a given folder, resizes images on the fly if size is given.
Args:
path: Path to a folder with subfolders representing the subjects (persons).
sz: A tuple with the size Resizes
Returns:
A list [X,y]
X: The images, which is a Python list of numpy arrays.
y: The corresponding labels (the unique number of the subject, person) in a Python list.
"""
c = 0
X,y = [], []
for dirname, dirnames, filenames in os.walk(path):
for subdirname in dirnames:
subject_path = os.path.join(dirname, subdirname)
for filename in os.listdir(subject_path):
try:
im = Image.open(os.path.join(subject_path, filename))
im = im.convert("L")
# resize to given size (if given)
if (sz is not None):
im = im.resize(sz, Image.ANTIALIAS)
X.append(np.asarray(im, dtype=np.uint8))
y.append(c)
except IOError, (errno, strerror):
print "I/O error({0}): {1}".format(errno, strerror)
except:
print "Unexpected error:", sys.exc_info()[0]
raise
c = c+1
return [X,y]
# Read in the image data:
[X,y] = read_images("data/gi4")
#### Learning a Model
model.compute(X,y)
#### Getting a prediction
# This gets you the output:
prediction = model.predict(X)
predicted_label = prediction[0]
classifier_output = prediction[1]
# Now let's get the distance from the assuming a 1-Nearest Neighbor.
# Since it's a 1-Nearest Neighbor only look take the zero-th element:
distance = classifier_output['distances'][0]
# Now you can easily threshold by it:
if distance > 10.0:
print "Unknown Person!"
else
print "Person is known with label %i" % (predicted_label)
```
| mit |
tayebzaidi/HonorsThesisTZ | ThesisCode/classification/selectedToPDF_sne.py | 1 | 4213 | #!/usr/bin/env python
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
import matplotlib.gridspec as gridspec
import json
import os
import sys
import numpy as np
import math
def main():
destination_file = 'OSC_misclassified.pdf'
source_directory = '../data/OSC/parsed'
filename = 'misclassed_lcurves_list'
with open(filename, 'r') as f:
lightcurves = [line.rstrip('\n') for line in f]
with PdfPages(destination_file) as pdf:
for lightcurve in lightcurves:
lightcurve_path = os.path.join(source_directory,lightcurve)
with open(lightcurve_path, 'r') as f:
file_data = json.load(f)
#Ignore all non-CSP or CfA entries
# for k in list(file_data.keys()):
# if not (k.endswith('CSP') or ('CfA' in k)):
# del file_data[k]
# if len(file_data) == 0:
# continue
#This hack removes the '_gpsmoothed.json' from the string to return the objname
objname = lightcurve[:-16]
#Number of filters
N = len(file_data.keys())
print(N)
cols = 3
if N < 3:
cols = 1
rows = int(math.ceil(N / cols))
#To ensure that plot text fits without overlay
#Change font size to fit the text, taken from \
#http://stackoverflow.com/questions/3899980/how-to-change-the-font-size-on-a-matplotlib-plot\
# answer by Pedro M. Duarte
SIZE = 5
MEDIUM_SIZE = 8
BIGGER_SIZE = 10
plt.rc('font', size=SIZE) # controls default text sizes
plt.rc('axes', titlesize=SIZE) # fontsize of the axes title
plt.rc('axes', labelsize=MEDIUM_SIZE) # fontsize of the x and y labels
plt.rc('xtick', labelsize=SIZE) # fontsize of the tick labels
plt.rc('ytick', labelsize=SIZE) # fontsize of the tick labels
plt.rc('legend', fontsize=SIZE) # legend fontsize
plt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title
if rows > 3:
small_size = 2
plt.rc('font', size=small_size) # controls default text sizes
plt.rc('axes', titlesize=small_size) # fontsize of the axes title
plt.rc('axes', labelsize=small_size) # fontsize of the x and y labels
plt.rc('xtick', labelsize=small_size) # fontsize of the tick labels
plt.rc('ytick', labelsize=small_size)
gs = gridspec.GridSpec(rows, cols)
fig = plt.figure(figsize=(6, 6))
fig.suptitle(objname)
#Return the list of keys from the file_data
data = list(file_data)
for i in range(len(data)):
filt = data[i]
mjd = file_data[filt]['mjd']
mag = file_data[filt]['mag']
mag_err = file_data[filt]['dmag']
model_phase = file_data[filt]['modeldate']
model_mag = file_data[filt]['modelmag']
#bspline_mag = file_data[filt]['bsplinemag']
goodstatus = file_data[filt]['goodstatus']
type = file_data[filt]['type']
ax = fig.add_subplot(gs[i])
ax.errorbar(mjd, mag, fmt='r', yerr=mag_err,label='Original', alpha=0.7, linestyle=None)
ymin, ymax = ax.get_ylim()
ax.plot(model_phase, model_mag, '-k', label='GP')
#ax.plot(model_phase, bspline_mag, '-b', label='BSpline')
ax.set_title(filt)
handles, labels = ax.get_legend_handles_labels()
if(not goodstatus):
ax.set_ylim(ymin, ymax)
#Working in flux space now
#ax.invert_yaxis()
fig.legend(handles, labels, title=type)
pdf.savefig() # saves the current figure into a pdf page
plt.close(fig)
if __name__ == "__main__":
sys.exit(main())
| gpl-3.0 |
groutr/numpy | doc/source/conf.py | 63 | 9811 | # -*- coding: utf-8 -*-
from __future__ import division, absolute_import, print_function
import sys, os, re
# Check Sphinx version
import sphinx
if sphinx.__version__ < "1.0.1":
raise RuntimeError("Sphinx 1.0.1 or newer required")
needs_sphinx = '1.0'
# -----------------------------------------------------------------------------
# General configuration
# -----------------------------------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
sys.path.insert(0, os.path.abspath('../sphinxext'))
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.pngmath', 'numpydoc',
'sphinx.ext.intersphinx', 'sphinx.ext.coverage',
'sphinx.ext.doctest', 'sphinx.ext.autosummary',
'matplotlib.sphinxext.plot_directive']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# General substitutions.
project = 'NumPy'
copyright = '2008-2009, The Scipy community'
# The default replacements for |version| and |release|, also used in various
# other places throughout the built documents.
#
import numpy
# The short X.Y version (including .devXXXX, rcX, b1 suffixes if present)
version = re.sub(r'(\d+\.\d+)\.\d+(.*)', r'\1\2', numpy.__version__)
version = re.sub(r'(\.dev\d+).*?$', r'\1', version)
# The full version, including alpha/beta/rc tags.
release = numpy.__version__
print("%s %s" % (version, release))
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# The reST default role (used for this markup: `text`) to use for all documents.
default_role = "autolink"
# List of directories, relative to source directories, that shouldn't be searched
# for source files.
exclude_dirs = []
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = False
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -----------------------------------------------------------------------------
# HTML output
# -----------------------------------------------------------------------------
themedir = os.path.join(os.pardir, 'scipy-sphinx-theme', '_theme')
if not os.path.isdir(themedir):
raise RuntimeError("Get the scipy-sphinx-theme first, "
"via git submodule init && git submodule update")
html_theme = 'scipy'
html_theme_path = [themedir]
if 'scipyorg' in tags:
# Build for the scipy.org website
html_theme_options = {
"edit_link": True,
"sidebar": "right",
"scipy_org_logo": True,
"rootlinks": [("http://scipy.org/", "Scipy.org"),
("http://docs.scipy.org/", "Docs")]
}
else:
# Default build
html_theme_options = {
"edit_link": False,
"sidebar": "left",
"scipy_org_logo": False,
"rootlinks": []
}
html_sidebars = {'index': 'indexsidebar.html'}
html_additional_pages = {
'index': 'indexcontent.html',
}
html_title = "%s v%s Manual" % (project, version)
html_static_path = ['_static']
html_last_updated_fmt = '%b %d, %Y'
html_use_modindex = True
html_copy_source = False
html_domain_indices = False
html_file_suffix = '.html'
htmlhelp_basename = 'numpy'
pngmath_use_preview = True
pngmath_dvipng_args = ['-gamma', '1.5', '-D', '96', '-bg', 'Transparent']
# -----------------------------------------------------------------------------
# LaTeX output
# -----------------------------------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, document class [howto/manual]).
_stdauthor = 'Written by the NumPy community'
latex_documents = [
('reference/index', 'numpy-ref.tex', 'NumPy Reference',
_stdauthor, 'manual'),
('user/index', 'numpy-user.tex', 'NumPy User Guide',
_stdauthor, 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
latex_preamble = r'''
\usepackage{amsmath}
\DeclareUnicodeCharacter{00A0}{\nobreakspace}
% In the parameters section, place a newline after the Parameters
% header
\usepackage{expdlist}
\let\latexdescription=\description
\def\description{\latexdescription{}{} \breaklabel}
% Make Examples/etc section headers smaller and more compact
\makeatletter
\titleformat{\paragraph}{\normalsize\py@HeaderFamily}%
{\py@TitleColor}{0em}{\py@TitleColor}{\py@NormalColor}
\titlespacing*{\paragraph}{0pt}{1ex}{0pt}
\makeatother
% Fix footer/header
\renewcommand{\chaptermark}[1]{\markboth{\MakeUppercase{\thechapter.\ #1}}{}}
\renewcommand{\sectionmark}[1]{\markright{\MakeUppercase{\thesection.\ #1}}}
'''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
latex_use_modindex = False
# -----------------------------------------------------------------------------
# Texinfo output
# -----------------------------------------------------------------------------
texinfo_documents = [
("contents", 'numpy', 'Numpy Documentation', _stdauthor, 'Numpy',
"NumPy: array processing for numbers, strings, records, and objects.",
'Programming',
1),
]
# -----------------------------------------------------------------------------
# Intersphinx configuration
# -----------------------------------------------------------------------------
intersphinx_mapping = {'http://docs.python.org/dev': None}
# -----------------------------------------------------------------------------
# Numpy extensions
# -----------------------------------------------------------------------------
# If we want to do a phantom import from an XML file for all autodocs
phantom_import_file = 'dump.xml'
# Make numpydoc to generate plots for example sections
numpydoc_use_plots = True
# -----------------------------------------------------------------------------
# Autosummary
# -----------------------------------------------------------------------------
import glob
autosummary_generate = glob.glob("reference/*.rst")
# -----------------------------------------------------------------------------
# Coverage checker
# -----------------------------------------------------------------------------
coverage_ignore_modules = r"""
""".split()
coverage_ignore_functions = r"""
test($|_) (some|all)true bitwise_not cumproduct pkgload
generic\.
""".split()
coverage_ignore_classes = r"""
""".split()
coverage_c_path = []
coverage_c_regexes = {}
coverage_ignore_c_items = {}
# -----------------------------------------------------------------------------
# Plots
# -----------------------------------------------------------------------------
plot_pre_code = """
import numpy as np
np.random.seed(0)
"""
plot_include_source = True
plot_formats = [('png', 100), 'pdf']
import math
phi = (math.sqrt(5) + 1)/2
plot_rcparams = {
'font.size': 8,
'axes.titlesize': 8,
'axes.labelsize': 8,
'xtick.labelsize': 8,
'ytick.labelsize': 8,
'legend.fontsize': 8,
'figure.figsize': (3*phi, 3),
'figure.subplot.bottom': 0.2,
'figure.subplot.left': 0.2,
'figure.subplot.right': 0.9,
'figure.subplot.top': 0.85,
'figure.subplot.wspace': 0.4,
'text.usetex': False,
}
# -----------------------------------------------------------------------------
# Source code links
# -----------------------------------------------------------------------------
import inspect
from os.path import relpath, dirname
for name in ['sphinx.ext.linkcode', 'numpydoc.linkcode']:
try:
__import__(name)
extensions.append(name)
break
except ImportError:
pass
else:
print("NOTE: linkcode extension not found -- no links to source generated")
def linkcode_resolve(domain, info):
"""
Determine the URL corresponding to Python object
"""
if domain != 'py':
return None
modname = info['module']
fullname = info['fullname']
submod = sys.modules.get(modname)
if submod is None:
return None
obj = submod
for part in fullname.split('.'):
try:
obj = getattr(obj, part)
except:
return None
try:
fn = inspect.getsourcefile(obj)
except:
fn = None
if not fn:
return None
try:
source, lineno = inspect.getsourcelines(obj)
except:
lineno = None
if lineno:
linespec = "#L%d-L%d" % (lineno, lineno + len(source) - 1)
else:
linespec = ""
fn = relpath(fn, start=dirname(numpy.__file__))
if 'dev' in numpy.__version__:
return "http://github.com/numpy/numpy/blob/master/numpy/%s%s" % (
fn, linespec)
else:
return "http://github.com/numpy/numpy/blob/v%s/numpy/%s%s" % (
numpy.__version__, fn, linespec)
| bsd-3-clause |
Microsoft/PTVS | Python/Product/Miniconda/Miniconda3-x64/Lib/site-packages/conda/_vendor/tqdm/_tqdm.py | 6 | 46609 | """
Customisable progressbar decorator for iterators.
Includes a default (x)range iterator printing to stderr.
Usage:
>>> from tqdm import trange[, tqdm]
>>> for i in trange(10): #same as: for i in tqdm(xrange(10))
... ...
"""
from __future__ import absolute_import
# integer division / : float, // : int
from __future__ import division
# compatibility functions and utilities
from ._utils import _supports_unicode, _environ_cols_wrapper, _range, _unich, \
_term_move_up, _unicode, WeakSet, _basestring, _OrderedDict
from ._monitor import TMonitor
# native libraries
import sys
from numbers import Number
from time import time
from contextlib import contextmanager
# For parallelism safety
import multiprocessing as mp
import threading as th
from warnings import warn
__author__ = {"github.com/": ["noamraph", "obiwanus", "kmike", "hadim",
"casperdcl", "lrq3000"]}
__all__ = ['tqdm', 'trange',
'TqdmTypeError', 'TqdmKeyError', 'TqdmWarning',
'TqdmExperimentalWarning', 'TqdmDeprecationWarning',
'TqdmMonitorWarning']
class TqdmTypeError(TypeError):
pass
class TqdmKeyError(KeyError):
pass
class TqdmWarning(Warning):
"""base class for all tqdm warnings.
Used for non-external-code-breaking errors, such as garbled printing.
"""
def __init__(self, msg, fp_write=None, *a, **k):
if fp_write is not None:
fp_write("\n" + self.__class__.__name__ + ": " + str(msg).rstrip() + '\n')
else:
super(TqdmWarning, self).__init__(msg, *a, **k)
class TqdmExperimentalWarning(TqdmWarning, FutureWarning):
"""beta feature, unstable API and behaviour"""
pass
class TqdmDeprecationWarning(TqdmWarning, DeprecationWarning):
# not suppressed if raised
pass
class TqdmMonitorWarning(TqdmWarning, RuntimeWarning):
"""tqdm monitor errors which do not affect external functionality"""
pass
# Create global parallelism locks to avoid racing issues with parallel bars
# works only if fork available (Linux, MacOSX, but not on Windows)
try:
mp_lock = mp.RLock() # multiprocessing lock
except ImportError: # pragma: no cover
mp_lock = None
except OSError: # pragma: no cover
mp_lock = None
try:
th_lock = th.RLock() # thread lock
except OSError: # pragma: no cover
th_lock = None
class TqdmDefaultWriteLock(object):
"""
Provide a default write lock for thread and multiprocessing safety.
Works only on platforms supporting `fork` (so Windows is excluded).
On Windows, you need to supply the lock from the parent to the children as
an argument to joblib or the parallelism lib you use.
"""
def __init__(self):
global mp_lock, th_lock
self.locks = [lk for lk in [mp_lock, th_lock] if lk is not None]
def acquire(self):
for lock in self.locks:
lock.acquire()
def release(self):
for lock in self.locks[::-1]: # Release in inverse order of acquisition
lock.release()
def __enter__(self):
self.acquire()
def __exit__(self, *exc):
self.release()
class tqdm(object):
"""
Decorate an iterable object, returning an iterator which acts exactly
like the original iterable, but prints a dynamically updating
progressbar every time a value is requested.
"""
monitor_interval = 10 # set to 0 to disable the thread
monitor = None
_lock = TqdmDefaultWriteLock()
@staticmethod
def format_sizeof(num, suffix='', divisor=1000):
"""
Formats a number (greater than unity) with SI Order of Magnitude
prefixes.
Parameters
----------
num : float
Number ( >= 1) to format.
suffix : str, optional
Post-postfix [default: ''].
divisor : float, optionl
Divisor between prefixes [default: 1000].
Returns
-------
out : str
Number with Order of Magnitude SI unit postfix.
"""
for unit in ['', 'k', 'M', 'G', 'T', 'P', 'E', 'Z']:
if abs(num) < 999.95:
if abs(num) < 99.95:
if abs(num) < 9.995:
return '{0:1.2f}'.format(num) + unit + suffix
return '{0:2.1f}'.format(num) + unit + suffix
return '{0:3.0f}'.format(num) + unit + suffix
num /= divisor
return '{0:3.1f}Y'.format(num) + suffix
@staticmethod
def format_interval(t):
"""
Formats a number of seconds as a clock time, [H:]MM:SS
Parameters
----------
t : int
Number of seconds.
Returns
-------
out : str
[H:]MM:SS
"""
mins, s = divmod(int(t), 60)
h, m = divmod(mins, 60)
if h:
return '{0:d}:{1:02d}:{2:02d}'.format(h, m, s)
else:
return '{0:02d}:{1:02d}'.format(m, s)
@staticmethod
def status_printer(file):
"""
Manage the printing and in-place updating of a line of characters.
Note that if the string is longer than a line, then in-place
updating may not work (it will print a new line at each refresh).
"""
fp = file
fp_flush = getattr(fp, 'flush', lambda: None) # pragma: no cover
def fp_write(s):
fp.write(_unicode(s))
fp_flush()
last_len = [0]
def print_status(s):
len_s = len(s)
fp_write('\r' + s + (' ' * max(last_len[0] - len_s, 0)))
last_len[0] = len_s
return print_status
@staticmethod
def format_meter(n, total, elapsed, ncols=None, prefix='', ascii=False,
unit='it', unit_scale=False, rate=None, bar_format=None,
postfix=None, unit_divisor=1000):
"""
Return a string-based progress bar given some parameters
Parameters
----------
n : int
Number of finished iterations.
total : int
The expected total number of iterations. If meaningless (), only
basic progress statistics are displayed (no ETA).
elapsed : float
Number of seconds passed since start.
ncols : int, optional
The width of the entire output message. If specified,
dynamically resizes the progress meter to stay within this bound
[default: None]. The fallback meter width is 10 for the progress
bar + no limit for the iterations counter and statistics. If 0,
will not print any meter (only stats).
prefix : str, optional
Prefix message (included in total width) [default: ''].
Use as {desc} in bar_format string.
ascii : bool, optional
If not set, use unicode (smooth blocks) to fill the meter
[default: False]. The fallback is to use ASCII characters
(1-9 #).
unit : str, optional
The iteration unit [default: 'it'].
unit_scale : bool or int or float, optional
If 1 or True, the number of iterations will be printed with an
appropriate SI metric prefix (k = 10^3, M = 10^6, etc.)
[default: False]. If any other non-zero number, will scale
`total` and `n`.
rate : float, optional
Manual override for iteration rate.
If [default: None], uses n/elapsed.
bar_format : str, optional
Specify a custom bar string formatting. May impact performance.
[default: '{l_bar}{bar}{r_bar}'], where
l_bar='{desc}: {percentage:3.0f}%|' and
r_bar='| {n_fmt}/{total_fmt} [{elapsed}<{remaining}, '
'{rate_fmt}{postfix}]'
Possible vars: l_bar, bar, r_bar, n, n_fmt, total, total_fmt,
percentage, rate, rate_fmt, rate_noinv, rate_noinv_fmt,
rate_inv, rate_inv_fmt, elapsed, remaining, desc, postfix.
Note that a trailing ": " is automatically removed after {desc}
if the latter is empty.
postfix : *, optional
Similar to `prefix`, but placed at the end
(e.g. for additional stats).
Note: postfix is usually a string (not a dict) for this method,
and will if possible be set to postfix = ', ' + postfix.
However other types are supported (#382).
unit_divisor : float, optional
[default: 1000], ignored unless `unit_scale` is True.
Returns
-------
out : Formatted meter and stats, ready to display.
"""
# sanity check: total
if total and n > total:
total = None
# apply custom scale if necessary
if unit_scale and unit_scale not in (True, 1):
total *= unit_scale
n *= unit_scale
unit_scale = False
format_interval = tqdm.format_interval
elapsed_str = format_interval(elapsed)
# if unspecified, attempt to use rate = average speed
# (we allow manual override since predicting time is an arcane art)
if rate is None and elapsed:
rate = n / elapsed
inv_rate = 1 / rate if rate else None
format_sizeof = tqdm.format_sizeof
rate_noinv_fmt = ((format_sizeof(rate) if unit_scale else
'{0:5.2f}'.format(rate))
if rate else '?') + unit + '/s'
rate_inv_fmt = ((format_sizeof(inv_rate) if unit_scale else
'{0:5.2f}'.format(inv_rate))
if inv_rate else '?') + 's/' + unit
rate_fmt = rate_inv_fmt if inv_rate and inv_rate > 1 else rate_noinv_fmt
if unit_scale:
n_fmt = format_sizeof(n, divisor=unit_divisor)
total_fmt = format_sizeof(total, divisor=unit_divisor) \
if total else None
else:
n_fmt = str(n)
total_fmt = str(total)
try:
postfix = ', ' + postfix if postfix else ''
except TypeError:
pass
# total is known: we can predict some stats
if total:
# fractional and percentage progress
frac = n / total
percentage = frac * 100
remaining_str = format_interval((total - n) / rate) \
if rate else '?'
# format the stats displayed to the left and right sides of the bar
if prefix:
# old prefix setup work around
bool_prefix_colon_already = (prefix[-2:] == ": ")
l_bar = prefix if bool_prefix_colon_already else prefix + ": "
else:
l_bar = ''
l_bar += '{0:3.0f}%|'.format(percentage)
r_bar = '| {0}/{1} [{2}<{3}, {4}{5}]'.format(
n_fmt, total_fmt, elapsed_str, remaining_str, rate_fmt, postfix)
if ncols == 0:
return l_bar[:-1] + r_bar[1:]
if bar_format:
# Custom bar formatting
# Populate a dict with all available progress indicators
bar_args = {'n': n,
'n_fmt': n_fmt,
'total': total,
'total_fmt': total_fmt,
'percentage': percentage,
'rate': inv_rate if inv_rate and inv_rate > 1
else rate,
'rate_fmt': rate_fmt,
'rate_noinv': rate,
'rate_noinv_fmt': rate_noinv_fmt,
'rate_inv': inv_rate,
'rate_inv_fmt': rate_inv_fmt,
'elapsed': elapsed_str,
'remaining': remaining_str,
'l_bar': l_bar,
'r_bar': r_bar,
'desc': prefix or '',
'postfix': postfix,
# 'bar': full_bar # replaced by procedure below
}
# auto-remove colon for empty `desc`
if not prefix:
bar_format = bar_format.replace("{desc}: ", '')
# Interpolate supplied bar format with the dict
if '{bar}' in bar_format:
# Format left/right sides of the bar, and format the bar
# later in the remaining space (avoid breaking display)
l_bar_user, r_bar_user = bar_format.split('{bar}')
l_bar = l_bar_user.format(**bar_args)
r_bar = r_bar_user.format(**bar_args)
else:
# Else no progress bar, we can just format and return
return bar_format.format(**bar_args)
# Formatting progress bar
# space available for bar's display
N_BARS = max(1, ncols - len(l_bar) - len(r_bar)) if ncols \
else 10
# format bar depending on availability of unicode/ascii chars
if ascii:
bar_length, frac_bar_length = divmod(
int(frac * N_BARS * 10), 10)
bar = '#' * bar_length
frac_bar = chr(48 + frac_bar_length) if frac_bar_length \
else ' '
else:
bar_length, frac_bar_length = divmod(int(frac * N_BARS * 8), 8)
bar = _unich(0x2588) * bar_length
frac_bar = _unich(0x2590 - frac_bar_length) \
if frac_bar_length else ' '
# whitespace padding
if bar_length < N_BARS:
full_bar = bar + frac_bar + \
' ' * max(N_BARS - bar_length - 1, 0)
else:
full_bar = bar + \
' ' * max(N_BARS - bar_length, 0)
# Piece together the bar parts
return l_bar + full_bar + r_bar
# no total: no progressbar, ETA, just progress stats
else:
return ((prefix + ": ") if prefix else '') + \
'{0}{1} [{2}, {3}{4}]'.format(
n_fmt, unit, elapsed_str, rate_fmt, postfix)
def __new__(cls, *args, **kwargs):
# Create a new instance
instance = object.__new__(cls)
# Add to the list of instances
if "_instances" not in cls.__dict__:
cls._instances = WeakSet()
if "_lock" not in cls.__dict__:
cls._lock = TqdmDefaultWriteLock()
with cls._lock:
cls._instances.add(instance)
# Create the monitoring thread
if cls.monitor_interval and (cls.monitor is None or not
cls.monitor.report()):
try:
cls.monitor = TMonitor(cls, cls.monitor_interval)
except Exception as e: # pragma: nocover
warn("tqdm:disabling monitor support"
" (monitor_interval = 0) due to:\n" + str(e),
TqdmMonitorWarning)
cls.monitor_interval = 0
# Return the instance
return instance
@classmethod
def _get_free_pos(cls, instance=None):
"""Skips specified instance"""
positions = set(abs(inst.pos) for inst in cls._instances
if inst is not instance)
return min(set(range(len(positions) + 1)).difference(positions))
@classmethod
def _decr_instances(cls, instance):
"""
Remove from list and reposition other bars
so that newer bars won't overlap previous bars
"""
with cls._lock:
try:
cls._instances.remove(instance)
except KeyError:
if not instance.gui: # pragma: no cover
raise
else:
for inst in cls._instances:
# negative `pos` means fixed
if inst.pos > abs(instance.pos):
inst.pos -= 1
# TODO: check this doesn't overwrite another fixed bar
# Kill monitor if no instances are left
if not cls._instances and cls.monitor:
try:
cls.monitor.exit()
del cls.monitor
except AttributeError: # pragma: nocover
pass
else:
cls.monitor = None
@classmethod
def write(cls, s, file=None, end="\n", nolock=False):
"""
Print a message via tqdm (without overlap with bars)
"""
fp = file if file is not None else sys.stdout
with cls.external_write_mode(file=file, nolock=nolock):
# Write the message
fp.write(s)
fp.write(end)
@classmethod
@contextmanager
def external_write_mode(cls, file=None, nolock=False):
"""
Disable tqdm within context and refresh tqdm when exits.
Useful when writing to standard output stream
"""
fp = file if file is not None else sys.stdout
if not nolock:
cls._lock.acquire()
# Clear all bars
inst_cleared = []
for inst in getattr(cls, '_instances', []):
# Clear instance if in the target output file
# or if write output + tqdm output are both either
# sys.stdout or sys.stderr (because both are mixed in terminal)
if inst.fp == fp or all(
f in (sys.stdout, sys.stderr) for f in (fp, inst.fp)):
inst.clear(nolock=True)
inst_cleared.append(inst)
yield
# Force refresh display of bars we cleared
for inst in inst_cleared:
# Avoid race conditions by checking that the instance started
if hasattr(inst, 'start_t'): # pragma: nocover
inst.refresh(nolock=True)
if not nolock:
cls._lock.release()
@classmethod
def set_lock(cls, lock):
cls._lock = lock
@classmethod
def get_lock(cls):
return cls._lock
@classmethod
def pandas(tclass, *targs, **tkwargs):
"""
Registers the given `tqdm` class with
pandas.core.
( frame.DataFrame
| series.Series
| groupby.DataFrameGroupBy
| groupby.SeriesGroupBy
).progress_apply
A new instance will be create every time `progress_apply` is called,
and each instance will automatically close() upon completion.
Parameters
----------
targs, tkwargs : arguments for the tqdm instance
Examples
--------
>>> import pandas as pd
>>> import numpy as np
>>> from tqdm import tqdm, tqdm_gui
>>>
>>> df = pd.DataFrame(np.random.randint(0, 100, (100000, 6)))
>>> tqdm.pandas(ncols=50) # can use tqdm_gui, optional kwargs, etc
>>> # Now you can use `progress_apply` instead of `apply`
>>> df.groupby(0).progress_apply(lambda x: x**2)
References
----------
https://stackoverflow.com/questions/18603270/
progress-indicator-during-pandas-operations-python
"""
from pandas.core.frame import DataFrame
from pandas.core.series import Series
from pandas.core.groupby import DataFrameGroupBy
from pandas.core.groupby import SeriesGroupBy
from pandas.core.groupby import GroupBy
from pandas.core.groupby import PanelGroupBy
from pandas import Panel
deprecated_t = [tkwargs.pop('deprecated_t', None)]
def inner_generator(df_function='apply'):
def inner(df, func, *args, **kwargs):
"""
Parameters
----------
df : (DataFrame|Series)[GroupBy]
Data (may be grouped).
func : function
To be applied on the (grouped) data.
**kwargs : optional
Transmitted to `df.apply()`.
"""
# Precompute total iterations
total = getattr(df, 'ngroups', None)
if total is None: # not grouped
if df_function == 'applymap':
total = df.size
elif isinstance(df, Series):
total = len(df)
else: # DataFrame or Panel
axis = kwargs.get('axis', 0)
# when axis=0, total is shape[axis1]
total = df.size // df.shape[axis]
# Init bar
if deprecated_t[0] is not None:
t = deprecated_t[0]
deprecated_t[0] = None
else:
t = tclass(*targs, total=total, **tkwargs)
if len(args) > 0:
# *args intentionally not supported (see #244, #299)
TqdmDeprecationWarning(
"Except func, normal arguments are intentionally" +
" not supported by" +
" `(DataFrame|Series|GroupBy).progress_apply`." +
" Use keyword arguments instead.",
fp_write=getattr(t.fp, 'write', sys.stderr.write))
# Define bar updating wrapper
def wrapper(*args, **kwargs):
# update tbar correctly
# it seems `pandas apply` calls `func` twice
# on the first column/row to decide whether it can
# take a fast or slow code path; so stop when t.total==t.n
t.update(n=1 if t.total and t.n < t.total else 0)
return func(*args, **kwargs)
# Apply the provided function (in **kwargs)
# on the df using our wrapper (which provides bar updating)
result = getattr(df, df_function)(wrapper, **kwargs)
# Close bar and return pandas calculation result
t.close()
return result
return inner
# Monkeypatch pandas to provide easy methods
# Enable custom tqdm progress in pandas!
Series.progress_apply = inner_generator()
SeriesGroupBy.progress_apply = inner_generator()
Series.progress_map = inner_generator('map')
SeriesGroupBy.progress_map = inner_generator('map')
DataFrame.progress_apply = inner_generator()
DataFrameGroupBy.progress_apply = inner_generator()
DataFrame.progress_applymap = inner_generator('applymap')
Panel.progress_apply = inner_generator()
PanelGroupBy.progress_apply = inner_generator()
GroupBy.progress_apply = inner_generator()
GroupBy.progress_aggregate = inner_generator('aggregate')
GroupBy.progress_transform = inner_generator('transform')
def __init__(self, iterable=None, desc=None, total=None, leave=True,
file=None, ncols=None, mininterval=0.1, maxinterval=10.0,
miniters=None, ascii=None, disable=False, unit='it',
unit_scale=False, dynamic_ncols=False, smoothing=0.3,
bar_format=None, initial=0, position=None, postfix=None,
unit_divisor=1000, gui=False, **kwargs):
"""
Parameters
----------
iterable : iterable, optional
Iterable to decorate with a progressbar.
Leave blank to manually manage the updates.
desc : str, optional
Prefix for the progressbar.
total : int, optional
The number of expected iterations. If unspecified,
len(iterable) is used if possible. As a last resort, only basic
progress statistics are displayed (no ETA, no progressbar).
If `gui` is True and this parameter needs subsequent updating,
specify an initial arbitrary large positive integer,
e.g. int(9e9).
leave : bool, optional
If [default: True], keeps all traces of the progressbar
upon termination of iteration.
file : `io.TextIOWrapper` or `io.StringIO`, optional
Specifies where to output the progress messages
(default: sys.stderr). Uses `file.write(str)` and `file.flush()`
methods.
ncols : int, optional
The width of the entire output message. If specified,
dynamically resizes the progressbar to stay within this bound.
If unspecified, attempts to use environment width. The
fallback is a meter width of 10 and no limit for the counter and
statistics. If 0, will not print any meter (only stats).
mininterval : float, optional
Minimum progress display update interval, in seconds [default: 0.1].
maxinterval : float, optional
Maximum progress display update interval, in seconds [default: 10].
Automatically adjusts `miniters` to correspond to `mininterval`
after long display update lag. Only works if `dynamic_miniters`
or monitor thread is enabled.
miniters : int, optional
Minimum progress display update interval, in iterations.
If 0 and `dynamic_miniters`, will automatically adjust to equal
`mininterval` (more CPU efficient, good for tight loops).
If > 0, will skip display of specified number of iterations.
Tweak this and `mininterval` to get very efficient loops.
If your progress is erratic with both fast and slow iterations
(network, skipping items, etc) you should set miniters=1.
ascii : bool, optional
If unspecified or False, use unicode (smooth blocks) to fill
the meter. The fallback is to use ASCII characters `1-9 #`.
disable : bool, optional
Whether to disable the entire progressbar wrapper
[default: False]. If set to None, disable on non-TTY.
unit : str, optional
String that will be used to define the unit of each iteration
[default: it].
unit_scale : bool or int or float, optional
If 1 or True, the number of iterations will be reduced/scaled
automatically and a metric prefix following the
International System of Units standard will be added
(kilo, mega, etc.) [default: False]. If any other non-zero
number, will scale `total` and `n`.
dynamic_ncols : bool, optional
If set, constantly alters `ncols` to the environment (allowing
for window resizes) [default: False].
smoothing : float, optional
Exponential moving average smoothing factor for speed estimates
(ignored in GUI mode). Ranges from 0 (average speed) to 1
(current/instantaneous speed) [default: 0.3].
bar_format : str, optional
Specify a custom bar string formatting. May impact performance.
[default: '{l_bar}{bar}{r_bar}'], where
l_bar='{desc}: {percentage:3.0f}%|' and
r_bar='| {n_fmt}/{total_fmt} [{elapsed}<{remaining}, '
'{rate_fmt}{postfix}]'
Possible vars: l_bar, bar, r_bar, n, n_fmt, total, total_fmt,
percentage, rate, rate_fmt, rate_noinv, rate_noinv_fmt,
rate_inv, rate_inv_fmt, elapsed, remaining, desc, postfix.
Note that a trailing ": " is automatically removed after {desc}
if the latter is empty.
initial : int, optional
The initial counter value. Useful when restarting a progress
bar [default: 0].
position : int, optional
Specify the line offset to print this bar (starting from 0)
Automatic if unspecified.
Useful to manage multiple bars at once (eg, from threads).
postfix : dict or *, optional
Specify additional stats to display at the end of the bar.
Calls `set_postfix(**postfix)` if possible (dict).
unit_divisor : float, optional
[default: 1000], ignored unless `unit_scale` is True.
gui : bool, optional
WARNING: internal parameter - do not use.
Use tqdm_gui(...) instead. If set, will attempt to use
matplotlib animations for a graphical output [default: False].
Returns
-------
out : decorated iterator.
"""
if file is None:
file = sys.stderr
if disable is None and hasattr(file, "isatty") and not file.isatty():
disable = True
if disable:
self.iterable = iterable
self.disable = disable
self.pos = self._get_free_pos(self)
self._instances.remove(self)
self.n = initial
return
if kwargs:
self.disable = True
self.pos = self._get_free_pos(self)
self._instances.remove(self)
raise (TqdmDeprecationWarning("""\
`nested` is deprecated and automated. Use position instead for manual control.
""", fp_write=getattr(file, 'write', sys.stderr.write)) if "nested" in kwargs
else TqdmKeyError("Unknown argument(s): " + str(kwargs)))
# Preprocess the arguments
if total is None and iterable is not None:
try:
total = len(iterable)
except (TypeError, AttributeError):
total = None
if ((ncols is None) and (file in (sys.stderr, sys.stdout))) or \
dynamic_ncols: # pragma: no cover
if dynamic_ncols:
dynamic_ncols = _environ_cols_wrapper()
if dynamic_ncols:
ncols = dynamic_ncols(file)
# elif ncols is not None:
# ncols = 79
else:
_dynamic_ncols = _environ_cols_wrapper()
if _dynamic_ncols:
ncols = _dynamic_ncols(file)
# else:
# ncols = 79
if miniters is None:
miniters = 0
dynamic_miniters = True
else:
dynamic_miniters = False
if mininterval is None:
mininterval = 0
if maxinterval is None:
maxinterval = 0
if ascii is None:
ascii = not _supports_unicode(file)
if bar_format and not ascii:
# Convert bar format into unicode since terminal uses unicode
bar_format = _unicode(bar_format)
if smoothing is None:
smoothing = 0
# Store the arguments
self.iterable = iterable
self.desc = desc or ''
self.total = total
self.leave = leave
self.fp = file
self.ncols = ncols
self.mininterval = mininterval
self.maxinterval = maxinterval
self.miniters = miniters
self.dynamic_miniters = dynamic_miniters
self.ascii = ascii
self.disable = disable
self.unit = unit
self.unit_scale = unit_scale
self.unit_divisor = unit_divisor
self.gui = gui
self.dynamic_ncols = dynamic_ncols
self.smoothing = smoothing
self.avg_time = None
self._time = time
self.bar_format = bar_format
self.postfix = None
if postfix:
try:
self.set_postfix(refresh=False, **postfix)
except TypeError:
self.postfix = postfix
# Init the iterations counters
self.last_print_n = initial
self.n = initial
# if nested, at initial sp() call we replace '\r' by '\n' to
# not overwrite the outer progress bar
if position is None:
self.pos = self._get_free_pos(self)
else: # mark fixed positions as negative
self.pos = -position
if not gui:
# Initialize the screen printer
self.sp = self.status_printer(self.fp)
with self._lock:
if self.pos:
self.moveto(abs(self.pos))
self.sp(self.__repr__(elapsed=0))
if self.pos:
self.moveto(-abs(self.pos))
# Init the time counter
self.last_print_t = self._time()
# NB: Avoid race conditions by setting start_t at the very end of init
self.start_t = self.last_print_t
def __len__(self):
return self.total if self.iterable is None else \
(self.iterable.shape[0] if hasattr(self.iterable, "shape")
else len(self.iterable) if hasattr(self.iterable, "__len__")
else self.total)
def __enter__(self):
return self
def __exit__(self, *exc):
self.close()
return False
def __del__(self):
self.close()
def __repr__(self, elapsed=None):
return self.format_meter(
self.n, self.total,
elapsed if elapsed is not None else self._time() - self.start_t,
self.dynamic_ncols(self.fp) if self.dynamic_ncols else self.ncols,
self.desc, self.ascii, self.unit,
self.unit_scale, 1 / self.avg_time if self.avg_time else None,
self.bar_format, self.postfix, self.unit_divisor)
def __lt__(self, other):
return abs(self.pos) < abs(other.pos)
def __le__(self, other):
return (self < other) or (self == other)
def __eq__(self, other):
return abs(self.pos) == abs(other.pos)
def __ne__(self, other):
return not (self == other)
def __gt__(self, other):
return not (self <= other)
def __ge__(self, other):
return not (self < other)
def __hash__(self):
return id(self)
def __iter__(self):
"""Backward-compatibility to use: for x in tqdm(iterable)"""
# Inlining instance variables as locals (speed optimisation)
iterable = self.iterable
# If the bar is disabled, then just walk the iterable
# (note: keep this check outside the loop for performance)
if self.disable:
for obj in iterable:
yield obj
else:
mininterval = self.mininterval
maxinterval = self.maxinterval
miniters = self.miniters
dynamic_miniters = self.dynamic_miniters
last_print_t = self.last_print_t
last_print_n = self.last_print_n
n = self.n
smoothing = self.smoothing
avg_time = self.avg_time
_time = self._time
try:
sp = self.sp
except AttributeError:
raise TqdmDeprecationWarning("""\
Please use `tqdm_gui(...)` instead of `tqdm(..., gui=True)`
""", fp_write=getattr(self.fp, 'write', sys.stderr.write))
for obj in iterable:
yield obj
# Update and possibly print the progressbar.
# Note: does not call self.update(1) for speed optimisation.
n += 1
# check counter first to avoid calls to time()
if n - last_print_n >= self.miniters:
miniters = self.miniters # watch monitoring thread changes
delta_t = _time() - last_print_t
if delta_t >= mininterval:
cur_t = _time()
delta_it = n - last_print_n
# EMA (not just overall average)
if smoothing and delta_t and delta_it:
avg_time = delta_t / delta_it \
if avg_time is None \
else smoothing * delta_t / delta_it + \
(1 - smoothing) * avg_time
self.n = n
with self._lock:
if self.pos:
self.moveto(abs(self.pos))
# Print bar update
sp(self.__repr__())
if self.pos:
self.moveto(-abs(self.pos))
# If no `miniters` was specified, adjust automatically
# to the max iteration rate seen so far between 2 prints
if dynamic_miniters:
if maxinterval and delta_t >= maxinterval:
# Adjust miniters to time interval by rule of 3
if mininterval:
# Set miniters to correspond to mininterval
miniters = delta_it * mininterval / delta_t
else:
# Set miniters to correspond to maxinterval
miniters = delta_it * maxinterval / delta_t
elif smoothing:
# EMA-weight miniters to converge
# towards the timeframe of mininterval
miniters = smoothing * delta_it * \
(mininterval / delta_t
if mininterval and delta_t else 1) + \
(1 - smoothing) * miniters
else:
# Maximum nb of iterations between 2 prints
miniters = max(miniters, delta_it)
# Store old values for next call
self.n = self.last_print_n = last_print_n = n
self.last_print_t = last_print_t = cur_t
self.miniters = miniters
# Closing the progress bar.
# Update some internal variables for close().
self.last_print_n = last_print_n
self.n = n
self.miniters = miniters
self.close()
def update(self, n=1):
"""
Manually update the progress bar, useful for streams
such as reading files.
E.g.:
>>> t = tqdm(total=filesize) # Initialise
>>> for current_buffer in stream:
... ...
... t.update(len(current_buffer))
>>> t.close()
The last line is highly recommended, but possibly not necessary if
`t.update()` will be called in such a way that `filesize` will be
exactly reached and printed.
Parameters
----------
n : int, optional
Increment to add to the internal counter of iterations
[default: 1].
"""
# N.B.: see __iter__() for more comments.
if self.disable:
return
if n < 0:
raise ValueError("n ({0}) cannot be negative".format(n))
self.n += n
# check counter first to reduce calls to time()
if self.n - self.last_print_n >= self.miniters:
delta_t = self._time() - self.last_print_t
if delta_t >= self.mininterval:
cur_t = self._time()
delta_it = self.n - self.last_print_n # >= n
# elapsed = cur_t - self.start_t
# EMA (not just overall average)
if self.smoothing and delta_t and delta_it:
self.avg_time = delta_t / delta_it \
if self.avg_time is None \
else self.smoothing * delta_t / delta_it + \
(1 - self.smoothing) * self.avg_time
if not hasattr(self, "sp"):
raise TqdmDeprecationWarning("""\
Please use `tqdm_gui(...)` instead of `tqdm(..., gui=True)`
""", fp_write=getattr(self.fp, 'write', sys.stderr.write))
with self._lock:
if self.pos:
self.moveto(abs(self.pos))
# Print bar update
self.sp(self.__repr__())
if self.pos:
self.moveto(-abs(self.pos))
# If no `miniters` was specified, adjust automatically to the
# maximum iteration rate seen so far between two prints.
# e.g.: After running `tqdm.update(5)`, subsequent
# calls to `tqdm.update()` will only cause an update after
# at least 5 more iterations.
if self.dynamic_miniters:
if self.maxinterval and delta_t >= self.maxinterval:
if self.mininterval:
self.miniters = delta_it * self.mininterval \
/ delta_t
else:
self.miniters = delta_it * self.maxinterval \
/ delta_t
elif self.smoothing:
self.miniters = self.smoothing * delta_it * \
(self.mininterval / delta_t
if self.mininterval and delta_t
else 1) + \
(1 - self.smoothing) * self.miniters
else:
self.miniters = max(self.miniters, delta_it)
# Store old values for next call
self.last_print_n = self.n
self.last_print_t = cur_t
def close(self):
"""
Cleanup and (if leave=False) close the progressbar.
"""
if self.disable:
return
# Prevent multiple closures
self.disable = True
# decrement instance pos and remove from internal set
pos = abs(self.pos)
self._decr_instances(self)
# GUI mode
if not hasattr(self, "sp"):
return
# annoyingly, _supports_unicode isn't good enough
def fp_write(s):
self.fp.write(_unicode(s))
try:
fp_write('')
except ValueError as e:
if 'closed' in str(e):
return
raise # pragma: no cover
with self._lock:
if pos:
self.moveto(pos)
if self.leave:
if self.last_print_n < self.n:
# stats for overall rate (no weighted average)
self.avg_time = None
self.sp(self.__repr__())
if pos:
self.moveto(-pos)
else:
fp_write('\n')
else:
self.sp('') # clear up last bar
if pos:
self.moveto(-pos)
else:
fp_write('\r')
def unpause(self):
"""
Restart tqdm timer from last print time.
"""
cur_t = self._time()
self.start_t += cur_t - self.last_print_t
self.last_print_t = cur_t
def set_description(self, desc=None, refresh=True):
"""
Set/modify description of the progress bar.
Parameters
----------
desc : str, optional
refresh : bool, optional
Forces refresh [default: True].
"""
self.desc = desc + ': ' if desc else ''
if refresh:
self.refresh()
def set_description_str(self, desc=None, refresh=True):
"""
Set/modify description without ': ' appended.
"""
self.desc = desc or ''
if refresh:
self.refresh()
def set_postfix(self, ordered_dict=None, refresh=True, **kwargs):
"""
Set/modify postfix (additional stats)
with automatic formatting based on datatype.
Parameters
----------
ordered_dict : dict or OrderedDict, optional
refresh : bool, optional
Forces refresh [default: True].
kwargs : dict, optional
"""
# Sort in alphabetical order to be more deterministic
postfix = _OrderedDict([] if ordered_dict is None else ordered_dict)
for key in sorted(kwargs.keys()):
postfix[key] = kwargs[key]
# Preprocess stats according to datatype
for key in postfix.keys():
# Number: limit the length of the string
if isinstance(postfix[key], Number):
postfix[key] = '{0:2.3g}'.format(postfix[key])
# Else for any other type, try to get the string conversion
elif not isinstance(postfix[key], _basestring):
postfix[key] = str(postfix[key])
# Else if it's a string, don't need to preprocess anything
# Stitch together to get the final postfix
self.postfix = ', '.join(key + '=' + postfix[key].strip()
for key in postfix.keys())
if refresh:
self.refresh()
def set_postfix_str(self, s='', refresh=True):
"""
Postfix without dictionary expansion, similar to prefix handling.
"""
self.postfix = str(s)
if refresh:
self.refresh()
def moveto(self, n):
self.fp.write(_unicode('\n' * n + _term_move_up() * -n))
self.fp.flush()
def clear(self, nolock=False):
"""
Clear current bar display
"""
if self.disable:
return
if not nolock:
self._lock.acquire()
self.moveto(abs(self.pos))
self.sp('')
self.fp.write('\r') # place cursor back at the beginning of line
self.moveto(-abs(self.pos))
if not nolock:
self._lock.release()
def refresh(self, nolock=False):
"""
Force refresh the display of this bar
"""
if self.disable:
return
if not nolock:
self._lock.acquire()
self.moveto(abs(self.pos))
self.sp(self.__repr__())
self.moveto(-abs(self.pos))
if not nolock:
self._lock.release()
def trange(*args, **kwargs):
"""
A shortcut for tqdm(xrange(*args), **kwargs).
On Python3+ range is used instead of xrange.
"""
return tqdm(_range(*args), **kwargs)
| apache-2.0 |
markelg/xray | xray/test/test_dataarray.py | 1 | 59254 | import numpy as np
import pandas as pd
from copy import deepcopy
from textwrap import dedent
from xray import (align, concat, broadcast_arrays, Dataset, DataArray,
Coordinate, Variable)
from xray.core.pycompat import iteritems, OrderedDict
from . import (TestCase, ReturnItem, source_ndarray, unittest, requires_dask,
InaccessibleArray)
class TestDataArray(TestCase):
def setUp(self):
self.attrs = {'attr1': 'value1', 'attr2': 2929}
self.x = np.random.random((10, 20))
self.v = Variable(['x', 'y'], self.x)
self.va = Variable(['x', 'y'], self.x, self.attrs)
self.ds = Dataset({'foo': self.v})
self.dv = self.ds['foo']
def test_repr(self):
v = Variable(['time', 'x'], [[1, 2, 3], [4, 5, 6]], {'foo': 'bar'})
data_array = DataArray(v, {'other': np.int64(0)}, name='my_variable')
expected = dedent("""\
<xray.DataArray 'my_variable' (time: 2, x: 3)>
array([[1, 2, 3],
[4, 5, 6]])
Coordinates:
other int64 0
* time (time) int64 0 1
* x (x) int64 0 1 2
Attributes:
foo: bar""")
self.assertEqual(expected, repr(data_array))
def test_properties(self):
self.assertVariableEqual(self.dv.variable, self.v)
self.assertArrayEqual(self.dv.values, self.v.values)
for attr in ['dims', 'dtype', 'shape', 'size', 'nbytes', 'ndim', 'attrs']:
self.assertEqual(getattr(self.dv, attr), getattr(self.v, attr))
self.assertEqual(len(self.dv), len(self.v))
self.assertVariableEqual(self.dv, self.v)
self.assertItemsEqual(list(self.dv.coords), list(self.ds.coords))
for k, v in iteritems(self.dv.coords):
self.assertArrayEqual(v, self.ds.coords[k])
with self.assertRaises(AttributeError):
self.dv.dataset
self.assertIsInstance(self.ds['x'].to_index(), pd.Index)
with self.assertRaisesRegexp(ValueError, 'must be 1-dimensional'):
self.ds['foo'].to_index()
with self.assertRaises(AttributeError):
self.dv.variable = self.v
def test_name(self):
arr = self.dv
self.assertEqual(arr.name, 'foo')
copied = arr.copy()
arr.name = 'bar'
self.assertEqual(arr.name, 'bar')
self.assertDataArrayEqual(copied, arr)
actual = DataArray(Coordinate('x', [3]))
actual.name = 'y'
expected = DataArray(Coordinate('y', [3]))
self.assertDataArrayIdentical(actual, expected)
def test_dims(self):
arr = self.dv
self.assertEqual(arr.dims, ('x', 'y'))
with self.assertRaisesRegexp(AttributeError, 'you cannot assign'):
arr.dims = ('w', 'z')
def test_encoding(self):
expected = {'foo': 'bar'}
self.dv.encoding['foo'] = 'bar'
self.assertEquals(expected, self.dv.encoding)
expected = {'baz': 0}
self.dv.encoding = expected
self.assertEquals(expected, self.dv.encoding)
self.assertIsNot(expected, self.dv.encoding)
def test_constructor(self):
data = np.random.random((2, 3))
actual = DataArray(data)
expected = Dataset({None: (['dim_0', 'dim_1'], data)})[None]
self.assertDataArrayIdentical(expected, actual)
actual = DataArray(data, [['a', 'b'], [-1, -2, -3]])
expected = Dataset({None: (['dim_0', 'dim_1'], data),
'dim_0': ('dim_0', ['a', 'b']),
'dim_1': ('dim_1', [-1, -2, -3])})[None]
self.assertDataArrayIdentical(expected, actual)
actual = DataArray(data, [pd.Index(['a', 'b'], name='x'),
pd.Index([-1, -2, -3], name='y')])
expected = Dataset({None: (['x', 'y'], data),
'x': ('x', ['a', 'b']),
'y': ('y', [-1, -2, -3])})[None]
self.assertDataArrayIdentical(expected, actual)
coords = [['a', 'b'], [-1, -2, -3]]
actual = DataArray(data, coords, ['x', 'y'])
self.assertDataArrayIdentical(expected, actual)
coords = [pd.Index(['a', 'b'], name='A'),
pd.Index([-1, -2, -3], name='B')]
actual = DataArray(data, coords, ['x', 'y'])
self.assertDataArrayIdentical(expected, actual)
coords = {'x': ['a', 'b'], 'y': [-1, -2, -3]}
actual = DataArray(data, coords, ['x', 'y'])
self.assertDataArrayIdentical(expected, actual)
coords = [('x', ['a', 'b']), ('y', [-1, -2, -3])]
actual = DataArray(data, coords)
self.assertDataArrayIdentical(expected, actual)
actual = DataArray(data, OrderedDict(coords))
self.assertDataArrayIdentical(expected, actual)
expected = Dataset({None: (['x', 'y'], data),
'x': ('x', ['a', 'b'])})[None]
actual = DataArray(data, {'x': ['a', 'b']}, ['x', 'y'])
self.assertDataArrayIdentical(expected, actual)
actual = DataArray(data, dims=['x', 'y'])
expected = Dataset({None: (['x', 'y'], data)})[None]
self.assertDataArrayIdentical(expected, actual)
actual = DataArray(data, dims=['x', 'y'], name='foo')
expected = Dataset({'foo': (['x', 'y'], data)})['foo']
self.assertDataArrayIdentical(expected, actual)
actual = DataArray(data, name='foo')
expected = Dataset({'foo': (['dim_0', 'dim_1'], data)})['foo']
self.assertDataArrayIdentical(expected, actual)
actual = DataArray(data, dims=['x', 'y'], attrs={'bar': 2})
expected = Dataset({None: (['x', 'y'], data, {'bar': 2})})[None]
self.assertDataArrayIdentical(expected, actual)
actual = DataArray(data, dims=['x', 'y'], encoding={'bar': 2})
expected = Dataset({None: (['x', 'y'], data, {}, {'bar': 2})})[None]
self.assertDataArrayIdentical(expected, actual)
def test_constructor_invalid(self):
data = np.random.randn(3, 2)
with self.assertRaisesRegexp(ValueError, 'coords is not dict-like'):
DataArray(data, [[0, 1, 2]], ['x', 'y'])
with self.assertRaisesRegexp(ValueError, 'not a subset of the .* dim'):
DataArray(data, {'x': [0, 1, 2]}, ['a', 'b'])
with self.assertRaisesRegexp(ValueError, 'not a subset of the .* dim'):
DataArray(data, {'x': [0, 1, 2]})
with self.assertRaisesRegexp(TypeError, 'is not a string'):
DataArray(data, dims=['x', None])
def test_constructor_from_self_described(self):
data = [[-0.1, 21], [0, 2]]
expected = DataArray(data,
coords={'x': ['a', 'b'], 'y': [-1, -2]},
dims=['x', 'y'], name='foobar',
attrs={'bar': 2}, encoding={'foo': 3})
actual = DataArray(expected)
self.assertDataArrayIdentical(expected, actual)
actual = DataArray(expected.values, actual.coords)
self.assertDataArrayEqual(expected, actual)
frame = pd.DataFrame(data, index=pd.Index(['a', 'b'], name='x'),
columns=pd.Index([-1, -2], name='y'))
actual = DataArray(frame)
self.assertDataArrayEqual(expected, actual)
series = pd.Series(data[0], index=pd.Index([-1, -2], name='y'))
actual = DataArray(series)
self.assertDataArrayEqual(expected[0].reset_coords('x', drop=True),
actual)
panel = pd.Panel({0: frame})
actual = DataArray(panel)
expected = DataArray([data], expected.coords, ['dim_0', 'x', 'y'])
self.assertDataArrayIdentical(expected, actual)
expected = DataArray(data,
coords={'x': ['a', 'b'], 'y': [-1, -2],
'a': 0, 'z': ('x', [-0.5, 0.5])},
dims=['x', 'y'])
actual = DataArray(expected)
self.assertDataArrayIdentical(expected, actual)
actual = DataArray(expected.values, expected.coords)
self.assertDataArrayIdentical(expected, actual)
expected = Dataset({'foo': ('foo', ['a', 'b'])})['foo']
actual = DataArray(pd.Index(['a', 'b'], name='foo'))
self.assertDataArrayIdentical(expected, actual)
actual = DataArray(Coordinate('foo', ['a', 'b']))
self.assertDataArrayIdentical(expected, actual)
s = pd.Series(range(2), pd.MultiIndex.from_product([['a', 'b'], [0]]))
with self.assertRaisesRegexp(NotImplementedError, 'MultiIndex'):
DataArray(s)
def test_constructor_from_0d(self):
expected = Dataset({None: ([], 0)})[None]
actual = DataArray(0)
self.assertDataArrayIdentical(expected, actual)
def test_equals_and_identical(self):
orig = DataArray(np.arange(5.0), {'a': 42}, dims='x')
expected = orig
actual = orig.copy()
self.assertTrue(expected.equals(actual))
self.assertTrue(expected.identical(actual))
actual = expected.rename('baz')
self.assertTrue(expected.equals(actual))
self.assertFalse(expected.identical(actual))
actual = expected.rename({'x': 'xxx'})
self.assertFalse(expected.equals(actual))
self.assertFalse(expected.identical(actual))
actual = expected.copy()
actual.attrs['foo'] = 'bar'
self.assertTrue(expected.equals(actual))
self.assertFalse(expected.identical(actual))
actual = expected.copy()
actual['x'] = ('x', -np.arange(5))
self.assertFalse(expected.equals(actual))
self.assertFalse(expected.identical(actual))
actual = expected.reset_coords(drop=True)
self.assertFalse(expected.equals(actual))
self.assertFalse(expected.identical(actual))
actual = orig.copy()
actual[0] = np.nan
expected = actual.copy()
self.assertTrue(expected.equals(actual))
self.assertTrue(expected.identical(actual))
actual[:] = np.nan
self.assertFalse(expected.equals(actual))
self.assertFalse(expected.identical(actual))
actual = expected.copy()
actual['a'] = 100000
self.assertFalse(expected.equals(actual))
self.assertFalse(expected.identical(actual))
def test_equals_failures(self):
orig = DataArray(np.arange(5.0), {'a': 42}, dims='x')
self.assertFalse(orig.equals(np.arange(5)))
self.assertFalse(orig.identical(123))
self.assertFalse(orig.broadcast_equals({1: 2}))
def test_broadcast_equals(self):
a = DataArray([0, 0], {'y': 0}, dims='x')
b = DataArray([0, 0], {'y': ('x', [0, 0])}, dims='x')
self.assertTrue(a.broadcast_equals(b))
self.assertTrue(b.broadcast_equals(a))
self.assertFalse(a.equals(b))
self.assertFalse(a.identical(b))
c = DataArray([0], coords={'x': 0}, dims='y')
self.assertFalse(a.broadcast_equals(c))
self.assertFalse(c.broadcast_equals(a))
def test_getitem(self):
# strings pull out dataarrays
self.assertDataArrayIdentical(self.dv, self.ds['foo'])
x = self.dv['x']
y = self.dv['y']
self.assertDataArrayIdentical(self.ds['x'], x)
self.assertDataArrayIdentical(self.ds['y'], y)
I = ReturnItem()
for i in [I[:], I[...], I[x.values], I[x.variable], I[x], I[x, y],
I[x.values > -1], I[x.variable > -1], I[x > -1],
I[x > -1, y > -1]]:
self.assertVariableEqual(self.dv, self.dv[i])
for i in [I[0], I[:, 0], I[:3, :2],
I[x.values[:3]], I[x.variable[:3]], I[x[:3]], I[x[:3], y[:4]],
I[x.values > 3], I[x.variable > 3], I[x > 3], I[x > 3, y > 3]]:
self.assertVariableEqual(self.v[i], self.dv[i])
def test_getitem_dict(self):
actual = self.dv[{'x': slice(3), 'y': 0}]
expected = self.dv.isel(x=slice(3), y=0)
self.assertDataArrayIdentical(expected, actual)
def test_getitem_coords(self):
orig = DataArray([[10], [20]],
{'x': [1, 2], 'y': [3], 'z': 4,
'x2': ('x', ['a', 'b']),
'y2': ('y', ['c']),
'xy': (['y', 'x'], [['d', 'e']])},
dims=['x', 'y'])
self.assertDataArrayIdentical(orig, orig[:])
self.assertDataArrayIdentical(orig, orig[:, :])
self.assertDataArrayIdentical(orig, orig[...])
self.assertDataArrayIdentical(orig, orig[:2, :1])
self.assertDataArrayIdentical(orig, orig[[0, 1], [0]])
actual = orig[0, 0]
expected = DataArray(
10, {'x': 1, 'y': 3, 'z': 4, 'x2': 'a', 'y2': 'c', 'xy': 'd'})
self.assertDataArrayIdentical(expected, actual)
actual = orig[0, :]
expected = DataArray(
[10], {'x': 1, 'y': [3], 'z': 4, 'x2': 'a', 'y2': ('y', ['c']),
'xy': ('y', ['d'])},
dims='y')
self.assertDataArrayIdentical(expected, actual)
actual = orig[:, 0]
expected = DataArray(
[10, 20], {'x': [1, 2], 'y': 3, 'z': 4, 'x2': ('x', ['a', 'b']),
'y2': 'c', 'xy': ('x', ['d', 'e'])},
dims='x')
self.assertDataArrayIdentical(expected, actual)
@requires_dask
def test_chunk(self):
unblocked = DataArray(np.ones((3, 4)))
self.assertIsNone(unblocked.chunks)
blocked = unblocked.chunk()
self.assertEqual(blocked.chunks, ((3,), (4,)))
blocked = unblocked.chunk(chunks=((2, 1), (2, 2)))
self.assertEqual(blocked.chunks, ((2, 1), (2, 2)))
blocked = unblocked.chunk(chunks=(3, 3))
self.assertEqual(blocked.chunks, ((3,), (3, 1)))
self.assertIsNone(blocked.load().chunks)
def test_isel(self):
self.assertDataArrayIdentical(self.dv[0], self.dv.isel(x=0))
self.assertDataArrayIdentical(self.dv, self.dv.isel(x=slice(None)))
self.assertDataArrayIdentical(self.dv[:3], self.dv.isel(x=slice(3)))
self.assertDataArrayIdentical(self.dv[:3, :5],
self.dv.isel(x=slice(3), y=slice(5)))
def test_sel(self):
self.ds['x'] = ('x', np.array(list('abcdefghij')))
da = self.ds['foo']
self.assertDataArrayIdentical(da, da.sel(x=slice(None)))
self.assertDataArrayIdentical(da[1], da.sel(x='b'))
self.assertDataArrayIdentical(da[:3], da.sel(x=slice('c')))
self.assertDataArrayIdentical(da[:3], da.sel(x=['a', 'b', 'c']))
self.assertDataArrayIdentical(da[:, :4], da.sel(y=(self.ds['y'] < 4)))
# verify that indexing with a dataarray works
b = DataArray('b')
self.assertDataArrayIdentical(da[1], da.sel(x=b))
self.assertDataArrayIdentical(da[[1]], da.sel(x=slice(b, b)))
def test_sel_method(self):
data = DataArray(np.random.randn(3, 4),
[('x', [0, 1, 2]), ('y', list('abcd'))])
expected = data.sel(y=['a', 'b'])
actual = data.sel(y=['ab', 'ba'], method='pad')
self.assertDataArrayIdentical(expected, actual)
expected = data.sel(x=[1, 2])
actual = data.sel(x=[0.9, 1.9], method='backfill')
self.assertDataArrayIdentical(expected, actual)
def test_isel_points(self):
shape = (10, 5, 6)
np_array = np.random.random(shape)
da = DataArray(np_array, dims=['time', 'y', 'x'])
y = [1, 3]
x = [3, 0]
expected = da.values[:, y, x]
actual = da.isel_points(y=y, x=x, dim='test_coord')
assert 'test_coord' in actual.coords
assert actual.coords['test_coord'].shape == (len(y), )
assert all(x in actual for x in ['time', 'x', 'y', 'test_coord'])
assert actual.dims == ('test_coord', 'time')
actual = da.isel_points(y=y, x=x)
assert 'points' in actual.coords
# Note that because xray always concatenates along the first dimension,
# We must transpose the result to match the numpy style of
# concatentation.
np.testing.assert_equal(actual.T, expected)
# a few corner cases
da.isel_points(time=[1, 2], x=[2, 2], y=[3, 4])
np.testing.assert_allclose(
da.isel_points(time=[1], x=[2], y=[4]).values.squeeze(),
np_array[1, 4, 2].squeeze())
da.isel_points(time=[1, 2])
y = [-1, 0]
x = [-2, 2]
expected = da.values[:, y, x]
actual = da.isel_points(x=x, y=y).values
np.testing.assert_equal(actual.T, expected)
# test that the order of the indexers doesn't matter
self.assertDataArrayIdentical(
da.isel_points(y=y, x=x),
da.isel_points(x=x, y=y))
# make sure we're raising errors in the right places
with self.assertRaisesRegexp(ValueError,
'All indexers must be the same length'):
da.isel_points(y=[1, 2], x=[1, 2, 3])
with self.assertRaisesRegexp(ValueError,
'dimension bad_key does not exist'):
da.isel_points(bad_key=[1, 2])
with self.assertRaisesRegexp(TypeError, 'Indexers must be integers'):
da.isel_points(y=[1.5, 2.2])
with self.assertRaisesRegexp(TypeError, 'Indexers must be integers'):
da.isel_points(x=[1, 2, 3], y=slice(3))
with self.assertRaisesRegexp(ValueError,
'Indexers must be 1 dimensional'):
da.isel_points(y=1, x=2)
with self.assertRaisesRegexp(ValueError,
'Existing dimension names are not'):
da.isel_points(y=[1, 2], x=[1, 2], dim='x')
# using non string dims
actual = da.isel_points(y=[1, 2], x=[1, 2], dim=['A', 'B'])
assert 'points' in actual.coords
def test_isel_points(self):
shape = (10, 5, 6)
np_array = np.random.random(shape)
da = DataArray(np_array, dims=['time', 'y', 'x'])
y = [1, 3]
x = [3, 0]
expected = da.isel_points(x=x, y=y)
actual = da.sel_points(x=x, y=y)
self.assertDataArrayIdentical(expected, actual)
def test_loc(self):
self.ds['x'] = ('x', np.array(list('abcdefghij')))
da = self.ds['foo']
self.assertDataArrayIdentical(da[:3], da.loc[:'c'])
self.assertDataArrayIdentical(da[1], da.loc['b'])
self.assertDataArrayIdentical(da[1], da.loc[{'x': 'b'}])
self.assertDataArrayIdentical(da[1], da.loc['b', ...])
self.assertDataArrayIdentical(da[:3], da.loc[['a', 'b', 'c']])
self.assertDataArrayIdentical(da[:3, :4],
da.loc[['a', 'b', 'c'], np.arange(4)])
self.assertDataArrayIdentical(da[:, :4], da.loc[:, self.ds['y'] < 4])
da.loc['a':'j'] = 0
self.assertTrue(np.all(da.values == 0))
da.loc[{'x': slice('a', 'j')}] = 2
self.assertTrue(np.all(da.values == 2))
def test_loc_single_boolean(self):
data = DataArray([0, 1], coords=[[True, False]])
self.assertEqual(data.loc[True], 0)
self.assertEqual(data.loc[False], 1)
def test_time_components(self):
dates = pd.date_range('2000-01-01', periods=10)
da = DataArray(np.arange(1, 11), [('time', dates)])
self.assertArrayEqual(da['time.dayofyear'], da.values)
self.assertArrayEqual(da.coords['time.dayofyear'], da.values)
def test_coords(self):
# use int64 to ensure repr() consistency on windows
coords = [Coordinate('x', np.array([-1, -2], 'int64')),
Coordinate('y', np.array([0, 1, 2], 'int64'))]
da = DataArray(np.random.randn(2, 3), coords, name='foo')
self.assertEquals(2, len(da.coords))
self.assertEqual(['x', 'y'], list(da.coords))
self.assertTrue(coords[0].identical(da.coords['x']))
self.assertTrue(coords[1].identical(da.coords['y']))
self.assertIn('x', da.coords)
self.assertNotIn(0, da.coords)
self.assertNotIn('foo', da.coords)
with self.assertRaises(KeyError):
da.coords[0]
with self.assertRaises(KeyError):
da.coords['foo']
expected = dedent("""\
Coordinates:
* x (x) int64 -1 -2
* y (y) int64 0 1 2""")
actual = repr(da.coords)
self.assertEquals(expected, actual)
def test_coord_coords(self):
orig = DataArray([10, 20],
{'x': [1, 2], 'x2': ('x', ['a', 'b']), 'z': 4},
dims='x')
actual = orig.coords['x']
expected = DataArray([1, 2], {'z': 4, 'x2': ('x', ['a', 'b'])},
dims='x', name='x')
self.assertDataArrayIdentical(expected, actual)
del actual.coords['x2']
self.assertDataArrayIdentical(
expected.reset_coords('x2', drop=True), actual)
actual.coords['x3'] = ('x', ['a', 'b'])
expected = DataArray([1, 2], {'z': 4, 'x3': ('x', ['a', 'b'])},
dims='x', name='x')
self.assertDataArrayIdentical(expected, actual)
def test_reset_coords(self):
data = DataArray(np.zeros((3, 4)),
{'bar': ('x', ['a', 'b', 'c']),
'baz': ('y', range(4))},
dims=['x', 'y'],
name='foo')
actual = data.reset_coords()
expected = Dataset({'foo': (['x', 'y'], np.zeros((3, 4))),
'bar': ('x', ['a', 'b', 'c']),
'baz': ('y', range(4))})
self.assertDatasetIdentical(actual, expected)
actual = data.reset_coords(['bar', 'baz'])
self.assertDatasetIdentical(actual, expected)
actual = data.reset_coords('bar')
expected = Dataset({'foo': (['x', 'y'], np.zeros((3, 4))),
'bar': ('x', ['a', 'b', 'c'])},
{'baz': ('y', range(4))})
self.assertDatasetIdentical(actual, expected)
actual = data.reset_coords(['bar'])
self.assertDatasetIdentical(actual, expected)
actual = data.reset_coords(drop=True)
expected = DataArray(np.zeros((3, 4)), dims=['x', 'y'], name='foo')
self.assertDataArrayIdentical(actual, expected)
actual = data.copy()
actual.reset_coords(drop=True, inplace=True)
self.assertDataArrayIdentical(actual, expected)
actual = data.reset_coords('bar', drop=True)
expected = DataArray(np.zeros((3, 4)), {'baz': ('y', range(4))},
dims=['x', 'y'], name='foo')
self.assertDataArrayIdentical(actual, expected)
with self.assertRaisesRegexp(ValueError, 'cannot reset coord'):
data.reset_coords(inplace=True)
with self.assertRaises(KeyError):
data.reset_coords('foo', drop=True)
with self.assertRaisesRegexp(ValueError, 'cannot be found'):
data.reset_coords('not_found')
with self.assertRaisesRegexp(ValueError, 'cannot remove index'):
data.reset_coords('y')
def test_assign_coords(self):
array = DataArray(10)
actual = array.assign_coords(c = 42)
expected = DataArray(10, {'c': 42})
self.assertDataArrayIdentical(actual, expected)
array = DataArray([1, 2, 3, 4], {'c': ('x', [0, 0, 1, 1])}, dims='x')
actual = array.groupby('c').assign_coords(d = lambda a: a.mean())
expected = array.copy()
expected.coords['d'] = ('x', [1.5, 1.5, 3.5, 3.5])
self.assertDataArrayIdentical(actual, expected)
def test_reindex(self):
foo = self.dv
bar = self.dv[:2, :2]
self.assertDataArrayIdentical(foo.reindex_like(bar), bar)
expected = foo.copy()
expected[:] = np.nan
expected[:2, :2] = bar
self.assertDataArrayIdentical(bar.reindex_like(foo), expected)
# regression test for #279
expected = DataArray(np.random.randn(5), dims=["time"])
time2 = DataArray(np.arange(5), dims="time2")
actual = expected.reindex(time=time2)
self.assertDataArrayIdentical(actual, expected)
def test_reindex_method(self):
x = DataArray([10, 20], dims='y')
y = [-0.5, 0.5, 1.5]
actual = x.reindex(y=y, method='backfill')
expected = DataArray([10, 20, np.nan], coords=[('y', y)])
self.assertDataArrayIdentical(expected, actual)
alt = Dataset({'y': y})
actual = x.reindex_like(alt, method='backfill')
self.assertDatasetIdentical(expected, actual)
def test_rename(self):
renamed = self.dv.rename('bar')
self.assertDatasetIdentical(
renamed.to_dataset(), self.ds.rename({'foo': 'bar'}))
self.assertEqual(renamed.name, 'bar')
renamed = self.dv.rename({'foo': 'bar'})
self.assertDatasetIdentical(
renamed.to_dataset(), self.ds.rename({'foo': 'bar'}))
self.assertEqual(renamed.name, 'bar')
def test_swap_dims(self):
array = DataArray(np.random.randn(3), {'y': ('x', list('abc'))}, 'x')
expected = DataArray(array.values,
{'y': list('abc'), 'x': ('y', range(3))},
dims='y')
actual = array.swap_dims({'x': 'y'})
self.assertDataArrayIdentical(expected, actual)
def test_dataset_getitem(self):
dv = self.ds['foo']
self.assertDataArrayIdentical(dv, self.dv)
def test_array_interface(self):
self.assertArrayEqual(np.asarray(self.dv), self.x)
# test patched in methods
self.assertArrayEqual(self.dv.astype(float), self.v.astype(float))
self.assertVariableEqual(self.dv.argsort(), self.v.argsort())
self.assertVariableEqual(self.dv.clip(2, 3), self.v.clip(2, 3))
# test ufuncs
expected = deepcopy(self.ds)
expected['foo'][:] = np.sin(self.x)
self.assertDataArrayEqual(expected['foo'], np.sin(self.dv))
self.assertDataArrayEqual(self.dv, np.maximum(self.v, self.dv))
bar = Variable(['x', 'y'], np.zeros((10, 20)))
self.assertDataArrayEqual(self.dv, np.maximum(self.dv, bar))
def test_is_null(self):
x = np.random.RandomState(42).randn(5, 6)
x[x < 0] = np.nan
original = DataArray(x, [-np.arange(5), np.arange(6)], ['x', 'y'])
expected = DataArray(pd.isnull(x), [-np.arange(5), np.arange(6)],
['x', 'y'])
self.assertDataArrayIdentical(expected, original.isnull())
self.assertDataArrayIdentical(~expected, original.notnull())
def test_math(self):
x = self.x
v = self.v
a = self.dv
# variable math was already tested extensively, so let's just make sure
# that all types are properly converted here
self.assertDataArrayEqual(a, +a)
self.assertDataArrayEqual(a, a + 0)
self.assertDataArrayEqual(a, 0 + a)
self.assertDataArrayEqual(a, a + 0 * v)
self.assertDataArrayEqual(a, 0 * v + a)
self.assertDataArrayEqual(a, a + 0 * x)
self.assertDataArrayEqual(a, 0 * x + a)
self.assertDataArrayEqual(a, a + 0 * a)
self.assertDataArrayEqual(a, 0 * a + a)
def test_math_automatic_alignment(self):
a = DataArray(range(5), [('x', range(5))])
b = DataArray(range(5), [('x', range(1, 6))])
expected = DataArray(np.ones(4), [('x', [1, 2, 3, 4])])
self.assertDataArrayIdentical(a - b, expected)
with self.assertRaisesRegexp(ValueError, 'no overlapping labels'):
a.isel(x=slice(2)) + a.isel(x=slice(2, None))
def test_inplace_math_basics(self):
x = self.x
v = self.v
a = self.dv
b = a
b += 1
self.assertIs(b, a)
self.assertIs(b.variable, v)
self.assertArrayEqual(b.values, x)
self.assertIs(source_ndarray(b.values), x)
self.assertDatasetIdentical(b._dataset, self.ds)
def test_inplace_math_automatic_alignment(self):
a = DataArray(range(5), [('x', range(5))])
b = DataArray(range(1, 6), [('x', range(1, 6))])
with self.assertRaisesRegexp(ValueError, 'not aligned'):
a += b
with self.assertRaisesRegexp(ValueError, 'not aligned'):
b += a
def test_math_name(self):
# Verify that name is preserved only when it can be done unambiguously.
# The rule (copied from pandas.Series) is keep the current name only if
# the other object has the same name or no name attribute and this
# object isn't a coordinate; otherwise reset to None.
a = self.dv
self.assertEqual((+a).name, 'foo')
self.assertEqual((a + 0).name, 'foo')
self.assertIs((a + a.rename(None)).name, None)
self.assertIs((a + a.rename('bar')).name, None)
self.assertEqual((a + a).name, 'foo')
self.assertIs((+a['x']).name, None)
self.assertIs((a['x'] + 0).name, None)
self.assertIs((a + a['x']).name, None)
def test_math_with_coords(self):
coords = {'x': [-1, -2], 'y': ['ab', 'cd', 'ef'],
'lat': (['x', 'y'], [[1, 2, 3], [-1, -2, -3]]),
'c': -999}
orig = DataArray(np.random.randn(2, 3), coords, dims=['x', 'y'])
actual = orig + 1
expected = DataArray(orig.values + 1, orig.coords)
self.assertDataArrayIdentical(expected, actual)
actual = 1 + orig
self.assertDataArrayIdentical(expected, actual)
actual = orig + orig[0, 0]
exp_coords = dict((k, v) for k, v in coords.items() if k != 'lat')
expected = DataArray(orig.values + orig.values[0, 0],
exp_coords, dims=['x', 'y'])
self.assertDataArrayIdentical(expected, actual)
actual = orig[0, 0] + orig
self.assertDataArrayIdentical(expected, actual)
actual = orig[0, 0] + orig[-1, -1]
expected = DataArray(orig.values[0, 0] + orig.values[-1, -1],
{'c': -999})
self.assertDataArrayIdentical(expected, actual)
actual = orig[:, 0] + orig[0, :]
exp_values = orig[:, 0].values[:, None] + orig[0, :].values[None, :]
expected = DataArray(exp_values, exp_coords, dims=['x', 'y'])
self.assertDataArrayIdentical(expected, actual)
actual = orig[0, :] + orig[:, 0]
self.assertDataArrayIdentical(expected.T, actual)
actual = orig - orig.T
expected = DataArray(np.zeros((2, 3)), orig.coords)
self.assertDataArrayIdentical(expected, actual)
actual = orig.T - orig
self.assertDataArrayIdentical(expected.T, actual)
alt = DataArray([1, 1], {'x': [-1, -2], 'c': 'foo', 'd': 555}, 'x')
actual = orig + alt
expected = orig + 1
expected.coords['d'] = 555
del expected.coords['c']
self.assertDataArrayIdentical(expected, actual)
actual = alt + orig
self.assertDataArrayIdentical(expected, actual)
def test_index_math(self):
orig = DataArray(range(3), dims='x', name='x')
actual = orig + 1
expected = DataArray(1 + np.arange(3), coords=[('x', range(3))])
self.assertDataArrayIdentical(expected, actual)
# regression tests for #254
actual = orig[0] < orig
expected = DataArray([False, True, True], coords=[('x', range(3))])
self.assertDataArrayIdentical(expected, actual)
actual = orig > orig[0]
self.assertDataArrayIdentical(expected, actual)
def test_dataset_math(self):
# more comprehensive tests with multiple dataset variables
obs = Dataset({'tmin': ('x', np.arange(5)),
'tmax': ('x', 10 + np.arange(5))},
{'x': ('x', 0.5 * np.arange(5)),
'loc': ('x', range(-2, 3))})
actual = 2 * obs['tmax']
expected = DataArray(2 * (10 + np.arange(5)), obs.coords, name='tmax')
self.assertDataArrayIdentical(actual, expected)
actual = obs['tmax'] - obs['tmin']
expected = DataArray(10 * np.ones(5), obs.coords)
self.assertDataArrayIdentical(actual, expected)
sim = Dataset({'tmin': ('x', 1 + np.arange(5)),
'tmax': ('x', 11 + np.arange(5)),
# does *not* include 'loc' as a coordinate
'x': ('x', 0.5 * np.arange(5))})
actual = sim['tmin'] - obs['tmin']
expected = DataArray(np.ones(5), obs.coords, name='tmin')
self.assertDataArrayIdentical(actual, expected)
actual = -obs['tmin'] + sim['tmin']
self.assertDataArrayIdentical(actual, expected)
actual = sim['tmin'].copy()
actual -= obs['tmin']
self.assertDataArrayIdentical(actual, expected)
actual = sim.copy()
actual['tmin'] = sim['tmin'] - obs['tmin']
expected = Dataset({'tmin': ('x', np.ones(5)),
'tmax': ('x', sim['tmax'].values)},
obs.coords)
self.assertDatasetIdentical(actual, expected)
actual = sim.copy()
actual['tmin'] -= obs['tmin']
self.assertDatasetIdentical(actual, expected)
def test_transpose(self):
self.assertVariableEqual(self.dv.variable.transpose(),
self.dv.transpose())
def test_squeeze(self):
self.assertVariableEqual(self.dv.variable.squeeze(), self.dv.squeeze())
def test_drop_coordinates(self):
expected = DataArray(np.random.randn(2, 3), dims=['x', 'y'])
arr = expected.copy()
arr.coords['z'] = 2
actual = arr.drop('z')
self.assertDataArrayIdentical(expected, actual)
with self.assertRaises(ValueError):
arr.drop('not found')
with self.assertRaisesRegexp(ValueError, 'cannot drop'):
arr.drop(None)
renamed = arr.rename('foo')
with self.assertRaisesRegexp(ValueError, 'cannot drop'):
renamed.drop('foo')
def test_drop_index_labels(self):
arr = DataArray(np.random.randn(2, 3), dims=['x', 'y'])
actual = arr.drop([0, 1], dim='y')
expected = arr[:, 2:]
self.assertDataArrayIdentical(expected, actual)
def test_dropna(self):
x = np.random.randn(4, 4)
x[::2, 0] = np.nan
arr = DataArray(x, dims=['a', 'b'])
actual = arr.dropna('a')
expected = arr[1::2]
self.assertDataArrayIdentical(actual, expected)
actual = arr.dropna('b', how='all')
self.assertDataArrayIdentical(actual, arr)
actual = arr.dropna('a', thresh=1)
self.assertDataArrayIdentical(actual, arr)
actual = arr.dropna('b', thresh=3)
expected = arr[:, 1:]
self.assertDataArrayIdentical(actual, expected)
def test_reduce(self):
coords = {'x': [-1, -2], 'y': ['ab', 'cd', 'ef'],
'lat': (['x', 'y'], [[1, 2, 3], [-1, -2, -3]]),
'c': -999}
orig = DataArray([[-1, 0, 1], [-3, 0, 3]], coords, dims=['x', 'y'])
actual = orig.mean()
expected = DataArray(0, {'c': -999})
self.assertDataArrayIdentical(expected, actual)
actual = orig.mean(['x', 'y'])
self.assertDataArrayIdentical(expected, actual)
actual = orig.mean('x')
expected = DataArray([-2, 0, 2], {'y': coords['y'], 'c': -999}, 'y')
self.assertDataArrayIdentical(expected, actual)
actual = orig.mean(['x'])
self.assertDataArrayIdentical(expected, actual)
actual = orig.mean('y')
expected = DataArray([0, 0], {'x': coords['x'], 'c': -999}, 'x')
self.assertDataArrayIdentical(expected, actual)
self.assertVariableEqual(self.dv.reduce(np.mean, 'x'),
self.v.reduce(np.mean, 'x'))
orig = DataArray([[1, 0, np.nan], [3, 0, 3]], coords, dims=['x', 'y'])
actual = orig.count()
expected = DataArray(5, {'c': -999})
self.assertDataArrayIdentical(expected, actual)
def test_reduce_keep_attrs(self):
# Test dropped attrs
vm = self.va.mean()
self.assertEqual(len(vm.attrs), 0)
self.assertEqual(vm.attrs, OrderedDict())
# Test kept attrs
vm = self.va.mean(keep_attrs=True)
self.assertEqual(len(vm.attrs), len(self.attrs))
self.assertEqual(vm.attrs, self.attrs)
def test_fillna(self):
a = DataArray([np.nan, 1, np.nan, 3], dims='x')
actual = a.fillna(-1)
expected = DataArray([-1, 1, -1, 3], dims='x')
self.assertDataArrayIdentical(expected, actual)
b = DataArray(range(4), dims='x')
actual = a.fillna(b)
expected = b.copy()
self.assertDataArrayIdentical(expected, actual)
actual = a.fillna(range(4))
self.assertDataArrayIdentical(expected, actual)
actual = a.fillna(b[:3])
self.assertDataArrayIdentical(expected, actual)
actual = a.fillna(b[:0])
self.assertDataArrayIdentical(a, actual)
with self.assertRaisesRegexp(TypeError, 'fillna on a DataArray'):
a.fillna({0: 0})
with self.assertRaisesRegexp(ValueError, 'broadcast'):
a.fillna([1, 2])
fill_value = DataArray([0, 1], dims='y')
actual = a.fillna(fill_value)
expected = DataArray([[0, 1], [1, 1], [0, 1], [3, 3]], dims=('x', 'y'))
self.assertDataArrayIdentical(expected, actual)
expected = b.copy()
for target in [a, expected]:
target.coords['b'] = ('x', [0, 0, 1, 1])
actual = a.groupby('b').fillna(DataArray([0, 2], dims='b'))
self.assertDataArrayIdentical(expected, actual)
def test_groupby_iter(self):
for ((act_x, act_dv), (exp_x, exp_ds)) in \
zip(self.dv.groupby('y'), self.ds.groupby('y')):
self.assertEqual(exp_x, act_x)
self.assertDataArrayIdentical(exp_ds['foo'], act_dv)
for ((_, exp_dv), act_dv) in zip(self.dv.groupby('x'), self.dv):
self.assertDataArrayIdentical(exp_dv, act_dv)
def make_groupby_example_array(self):
da = self.dv.copy()
da.coords['abc'] = ('y', np.array(['a'] * 9 + ['c'] + ['b'] * 10))
da.coords['y'] = 20 + 100 * da['y']
return da
def test_groupby_properties(self):
grouped = self.make_groupby_example_array().groupby('abc')
expected_unique = Variable('abc', ['a', 'b', 'c'])
self.assertVariableEqual(expected_unique, grouped.unique_coord)
self.assertEqual(3, len(grouped))
def test_groupby_apply_identity(self):
expected = self.make_groupby_example_array()
idx = expected.coords['y']
identity = lambda x: x
for g in ['x', 'y', 'abc', idx]:
for shortcut in [False, True]:
for squeeze in [False, True]:
grouped = expected.groupby(g, squeeze=squeeze)
actual = grouped.apply(identity, shortcut=shortcut)
self.assertDataArrayIdentical(expected, actual)
def test_groupby_sum(self):
array = self.make_groupby_example_array()
grouped = array.groupby('abc')
expected_sum_all = Dataset(
{'foo': Variable(['abc'], np.array([self.x[:, :9].sum(),
self.x[:, 10:].sum(),
self.x[:, 9:10].sum()]).T),
'abc': Variable(['abc'], np.array(['a', 'b', 'c']))})['foo']
self.assertDataArrayAllClose(expected_sum_all, grouped.reduce(np.sum))
self.assertDataArrayAllClose(expected_sum_all, grouped.sum())
expected = DataArray([array['y'].values[idx].sum() for idx
in [slice(9), slice(10, None), slice(9, 10)]],
[['a', 'b', 'c']], ['abc'])
actual = array['y'].groupby('abc').apply(np.sum)
self.assertDataArrayAllClose(expected, actual)
actual = array['y'].groupby('abc').sum()
self.assertDataArrayAllClose(expected, actual)
expected_sum_axis1 = Dataset(
{'foo': (['x', 'abc'], np.array([self.x[:, :9].sum(1),
self.x[:, 10:].sum(1),
self.x[:, 9:10].sum(1)]).T),
'x': self.ds['x'],
'abc': Variable(['abc'], np.array(['a', 'b', 'c']))})['foo']
self.assertDataArrayAllClose(expected_sum_axis1,
grouped.reduce(np.sum, 'y'))
self.assertDataArrayAllClose(expected_sum_axis1, grouped.sum('y'))
def test_groupby_count(self):
array = DataArray([0, 0, np.nan, np.nan, 0, 0],
coords={'cat': ('x', ['a', 'b', 'b', 'c', 'c', 'c'])},
dims='x')
actual = array.groupby('cat').count()
expected = DataArray([1, 1, 2], coords=[('cat', ['a', 'b', 'c'])])
self.assertDataArrayIdentical(actual, expected)
@unittest.skip('needs to be fixed for shortcut=False, keep_attrs=False')
def test_groupby_reduce_attrs(self):
array = self.make_groupby_example_array()
array.attrs['foo'] = 'bar'
for shortcut in [True, False]:
for keep_attrs in [True, False]:
print('shortcut=%s, keep_attrs=%s' % (shortcut, keep_attrs))
actual = array.groupby('abc').reduce(
np.mean, keep_attrs=keep_attrs, shortcut=shortcut)
expected = array.groupby('abc').mean()
if keep_attrs:
expected.attrs['foo'] = 'bar'
self.assertDataArrayIdentical(expected, actual)
def test_groupby_apply_center(self):
def center(x):
return x - np.mean(x)
array = self.make_groupby_example_array()
grouped = array.groupby('abc')
expected_ds = array.to_dataset()
exp_data = np.hstack([center(self.x[:, :9]),
center(self.x[:, 9:10]),
center(self.x[:, 10:])])
expected_ds['foo'] = (['x', 'y'], exp_data)
expected_centered = expected_ds['foo']
self.assertDataArrayAllClose(expected_centered, grouped.apply(center))
def test_groupby_apply_ndarray(self):
# regression test for #326
array = self.make_groupby_example_array()
grouped = array.groupby('abc')
actual = grouped.apply(np.asarray)
self.assertDataArrayEqual(array, actual)
def test_groupby_apply_changes_metadata(self):
def change_metadata(x):
x.coords['x'] = x.coords['x'] * 2
x.attrs['fruit'] = 'lemon'
return x
array = self.make_groupby_example_array()
grouped = array.groupby('abc')
actual = grouped.apply(change_metadata)
expected = array.copy()
expected = change_metadata(expected)
self.assertDataArrayEqual(expected, actual)
def test_groupby_math(self):
array = self.make_groupby_example_array()
for squeeze in [True, False]:
grouped = array.groupby('x', squeeze=squeeze)
expected = array + array.coords['x']
actual = grouped + array.coords['x']
self.assertDataArrayIdentical(expected, actual)
actual = array.coords['x'] + grouped
self.assertDataArrayIdentical(expected, actual)
ds = array.coords['x'].to_dataset()
expected = array + ds
actual = grouped + ds
self.assertDatasetIdentical(expected, actual)
actual = ds + grouped
self.assertDatasetIdentical(expected, actual)
grouped = array.groupby('abc')
expected_agg = (grouped.mean() - np.arange(3)).rename(None)
actual = grouped - DataArray(range(3), [('abc', ['a', 'b', 'c'])])
actual_agg = actual.groupby('abc').mean()
self.assertDataArrayAllClose(expected_agg, actual_agg)
with self.assertRaisesRegexp(TypeError, 'only support binary ops'):
grouped + 1
with self.assertRaisesRegexp(TypeError, 'only support binary ops'):
grouped + grouped
with self.assertRaisesRegexp(TypeError, 'in-place operations'):
array += grouped
def test_groupby_math_not_aligned(self):
array = DataArray(range(4), {'b': ('x', [0, 0, 1, 1])}, dims='x')
other = DataArray([10], dims='b')
actual = array.groupby('b') + other
expected = DataArray([10, 11, np.nan, np.nan], array.coords)
self.assertDataArrayIdentical(expected, actual)
other = DataArray([10], coords={'c': 123}, dims='b')
actual = array.groupby('b') + other
expected.coords['c'] = (['x'], [123] * 2 + [np.nan] * 2)
self.assertDataArrayIdentical(expected, actual)
other = Dataset({'a': ('b', [10])})
actual = array.groupby('b') + other
expected = Dataset({'a': ('x', [10, 11, np.nan, np.nan])},
array.coords)
self.assertDatasetIdentical(expected, actual)
def test_groupby_restore_dim_order(self):
array = DataArray(np.random.randn(5, 3),
coords={'a': ('x', range(5)), 'b': ('y', range(3))},
dims=['x', 'y'])
for by, expected_dims in [('x', ('x', 'y')),
('y', ('x', 'y')),
('a', ('a', 'y')),
('b', ('x', 'b'))]:
result = array.groupby(by).apply(lambda x: x.squeeze())
self.assertEqual(result.dims, expected_dims)
def test_groupby_first_and_last(self):
array = DataArray([1, 2, 3, 4, 5], dims='x')
by = DataArray(['a'] * 2 + ['b'] * 3, dims='x', name='ab')
expected = DataArray([1, 3], [('ab', ['a', 'b'])])
actual = array.groupby(by).first()
self.assertDataArrayIdentical(expected, actual)
expected = DataArray([2, 5], [('ab', ['a', 'b'])])
actual = array.groupby(by).last()
self.assertDataArrayIdentical(expected, actual)
array = DataArray(np.random.randn(5, 3), dims=['x', 'y'])
expected = DataArray(array[[0, 2]], {'ab': ['a', 'b']}, ['ab', 'y'])
actual = array.groupby(by).first()
self.assertDataArrayIdentical(expected, actual)
actual = array.groupby('x').first()
expected = array # should be a no-op
self.assertDataArrayIdentical(expected, actual)
def test_resample(self):
times = pd.date_range('2000-01-01', freq='6H', periods=10)
array = DataArray(np.arange(10), [('time', times)])
actual = array.resample('6H', dim='time')
self.assertDataArrayIdentical(array, actual)
actual = array.resample('24H', dim='time')
expected = DataArray(array.to_series().resample('24H'))
self.assertDataArrayIdentical(expected, actual)
actual = array.resample('24H', dim='time', how=np.mean)
self.assertDataArrayIdentical(expected, actual)
with self.assertRaisesRegexp(ValueError, 'index must be monotonic'):
array[[2, 0, 1]].resample('1D', dim='time')
def test_resample_first(self):
times = pd.date_range('2000-01-01', freq='6H', periods=10)
array = DataArray(np.arange(10), [('time', times)])
actual = array.resample('1D', dim='time', how='first')
expected = DataArray([0, 4, 8], [('time', times[::4])])
self.assertDataArrayIdentical(expected, actual)
# verify that labels don't use the first value
actual = array.resample('24H', dim='time', how='first')
expected = DataArray(array.to_series().resample('24H', how='first'))
self.assertDataArrayIdentical(expected, actual)
# missing values
array = array.astype(float)
array[:2] = np.nan
actual = array.resample('1D', dim='time', how='first')
expected = DataArray([2, 4, 8], [('time', times[::4])])
self.assertDataArrayIdentical(expected, actual)
actual = array.resample('1D', dim='time', how='first', skipna=False)
expected = DataArray([np.nan, 4, 8], [('time', times[::4])])
self.assertDataArrayIdentical(expected, actual)
def test_resample_skipna(self):
times = pd.date_range('2000-01-01', freq='6H', periods=10)
array = DataArray(np.ones(10), [('time', times)])
array[1] = np.nan
actual = array.resample('1D', dim='time', skipna=False)
expected = DataArray([np.nan, 1, 1], [('time', times[::4])])
self.assertDataArrayIdentical(expected, actual)
def test_resample_upsampling(self):
times = pd.date_range('2000-01-01', freq='1D', periods=5)
array = DataArray(np.arange(5), [('time', times)])
expected_time = pd.date_range('2000-01-01', freq='12H', periods=9)
expected = array.reindex(time=expected_time)
for how in ['mean', 'median', 'sum', 'first', 'last', np.mean]:
actual = array.resample('12H', 'time', how=how)
self.assertDataArrayIdentical(expected, actual)
def test_align(self):
self.ds['x'] = ('x', np.array(list('abcdefghij')))
dv1, dv2 = align(self.dv, self.dv[:5], join='inner')
self.assertDataArrayIdentical(dv1, self.dv[:5])
self.assertDataArrayIdentical(dv2, self.dv[:5])
def test_align_dtype(self):
# regression test for #264
x1 = np.arange(30)
x2 = np.arange(5, 35)
a = DataArray(np.random.random((30,)).astype('f32'), {'x': x1})
b = DataArray(np.random.random((30,)).astype('f32'), {'x': x2})
c, d = align(a, b, join='outer')
self.assertEqual(c.dtype, np.float32)
def test_broadcast_arrays(self):
x = DataArray([1, 2], coords=[('a', [-1, -2])], name='x')
y = DataArray([1, 2], coords=[('b', [3, 4])], name='y')
x2, y2 = broadcast_arrays(x, y)
expected_coords = [('a', [-1, -2]), ('b', [3, 4])]
expected_x2 = DataArray([[1, 1], [2, 2]], expected_coords, name='x')
expected_y2 = DataArray([[1, 2], [1, 2]], expected_coords, name='y')
self.assertDataArrayIdentical(expected_x2, x2)
self.assertDataArrayIdentical(expected_y2, y2)
x = DataArray(np.random.randn(2, 3), dims=['a', 'b'])
y = DataArray(np.random.randn(3, 2), dims=['b', 'a'])
x2, y2 = broadcast_arrays(x, y)
expected_x2 = x
expected_y2 = y.T
self.assertDataArrayIdentical(expected_x2, x2)
self.assertDataArrayIdentical(expected_y2, y2)
with self.assertRaisesRegexp(ValueError, 'cannot broadcast'):
z = DataArray([1, 2], coords=[('a', [-10, 20])])
broadcast_arrays(x, z)
def test_to_pandas(self):
# 0d
actual = DataArray(42).to_pandas()
expected = np.array(42)
self.assertArrayEqual(actual, expected)
# 1d
values = np.random.randn(3)
index = pd.Index(['a', 'b', 'c'], name='x')
da = DataArray(values, coords=[index])
actual = da.to_pandas()
self.assertArrayEqual(actual.values, values)
self.assertArrayEqual(actual.index, index)
self.assertArrayEqual(actual.index.name, 'x')
# 2d
values = np.random.randn(3, 2)
da = DataArray(values, coords=[('x', ['a', 'b', 'c']), ('y', [0, 1])],
name='foo')
actual = da.to_pandas()
self.assertArrayEqual(actual.values, values)
self.assertArrayEqual(actual.index, ['a', 'b', 'c'])
self.assertArrayEqual(actual.columns, [0, 1])
# roundtrips
for shape in [(3,), (3, 4), (3, 4, 5)]:
dims = list('abc')[:len(shape)]
da = DataArray(np.random.randn(*shape), dims=dims)
roundtripped = DataArray(da.to_pandas())
self.assertDataArrayIdentical(da, roundtripped)
with self.assertRaisesRegexp(ValueError, 'cannot convert'):
DataArray(np.random.randn(1, 2, 3, 4, 5)).to_pandas()
def test_to_dataframe(self):
# regression test for #260
arr = DataArray(np.random.randn(3, 4),
[('B', [1, 2, 3]), ('A', list('cdef'))])
expected = arr.to_series()
actual = arr.to_dataframe()[None]
self.assertArrayEqual(expected.values, actual.values)
self.assertArrayEqual(expected.name, actual.name)
self.assertArrayEqual(expected.index.values, actual.index.values)
# regression test for coords with different dimensions
arr.coords['C'] = ('B', [-1, -2, -3])
expected = arr.to_series().to_frame()
expected['C'] = [-1] * 4 + [-2] * 4 + [-3] * 4
expected.columns = [None, 'C']
actual = arr.to_dataframe()
self.assertArrayEqual(expected.values, actual.values)
self.assertArrayEqual(expected.columns.values, actual.columns.values)
self.assertArrayEqual(expected.index.values, actual.index.values)
def test_to_and_from_series(self):
expected = self.dv.to_dataframe()['foo']
actual = self.dv.to_series()
self.assertArrayEqual(expected.values, actual.values)
self.assertArrayEqual(expected.index.values, actual.index.values)
self.assertEqual('foo', actual.name)
# test roundtrip
self.assertDataArrayIdentical(self.dv, DataArray.from_series(actual))
# test name is None
actual.name = None
expected_da = self.dv.rename(None)
self.assertDataArrayIdentical(expected_da,
DataArray.from_series(actual))
def test_to_masked_array(self):
rs = np.random.RandomState(44)
x = rs.random_sample(size=(10, 20))
x_masked = np.ma.masked_where(x < 0.5, x)
da = DataArray(x_masked)
# Test round trip
x_masked_2 = da.to_masked_array()
da_2 = DataArray(x_masked_2)
self.assertArrayEqual(x_masked, x_masked_2)
self.assertDataArrayEqual(da, da_2)
da_masked_array = da.to_masked_array(copy=True)
self.assertTrue(isinstance(da_masked_array, np.ma.MaskedArray))
# Test masks
self.assertArrayEqual(da_masked_array.mask, x_masked.mask)
# Test that mask is unpacked correctly
self.assertArrayEqual(da.values, x_masked.filled(np.nan))
# Test that the underlying data (including nans) hasn't changed
self.assertArrayEqual(da_masked_array, x_masked.filled(np.nan))
# Test that copy=False gives access to values
masked_array = da.to_masked_array(copy=False)
masked_array[0, 0] = 10.
self.assertEqual(masked_array[0, 0], 10.)
self.assertEqual(da[0, 0].values, 10.)
self.assertTrue(masked_array.base is da.values)
self.assertIsInstance(masked_array, np.ma.MaskedArray)
# Test with some odd arrays
for v in [4, np.nan, True, '4', 'four']:
da = DataArray(v)
ma = da.to_masked_array()
self.assertIsInstance(ma, np.ma.MaskedArray)
def test_to_and_from_cdms2(self):
try:
import cdms2
except ImportError:
raise unittest.SkipTest('cdms2 not installed')
original = DataArray(np.arange(6).reshape(2, 3),
[('distance', [-2, 2], {'units': 'meters'}),
('time', pd.date_range('2000-01-01', periods=3))],
name='foo', attrs={'baz': 123})
expected_coords = [Coordinate('distance', [-2, 2]),
Coordinate('time', [0, 1, 2])]
actual = original.to_cdms2()
self.assertArrayEqual(actual, original)
self.assertEqual(actual.id, original.name)
self.assertItemsEqual(actual.getAxisIds(), original.dims)
for axis, coord in zip(actual.getAxisList(), expected_coords):
self.assertEqual(axis.id, coord.name)
self.assertArrayEqual(axis, coord.values)
self.assertEqual(actual.baz, original.attrs['baz'])
component_times = actual.getAxis(1).asComponentTime()
self.assertEqual(len(component_times), 3)
self.assertEqual(str(component_times[0]), '2000-1-1 0:0:0.0')
roundtripped = DataArray.from_cdms2(actual)
self.assertDataArrayIdentical(original, roundtripped)
def test_to_dataset_whole(self):
unnamed = DataArray([1, 2], dims='x')
actual = unnamed.to_dataset()
expected = Dataset({None: ('x', [1, 2])})
self.assertDatasetIdentical(expected, actual)
self.assertIsNot(unnamed._dataset, actual)
actual = unnamed.to_dataset(name='foo')
expected = Dataset({'foo': ('x', [1, 2])})
self.assertDatasetIdentical(expected, actual)
named = DataArray([1, 2], dims='x', name='foo')
actual = named.to_dataset()
expected = Dataset({'foo': ('x', [1, 2])})
self.assertDatasetIdentical(expected, actual)
expected = Dataset({'bar': ('x', [1, 2])})
with self.assertWarns('order of the arguments'):
actual = named.to_dataset('bar')
self.assertDatasetIdentical(expected, actual)
def test_to_dataset_split(self):
array = DataArray([1, 2, 3], coords=[('x', list('abc'))],
attrs={'a': 1})
expected = Dataset(OrderedDict([('a', 1), ('b', 2), ('c', 3)]),
attrs={'a': 1})
actual = array.to_dataset('x')
self.assertDatasetIdentical(expected, actual)
with self.assertRaises(TypeError):
array.to_dataset('x', name='foo')
roundtriped = actual.to_array(dim='x')
self.assertDataArrayIdentical(array, roundtriped)
array = DataArray([1, 2, 3], dims='x')
expected = Dataset(OrderedDict([('0', 1), ('1', 2), ('2', 3)]))
actual = array.to_dataset('x')
self.assertDatasetIdentical(expected, actual)
def test__title_for_slice(self):
array = DataArray(np.ones((4, 3, 2)), dims=['a', 'b', 'c'])
self.assertEqual('', array._title_for_slice())
self.assertEqual('c = 0', array.isel(c=0)._title_for_slice())
self.assertEqual('b = 1, c = 0', array.isel(b=1, c=0)._title_for_slice())
a2 = DataArray(np.ones((4, 1)), dims=['a', 'b'])
self.assertEqual('b = [0]', a2._title_for_slice())
def test__title_for_slice_truncate(self):
array = DataArray(np.ones((4)))
array.coords['a'] = 'a' * 100
array.coords['b'] = 'b' * 100
nchar = 80
title = array._title_for_slice(truncate=nchar)
self.assertEqual(nchar, len(title))
self.assertTrue(title.endswith('...'))
| apache-2.0 |
ZacNeubert/traveling-serpent | plot.py | 1 | 2298 | import json
import matplotlib.pyplot as plot
import argparse
from statistics import mean
parser = argparse.ArgumentParser()
parser.add_argument('--infiles', action='store', nargs='+')
parser.add_argument('--fields', action='store', nargs='+')
parser.add_argument('--x-field', action='store')
parser.add_argument('--start-i', action='store', type=int, default=0)
parser.add_argument('--end-i', action='store', type=int, default=9999999)
args = parser.parse_args()
KNOWN_NAMES = {
'timestamp': 'Timestamp (seconds)',
'mean_move': 'Mean Moves over RUNS Runs',
'mean_reward': 'Mean Reward over RUNS Runs',
'stdev_move': 'Standard Deviation of Move Count over RUNS Runs',
'stdev_reward': 'Standard Deviation of Reward over RUNS Runs',
}
def read_file(infile):
with open(infile, 'r') as inf:
file_json = sorted([json.loads(line) for line in inf.readlines() if 'mean' in line], key=lambda line: line[args.x_field])
return file_json
def translate_label(label, mean_runs):
if KNOWN_NAMES.get(label):
label = KNOWN_NAMES.get(label)
else:
label = input('What is {}? '.format(label))
label = label.replace('RUNS', str(int(mean_runs)))
return label
if __name__ == '__main__':
file_json_dicts = {file.split(':')[0]: read_file(file.split(':')[1]) for file in args.infiles}
for field in args.fields:
for file_title, file_content in file_json_dicts.items():
times = [d[args.x_field] for d in file_content][args.start_i:args.end_i]
values = [d[field] for d in file_content][args.start_i:args.end_i]
mean_runs = mean([d['length'] for d in file_content][args.start_i:args.end_i])
mean_vals = mean(values)
print(mean(values))
if not times:
raise Exception('No x axis')
if not values:
raise Exception('No y axis')
fig = plot.figure()
x_label = translate_label(args.x_field, mean_runs)
y_label = translate_label(field, mean_runs)
plot.xlabel(x_label)
plot.ylabel(y_label)
fig.suptitle(file_title, fontsize=20)
ax = fig.add_subplot(111)
ax.grid(True)
ax.plot(times, values)
plot.show()
| gpl-3.0 |
uglyboxer/linear_neuron | net-p3/lib/python3.5/site-packages/sklearn/utils/tests/test_multiclass.py | 14 | 15416 | from __future__ import division
import numpy as np
import scipy.sparse as sp
from itertools import product
from functools import partial
from sklearn.externals.six.moves import xrange
from sklearn.externals.six import iteritems
from scipy.sparse import issparse
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import coo_matrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.multiclass import unique_labels
from sklearn.utils.multiclass import is_label_indicator_matrix
from sklearn.utils.multiclass import is_multilabel
from sklearn.utils.multiclass import is_sequence_of_sequences
from sklearn.utils.multiclass import type_of_target
from sklearn.utils.multiclass import class_distribution
class NotAnArray(object):
"""An object that is convertable to an array. This is useful to
simulate a Pandas timeseries."""
def __init__(self, data):
self.data = data
def __array__(self):
return self.data
EXAMPLES = {
'multilabel-indicator': [
# valid when the data is formated as sparse or dense, identified
# by CSR format when the testing takes place
csr_matrix(np.random.RandomState(42).randint(2, size=(10, 10))),
csr_matrix(np.array([[0, 1], [1, 0]])),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.bool)),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.int8)),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.uint8)),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.float)),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.float32)),
csr_matrix(np.array([[0, 0], [0, 0]])),
csr_matrix(np.array([[0, 1]])),
# Only valid when data is dense
np.array([[-1, 1], [1, -1]]),
np.array([[-3, 3], [3, -3]]),
NotAnArray(np.array([[-3, 3], [3, -3]])),
],
'multilabel-sequences': [
[[0, 1]],
[[0], [1]],
[[1, 2, 3]],
[[1, 2, 1]], # duplicate values, why not?
[[1], [2], [0, 1]],
[[1], [2]],
[[]],
[()],
np.array([[], [1, 2]], dtype='object'),
NotAnArray(np.array([[], [1, 2]], dtype='object')),
],
'multiclass': [
[1, 0, 2, 2, 1, 4, 2, 4, 4, 4],
np.array([1, 0, 2]),
np.array([1, 0, 2], dtype=np.int8),
np.array([1, 0, 2], dtype=np.uint8),
np.array([1, 0, 2], dtype=np.float),
np.array([1, 0, 2], dtype=np.float32),
np.array([[1], [0], [2]]),
NotAnArray(np.array([1, 0, 2])),
[0, 1, 2],
['a', 'b', 'c'],
np.array([u'a', u'b', u'c']),
np.array([u'a', u'b', u'c'], dtype=object),
np.array(['a', 'b', 'c'], dtype=object),
],
'multiclass-multioutput': [
np.array([[1, 0, 2, 2], [1, 4, 2, 4]]),
np.array([[1, 0, 2, 2], [1, 4, 2, 4]], dtype=np.int8),
np.array([[1, 0, 2, 2], [1, 4, 2, 4]], dtype=np.uint8),
np.array([[1, 0, 2, 2], [1, 4, 2, 4]], dtype=np.float),
np.array([[1, 0, 2, 2], [1, 4, 2, 4]], dtype=np.float32),
np.array([['a', 'b'], ['c', 'd']]),
np.array([[u'a', u'b'], [u'c', u'd']]),
np.array([[u'a', u'b'], [u'c', u'd']], dtype=object),
np.array([[1, 0, 2]]),
NotAnArray(np.array([[1, 0, 2]])),
],
'binary': [
[0, 1],
[1, 1],
[],
[0],
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1]),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.bool),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.int8),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.uint8),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.float),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.float32),
np.array([[0], [1]]),
NotAnArray(np.array([[0], [1]])),
[1, -1],
[3, 5],
['a'],
['a', 'b'],
['abc', 'def'],
np.array(['abc', 'def']),
[u'a', u'b'],
np.array(['abc', 'def'], dtype=object),
],
'continuous': [
[1e-5],
[0, .5],
np.array([[0], [.5]]),
np.array([[0], [.5]], dtype=np.float32),
],
'continuous-multioutput': [
np.array([[0, .5], [.5, 0]]),
np.array([[0, .5], [.5, 0]], dtype=np.float32),
np.array([[0, .5]]),
],
'unknown': [
# empty second dimension
np.array([[], []]),
# 3d
np.array([[[0, 1], [2, 3]], [[4, 5], [6, 7]]]),
# not currently supported sequence of sequences
np.array([np.array([]), np.array([1, 2, 3])], dtype=object),
[np.array([]), np.array([1, 2, 3])],
[set([1, 2, 3]), set([1, 2])],
[frozenset([1, 2, 3]), frozenset([1, 2])],
# and also confusable as sequences of sequences
[{0: 'a', 1: 'b'}, {0: 'a'}],
]
}
NON_ARRAY_LIKE_EXAMPLES = [
set([1, 2, 3]),
{0: 'a', 1: 'b'},
{0: [5], 1: [5]},
'abc',
frozenset([1, 2, 3]),
None,
]
def test_unique_labels():
# Empty iterable
assert_raises(ValueError, unique_labels)
# Multiclass problem
assert_array_equal(unique_labels(xrange(10)), np.arange(10))
assert_array_equal(unique_labels(np.arange(10)), np.arange(10))
assert_array_equal(unique_labels([4, 0, 2]), np.array([0, 2, 4]))
# Multilabels
assert_array_equal(assert_warns(DeprecationWarning,
unique_labels,
[(0, 1, 2), (0,), tuple(), (2, 1)]),
np.arange(3))
assert_array_equal(assert_warns(DeprecationWarning,
unique_labels,
[[0, 1, 2], [0], list(), [2, 1]]),
np.arange(3))
assert_array_equal(unique_labels(np.array([[0, 0, 1],
[1, 0, 1],
[0, 0, 0]])),
np.arange(3))
assert_array_equal(unique_labels(np.array([[0, 0, 1],
[0, 0, 0]])),
np.arange(3))
# Several arrays passed
assert_array_equal(unique_labels([4, 0, 2], xrange(5)),
np.arange(5))
assert_array_equal(unique_labels((0, 1, 2), (0,), (2, 1)),
np.arange(3))
# Border line case with binary indicator matrix
assert_raises(ValueError, unique_labels, [4, 0, 2], np.ones((5, 5)))
assert_raises(ValueError, unique_labels, np.ones((5, 4)), np.ones((5, 5)))
assert_array_equal(unique_labels(np.ones((4, 5)), np.ones((5, 5))),
np.arange(5))
# Some tests with strings input
assert_array_equal(unique_labels(["a", "b", "c"], ["d"]),
["a", "b", "c", "d"])
assert_array_equal(assert_warns(DeprecationWarning, unique_labels,
[["a", "b"], ["c"]], [["d"]]),
["a", "b", "c", "d"])
@ignore_warnings
def test_unique_labels_non_specific():
# Test unique_labels with a variety of collected examples
# Smoke test for all supported format
for format in ["binary", "multiclass", "multilabel-sequences",
"multilabel-indicator"]:
for y in EXAMPLES[format]:
unique_labels(y)
# We don't support those format at the moment
for example in NON_ARRAY_LIKE_EXAMPLES:
assert_raises(ValueError, unique_labels, example)
for y_type in ["unknown", "continuous", 'continuous-multioutput',
'multiclass-multioutput']:
for example in EXAMPLES[y_type]:
assert_raises(ValueError, unique_labels, example)
@ignore_warnings
def test_unique_labels_mixed_types():
# Mix of multilabel-indicator and multilabel-sequences
mix_multilabel_format = product(EXAMPLES["multilabel-indicator"],
EXAMPLES["multilabel-sequences"])
for y_multilabel, y_multiclass in mix_multilabel_format:
assert_raises(ValueError, unique_labels, y_multiclass, y_multilabel)
assert_raises(ValueError, unique_labels, y_multilabel, y_multiclass)
# Mix with binary or multiclass and multilabel
mix_clf_format = product(EXAMPLES["multilabel-indicator"] +
EXAMPLES["multilabel-sequences"],
EXAMPLES["multiclass"] +
EXAMPLES["binary"])
for y_multilabel, y_multiclass in mix_clf_format:
assert_raises(ValueError, unique_labels, y_multiclass, y_multilabel)
assert_raises(ValueError, unique_labels, y_multilabel, y_multiclass)
# Mix string and number input type
assert_raises(ValueError, unique_labels, [[1, 2], [3]],
[["a", "d"]])
assert_raises(ValueError, unique_labels, ["1", 2])
assert_raises(ValueError, unique_labels, [["1", 2], [3]])
assert_raises(ValueError, unique_labels, [["1", "2"], [3]])
assert_array_equal(unique_labels([(2,), (0, 2,)], [(), ()]), [0, 2])
assert_array_equal(unique_labels([("2",), ("0", "2",)], [(), ()]),
["0", "2"])
@ignore_warnings
def test_is_multilabel():
for group, group_examples in iteritems(EXAMPLES):
if group.startswith('multilabel'):
assert_, exp = assert_true, 'True'
else:
assert_, exp = assert_false, 'False'
for example in group_examples:
assert_(is_multilabel(example),
msg='is_multilabel(%r) should be %s' % (example, exp))
def test_is_label_indicator_matrix():
for group, group_examples in iteritems(EXAMPLES):
if group in ['multilabel-indicator']:
dense_assert_, dense_exp = assert_true, 'True'
else:
dense_assert_, dense_exp = assert_false, 'False'
for example in group_examples:
# Only mark explicitly defined sparse examples as valid sparse
# multilabel-indicators
if group == 'multilabel-indicator' and issparse(example):
sparse_assert_, sparse_exp = assert_true, 'True'
else:
sparse_assert_, sparse_exp = assert_false, 'False'
if (issparse(example) or
(hasattr(example, '__array__') and
np.asarray(example).ndim == 2 and
np.asarray(example).dtype.kind in 'biuf' and
np.asarray(example).shape[1] > 0)):
examples_sparse = [sparse_matrix(example)
for sparse_matrix in [coo_matrix,
csc_matrix,
csr_matrix,
dok_matrix,
lil_matrix]]
for exmpl_sparse in examples_sparse:
sparse_assert_(is_label_indicator_matrix(exmpl_sparse),
msg=('is_label_indicator_matrix(%r)'
' should be %s')
% (exmpl_sparse, sparse_exp))
# Densify sparse examples before testing
if issparse(example):
example = example.toarray()
dense_assert_(is_label_indicator_matrix(example),
msg='is_label_indicator_matrix(%r) should be %s'
% (example, dense_exp))
def test_is_sequence_of_sequences():
for group, group_examples in iteritems(EXAMPLES):
if group == 'multilabel-sequences':
assert_, exp = assert_true, 'True'
check = partial(assert_warns, DeprecationWarning,
is_sequence_of_sequences)
else:
assert_, exp = assert_false, 'False'
check = is_sequence_of_sequences
for example in group_examples:
assert_(check(example),
msg='is_sequence_of_sequences(%r) should be %s'
% (example, exp))
@ignore_warnings
def test_type_of_target():
for group, group_examples in iteritems(EXAMPLES):
for example in group_examples:
assert_equal(type_of_target(example), group,
msg='type_of_target(%r) should be %r, got %r'
% (example, group, type_of_target(example)))
for example in NON_ARRAY_LIKE_EXAMPLES:
assert_raises(ValueError, type_of_target, example)
def test_class_distribution():
y = np.array([[1, 0, 0, 1],
[2, 2, 0, 1],
[1, 3, 0, 1],
[4, 2, 0, 1],
[2, 0, 0, 1],
[1, 3, 0, 1]])
# Define the sparse matrix with a mix of implicit and explicit zeros
data = np.array([1, 2, 1, 4, 2, 1, 0, 2, 3, 2, 3, 1, 1, 1, 1, 1, 1])
indices = np.array([0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 5, 0, 1, 2, 3, 4, 5])
indptr = np.array([0, 6, 11, 11, 17])
y_sp = sp.csc_matrix((data, indices, indptr), shape=(6, 4))
classes, n_classes, class_prior = class_distribution(y)
classes_sp, n_classes_sp, class_prior_sp = class_distribution(y_sp)
classes_expected = [[1, 2, 4],
[0, 2, 3],
[0],
[1]]
n_classes_expected = [3, 3, 1, 1]
class_prior_expected = [[3/6, 2/6, 1/6],
[1/3, 1/3, 1/3],
[1.0],
[1.0]]
for k in range(y.shape[1]):
assert_array_almost_equal(classes[k], classes_expected[k])
assert_array_almost_equal(n_classes[k], n_classes_expected[k])
assert_array_almost_equal(class_prior[k], class_prior_expected[k])
assert_array_almost_equal(classes_sp[k], classes_expected[k])
assert_array_almost_equal(n_classes_sp[k], n_classes_expected[k])
assert_array_almost_equal(class_prior_sp[k], class_prior_expected[k])
# Test again with explicit sample weights
(classes,
n_classes,
class_prior) = class_distribution(y, [1.0, 2.0, 1.0, 2.0, 1.0, 2.0])
(classes_sp,
n_classes_sp,
class_prior_sp) = class_distribution(y, [1.0, 2.0, 1.0, 2.0, 1.0, 2.0])
class_prior_expected = [[4/9, 3/9, 2/9],
[2/9, 4/9, 3/9],
[1.0],
[1.0]]
for k in range(y.shape[1]):
assert_array_almost_equal(classes[k], classes_expected[k])
assert_array_almost_equal(n_classes[k], n_classes_expected[k])
assert_array_almost_equal(class_prior[k], class_prior_expected[k])
assert_array_almost_equal(classes_sp[k], classes_expected[k])
assert_array_almost_equal(n_classes_sp[k], n_classes_expected[k])
assert_array_almost_equal(class_prior_sp[k], class_prior_expected[k])
if __name__ == "__main__":
import nose
nose.runmodule()
| mit |
oldmonkABA/optimal_histogram_bin_width | hist_optimal_bin_width.py | 1 | 3159 | #######################################################################################################################
#Author : Dr. Arun B Ayyar
#
#Based on : Shimazaki H. and Shinomoto S., A method for selecting the bin size of a time histogram Neural Computation (2007)
# Vol. 19(6), 1503-1527
#
#Data : The duration for eruptions of the Old Faithful geyser in Yellowstone National Park (in minutes)
# or normal distribuition.
# given at http://176.32.89.45/~hideaki/res/histogram.html
#
#Comments : Implements a faster version than using hist from matplotlib and histogram from numpy libraries
# Also implements the shifts for the bin edges
#
########################################################################################################################
import numpy as np
from numpy.random import normal
from scipy import linspace
import array
from matplotlib import rcParams
from matplotlib.pyplot import figure, plot, xlabel, ylabel,\
title, show, savefig, hist
data = normal(0, 1, 100000) #Data placeholder.. Use this to input your data
#data = [4.37,3.87,4.00,4.03,3.50,4.08,2.25,4.70,1.73,4.93,1.73,4.62,\
#3.43,4.25,1.68,3.92,3.68,3.10,4.03,1.77,4.08,1.75,3.20,1.85,\
#4.62,1.97,4.50,3.92,4.35,2.33,3.83,1.88,4.60,1.80,4.73,1.77,\
#4.57,1.85,3.52,4.00,3.70,3.72,4.25,3.58,3.80,3.77,3.75,2.50,\
#4.50,4.10,3.70,3.80,3.43,4.00,2.27,4.40,4.05,4.25,3.33,2.00,\
#4.33,2.93,4.58,1.90,3.58,3.73,3.73,1.82,4.63,3.50,4.00,3.67,\
#1.67,4.60,1.67,4.00,1.80,4.42,1.90,4.63,2.93,3.50,1.97,4.28,\
#1.83,4.13,1.83,4.65,4.20,3.93,4.33,1.83,4.53,2.03,4.18,4.43,\
#4.07,4.13,3.95,4.10,2.27,4.58,1.90,4.50,1.95,4.83,4.12]
data_max = max(data) #lower end of data
data_min = min(data) #upper end of data
n_min = 2 #Minimum number of bins Ideal value = 2
n_max = 200 #Maximum number of bins Ideal value =200
n_shift = 30 #number of shifts Ideal value = 30
N = np.array(range(n_min,n_max))
D = float(data_max-data_min)/N #Bin width vector
Cs = np.zeros((len(D),n_shift)) #Cost function vector
#Computation of the cost function
for i in xrange(np.size(N)):
shift = linspace(0,D[i],n_shift)
for j in xrange(n_shift):
edges = linspace(data_min+shift[j]-D[i]/2,data_max+shift[j]-D[i]/2,N[i]+1) # shift the Bin edges
binindex = np.digitize(data,edges) #Find binindex of each data point
ki=np.bincount(binindex)[1:N[i]+1] #Find number of points in each bin
k = np.mean(ki) #Mean of event count
v = sum((ki-k)**2)/N[i] #Variance of event count
Cs[i,j]+= (2*k-v)/((D[i])**2) #The cost Function
C=Cs.mean(1)
#Optimal Bin Size Selection
loc = np.argwhere(Cs==Cs.min())[0]
cmin = C.min()
idx = np.where(C==cmin)
idx = idx[0][0]
optD = D[idx]
print 'Optimal Bin Number :',N[idx]
print 'Optimal Bin Width :',optD
#Plot
edges = linspace(data_min+shift[loc[1]]-D[idx]/2,data_max+shift[loc[1]]-D[idx]/2,N[idx]+1)
rcParams.update({'figure.autolayout': True})
fig = figure()
ax = fig.add_subplot(111)
ax.hist(data,edges)
title(u"Histogram")
ylabel(u"Frequency")
xlabel(u"Value")
savefig('Hist.png')
fig = figure()
plot(N,C,'.b',N[idx],cmin,'*r')
xlabel('Number of bins')
ylabel('Cobj')
savefig('Fobj.png')
| mit |
detrout/debian-statsmodels | statsmodels/tools/data.py | 2 | 3608 | """
Compatibility tools for various data structure inputs
"""
#TODO: question: interpret_data
# looks good and could/should be merged with other check convertion functions we also have
# similar also to what Nathaniel mentioned for Formula
# good: if ndarray check passes then loading pandas is not triggered,
from statsmodels.compat.python import range
import numpy as np
import pandas as pd
def _check_period_index(x, freq="M"):
from pandas import PeriodIndex, DatetimeIndex
if not isinstance(x.index, (DatetimeIndex, PeriodIndex)):
raise ValueError("The index must be a DatetimeIndex or PeriodIndex")
from statsmodels.tsa.base.datetools import _infer_freq
inferred_freq = _infer_freq(x.index)
if not inferred_freq.startswith(freq):
raise ValueError("Expected frequency {}. Got {}".format(inferred_freq,
freq))
def is_data_frame(obj):
return isinstance(obj, pd.DataFrame)
def is_design_matrix(obj):
from patsy import DesignMatrix
return isinstance(obj, DesignMatrix)
def _is_structured_ndarray(obj):
return isinstance(obj, np.ndarray) and obj.dtype.names is not None
def interpret_data(data, colnames=None, rownames=None):
"""
Convert passed data structure to form required by estimation classes
Parameters
----------
data : ndarray-like
colnames : sequence or None
May be part of data structure
rownames : sequence or None
Returns
-------
(values, colnames, rownames) : (homogeneous ndarray, list)
"""
if isinstance(data, np.ndarray):
if _is_structured_ndarray(data):
if colnames is None:
colnames = data.dtype.names
values = struct_to_ndarray(data)
else:
values = data
if colnames is None:
colnames = ['Y_%d' % i for i in range(values.shape[1])]
elif is_data_frame(data):
# XXX: hack
data = data.dropna()
values = data.values
colnames = data.columns
rownames = data.index
else: # pragma: no cover
raise Exception('cannot handle other input types at the moment')
if not isinstance(colnames, list):
colnames = list(colnames)
# sanity check
if len(colnames) != values.shape[1]:
raise ValueError('length of colnames does not match number '
'of columns in data')
if rownames is not None and len(rownames) != len(values):
raise ValueError('length of rownames does not match number '
'of rows in data')
return values, colnames, rownames
def struct_to_ndarray(arr):
return arr.view((float, len(arr.dtype.names)))
def _is_using_ndarray_type(endog, exog):
return (type(endog) is np.ndarray and
(type(exog) is np.ndarray or exog is None))
def _is_using_ndarray(endog, exog):
return (isinstance(endog, np.ndarray) and
(isinstance(exog, np.ndarray) or exog is None))
def _is_using_pandas(endog, exog):
klasses = (pd.Series, pd.DataFrame, pd.WidePanel)
return (isinstance(endog, klasses) or isinstance(exog, klasses))
def _is_array_like(endog, exog):
try: # do it like this in case of mixed types, ie., ndarray and list
endog = np.asarray(endog)
exog = np.asarray(exog)
return True
except:
return False
def _is_using_patsy(endog, exog):
# we get this when a structured array is passed through a formula
return (is_design_matrix(endog) and
(is_design_matrix(exog) or exog is None))
| bsd-3-clause |
OSSHealth/ghdata | workers/worker_base.py | 1 | 130034 | #SPDX-License-Identifier: MIT
""" Helper methods constant across all workers """
import requests
import datetime
import time
import traceback
import json
import os
import sys
import math
import logging
import numpy
import copy
import concurrent
import multiprocessing
import psycopg2
import csv
import io
from logging import FileHandler, Formatter, StreamHandler
from multiprocessing import Process, Queue, Pool
from os import getpid
import sqlalchemy as s
import pandas as pd
from pathlib import Path
from urllib.parse import urlparse, quote
from sqlalchemy.ext.automap import automap_base
from augur.config import AugurConfig
from augur.logging import AugurLogging
from sqlalchemy.sql.expression import bindparam
from concurrent import futures
import dask.dataframe as dd
class Worker():
ROOT_AUGUR_DIR = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
## Set Thread Safety for OSX
# os.system("./osx-thread.sh")
def __init__(self, worker_type, config={}, given=[], models=[], data_tables=[], operations_tables=[], platform="github"):
self.worker_type = worker_type
self.collection_start_time = None
self._task = None # task currently being worked on (dict)
self._child = None # process of currently running task (multiprocessing process)
self._queue = Queue() # tasks stored here 1 at a time (in a mp queue so it can translate across multiple processes)
self.data_tables = data_tables
self.operations_tables = operations_tables
self._root_augur_dir = Worker.ROOT_AUGUR_DIR
self.platform = platform
# count of tuples inserted in the database (to store stats for each task in op tables)
self.update_counter = 0
self.insert_counter = 0
self._results_counter = 0
# if we are finishing a previous task, certain operations work differently
self.finishing_task = False
# Update config with options that are general and not specific to any worker
self.augur_config = AugurConfig(self._root_augur_dir)
self.config = {
'worker_type': self.worker_type,
'host': self.augur_config.get_value('Server', 'host'),
'gh_api_key': self.augur_config.get_value('Database', 'key'),
'gitlab_api_key': self.augur_config.get_value('Database', 'gitlab_api_key'),
'offline_mode': False
}
self.config.update(self.augur_config.get_section("Logging"))
try:
worker_defaults = self.augur_config.get_default_config()['Workers'][self.config['worker_type']]
self.config.update(worker_defaults)
except KeyError as e:
logging.warn('Could not get default configuration for {}'.format(self.config['worker_type']))
worker_info = self.augur_config.get_value('Workers', self.config['worker_type'])
self.config.update(worker_info)
worker_port = self.config['port']
while True:
try:
r = requests.get('http://{}:{}/AUGWOP/heartbeat'.format(
self.config['host'], worker_port)).json()
if 'status' in r:
if r['status'] == 'alive':
worker_port += 1
except:
break
self.config.update({
'port': worker_port,
'id': "workers.{}.{}".format(self.worker_type, worker_port),
'capture_output': False,
'location': 'http://{}:{}'.format(self.config['host'], worker_port),
'port_broker': self.augur_config.get_value('Server', 'port'),
'host_broker': self.augur_config.get_value('Server', 'host'),
'host_database': self.augur_config.get_value('Database', 'host'),
'port_database': self.augur_config.get_value('Database', 'port'),
'user_database': self.augur_config.get_value('Database', 'user'),
'name_database': self.augur_config.get_value('Database', 'name'),
'password_database': self.augur_config.get_value('Database', 'password')
})
self.config.update(config)
# Initialize logging in the main process
self.initialize_logging()
# Clear log contents from previous runs
open(self.config["server_logfile"], "w").close()
open(self.config["collection_logfile"], "w").close()
# Get configured collection logger
self.logger = logging.getLogger(self.config["id"])
self.logger.info('Worker (PID: {}) initializing...'.format(str(os.getpid())))
self.task_info = None
self.repo_id = None
self.owner = None
self.repo = None
self.given = given
self.models = models
self.debug_data = [] if 'debug_data' not in self.config else self.config['debug_data']
self.specs = {
'id': self.config['id'], # what the broker knows this worker as
'location': self.config['location'], # host + port worker is running on (so broker can send tasks here)
'qualifications': [
{
'given': self.given, # type of repo this worker can be given as a task
'models': self.models # models this worker can fill for a repo as a task
}
],
'config': self.config
}
# Send broker hello message
if self.config['offline_mode'] is False:
self.connect_to_broker()
try:
self.tool_source
self.tool_version
self.data_source
except:
self.tool_source = 'Augur Worker Testing'
self.tool_version = '0.0.0'
self.data_source = 'Augur Worker Testing'
def __repr__(self):
return f"{self.config['id']}"
def write_debug_data(self, data, name):
if name in self.debug_data:
with open(f'{name}.json', 'w') as f:
json.dump(data, f)
def initialize_logging(self):
self.config['log_level'] = self.config['log_level'].upper()
if self.config['debug']:
self.config['log_level'] = 'DEBUG'
if self.config['verbose']:
format_string = AugurLogging.verbose_format_string
else:
format_string = AugurLogging.simple_format_string
formatter = Formatter(fmt=format_string)
error_formatter = Formatter(fmt=AugurLogging.error_format_string)
worker_dir = AugurLogging.get_log_directories(self.augur_config, reset_logfiles=False) + "/workers/"
Path(worker_dir).mkdir(exist_ok=True)
logfile_dir = worker_dir + f"/{self.worker_type}/"
Path(logfile_dir).mkdir(exist_ok=True)
server_logfile = logfile_dir + '{}_{}_server.log'.format(self.worker_type, self.config["port"])
collection_logfile = logfile_dir + '{}_{}_collection.log'.format(self.worker_type, self.config["port"])
collection_errorfile = logfile_dir + '{}_{}_collection.err'.format(self.worker_type, self.config["port"])
self.config.update({
'logfile_dir': logfile_dir,
'server_logfile': server_logfile,
'collection_logfile': collection_logfile,
'collection_errorfile': collection_errorfile
})
collection_file_handler = FileHandler(filename=self.config['collection_logfile'], mode="a")
collection_file_handler.setFormatter(formatter)
collection_file_handler.setLevel(self.config['log_level'])
collection_errorfile_handler = FileHandler(filename=self.config['collection_errorfile'], mode="a")
collection_errorfile_handler.setFormatter(error_formatter)
collection_errorfile_handler.setLevel(logging.WARNING)
logger = logging.getLogger(self.config['id'])
logger.handlers = []
logger.addHandler(collection_file_handler)
logger.addHandler(collection_errorfile_handler)
logger.setLevel(self.config['log_level'])
logger.propagate = False
if self.config['debug']:
self.config['log_level'] = 'DEBUG'
console_handler = StreamHandler()
console_handler.setFormatter(formatter)
console_handler.setLevel(self.config['log_level'])
logger.addHandler(console_handler)
if self.config['quiet']:
logger.disabled = True
self.logger = logger
def initialize_database_connections(self):
DB_STR = 'postgresql://{}:{}@{}:{}/{}'.format(
self.config['user_database'], self.config['password_database'], self.config['host_database'], self.config['port_database'], self.config['name_database']
)
# Create an sqlalchemy engine for both database schemas
self.logger.info("Making database connections")
db_schema = 'augur_data'
self.db = s.create_engine(DB_STR, poolclass=s.pool.NullPool,
connect_args={'options': '-csearch_path={}'.format(db_schema)})
helper_schema = 'augur_operations'
self.helper_db = s.create_engine(DB_STR, poolclass=s.pool.NullPool,
connect_args={'options': '-csearch_path={}'.format(helper_schema)})
metadata = s.MetaData()
helper_metadata = s.MetaData()
# Reflect only the tables we will use for each schema's metadata object
metadata.reflect(self.db, only=self.data_tables)
helper_metadata.reflect(self.helper_db, only=self.operations_tables)
Base = automap_base(metadata=metadata)
HelperBase = automap_base(metadata=helper_metadata)
Base.prepare()
HelperBase.prepare()
# So we can access all our tables when inserting, updating, etc
for table in self.data_tables:
setattr(self, '{}_table'.format(table), Base.classes[table].__table__)
try:
self.logger.info(HelperBase.classes.keys())
except:
pass
for table in self.operations_tables:
try:
setattr(self, '{}_table'.format(table), HelperBase.classes[table].__table__)
except Exception as e:
self.logger.error("Error setting attribute for table: {} : {}".format(table, e))
# Increment so we are ready to insert the 'next one' of each of these most recent ids
self.history_id = self.get_max_id('worker_history', 'history_id', operations_table=True) + 1
# Organize different api keys/oauths available
self.logger.info("Initializing API key.")
if 'gh_api_key' in self.config or 'gitlab_api_key' in self.config:
self.init_oauths(self.platform)
else:
self.oauths = [{'oauth_id': 0}]
@property
def results_counter(self):
""" Property that is returned when the worker's current results_counter is referenced
"""
if self.worker_type == 'facade_worker':
return self.cfg.repos_processed #TODO: figure out why this doesn't work...
else:
return self._results_counter
@results_counter.setter
def results_counter(self, value):
""" entry point for the broker to add a task to the queue
Adds this task to the queue, and calls method to process queue
"""
self._results_counter = value
@property
def task(self):
""" Property that is returned when the worker's current task is referenced
"""
return self._task
@task.setter
def task(self, value):
""" entry point for the broker to add a task to the queue
Adds this task to the queue, and calls method to process queue
"""
# If the task has one of our "valid" job types
if value['job_type'] == "UPDATE" or value['job_type'] == "MAINTAIN":
self._queue.put(value)
# Setting that causes paginating through ALL pages, not just unknown ones
# This setting is set by the housekeeper and is attached to the task before it gets sent here
if 'focused_task' in value:
if value['focused_task'] == 1:
self.logger.debug("Focused task is ON\n")
self.finishing_task = True
self._task = value
self.run()
def cancel(self):
""" Delete/cancel current task
"""
self._task = None
def run(self):
""" Kicks off the processing of the queue if it is not already being processed
Gets run whenever a new task is added
"""
# Spawn a subprocess to handle message reading and performing the tasks
self._child = Process(target=self.collect, args=())
self._child.start()
def collect(self):
""" Function to process each entry in the worker's task queue
Determines what action to take based off the message type
"""
self.initialize_logging() # need to initialize logging again in child process cause multiprocessing
self.logger.info("Starting data collection process\n")
self.initialize_database_connections()
while True:
if not self._queue.empty():
message = self._queue.get() # Get the task off our MP queue
else:
self.logger.info("No job found.")
break
self.logger.info("Popped off message: {}\n".format(str(message)))
if message['job_type'] == 'STOP':
break
# If task is not a valid job type
if message['job_type'] != 'MAINTAIN' and message['job_type'] != 'UPDATE':
raise ValueError('{} is not a recognized task type'.format(message['job_type']))
pass
# Query repo_id corresponding to repo url of given task
repoUrlSQL = s.sql.text("""
SELECT min(repo_id) as repo_id FROM repo WHERE repo_git = '{}'
""".format(message['given'][self.given[0][0]]))
repo_id = int(pd.read_sql(repoUrlSQL, self.db, params={}).iloc[0]['repo_id'])
self.logger.info("repo_id for which data collection is being initiated: {}".format(str(repo_id)))
# Call method corresponding to model sent in task
try:
model_method = getattr(self, '{}_model'.format(message['models'][0]))
self.record_model_process(repo_id, 'repo_info')
except Exception as e:
self.logger.error('Error: {}.\nNo defined method for model: {}, '.format(e, message['models'][0]) +
'must have name of {}_model'.format(message['models'][0]))
self.register_task_failure(message, repo_id, e)
break
# Model method calls wrapped in try/except so that any unexpected error that occurs can be caught
# and worker can move onto the next task without stopping
try:
self.logger.info("Calling model method {}_model".format(message['models'][0]))
self.task_info = message
self.repo_id = repo_id
self.owner, self.repo = self.get_owner_repo(list(message['given'].values())[0])
model_method(message, repo_id)
except Exception as e: # this could be a custom exception, might make things easier
self.register_task_failure(message, repo_id, e)
break
self.logger.debug('Closing database connections\n')
self.db.dispose()
self.helper_db.dispose()
self.logger.info("Collection process finished")
def sync_df_types(self, subject, source, subject_columns, source_columns):
type_dict = {}
for index in range(len(source_columns)):
if type(source[source_columns[index]].values[0]) == numpy.datetime64:
subject[subject_columns[index]] = pd.to_datetime(
subject[subject_columns[index]], utc=True
)
source[source_columns[index]] = pd.to_datetime(
source[source_columns[index]], utc=True
)
continue
type_dict[subject_columns[index]] = type(source[source_columns[index]].values[0])
subject = subject.astype(type_dict)
return subject, source
def get_sqlalchemy_type(self, data, column_name=None):
if type(data) == str:
try:
time.strptime(data, "%Y-%m-%dT%H:%M:%SZ")
return s.types.TIMESTAMP
except ValueError:
return s.types.String
elif (
isinstance(data, (int, numpy.integer))
or (isinstance(data, float) and column_name and 'id' in column_name)
):
return s.types.BigInteger
elif isinstance(data, float):
return s.types.Float
elif type(data) in [numpy.datetime64, pd._libs.tslibs.timestamps.Timestamp]:
return s.types.TIMESTAMP
elif column_name and 'id' in column_name:
return s.types.BigInteger
return s.types.String
def _convert_float_nan_to_int(self, df):
for column in df.columns:
if (
df[column].dtype == float
and ((df[column] % 1 == 0) | (df[column].isnull())).all()
):
df[column] = df[column].astype("Int64").astype(object).where(
pd.notnull(df[column]), None
)
return df
def _setup_postgres_merge(self, data_sets, sort=False):
metadata = s.MetaData()
data_tables = []
# Setup/create tables
for index, data in enumerate(data_sets):
data_table = s.schema.Table(f"merge_data_{index}_{os.getpid()}", metadata)
df = pd.DataFrame(data)
columns = sorted(list(df.columns)) if sort else df.columns
df = self._convert_float_nan_to_int(df)
for column in columns:
data_table.append_column(
s.schema.Column(
column, self.get_sqlalchemy_type(
df.fillna(method='bfill').iloc[0][column], column_name=column
)
)
)
data_tables.append(data_table)
metadata.create_all(self.db, checkfirst=True)
# Insert data to tables
for data_table, data in zip(data_tables, data_sets):
self.bulk_insert(
data_table, insert=data, increment_counter=False, convert_float_int=True
)
session = s.orm.Session(self.db)
self.logger.info("Session created for merge tables")
return data_tables, metadata, session
def _close_postgres_merge(self, metadata, session):
session.close()
self.logger.info("Session closed")
# metadata.reflect(self.db, only=[new_data_table.name, table_values_table.name])
metadata.drop_all(self.db, checkfirst=True)
self.logger.info("Merge tables dropped")
def _get_data_set_columns(self, data, columns):
if not len(data):
return []
self.logger.info("Getting data set columns")
df = pd.DataFrame(data, columns=data[0].keys())
final_columns = copy.deepcopy(columns)
for column in columns:
if '.' not in column:
continue
root = column.split('.')[0]
if root not in df.columns:
df[root] = None
expanded_column = pd.DataFrame(
df[root].where(df[root].notna(), lambda x: [{}]).tolist()
)
expanded_column.columns = [
f'{root}.{attribute}' for attribute in expanded_column.columns
]
if column not in expanded_column.columns:
expanded_column[column] = None
final_columns += list(expanded_column.columns)
try:
df = df.join(expanded_column)
except ValueError:
# columns already added (happens if trying to expand the same column twice)
# TODO: Catch this before by only looping unique prefixs?
self.logger.info("Columns have already been added, moving on...")
pass
self.logger.info(final_columns)
self.logger.info(list(set(final_columns)))
self.logger.info("Finished getting data set columns")
return df[list(set(final_columns))].to_dict(orient='records')
def organize_needed_data(
self, new_data, table_values, table_pkey, action_map={}, in_memory=True
):
if len(table_values) == 0:
return new_data, []
if len(new_data) == 0:
return [], []
need_insertion = pd.DataFrame()
need_updates = pd.DataFrame()
if not in_memory:
new_data_columns = action_map['insert']['source']
table_value_columns = action_map['insert']['augur']
if 'update' in action_map:
new_data_columns += action_map['update']['source']
table_value_columns += action_map['update']['augur']
(new_data_table, table_values_table), metadata, session = self._setup_postgres_merge(
[
self._get_data_set_columns(new_data, new_data_columns),
self._get_data_set_columns(table_values, table_value_columns)
]
)
need_insertion = pd.DataFrame(session.query(new_data_table).join(table_values_table,
eval(
' and '.join([
f"table_values_table.c.{table_column} == new_data_table.c.{source_column}" \
for table_column, source_column in zip(action_map['insert']['augur'],
action_map['insert']['source'])
])
), isouter=True).filter(
table_values_table.c[action_map['insert']['augur'][0]] == None
).all(), columns=table_value_columns)
self.logger.info("need_insertion calculated successfully")
need_updates = pd.DataFrame(columns=table_value_columns)
if 'update' in action_map:
need_updates = pd.DataFrame(session.query(new_data_table).join(table_values_table,
s.and_(
eval(' and '.join([f"table_values_table.c.{table_column} == new_data_table.c.{source_column}" for \
table_column, source_column in zip(action_map['insert']['augur'], action_map['insert']['source'])])),
eval(' and '.join([f"table_values_table.c.{table_column} != new_data_table.c.{source_column}" for \
table_column, source_column in zip(action_map['update']['augur'], action_map['update']['source'])]))
) ).all(), columns=table_value_columns)
self.logger.info("need_updates calculated successfully")
self._close_postgres_merge(metadata, session)
new_data_df = pd.DataFrame(new_data)
need_insertion, new_data_df = self.sync_df_types(
need_insertion, new_data_df, table_value_columns, new_data_columns
)
need_insertion = need_insertion.merge(
new_data_df, how='inner', left_on=table_value_columns, right_on=new_data_columns
)
self.logger.info(
f"Table needs {len(need_insertion)} insertions and "
f"{len(need_updates)} updates.\n")
else:
table_values_df = pd.DataFrame(table_values, columns=table_values[0].keys())
new_data_df = pd.DataFrame(new_data).dropna(subset=action_map['insert']['source'])
new_data_df, table_values_df = self.sync_df_types(new_data_df, table_values_df,
action_map['insert']['source'], action_map['insert']['augur'])
need_insertion = new_data_df.merge(table_values_df, suffixes=('','_table'),
how='outer', indicator=True, left_on=action_map['insert']['source'],
right_on=action_map['insert']['augur']).loc[lambda x : x['_merge']=='left_only']
if 'update' in action_map:
new_data_df, table_values_df = self.sync_df_types(new_data_df, table_values_df,
action_map['update']['source'], action_map['update']['augur'])
partitions = math.ceil(len(new_data_df) / 1000)
attempts = 0
while attempts < 50:
try:
need_updates = pd.DataFrame()
self.logger.info(f"Trying {partitions} partitions\n")
for sub_df in numpy.array_split(new_data_df, partitions):
self.logger.info(f"Trying a partition, len {len(sub_df)}\n")
need_updates = pd.concat([ need_updates, sub_df.merge(table_values_df, left_on=action_map['insert']['source'],
right_on=action_map['insert']['augur'], suffixes=('','_table'), how='inner',
indicator=False).merge(table_values_df, left_on=action_map['update']['source'],
right_on=action_map['update']['augur'], suffixes=('','_table'), how='outer',
indicator=True).loc[lambda x : x['_merge']=='left_only'] ])
self.logger.info(f"need_updates merge: {len(sub_df)} worked\n")
break
except MemoryError as e:
self.logger.info(f"new_data ({sub_df.shape}) is too large to allocate memory for " +
f"need_updates df merge.\nMemoryError: {e}\nTrying again with {partitions + 1} partitions...\n")
partitions += 1
attempts += 1
# self.logger.info(f"End attempt # {attempts}\n")
if attempts >= 50:
self.loggger.info("Max need_updates merge attempts exceeded, cannot perform " +
"updates on this repo.\n")
else:
need_updates = need_updates.drop([column for column in list(need_updates.columns) if \
column not in action_map['update']['augur'] and column not in action_map['insert']['augur']],
axis='columns')
for column in action_map['insert']['augur']:
need_updates[f'b_{column}'] = need_updates[column]
need_updates = need_updates.drop([column for column in action_map['insert']['augur']], axis='columns')
return need_insertion.to_dict('records'), need_updates.to_dict('records')
def assign_tuple_action(self, new_data, table_values, update_col_map, duplicate_col_map, table_pkey, value_update_col_map={}):
""" DEPRECATED
Include an extra key-value pair on each element of new_data that represents
the action that should be taken with this element (i.e. 'need_insertion')
:param new_data: List of dictionaries, data to be assigned an action to
:param table_values: Pandas DataFrame, existing data in the database to check
what action should be taken on the new_data depending on the presence of
each element in this DataFrame
:param update_col_map: Dictionary, maps the column names of the source data
to the field names in our database for columns that should be checked for
updates (if source data value != value in existing database row, then an
update is needed). Key is source data column name, value is database field name.
Example: {'id': 'gh_issue_id'}
:param duplicate_col_map: Dictionary, maps the column names of the source data
to the field names in our database for columns that should be checked for
duplicates (if source data value == value in existing database row, then this
element is a duplicate and would not need an insertion). Key is source data
column name, value is database field name. Example: {'id': 'gh_issue_id'}
:param table_pkey: String, the field name of the primary key of the table in
the database that we are checking the table_values for.
:param value_update_col_map: Dictionary, sometimes we add a new field to a table,
and we want to trigger an update of that row in the database even if all of the
data values are the same and would not need an update ordinarily. Checking for
a specific existing value in the database field allows us to do this. The key is the
name of the field in the database we are checking for a specific value to trigger
an update, the value is the value we are checking for equality to trigger an update.
Example: {'cntrb_id': None}
:return: List of dictionaries, contains all the same elements of new_data, except
each element now has an extra key-value pair with the key being 'flag', and
the value being 'need_insertion', 'need_update', or 'none'
"""
need_insertion_count = 0
need_update_count = 0
if type(table_values) == list:
if len(table_values) > 0:
table_values = pd.DataFrame(table_values, columns=table_values[0].keys())
else:
table_values = pd.DataFrame(table_values)
for i, obj in enumerate(new_data):
if type(obj) != dict:
new_data[i] = {'flag': 'none'}
continue
obj['flag'] = 'none' # default of no action needed
existing_tuple = None
for db_dupe_key in list(duplicate_col_map.keys()):
if table_values.isin([obj[duplicate_col_map[db_dupe_key]]]).any().any():
if table_values[table_values[db_dupe_key].isin(
[obj[duplicate_col_map[db_dupe_key]]])].to_dict('records'):
existing_tuple = table_values[table_values[db_dupe_key].isin(
[obj[duplicate_col_map[db_dupe_key]]])].to_dict('records')[0]
continue
obj['flag'] = 'need_insertion'
need_insertion_count += 1
break
if obj['flag'] == 'need_insertion':
continue
if not existing_tuple:
self.logger.info('An existing tuple was not found for this data ' +
'point and we have reached the check-updates portion of assigning ' +
'tuple action, so we will now move to next data point\n')
continue
# If we need to check the values of the existing tuple to determine if an update is needed
for augur_col, value_check in value_update_col_map.items():
not_nan_check = not (math.isnan(value_check) and math.isnan(existing_tuple[augur_col])) if value_check is not None else True
if existing_tuple[augur_col] != value_check and not_nan_check:
continue
self.logger.info("Found a tuple that needs an update for column: {}\n".format(augur_col))
obj['flag'] = 'need_update'
obj['pkey'] = existing_tuple[table_pkey]
need_update_count += 1
if obj['flag'] == 'need_update':
self.logger.info('Already determined that current tuple needs update, skipping checking further updates. '
'Moving to next tuple.\n')
continue
# Now check the existing tuple's values against the response values to determine if an update is needed
for col in update_col_map.keys():
if update_col_map[col] not in obj:
continue
if obj[update_col_map[col]] == existing_tuple[col]:
continue
self.logger.info("Found a tuple that needs an update for column: {}\n".format(col))
obj['flag'] = 'need_update'
self.logger.info(existing_tuple)
obj['pkey'] = existing_tuple[table_pkey]
need_update_count += 1
self.logger.info("Page recieved has {} tuples, while filtering duplicates this ".format(len(new_data)) +
"was reduced to {} tuples, and {} tuple updates are needed.\n".format(need_insertion_count, need_update_count))
return new_data
def check_duplicates(self, new_data, table_values, key):
""" Filters what items of the new_data json (list of dictionaries) that are not
present in the table_values df
:param new_data: List of dictionaries, new data to filter duplicates out of
:param table_values: Pandas DataFrame, existing data to check what data is already
present in the database
:param key: String, key of each dict in new_data whose value we are checking
duplicates with
:return: List of dictionaries, contains elements of new_data that are not already
present in the database
"""
need_insertion = []
for obj in new_data:
if type(obj) != dict:
continue
if not table_values.isin([obj[key]]).any().any():
need_insertion.append(obj)
self.logger.info("Page recieved has {} tuples, while filtering duplicates this ".format(str(len(new_data))) +
"was reduced to {} tuples.\n".format(str(len(need_insertion))))
return need_insertion
def connect_to_broker(self):
connected = False
for i in range(5):
try:
self.logger.debug("Connecting to broker, attempt {}\n".format(i))
if i > 0:
time.sleep(10)
requests.post('http://{}:{}/api/unstable/workers'.format(
self.config['host_broker'],self.config['port_broker']), json=self.specs)
self.logger.info("Connection to the broker was successful\n")
connected = True
break
except requests.exceptions.ConnectionError:
self.logger.error('Cannot connect to the broker. Trying again...\n')
if not connected:
sys.exit('Could not connect to the broker after 5 attempts! Quitting...\n')
@staticmethod
def dump_queue(queue):
""" Empties all pending items in a queue and returns them in a list.
"""
result = []
queue.put("STOP")
for i in iter(queue.get, 'STOP'):
result.append(i)
# time.sleep(.1)
return result
def find_id_from_login(self, login, platform='github'):
""" Retrieves our contributor table primary key value for the contributor with
the given GitHub login credentials, if this contributor is not there, then
they get inserted.
:param login: String, the GitHub login username to find the primary key id for
:return: Integer, the id of the row in our database with the matching GitHub login
"""
idSQL = s.sql.text("""
SELECT cntrb_id FROM contributors WHERE cntrb_login = '{}' \
AND LOWER(data_source) = '{} api'
""".format(login, platform))
rs = pd.read_sql(idSQL, self.db, params={})
data_list = [list(row) for row in rs.itertuples(index=False)]
try:
return data_list[0][0]
except:
self.logger.info('contributor needs to be added...')
if platform == 'github':
cntrb_url = ("https://api.github.com/users/" + login)
elif platform == 'gitlab':
cntrb_url = ("https://gitlab.com/api/v4/users?username=" + login )
self.logger.info("Hitting endpoint: {} ...\n".format(cntrb_url))
while True:
try:
r = requests.get(url=cntrb_url, headers=self.headers)
break
except TimeoutError as e:
self.logger.info("Request timed out. Sleeping 10 seconds and trying again...\n")
time.sleep(30)
self.update_rate_limit(r)
contributor = r.json()
company = None
location = None
email = None
if 'company' in contributor:
company = contributor['company']
if 'location' in contributor:
location = contributor['location']
if 'email' in contributor:
email = contributor['email']
if platform == 'github':
cntrb = {
'cntrb_login': contributor['login'] if 'login' in contributor else None,
'cntrb_email': contributor['email'] if 'email' in contributor else None,
'cntrb_company': contributor['company'] if 'company' in contributor else None,
'cntrb_location': contributor['location'] if 'location' in contributor else None,
'cntrb_created_at': contributor['created_at'] if 'created_at' in contributor else None,
'cntrb_canonical': None,
'gh_user_id': contributor['id'] if 'id' in contributor else None,
'gh_login': contributor['login'] if 'login' in contributor else None,
'gh_url': contributor['url'] if 'url' in contributor else None,
'gh_html_url': contributor['html_url'] if 'html_url' in contributor else None,
'gh_node_id': contributor['node_id'] if 'node_id' in contributor else None,
'gh_avatar_url': contributor['avatar_url'] if 'avatar_url' in contributor else None,
'gh_gravatar_id': contributor['gravatar_id'] if 'gravatar_id' in contributor else None,
'gh_followers_url': contributor['followers_url'] if 'followers_url' in contributor else None,
'gh_following_url': contributor['following_url'] if 'following_url' in contributor else None,
'gh_gists_url': contributor['gists_url'] if 'gists_url' in contributor else None,
'gh_starred_url': contributor['starred_url'] if 'starred_url' in contributor else None,
'gh_subscriptions_url': contributor['subscriptions_url'] if 'subscriptions_url' in contributor else None,
'gh_organizations_url': contributor['organizations_url'] if 'organizations_url' in contributor else None,
'gh_repos_url': contributor['repos_url'] if 'repos_url' in contributor else None,
'gh_events_url': contributor['events_url'] if 'events_url' in contributor else None,
'gh_received_events_url': contributor['received_events_url'] if 'received_events_url' in contributor else None,
'gh_type': contributor['type'] if 'type' in contributor else None,
'gh_site_admin': contributor['site_admin'] if 'site_admin' in contributor else None,
'tool_source': self.tool_source,
'tool_version': self.tool_version,
'data_source': self.data_source
}
elif platform == 'gitlab':
cntrb = {
'cntrb_login': contributor[0]['username'] if 'username' in contributor[0] else None,
'cntrb_email': email,
'cntrb_company': company,
'cntrb_location': location,
'cntrb_created_at': contributor[0]['created_at'] if 'created_at' in contributor[0] else None,
'cntrb_canonical': None,
'gh_user_id': contributor[0]['id'],
'gh_login': contributor[0]['username'],
'gh_url': contributor[0]['web_url'],
'gh_html_url': None,
'gh_node_id': None,
'gh_avatar_url': contributor[0]['avatar_url'],
'gh_gravatar_id': None,
'gh_followers_url': None,
'gh_following_url': None,
'gh_gists_url': None,
'gh_starred_url': None,
'gh_subscriptions_url': None,
'gh_organizations_url': None,
'gh_repos_url': None,
'gh_events_url': None,
'gh_received_events_url': None,
'gh_type': None,
'gh_site_admin': None,
'tool_source': self.tool_source,
'tool_version': self.tool_version,
'data_source': self.data_source
}
result = self.db.execute(self.contributors_table.insert().values(cntrb))
self.logger.info("Primary key inserted into the contributors table: " + str(result.inserted_primary_key))
self.results_counter += 1
self.cntrb_id_inc = int(result.inserted_primary_key[0])
self.logger.info(f"Inserted contributor: {cntrb['cntrb_login']}\n")
return self.find_id_from_login(login, platform)
def get_owner_repo(self, git_url):
""" Gets the owner and repository names of a repository from a git url
:param git_url: String, the git url of a repository
:return: Tuple, includes the owner and repository names in that order
"""
split = git_url.split('/')
owner = split[-2]
repo = split[-1]
if '.git' == repo[-4:]:
repo = repo[:-4]
return owner, repo
def get_max_id(self, table, column, default=25150, operations_table=False):
""" Gets the max value (usually used for id/pk's) of any Integer column
of any table
:param table: String, the table that consists of the column you want to
query a max value for
:param column: String, the column that you want to query the max value for
:param default: Integer, if there are no values in the
specified column, the value of this parameter will be returned
:param operations_table: Boolean, if True, this signifies that the table/column
that is wanted to be queried is in the augur_operations schema rather than
the augur_data schema. Default False
:return: Integer, the max value of the specified column/table
"""
maxIdSQL = s.sql.text("""
SELECT max({0}.{1}) AS {1}
FROM {0}
""".format(table, column))
db = self.db if not operations_table else self.helper_db
rs = pd.read_sql(maxIdSQL, db, params={})
if rs.iloc[0][column] is not None:
max_id = int(rs.iloc[0][column]) + 1
self.logger.info("Found max id for {} column in the {} table: {}\n".format(column, table, max_id))
else:
max_id = default
self.logger.warning("Could not find max id for {} column in the {} table... " +
"using default set to: {}\n".format(column, table, max_id))
return max_id
def get_table_values(self, cols, tables, where_clause=""):
""" Can query all values of any column(s) from any table(s)
with an optional where clause
:param cols: List of Strings, column(s) that user wants to query
:param tables: List of Strings, table(s) that user wants to query
:param where_clause: String, optional where clause to filter the values
queried
:return: Pandas DataFrame, contains all values queried in the columns, tables, and
optional where clause provided
"""
table_str = tables[0]
del tables[0]
col_str = cols[0]
del cols[0]
for table in tables:
table_str += ", " + table
for col in cols:
col_str += ", " + col
table_values_sql = s.sql.text("""
SELECT {} FROM {} {}
""".format(col_str, table_str, where_clause))
self.logger.info("Getting table values with the following PSQL query: \n{}\n".format(
table_values_sql))
values = pd.read_sql(table_values_sql, self.db, params={})
return values
def init_oauths(self, platform='github'):
self.oauths = []
self.headers = None
self.logger.info("Trying initialization.")
# Make a list of api key in the config combined w keys stored in the database
# Select endpoint to hit solely to retrieve rate limit
# information from headers of the response
# Adjust header keys needed to fetch rate limit information from the API responses
if platform == 'github':
url = "https://api.github.com/users/gabe-heim"
oauthSQL = s.sql.text("""
SELECT * FROM worker_oauth WHERE access_token <> '{}' and platform = 'github'
""".format(self.config['gh_api_key']))
key_name = 'gh_api_key'
rate_limit_header_key = "X-RateLimit-Remaining"
rate_limit_reset_header_key = "X-RateLimit-Reset"
elif platform == 'gitlab':
url = "https://gitlab.com/api/v4/version"
oauthSQL = s.sql.text("""
SELECT * FROM worker_oauth WHERE access_token <> '{}' and platform = 'gitlab'
""".format(self.config['gitlab_api_key']))
key_name = 'gitlab_api_key'
rate_limit_header_key = 'ratelimit-remaining'
rate_limit_reset_header_key = 'ratelimit-reset'
for oauth in [{'oauth_id': 0, 'access_token': self.config[key_name]}] + json.loads(
pd.read_sql(oauthSQL, self.helper_db, params={}).to_json(orient="records")
):
if platform == 'github':
self.headers = {'Authorization': 'token %s' % oauth['access_token']}
elif platform == 'gitlab':
self.headers = {'Authorization': 'Bearer %s' % oauth['access_token']}
response = requests.get(url=url, headers=self.headers)
self.oauths.append({
'oauth_id': oauth['oauth_id'],
'access_token': oauth['access_token'],
'rate_limit': int(response.headers[rate_limit_header_key]),
'seconds_to_reset': (
datetime.datetime.fromtimestamp(
int(response.headers[rate_limit_reset_header_key])
) - datetime.datetime.now()
).total_seconds()
})
self.logger.debug("Found OAuth available for use: {}".format(self.oauths[-1]))
if len(self.oauths) == 0:
self.logger.info(
"No API keys detected, please include one in your config or in the "
"worker_oauths table in the augur_operations schema of your database."
)
# First key to be used will be the one specified in the config (first element in
# self.oauths array will always be the key in use)
if platform == 'github':
self.headers = {'Authorization': 'token %s' % self.oauths[0]['access_token']}
elif platform == 'gitlab':
self.headers = {'Authorization': 'Bearer %s' % self.oauths[0]['access_token']}
self.logger.info("OAuth initialized\n")
def bulk_insert(
self, table, insert=[], update=[], unique_columns=[], update_columns=[],
max_attempts=3, attempt_delay=3, increment_counter=True, convert_float_int=False
):
""" Performs bulk inserts/updates of the given data to the given table
:param table: String, name of the table that we are inserting/updating rows
:param insert: List of dicts, data points to insert
:param update: List of dicts, data points to update, only needs key/value
pairs of the update_columns and the unique_columns
:param unique_columns: List of strings, column names that would uniquely identify any
given data point
:param update_columns: List of strings, names of columns that are being updated
:param max_attempts: Integer, number of attempts to perform on inserting/updating
before moving on
:param attempt_delay: Integer, number of seconds to wait in between attempts
:returns: SQLAlchemy database execution response object(s), contains metadata
about number of rows inserted etc. This data is not often used.
"""
self.logger.info(
f"{len(insert)} insertions are needed and {len(update)} "
f"updates are needed for {table}"
)
update_result = None
insert_result = None
if len(update) > 0:
attempts = 0
update_start_time = time.time()
while attempts < max_attempts:
try:
update_result = self.db.execute(
table.update().where(
eval(
' and '.join(
[
f"self.{table}_table.c.{key} == bindparam('b_{key}')"
for key in unique_columns
]
)
)
).values(
{key: key for key in update_columns}
),
update
)
if increment_counter:
self.update_counter += update_result.rowcount
self.logger.info(
f"Updated {update_result.rowcount} rows in "
f"{time.time() - update_start_time} seconds"
)
break
except Exception as e:
self.logger.info(f"Warning! Error bulk updating data: {e}")
time.sleep(attempt_delay)
attempts += 1
if len(insert) > 0:
insert_start_time = time.time()
def psql_insert_copy(table, conn, keys, data_iter):
"""
Execute SQL statement inserting data
Parameters
----------
table : pandas.io.sql.SQLTable
conn : sqlalchemy.engine.Engine or sqlalchemy.engine.Connection
keys : list of str
Column names
data_iter : Iterable that iterates the values to be inserted
"""
# gets a DBAPI connection that can provide a cursor
dbapi_conn = conn.connection
with dbapi_conn.cursor() as cur:
s_buf = io.StringIO()
writer = csv.writer(s_buf)
writer.writerows(data_iter)
s_buf.seek(0)
columns = ', '.join('"{}"'.format(k) for k in keys)
if table.schema:
table_name = '{}.{}'.format(table.schema, table.name)
else:
table_name = table.name
sql = 'COPY {} ({}) FROM STDIN WITH CSV'.format(
table_name, columns)
cur.copy_expert(sql=sql, file=s_buf)
df = pd.DataFrame(insert)
if convert_float_int:
df = self._convert_float_nan_to_int(df)
df.to_sql(
name=table.name,
con=self.db,
if_exists="append",
index=False,
method=psql_insert_copy
)
if increment_counter:
self.insert_counter += len(insert)
self.logger.info(
f"Inserted {len(insert)} rows in {time.time() - insert_start_time} seconds "
"thanks to postgresql's COPY FROM CSV! :)"
)
return insert_result, update_result
def text_clean(self, data, field):
""" "Cleans" the provided field of each dict in the list of dicts provided
by removing NUL (C text termination) characters
Example: "\u0000"
:param data: List of dicts
:param field: String
:returns: Same data list with each element's field updated with NUL characters
removed
"""
return [
{
**data_point,
field: data_point[field].replace("\x00", "\uFFFD")
} for data_point in data
]
def _add_nested_columns(self, df, column_names):
# todo: support deeper nests (>1) and only expand necessary columns
# todo: merge with _get_data_set_columns
for column in column_names:
if '.' not in column:
continue
root = column.split('.')[0]
if root not in df.columns:
df[root] = None
expanded_column = pd.DataFrame(
df[root].where(df[root].notna(), lambda x: [{}]).tolist()
)
expanded_column.columns = [
f'{root}.{attribute}' for attribute in expanded_column.columns
]
if column not in expanded_column.columns:
expanded_column[column] = None
try:
df = df.join(expanded_column)
except ValueError:
# columns already added (happens if trying to expand the same column twice)
# TODO: Catch this before by only looping unique prefixs?
pass
return df
def enrich_cntrb_id(
self, data, key, action_map_additions={'insert': {'source': [], 'augur': []}},
platform='github', prefix=''
):
if not len(data):
return data
self.logger.info(f"Enriching contributor ids for {len(data)} data points...")
source_df = pd.DataFrame(data)
expanded_source_df = self._add_nested_columns(
source_df.copy(), [key] + action_map_additions['insert']['source']
)
# Insert cntrbs that are not in db
cntrb_action_map = {
'insert': {
'source': [key] + action_map_additions['insert']['source'],
'augur': ['cntrb_login'] + action_map_additions['insert']['augur']
}
}
source_cntrb_insert, _ = self.new_organize_needed_data(
expanded_source_df.to_dict(orient='records'), augur_table=self.contributors_table,
action_map=cntrb_action_map
)
cntrb_insert = [
{
'cntrb_login': contributor[f'{prefix}login'],
'cntrb_created_at': None if (
f'{prefix}created_at' not in contributor
) else contributor[f'{prefix}created_at'],
'cntrb_email': None if f'{prefix}email' not in contributor else contributor[f'{prefix}email'],
'cntrb_company': None if f'{prefix}company' not in contributor else contributor[f'{prefix}company'],
'cntrb_location': None if (
f'{prefix}location' not in contributor
) else contributor[f'{prefix}location'],
'gh_user_id': None if (
not contributor[f'{prefix}id']
) else int(float(contributor[f'{prefix}id'])),
'gh_login': contributor[f'{prefix}login'],
'gh_url': contributor[f'{prefix}url'],
'gh_html_url': contributor[f'{prefix}html_url'],
'gh_node_id': contributor[f'{prefix}node_id'],
'gh_avatar_url': contributor[f'{prefix}avatar_url'],
'gh_gravatar_id': contributor[f'{prefix}gravatar_id'],
'gh_followers_url': contributor[f'{prefix}followers_url'],
'gh_following_url': contributor[f'{prefix}following_url'],
'gh_gists_url': contributor[f'{prefix}gists_url'],
'gh_starred_url': contributor[f'{prefix}starred_url'],
'gh_subscriptions_url': contributor[f'{prefix}subscriptions_url'],
'gh_organizations_url': contributor[f'{prefix}organizations_url'],
'gh_repos_url': contributor[f'{prefix}repos_url'],
'gh_events_url': contributor[f'{prefix}events_url'],
'gh_received_events_url': contributor[f'{prefix}received_events_url'],
'gh_type': contributor[f'{prefix}type'],
'gh_site_admin': contributor[f'{prefix}site_admin'],
'tool_source': self.tool_source,
'tool_version': self.tool_version,
'data_source': self.data_source
} for contributor in source_cntrb_insert if contributor[f'{prefix}login']
]
self.bulk_insert(self.contributors_table, cntrb_insert)
# Query db for inserted cntrb pkeys and add to shallow level of data
# Query
cntrb_pk_name = list(self.contributors_table.primary_key)[0].name
session = s.orm.Session(self.db)
inserted_pks = pd.DataFrame(
session.query(
self.contributors_table.c[cntrb_pk_name], self.contributors_table.c.cntrb_login,
self.contributors_table.c.gh_node_id
).distinct(self.contributors_table.c.cntrb_login).order_by(
self.contributors_table.c.cntrb_login, self.contributors_table.c[cntrb_pk_name]
).all(), columns=[cntrb_pk_name, 'cntrb_login', 'gh_node_id']
).to_dict(orient='records')
session.close()
# Prepare for merge
source_columns = sorted(list(source_df.columns))
necessary_columns = sorted(list(set(source_columns + cntrb_action_map['insert']['source'])))
(source_table, inserted_pks_table), metadata, session = self._setup_postgres_merge(
[
expanded_source_df[necessary_columns].to_dict(orient='records'),
inserted_pks
], sort=True
)
final_columns = [cntrb_pk_name] + sorted(list(set(necessary_columns)))
# Merge
source_pk = pd.DataFrame(
session.query(
inserted_pks_table.c.cntrb_id, source_table
).join(
source_table,
eval(
' and '.join(
[
(
f"inserted_pks_table.c['{table_column}'] "
f"== source_table.c['{source_column}']"
) for table_column, source_column in zip(
cntrb_action_map['insert']['augur'],
cntrb_action_map['insert']['source']
)
]
)
)
).all(), columns=final_columns
)
# Cleanup merge
source_pk = self._eval_json_columns(source_pk)
self._close_postgres_merge(metadata, session)
self.logger.info(
"Contributor id enrichment successful, result has "
f"{len(source_pk)} data points.\n"
)
return source_pk.to_dict(orient='records')
def enrich_data_primary_keys(
self, source_data, table, gh_merge_fields, augur_merge_fields, in_memory=False
):
self.logger.info("Preparing to enrich data.\n")
if len(source_data) == 0:
self.logger.info("There is no source data to enrich.\n")
return source_data
source_df = self._add_nested_columns(pd.DataFrame(source_data), gh_merge_fields)
if not in_memory:
source_pk_columns = list(source_df.columns)
source_pk_columns.insert(0, list(table.primary_key)[0].name)
(source_table, ), metadata, session = self._setup_postgres_merge(
# [self._get_data_set_columns(source_data, gh_merge_fields)]
[source_df.to_dict(orient='records')]
)
source_pk = pd.DataFrame(
# eval(
# "session.query("
# + ", ".join(
# [
# f"table.c['{column}']" for column in [list(table.primary_key)[0].name]
# + augur_merge_fields
# ]
# )
# + ")"
# )
session.query(
table.c[list(table.primary_key)[0].name],
source_table
# eval(
# f"table.c['{list(table.primary_key)[0].name}'], "
# + ", ".join(
# [
# f"source_table.c['{column}']" for column in source_pk_columns
# ]
# )
# )
).join(
source_table,
eval(
' and '.join(
[
f"table.c['{table_column}'] == source_table.c['{source_column}']"
for table_column, source_column in zip(
augur_merge_fields, gh_merge_fields
)
]
)
)
).all(), columns=source_pk_columns # gh_merge_fields
)
source_pk = self._eval_json_columns(source_pk)
# source_pk, source_df = self.sync_df_types(
# source_pk, source_df, gh_merge_fields, gh_merge_fields
# )
# source_pk = source_pk.merge(source_df, how='inner', on=gh_merge_fields)
self.logger.info("source_pk calculated successfully")
self._close_postgres_merge(metadata, session)
self.logger.info("Done")
else:
# s_tuple = s.tuple_([table.c[field] for field in augur_merge_fields])
# s_tuple.__dict__['clauses'] = s_tuple.__dict__['clauses'][0].effective_value
# s_tuple.__dict__['_type_tuple'] = []
# for field in augur_merge_fields:
# s_tuple.__dict__['_type_tuple'].append(table.c[field].__dict__['type'])
# try:
# primary_keys = self.db.execute(s.sql.select(
# [table.c[field] for field in augur_merge_fields] + [table.c[list(table.primary_key)[0].name]]
# ).where(
# s_tuple.in_(
# list(source_df[gh_merge_fields].itertuples(index=False))
# ))).fetchall()
# except psycopg2.errors.StatementTooComplex as e:
self.logger.info("Retrieve pk statement too complex, querying all instead " +
"and performing partitioned merge.\n")
all_primary_keys = self.db.execute(s.sql.select(
[table.c[field] for field in augur_merge_fields] + [table.c[list(table.primary_key)[0].name]]
)).fetchall()
self.logger.info("Queried all")
all_primary_keys_df = pd.DataFrame(all_primary_keys,
columns=augur_merge_fields + [list(table.primary_key)[0].name])
self.logger.info("Converted to df")
source_df, all_primary_keys_df = self.sync_df_types(source_df, all_primary_keys_df,
gh_merge_fields, augur_merge_fields)
self.logger.info("Synced df types")
partitions = math.ceil(len(source_df) / 600)#1000)
attempts = 0
while attempts < 50:
try:
source_pk = pd.DataFrame()
self.logger.info(f"Trying {partitions} partitions of new data, {len(all_primary_keys_df)} " +
"pk data points to enrich\n")
for sub_df in numpy.array_split(source_df, partitions):
self.logger.info(f"Trying a partition, len {len(sub_df)}\n")
source_pk = pd.concat([ source_pk, sub_df.merge(all_primary_keys_df, suffixes=('','_table'),
how='inner', left_on=gh_merge_fields, right_on=augur_merge_fields) ])
self.logger.info(f"source_pk merge: {len(sub_df)} worked\n")
break
except MemoryError as e:
self.logger.info(f"new_data ({sub_df.shape}) is too large to allocate memory for " +
f"source_pk df merge.\nMemoryError: {e}\nTrying again with {partitions + 1} partitions...\n")
partitions += 1
attempts += 1
# self.logger.info(f"End attempt # {attempts}\n")
if attempts >= 50:
self.logger.info("Max source_pk merge attempts exceeded, cannot perform " +
"updates on this repo.\n")
else:
self.logger.info(f"Data enrichment successful, length: {len(source_pk)}\n")
# all_primary_keys_df.to_json(path_or_buf='all_primary_keys_df.json', orient='records')
# all_primary_keys_dask_df = dd.from_pandas(all_primary_keys_df, chunksize=1000)
# source_dask_df = dd.from_pandas(source_df, chunksize=1000)
# result = json.loads(source_dask_df.merge(all_primary_keys_dask_df, suffixes=('','_table'),
# how='inner', left_on=gh_merge_fields, right_on=augur_merge_fields).compute(
# ).to_json(default_handler=str, orient='records'))
return source_pk.to_dict(orient='records')
# if len(primary_keys) > 0:
# primary_keys_df = pd.DataFrame(primary_keys,
# columns=augur_merge_fields + [list(table.primary_key)[0].name])
# else:
# self.logger.info("There are no inserted primary keys to enrich the source data with.\n")
# return []
# source_df, primary_keys_df = self.sync_df_types(source_df, primary_keys_df,
# gh_merge_fields, augur_merge_fields)
# source_df = dd.from_pandas(source_df, chunksize=1000)
# primary_keys_df = dd.from_pandas(primary_keys_df, chunksize=1000)
# result = json.loads(source_df.merge(primary_keys_df, suffixes=('','_table'),
# how='inner', left_on=gh_merge_fields, right_on=augur_merge_fields).compute().to_json(
# default_handler=str, orient='records'))
# self.logger.info("Data enrichment successful.\n")
# return result
def multi_thread_urls(self, all_urls, max_attempts=5, platform='github'):
"""
:param all_urls: list of tuples
"""
if not len(all_urls):
self.logger.info("No urls to multithread, returning blank list.\n")
return []
def load_url(url, extra_data={}):
try:
html = requests.get(url, stream=True, headers=self.headers)
return html, extra_data
except requests.exceptions.RequestException as e:
self.logger.info(e, url)
self.logger.info("Beginning to multithread API endpoints.")
start = time.time()
all_data = []
valid_url_count = len(all_urls)
partitions = math.ceil(len(all_urls) / 600)
self.logger.info(f"{len(all_urls)} urls to process. Trying {partitions} partitions. " +
f"Using {max(multiprocessing.cpu_count()//8, 1)} threads.")
for urls in numpy.array_split(all_urls, partitions):
attempts = 0
self.logger.info(f"Total data points collected so far: {len(all_data)}")
while len(urls) > 0 and attempts < max_attempts:
with concurrent.futures.ThreadPoolExecutor(
max_workers=max(multiprocessing.cpu_count()//8, 1)
) as executor:
# Start the load operations and mark each future with its URL
future_to_url = {executor.submit(load_url, *url): url for url in urls}
self.logger.info("Multithreaded urls and returned status codes:")
count = 0
for future in concurrent.futures.as_completed(future_to_url):
if count % 100 == 0:
self.logger.info(
f"Processed {len(all_data)} / {valid_url_count} urls. "
f"{len(urls)} remaining in this partition."
)
count += 1
url = future_to_url[future]
try:
response, extra_data = future.result()
if response.status_code != 200:
self.logger.info(
f"Url: {url[0]} ; Status code: {response.status_code}"
)
if response.status_code == 403 or response.status_code == 401: # 403 is rate limit, 404 is not found, 401 is bad credentials
self.update_rate_limit(response, platform=platform)
continue
elif response.status_code == 200:
try:
page_data = response.json()
except:
page_data = json.loads(json.dumps(response.text))
page_data = [{**data, **extra_data} for data in page_data]
all_data += page_data
if 'last' in response.links and "&page=" not in url[0]:
urls += [
(url[0] + f"&page={page}", extra_data) for page in range(
2, int(response.links['last']['url'].split('=')[-1]) + 1
)
]
urls = numpy.delete(urls, numpy.where(urls == url), axis=0)
elif response.status_code == 404:
urls = numpy.delete(urls, numpy.where(urls == url), axis=0)
self.logger.info(f"Not found url: {url}\n")
else:
self.logger.info(
f"Unhandled response code: {response.status_code} {url}\n"
)
except Exception as e:
self.logger.info(
f"{url} generated an exception: {traceback.format_exc()}\n"
)
attempts += 1
self.logger.info(
f"Processed {valid_url_count} urls and got {len(all_data)} data points "
f"in {time.time() - start} seconds thanks to multithreading!\n"
)
return all_data
def _eval_json_columns(self, df):
if not len(df):
return df
for column in df.columns:
first_valid_value = df.fillna(method='bfill').iloc[0][column]
if isinstance(first_valid_value, str):
if (
first_valid_value[0] == '{' and first_valid_value[-1] == '}'
or first_valid_value[0] == '[' and first_valid_value[-1] == ']'
):
df[column] = df[column].fillna("'null_placeholder'").apply(eval).replace(
"null_placeholder", numpy.nan
).where(df[column].notna(), lambda x: [{}])
return df
def new_organize_needed_data(
self, new_data, augur_table=None, where_clause=True, action_map={}
):
self.logger.info(f"Beginning to organize needed data from {len(new_data)} data points...")
if len(new_data) == 0:
return [], []
new_data_columns = pd.DataFrame(new_data).columns
# # new_data_columns = copy.deepcopy(action_map['insert']['source'])
# table_value_columns = copy.deepcopy(action_map['insert']['augur'])
#
# if 'update' in action_map:
# # new_data_columns += action_map['update']['source']
# table_value_columns += action_map['update']['augur']
(new_data_table, ), metadata, session = self._setup_postgres_merge(
[
new_data
# self._get_data_set_columns(new_data, new_data_columns)
]
)
need_insertion = pd.DataFrame(
session.query(new_data_table).join(
augur_table,
eval(
' and '.join(
[
f"augur_table.c['{table_column}'] == new_data_table.c['{source_column}']"
for table_column, source_column in zip(
action_map['insert']['augur'], action_map['insert']['source']
)
]
)
), isouter=True
).filter(
augur_table.c[action_map['insert']['augur'][0]] == None
).all(), columns=new_data_columns # table_value_columns
)
need_insertion = self._eval_json_columns(need_insertion)
# new_data_df = pd.DataFrame(new_data)
# need_insertion, new_data_df = self.sync_df_types(
# need_insertion, new_data_df, table_value_columns, new_data_columns
# )
# need_insertion = need_insertion.merge(
# new_data_df, how='inner', left_on=table_value_columns, right_on=new_data_columns
# )
self.logger.info("need_insertion calculated successfully")
need_updates = pd.DataFrame(columns=new_data_columns)
if 'update' in action_map:
need_updates = pd.DataFrame(
session.query(new_data_table).join(
augur_table,
s.and_(
eval(
' and '.join(
[
(
f"augur_table.c.{table_column} "
f"== new_data_table.c.{source_column}"
) for table_column, source_column in zip(
action_map['insert']['augur'],
action_map['insert']['source']
)
]
)
),
eval(
' and '.join(
[
(
f"augur_table.c.{table_column} "
f"!= new_data_table.c.{source_column}"
) for table_column, source_column in zip(
action_map['update']['augur'],
action_map['update']['source']
)
]
)
)
)
).all(), columns=new_data_columns
)
self.logger.info("need_updates calculated successfully")
self._close_postgres_merge(metadata, session)
self.logger.info(
f"Table needs {len(need_insertion)} insertions and "
f"{len(need_updates)} updates.\n"
)
return need_insertion.to_dict('records'), need_updates.to_dict('records')
def new_paginate_endpoint(
self, url, action_map={}, table=None, where_clause=True, platform='github'
):
page_number = 1
multiple_pages = False
need_insertion = []
need_update = []
all_data = []
forward_pagination = True
backwards_activation = False
last_page_number = -1
while True:
# Multiple attempts to hit endpoint
num_attempts = 0
success = False
while num_attempts < 10:
self.logger.info("hitting an endpiont")
# f"Hitting endpoint: ...\n"
# f"{url.format(page_number)} on page number. \n")
try:
response = requests.get(url=url.format(page_number), headers=self.headers)
except TimeoutError as e:
self.logger.info("Request timed out. Sleeping 10 seconds and trying again...\n")
time.sleep(10)
continue
self.update_rate_limit(response, platform=platform)
try:
page_data = response.json()
except:
page_data = json.loads(json.dumps(response.text))
if type(page_data) == list:
success = True
break
elif type(page_data) == dict:
self.logger.info("Request returned a dict: {}\n".format(page_data))
if page_data['message'] == "Not Found":
self.logger.warning(
"Github repo was not found or does not exist for endpoint: "
f"{url.format(page_number)}\n"
)
break
if "You have triggered an abuse detection mechanism." in page_data['message']:
num_attempts -= 1
self.update_rate_limit(response, temporarily_disable=True,platform=platform)
if page_data['message'] == "Bad credentials":
self.update_rate_limit(response, bad_credentials=True, platform=platform)
elif type(page_data) == str:
self.logger.info(f"Warning! page_data was string: {page_data}\n")
if "<!DOCTYPE html>" in page_data:
self.logger.info("HTML was returned, trying again...\n")
elif len(page_data) == 0:
self.logger.warning("Empty string, trying again...\n")
else:
try:
page_data = json.loads(page_data)
success = True
break
except:
pass
num_attempts += 1
if not success:
break
# Success
# Determine if continued pagination is needed
if len(page_data) == 0:
self.logger.info("Response was empty, breaking from pagination.\n")
break
all_data += page_data
if not forward_pagination:
# Checking contents of requests with what we already have in the db
page_insertions, page_updates = self.new_organize_needed_data(
page_data, augur_table=table, action_map=action_map
)
# Reached a page where we already have all tuples
if len(need_insertion) == 0 and len(need_update) == 0 and \
backwards_activation:
self.logger.info(
"No more pages with unknown tuples, breaking from pagination.\n"
)
break
need_insertion += page_insertions
need_update += page_updates
# Find last page so we can decrement from there
if 'last' in response.links and last_page_number == -1:
if platform == 'github':
last_page_number = int(response.links['last']['url'][-6:].split('=')[1])
elif platform == 'gitlab':
last_page_number = int(response.links['last']['url'].split('&')[2].split('=')[1])
if not forward_pagination and not backwards_activation:
page_number = last_page_number
backwards_activation = True
self.logger.info("Analyzation of page {} of {} complete\n".format(page_number,
int(last_page_number) if last_page_number != -1 else "*last page not known*"))
if (page_number <= 1 and not forward_pagination) or \
(page_number >= last_page_number and forward_pagination):
self.logger.info("No more pages to check, breaking from pagination.\n")
break
page_number = page_number + 1 if forward_pagination else page_number - 1
if forward_pagination:
need_insertion, need_update = self.new_organize_needed_data(
all_data, augur_table=table, action_map=action_map
)
return {
'insert': need_insertion,
'update': need_update,
'all': all_data
}
def paginate_endpoint(
self, url, action_map={}, table=None, where_clause=True, platform='github', in_memory=True
):
table_values = self.db.execute(
s.sql.select(self.get_relevant_columns(table, action_map)).where(where_clause)
).fetchall()
page_number = 1
multiple_pages = False
need_insertion = []
need_update = []
all_data = []
forward_pagination = True
backwards_activation = False
last_page_number = -1
while True:
# Multiple attempts to hit endpoint
num_attempts = 0
success = False
while num_attempts < 10:
self.logger.info(f"Hitting endpoint: {url.format(page_number)}...\n")
try:
response = requests.get(url=url.format(page_number), headers=self.headers)
except TimeoutError as e:
self.logger.info("Request timed out. Sleeping 10 seconds and trying again...\n")
time.sleep(10)
continue
self.update_rate_limit(response, platform=platform)
try:
page_data = response.json()
except:
page_data = json.loads(json.dumps(response.text))
if type(page_data) == list:
success = True
break
elif type(page_data) == dict:
self.logger.info("Request returned a dict: {}\n".format(page_data))
if page_data['message'] == "Not Found":
self.logger.warning(
"Github repo was not found or does not exist for endpoint: "
f"{url.format(page_number)}\n"
)
break
if "You have triggered an abuse detection mechanism." in page_data['message']:
num_attempts -= 1
self.update_rate_limit(response, temporarily_disable=True,platform=platform)
if page_data['message'] == "Bad credentials":
self.update_rate_limit(response, bad_credentials=True, platform=platform)
elif type(page_data) == str:
self.logger.info(f"Warning! page_data was string: {page_data}\n")
if "<!DOCTYPE html>" in page_data:
self.logger.info("HTML was returned, trying again...\n")
elif len(page_data) == 0:
self.logger.warning("Empty string, trying again...\n")
else:
try:
page_data = json.loads(page_data)
success = True
break
except:
pass
num_attempts += 1
if not success:
break
# Success
# Determine if continued pagination is needed
if len(page_data) == 0:
self.logger.info("Response was empty, breaking from pagination.\n")
break
all_data += page_data
if not forward_pagination:
# Checking contents of requests with what we already have in the db
page_insertions, page_updates = self.organize_needed_data(
page_data, table_values, list(table.primary_key)[0].name,
action_map, in_memory=True
)
# Reached a page where we already have all tuples
if len(need_insertion) == 0 and len(need_update) == 0 and \
backwards_activation:
self.logger.info(
"No more pages with unknown tuples, breaking from pagination.\n"
)
break
need_insertion += page_insertions
need_update += page_updates
# Find last page so we can decrement from there
if 'last' in response.links and last_page_number == -1:
if platform == 'github':
last_page_number = int(response.links['last']['url'][-6:].split('=')[1])
elif platform == 'gitlab':
last_page_number = int(response.links['last']['url'].split('&')[2].split('=')[1])
if not forward_pagination and not backwards_activation:
page_number = last_page_number
backwards_activation = True
self.logger.info("Analyzation of page {} of {} complete\n".format(page_number,
int(last_page_number) if last_page_number != -1 else "*last page not known*"))
if (page_number <= 1 and not forward_pagination) or \
(page_number >= last_page_number and forward_pagination):
self.logger.info("No more pages to check, breaking from pagination.\n")
break
page_number = page_number + 1 if forward_pagination else page_number - 1
if forward_pagination:
need_insertion, need_update = self.organize_needed_data(
all_data, table_values, list(table.primary_key)[0].name, action_map,
in_memory=in_memory
)
return {
'insert': need_insertion,
'update': need_update,
'all': all_data
}
def paginate(self, url, duplicate_col_map, update_col_map, table, table_pkey, where_clause="", value_update_col_map={}, platform="github"):
""" DEPRECATED
Paginate either backwards or forwards (depending on the value of the worker's
finishing_task attribute) through all the GitHub or GitLab api endpoint pages.
:param url: String, the url of the API endpoint we are paginating through, expects
a curly brace string formatter within the string to format the Integer
representing the page number that is wanted to be returned
:param duplicate_col_map: Dictionary, maps the column names of the source data
to the field names in our database for columns that should be checked for
duplicates (if source data value == value in existing database row, then this
element is a duplicate and would not need an insertion). Key is source data
column name, value is database field name. Example: {'id': 'gh_issue_id'}
:param update_col_map: Dictionary, maps the column names of the source data
to the field names in our database for columns that should be checked for
updates (if source data value != value in existing database row, then an
update is needed). Key is source data column name, value is database field name.
Example: {'id': 'gh_issue_id'}
:param table: String, the name of the table that holds the values to check for
duplicates/updates against
:param table_pkey: String, the field name of the primary key of the table in
the database that we are getting the values for to cross-reference to check
for duplicates.
:param where_clause: String, optional where clause to filter the values
that are queried when preparing the values that will be cross-referenced
for duplicates/updates
:param value_update_col_map: Dictionary, sometimes we add a new field to a table,
and we want to trigger an update of that row in the database even if all of the
data values are the same and would not need an update ordinarily. Checking for
a specific existing value in the database field allows us to do this. The key is the
name of the field in the database we are checking for a specific value to trigger
an update, the value is the value we are checking for equality to trigger an update.
Example: {'cntrb_id': None}
:return: List of dictionaries, all data points from the pages of the specified API endpoint
each with a 'flag' key-value pair representing the required action to take with that
data point (i.e. 'need_insertion', 'need_update', 'none')
"""
update_keys = list(update_col_map.keys()) if update_col_map else []
update_keys += list(value_update_col_map.keys()) if value_update_col_map else []
cols_to_query = list(duplicate_col_map.keys()) + update_keys + [table_pkey]
table_values = self.get_table_values(cols_to_query, [table], where_clause)
i = 1
multiple_pages = False
tuples = []
while True:
num_attempts = 0
success = False
while num_attempts < 3:
self.logger.info(f'Hitting endpoint: {url.format(i)}...\n')
r = requests.get(url=url.format(i), headers=self.headers)
self.update_rate_limit(r, platform=platform)
if 'last' not in r.links:
last_page = None
else:
if platform == "github":
last_page = r.links['last']['url'][-6:].split('=')[1]
elif platform == "gitlab":
last_page = r.links['last']['url'].split('&')[2].split("=")[1]
self.logger.info("Analyzing page {} of {}\n".format(i, int(last_page) + 1 if last_page is not None else '*last page not known*'))
try:
j = r.json()
except:
j = json.loads(json.dumps(r.text))
if type(j) != dict and type(j) != str:
success = True
break
elif type(j) == dict:
self.logger.info("Request returned a dict: {}\n".format(j))
if j['message'] == 'Not Found':
self.logger.warning("Github repo was not found or does not exist for endpoint: {}\n".format(url))
break
if j['message'] == 'You have triggered an abuse detection mechanism. Please wait a few minutes before you try again.':
num_attempts -= 1
self.logger.info("rate limit update code goes here")
self.update_rate_limit(r, temporarily_disable=True,platform=platform)
if j['message'] == 'Bad credentials':
self.logger.info("rate limit update code goes here")
self.update_rate_limit(r, bad_credentials=True, platform=platform)
elif type(j) == str:
self.logger.info(f'J was string: {j}\n')
if '<!DOCTYPE html>' in j:
self.logger.info('HTML was returned, trying again...\n')
elif len(j) == 0:
self.logger.warning('Empty string, trying again...\n')
else:
try:
j = json.loads(j)
success = True
break
except:
pass
num_attempts += 1
if not success:
break
# Find last page so we can decrement from there
if 'last' in r.links and not multiple_pages and not self.finishing_task:
if platform == "github":
param = r.links['last']['url'][-6:]
i = int(param.split('=')[1]) + 1
elif platform == "gitlab":
i = int(r.links['last']['url'].split('&')[2].split("=")[1]) + 1
self.logger.info("Multiple pages of request, last page is " + str(i - 1) + "\n")
multiple_pages = True
elif not multiple_pages and not self.finishing_task:
self.logger.info("Only 1 page of request\n")
elif self.finishing_task:
self.logger.info("Finishing a previous task, paginating forwards ..."
" excess rate limit requests will be made\n")
if len(j) == 0:
self.logger.info("Response was empty, breaking from pagination.\n")
break
# Checking contents of requests with what we already have in the db
j = self.assign_tuple_action(j, table_values, update_col_map, duplicate_col_map, table_pkey, value_update_col_map)
if not j:
self.logger.error("Assigning tuple action failed, moving to next page.\n")
i = i + 1 if self.finishing_task else i - 1
continue
try:
to_add = [obj for obj in j if obj not in tuples and (obj['flag'] != 'none')]
except Exception as e:
self.logger.error("Failure accessing data of page: {}. Moving to next page.\n".format(e))
i = i + 1 if self.finishing_task else i - 1
continue
if len(to_add) == 0 and multiple_pages and 'last' in r.links:
self.logger.info("{}".format(r.links['last']))
if platform == "github":
page_number = int(r.links['last']['url'][-6:].split('=')[1])
elif platform == "gitlab":
page_number = int(r.links['last']['url'].split('&')[2].split("=")[1])
if i - 1 != page_number:
self.logger.info("No more pages with unknown tuples, breaking from pagination.\n")
break
tuples += to_add
i = i + 1 if self.finishing_task else i - 1
# Since we already wouldve checked the first page... break
if (i == 1 and multiple_pages and not self.finishing_task) or i < 1 or len(j) == 0:
self.logger.info("No more pages to check, breaking from pagination.\n")
break
return tuples
def query_github_contributors(self, entry_info, repo_id):
""" Data collection function
Query the GitHub API for contributors
"""
self.logger.info(f"Querying contributors with given entry info: {entry_info}\n")
github_url = entry_info['given']['github_url'] if 'github_url' in entry_info['given'] else entry_info['given']['git_url']
# Extract owner/repo from the url for the endpoint
owner, name = self.get_owner_repo(github_url)
# Set the base of the url and place to hold contributors to insert
contributors_url = (
f"https://api.github.com/repos/{owner}/{name}/" +
"contributors?per_page=100&page={}"
)
# Get contributors that we already have stored
# Set our duplicate and update column map keys (something other than PK) to
# check dupicates/needed column updates with
table = 'contributors'
table_pkey = 'cntrb_id'
update_col_map = {'cntrb_email': 'email'}
duplicate_col_map = {'cntrb_login': 'login'}
#list to hold contributors needing insertion or update
contributors = self.paginate(contributors_url, duplicate_col_map, update_col_map, table, table_pkey)
self.logger.info("Count of contributors needing insertion: " + str(len(contributors)) + "\n")
for repo_contributor in contributors:
try:
# Need to hit this single contributor endpoint to get extra data including...
# `created at`
# i think that's it
cntrb_url = ("https://api.github.com/users/" + repo_contributor['login'])
self.logger.info("Hitting endpoint: " + cntrb_url + " ...\n")
r = requests.get(url=cntrb_url, headers=self.headers)
self.update_gh_rate_limit(r)
contributor = r.json()
company = None
location = None
email = None
if 'company' in contributor:
company = contributor['company']
if 'location' in contributor:
location = contributor['location']
if 'email' in contributor:
email = contributor['email']
canonical_email = contributor['email']
cntrb = {
"cntrb_login": contributor['login'],
"cntrb_created_at": contributor['created_at'],
"cntrb_email": email,
"cntrb_company": company,
"cntrb_location": location,
# "cntrb_type": , dont have a use for this as of now ... let it default to null
"cntrb_canonical": canonical_email,
"gh_user_id": contributor['id'],
"gh_login": contributor['login'],
"gh_url": contributor['url'],
"gh_html_url": contributor['html_url'],
"gh_node_id": contributor['node_id'],
"gh_avatar_url": contributor['avatar_url'],
"gh_gravatar_id": contributor['gravatar_id'],
"gh_followers_url": contributor['followers_url'],
"gh_following_url": contributor['following_url'],
"gh_gists_url": contributor['gists_url'],
"gh_starred_url": contributor['starred_url'],
"gh_subscriptions_url": contributor['subscriptions_url'],
"gh_organizations_url": contributor['organizations_url'],
"gh_repos_url": contributor['repos_url'],
"gh_events_url": contributor['events_url'],
"gh_received_events_url": contributor['received_events_url'],
"gh_type": contributor['type'],
"gh_site_admin": contributor['site_admin'],
"tool_source": self.tool_source,
"tool_version": self.tool_version,
"data_source": self.data_source
}
# Commit insertion to table
if repo_contributor['flag'] == 'need_update':
result = self.db.execute(self.contributors_table.update().where(
self.worker_history_table.c.cntrb_email==email).values(cntrb))
self.logger.info("Updated tuple in the contributors table with existing email: {}".format(email))
self.cntrb_id_inc = repo_contributor['pkey']
elif repo_contributor['flag'] == 'need_insertion':
result = self.db.execute(self.contributors_table.insert().values(cntrb))
self.logger.info("Primary key inserted into the contributors table: {}".format(result.inserted_primary_key))
self.results_counter += 1
self.logger.info("Inserted contributor: " + contributor['login'] + "\n")
# Increment our global track of the cntrb id for the possibility of it being used as a FK
self.cntrb_id_inc = int(result.inserted_primary_key[0])
except Exception as e:
self.logger.error("Caught exception: {}".format(e))
self.logger.error("Cascading Contributor Anomalie from missing repo contributor data: {} ...\n".format(cntrb_url))
continue
def query_github_contributors_bulk(self, entry_info, repo_id):
""" Data collection function
Query the GitHub API for contributors
"""
self.logger.info(f"Querying contributors with given entry info: {entry_info}\n")
github_url = entry_info['given']['github_url'] if 'github_url' in entry_info['given'] else entry_info['given']['git_url']
owner, name = self.get_owner_repo(github_url)
contributors_url = (f"https://api.github.com/repos/{owner}/{name}/" +
"contributors?per_page=100&page={}")
action_map = {
'insert': {
'source': ['login'],
'augur': ['cntrb_login']
},
'update': {
'source': ['email'],
'augur': ['cntrb_email']
}
}
source_contributors = self.paginate_endpoint(contributors_url, action_map=action_map,
table=self.contributors_table)
contributors_insert = []
for repo_contributor in source_contributors['insert']:
# Need to hit this single contributor endpoint to get extra data
cntrb_url = (f"https://api.github.com/users/{repo_contributor['login']}")
self.logger.info(f"Hitting endpoint: {cntrb_url} ...\n")
r = requests.get(url=cntrb_url, headers=self.headers)
self.update_gh_rate_limit(r)
contributor = r.json()
contributors_insert.append({
'cntrb_login': contributor['login'],
'cntrb_created_at': contributor['created_at'],
'cntrb_email': contributor['email'] if 'email' in contributor else None,
'cntrb_company': contributor['company'] if 'company' in contributor else None,
'cntrb_location': contributor['location'] if 'location' in contributor else None,
'cntrb_canonical': contributor['email'] if 'email' in contributor else None,
'gh_user_id': contributor['id'],
'gh_login': contributor['login'],
'gh_url': contributor['url'],
'gh_html_url': contributor['html_url'],
'gh_node_id': contributor['node_id'],
'gh_avatar_url': contributor['avatar_url'],
'gh_gravatar_id': contributor['gravatar_id'],
'gh_followers_url': contributor['followers_url'],
'gh_following_url': contributor['following_url'],
'gh_gists_url': contributor['gists_url'],
'gh_starred_url': contributor['starred_url'],
'gh_subscriptions_url': contributor['subscriptions_url'],
'gh_organizations_url': contributor['organizations_url'],
'gh_repos_url': contributor['repos_url'],
'gh_events_url': contributor['events_url'],
'gh_received_events_url': contributor['received_events_url'],
'gh_type': contributor['type'],
'gh_site_admin': contributor['site_admin'],
'tool_source': self.tool_source,
'tool_version': self.tool_version,
'data_source': self.data_source
})
contributors_insert_result, contributors_update_result = self.bulk_insert(self.contributors_table,
update=source_contributors['update'], unique_columns=action_map['insert']['augur'],
insert=contributors_insert, update_columns=action_map['update']['augur'])
def query_github_contributors_fast(self, entry_info, repo_id):
""" Data collection function
Query the GitHub API for contributors
"""
self.logger.info(f"Querying contributors with given entry info: {entry_info}")
github_url = (
entry_info['given']['github_url'] if 'github_url' in entry_info['given']
else entry_info['given']['git_url']
)
contributors_url = (
f"https://api.github.com/repos/{self.owner}/{self.name}/"
"contributors?per_page=100&page={}"
)
action_map = {
'insert': {
'source': ['login'],
'augur': ['cntrb_login']
},
'update': {
'source': ['email'],
'augur': ['cntrb_email']
}
}
source_contributors = self.paginate_endpoint(
contributors_url, action_map=action_map, table=self.contributors_table
)
contributors_insert = [
{
'cntrb_login': contributor['login'],
'cntrb_created_at': (
contributor['created_at'] if 'created_at' in contributor else None
),
'cntrb_email': contributor['email'] if 'email' in contributor else None,
'cntrb_company': contributor['company'] if 'company' in contributor else None,
'cntrb_location': contributor['location'] if 'location' in contributor else None,
'cntrb_canonical': contributor['email'] if 'email' in contributor else None,
'gh_user_id': contributor['id'],
'gh_login': contributor['login'],
'gh_url': contributor['url'],
'gh_html_url': contributor['html_url'],
'gh_node_id': contributor['node_id'],
'gh_avatar_url': contributor['avatar_url'],
'gh_gravatar_id': contributor['gravatar_id'],
'gh_followers_url': contributor['followers_url'],
'gh_following_url': contributor['following_url'],
'gh_gists_url': contributor['gists_url'],
'gh_starred_url': contributor['starred_url'],
'gh_subscriptions_url': contributor['subscriptions_url'],
'gh_organizations_url': contributor['organizations_url'],
'gh_repos_url': contributor['repos_url'],
'gh_events_url': contributor['events_url'],
'gh_received_events_url': contributor['received_events_url'],
'gh_type': contributor['type'],
'gh_site_admin': contributor['site_admin'],
'tool_source': self.tool_source,
'tool_version': self.tool_version,
'data_source': self.data_source
} for contributor in source_contributors['insert']
]
self.bulk_insert(
self.contributors_table, update=source_contributors['update'],
unique_columns=action_map['insert']['augur'],
insert=contributors_insert, update_columns=action_map['update']['augur']
)
def query_gitlab_contribtutors(self, entry_info, repo_id):
gitlab_url = (
entry_info['given']['gitlab_url'] if 'gitlab_url' in entry_info['given']
else entry_info['given']['git_url']
)
self.logger.info("Querying contributors with given entry info: " + str(entry_info) + "\n")
path = urlparse(gitlab_url)
split = path[2].split('/')
owner = split[1]
name = split[2]
# Handles git url case by removing the extension
if ".git" in name:
name = name[:-4]
url_encoded_format = quote(owner + '/' + name, safe='')
table = 'contributors'
table_pkey = 'cntrb_id'
### %TODO Remap this to a GitLab Contributor ID like the GitHub Worker.
### Following Gabe's rework of the contributor worker.
update_col_map = {'cntrb_email': 'email'}
duplicate_col_map = {'cntrb_login': 'email'}
# list to hold contributors needing insertion or update
contributors = self.paginate("https://gitlab.com/api/v4/projects/" + url_encoded_format + "/repository/contributors?per_page=100&page={}", duplicate_col_map, update_col_map, table, table_pkey, platform='gitlab')
for repo_contributor in contributors:
try:
cntrb_compressed_url = ("https://gitlab.com/api/v4/users?search=" + repo_contributor['email'])
self.logger.info("Hitting endpoint: " + cntrb_compressed_url + " ...\n")
r = requests.get(url=cntrb_compressed_url, headers=self.headers)
contributor_compressed = r.json()
email = repo_contributor['email']
self.logger.info(contributor_compressed)
if len(contributor_compressed) == 0 or type(contributor_compressed) is dict or "id" not in contributor_compressed[0]:
continue
self.logger.info("Fetching for user: " + str(contributor_compressed[0]["id"]))
cntrb_url = ("https://gitlab.com/api/v4/users/" + str(contributor_compressed[0]["id"]))
self.logger.info("Hitting end point to get complete contributor info now: " + cntrb_url + "...\n")
r = requests.get(url=cntrb_url, headers=self.headers)
contributor = r.json()
cntrb = {
"cntrb_login": contributor.get('username', None),
"cntrb_created_at": contributor.get('created_at', None),
"cntrb_email": email,
"cntrb_company": contributor.get('organization', None),
"cntrb_location": contributor.get('location', None),
# "cntrb_type": , dont have a use for this as of now ... let it default to null
"cntrb_canonical": contributor.get('public_email', None),
"gh_user_id": contributor.get('id', None),
"gh_login": contributor.get('username', None),
"gh_url": contributor.get('web_url', None),
"gh_html_url": contributor.get('web_url', None),
"gh_node_id": None,
"gh_avatar_url": contributor.get('avatar_url', None),
"gh_gravatar_id": None,
"gh_followers_url": None,
"gh_following_url": None,
"gh_gists_url": None,
"gh_starred_url": None,
"gh_subscriptions_url": None,
"gh_organizations_url": None,
"gh_repos_url": None,
"gh_events_url": None,
"gh_received_events_url": None,
"gh_type": None,
"gh_site_admin": None,
"tool_source": self.tool_source,
"tool_version": self.tool_version,
"data_source": self.data_source
}
# Commit insertion to table
if repo_contributor['flag'] == 'need_update':
result = self.db.execute(self.contributors_table.update().where(
self.worker_history_table.c.cntrb_email == email).values(cntrb))
self.logger.info("Updated tuple in the contributors table with existing email: {}".format(email))
self.cntrb_id_inc = repo_contributor['pkey']
elif repo_contributor['flag'] == 'need_insertion':
result = self.db.execute(self.contributors_table.insert().values(cntrb))
self.logger.info("Primary key inserted into the contributors table: {}".format(result.inserted_primary_key))
self.results_counter += 1
self.logger.info("Inserted contributor: " + contributor['username'] + "\n")
# Increment our global track of the cntrb id for the possibility of it being used as a FK
self.cntrb_id_inc = int(result.inserted_primary_key[0])
except Exception as e:
self.logger.info("Caught exception: {}".format(e))
self.logger.info("Cascading Contributor Anomalie from missing repo contributor data: {} ...\n".format(cntrb_url))
continue
def record_model_process(self, repo_id, model):
task_history = {
"repo_id": repo_id,
"worker": self.config['id'],
"job_model": model,
"oauth_id": self.oauths[0]['oauth_id'],
"timestamp": datetime.datetime.now(),
"status": "Stopped",
"total_results": self.results_counter
}
if self.finishing_task:
result = self.helper_db.execute(self.worker_history_table.update().where(
self.worker_history_table.c.history_id==self.history_id).values(task_history))
self.history_id += 1
else:
result = self.helper_db.execute(self.worker_history_table.insert().values(task_history))
self.logger.info("Record incomplete history tuple: {}\n".format(result.inserted_primary_key))
self.history_id = int(result.inserted_primary_key[0])
self.collection_start_time = time.time()
def register_task_completion(self, task, repo_id, model):
self.logger.info(f"Worker completed this task in {self.collection_start_time - time.time()} seconds.\n")
# Task to send back to broker
task_completed = {
'worker_id': self.config['id'],
'job_type': "MAINTAIN",
'repo_id': repo_id,
'job_model': model
}
key = 'github_url' if 'github_url' in task['given'] else 'git_url' if 'git_url' in task['given'] else \
'gitlab_url' if 'gitlab_url' in task['given'] else 'INVALID_GIVEN'
task_completed[key] = task['given']['github_url'] if 'github_url' in task['given'] else task['given']['git_url'] \
if 'git_url' in task['given'] else task['given']['gitlab_url'] if 'gitlab_url' in task['given'] else 'INVALID_GIVEN'
if key == 'INVALID_GIVEN':
self.register_task_failure(task, repo_id, "INVALID_GIVEN: Not a github/gitlab/git url.")
return
# Add to history table
task_history = {
'repo_id': repo_id,
'worker': self.config['id'],
'job_model': model,
'oauth_id': self.oauths[0]['oauth_id'],
'timestamp': datetime.datetime.now(),
'status': "Success",
'total_results': self.results_counter
}
self.helper_db.execute(self.worker_history_table.update().where(
self.worker_history_table.c.history_id==self.history_id).values(task_history))
self.logger.info(f"Recorded job completion for: {task_completed}\n")
# Update job process table
updated_job = {
'since_id_str': repo_id,
'last_count': self.results_counter,
'last_run': datetime.datetime.now(),
'analysis_state': 0
}
self.helper_db.execute(self.worker_job_table.update().where(
self.worker_job_table.c.job_model==model).values(updated_job))
self.logger.info(f"Updated job process for model: {model}\n")
if self.config['offline_mode'] is False:
# Notify broker of completion
self.logger.info(f"Telling broker we completed task: {task_completed}\n")
self.logger.info(f"This task inserted: {self.results_counter + self.insert_counter} tuples " +
f"and updated {self.update_counter} tuples.\n")
requests.post('http://{}:{}/api/unstable/completed_task'.format(
self.config['host_broker'],self.config['port_broker']), json=task_completed)
# Reset results counter for next task
self.results_counter = 0
self.insert_counter = 0
self.update_counter = 0
def register_task_failure(self, task, repo_id, e):
self.logger.error(f"Worker ran into an error for task: {task}")
self.logger.error(
f"Worker was processing this task for {self.collection_start_time - time.time()} "
"seconds."
)
self.logger.error("Printing traceback...")
self.logger.error(e)
tb = traceback.format_exc()
self.logger.error(tb)
self.logger.info(f"This task inserted {self.results_counter} tuples before failure.")
self.logger.info("Notifying broker and logging task failure in database...")
key = (
'github_url' if 'github_url' in task['given'] else 'git_url'
if 'git_url' in task['given'] else 'gitlab_url'
if 'gitlab_url' in task['given'] else 'INVALID_GIVEN'
)
url = task['given'][key]
""" Query all repos with repo url of given task """
repoUrlSQL = s.sql.text("""
SELECT min(repo_id) as repo_id FROM repo WHERE repo_git = '{}'
""".format(url))
repo_id = int(pd.read_sql(repoUrlSQL, self.db, params={}).iloc[0]['repo_id'])
task['worker_id'] = self.config['id']
try:
requests.post("http://{}:{}/api/unstable/task_error".format(
self.config['host_broker'],self.config['port_broker']), json=task)
except requests.exceptions.ConnectionError:
self.logger.error("Could not send task failure message to the broker:")
self.logger.error(e)
except Exception:
self.logger.error("An error occured while informing broker about task failure:")
self.logger.error(e)
# Add to history table
task_history = {
"repo_id": repo_id,
"worker": self.config['id'],
"job_model": task['models'][0],
"oauth_id": self.oauths[0]['oauth_id'],
"timestamp": datetime.datetime.now(),
"status": "Error",
"total_results": self.results_counter
}
self.helper_db.execute(
self.worker_history_table.update().where(
self.worker_history_table.c.history_id==self.history_id
).values(task_history)
)
self.logger.error(f"Recorded job error in the history table for: {task}")
# Update job process table
updated_job = {
"since_id_str": repo_id,
"last_count": self.results_counter,
"last_run": datetime.datetime.now(),
"analysis_state": 0
}
self.helper_db.execute(
self.worker_job_table.update().where(
self.worker_job_table.c.job_model==task['models'][0]
).values(updated_job)
)
self.logger.info(f"Updated job process for model: {task['models'][0]}\n")
# Reset results counter for next task
self.results_counter = 0
def get_relevant_columns(self, table, action_map={}):
columns = copy.deepcopy(action_map['update']['augur']) if 'update' in action_map else []
columns += action_map['value_update']['augur'] if 'value_update' in action_map else []
columns += action_map['insert']['augur'] if 'insert' in action_map else []
return [table.c[column] for column in
columns + [list(table.primary_key)[0].name]]
def retrieve_tuple(self, key_values, tables):
table_str = tables[0]
del tables[0]
key_values_items = list(key_values.items())
for col, value in [key_values_items[0]]:
where_str = col + " = '" + value + "'"
del key_values_items[0]
for col, value in key_values_items:
where_str += ' AND ' + col + " = '" + value + "'"
for table in tables:
table_str += ", " + table
retrieveTupleSQL = s.sql.text("""
SELECT * FROM {} WHERE {}
""".format(table_str, where_str))
values = json.loads(
pd.read_sql(retrieveTupleSQL, self.db, params={}).to_json(orient="records")
)
return values
def update_gitlab_rate_limit(self, response, bad_credentials=False, temporarily_disable=False):
# Try to get rate limit from request headers, sometimes it does not work (GH's issue)
# In that case we just decrement from last recieved header count
if bad_credentials and len(self.oauths) > 1:
self.logger.info(
f"Removing oauth with bad credentials from consideration: {self.oauths[0]}"
)
del self.oauths[0]
if temporarily_disable:
self.logger.info("Gitlab rate limit reached. Temp. disabling...")
self.oauths[0]['rate_limit'] = 0
else:
try:
self.oauths[0]['rate_limit'] = int(response.headers['RateLimit-Remaining'])
except:
self.oauths[0]['rate_limit'] -= 1
self.logger.info("Updated rate limit, you have: " +
str(self.oauths[0]['rate_limit']) + " requests remaining.")
if self.oauths[0]['rate_limit'] <= 0:
try:
reset_time = response.headers['RateLimit-Reset']
except Exception as e:
self.logger.info(f"Could not get reset time from headers because of error: {e}")
reset_time = 3600
time_diff = datetime.datetime.fromtimestamp(int(reset_time)) - datetime.datetime.now()
self.logger.info("Rate limit exceeded, checking for other available keys to use.")
# We will be finding oauth with the highest rate limit left out of our list of oauths
new_oauth = self.oauths[0]
# Endpoint to hit solely to retrieve rate limit information from headers of the response
url = "https://gitlab.com/api/v4/version"
other_oauths = self.oauths[0:] if len(self.oauths) > 1 else []
for oauth in other_oauths:
# self.logger.info("Inspecting rate limit info for oauth: {}\n".format(oauth))
self.headers = {"PRIVATE-TOKEN" : oauth['access_token']}
response = requests.get(url=url, headers=self.headers)
oauth['rate_limit'] = int(response.headers['RateLimit-Remaining'])
oauth['seconds_to_reset'] = (
datetime.datetime.fromtimestamp(
int(response.headers['RateLimit-Reset'])
) - datetime.datetime.now()
).total_seconds()
# Update oauth to switch to if a higher limit is found
if oauth['rate_limit'] > new_oauth['rate_limit']:
self.logger.info(f"Higher rate limit found in oauth: {oauth}")
new_oauth = oauth
elif (
oauth['rate_limit'] == new_oauth['rate_limit']
and oauth['seconds_to_reset'] < new_oauth['seconds_to_reset']
):
self.logger.info(
f"Lower wait time found in oauth with same rate limit: {oauth}"
)
new_oauth = oauth
if new_oauth['rate_limit'] <= 0 and new_oauth['seconds_to_reset'] > 0:
self.logger.info(
"No oauths with >0 rate limit were found, waiting for oauth with "
f"smallest wait time: {new_oauth}\n"
)
time.sleep(new_oauth['seconds_to_reset'])
# Make new oauth the 0th element in self.oauths so we know which one is in use
index = self.oauths.index(new_oauth)
self.oauths[0], self.oauths[index] = self.oauths[index], self.oauths[0]
self.logger.info("Using oauth: {}\n".format(self.oauths[0]))
# Change headers to be using the new oauth's key
self.headers = {"PRIVATE-TOKEN" : self.oauths[0]['access_token']}
def update_gh_rate_limit(self, response, bad_credentials=False, temporarily_disable=False):
# Try to get rate limit from request headers, sometimes it does not work (GH's issue)
# In that case we just decrement from last recieved header count
if bad_credentials and len(self.oauths) > 1:
self.logger.warning(
f"Removing oauth with bad credentials from consideration: {self.oauths[0]}"
)
del self.oauths[0]
if temporarily_disable:
self.logger.debug(
"Github thinks we are abusing their api. Preventing use "
"of this key until its rate limit resets..."
)
self.oauths[0]['rate_limit'] = 0
else:
try:
self.oauths[0]['rate_limit'] = int(response.headers['X-RateLimit-Remaining'])
# self.logger.info("Recieved rate limit from headers\n")
except:
self.oauths[0]['rate_limit'] -= 1
self.logger.info("Headers did not work, had to decrement")
self.logger.info(
f"Updated rate limit, you have: {self.oauths[0]['rate_limit']} requests remaining."
)
if self.oauths[0]['rate_limit'] <= 0:
try:
reset_time = response.headers['X-RateLimit-Reset']
except Exception as e:
self.logger.error(f"Could not get reset time from headers because of error: {e}")
reset_time = 3600
time_diff = datetime.datetime.fromtimestamp(int(reset_time)) - datetime.datetime.now()
self.logger.info("Rate limit exceeded, checking for other available keys to use.")
# We will be finding oauth with the highest rate limit left out of our list of oauths
new_oauth = self.oauths[0]
# Endpoint to hit solely to retrieve rate limit information from headers of the response
url = "https://api.github.com/users/gabe-heim"
other_oauths = self.oauths[0:] if len(self.oauths) > 1 else []
for oauth in other_oauths:
# self.logger.info("Inspecting rate limit info for oauth: {}\n".format(oauth))
self.headers = {'Authorization': 'token %s' % oauth['access_token']}
attempts = 3
success = False
while attempts > 0 and not success:
response = requests.get(url=url, headers=self.headers)
try:
oauth['rate_limit'] = int(response.headers['X-RateLimit-Remaining'])
oauth['seconds_to_reset'] = (
datetime.datetime.fromtimestamp(
int(response.headers['X-RateLimit-Reset'])
) - datetime.datetime.now()
).total_seconds()
success = True
except Exception as e:
self.logger.info(
f"oath method ran into error getting info from headers: {e}\n"
)
self.logger.info(f"{self.headers}\n{url}\n")
attempts -= 1
if not success:
continue
# Update oauth to switch to if a higher limit is found
if oauth['rate_limit'] > new_oauth['rate_limit']:
self.logger.info("Higher rate limit found in oauth: {}\n".format(oauth))
new_oauth = oauth
elif (
oauth['rate_limit'] == new_oauth['rate_limit']
and oauth['seconds_to_reset'] < new_oauth['seconds_to_reset']
):
self.logger.info(
f"Lower wait time found in oauth with same rate limit: {oauth}\n"
)
new_oauth = oauth
if new_oauth['rate_limit'] <= 0 and new_oauth['seconds_to_reset'] > 0:
self.logger.info(
"No oauths with >0 rate limit were found, waiting for oauth with "
f"smallest wait time: {new_oauth}\n"
)
time.sleep(new_oauth['seconds_to_reset'])
# Make new oauth the 0th element in self.oauths so we know which one is in use
index = self.oauths.index(new_oauth)
self.oauths[0], self.oauths[index] = self.oauths[index], self.oauths[0]
self.logger.info("Using oauth: {}\n".format(self.oauths[0]))
# Change headers to be using the new oauth's key
self.headers = {'Authorization': 'token %s' % self.oauths[0]['access_token']}
def update_rate_limit(
self, response, bad_credentials=False, temporarily_disable=False, platform="gitlab"
):
if platform == 'gitlab':
return self.update_gitlab_rate_limit(
response, bad_credentials=bad_credentials, temporarily_disable=temporarily_disable
)
elif platform == 'github':
return self.update_gh_rate_limit(
response, bad_credentials=bad_credentials, temporarily_disable=temporarily_disable
)
| mit |
hainm/scikit-learn | sklearn/ensemble/tests/test_partial_dependence.py | 365 | 6996 | """
Testing for the partial dependence module.
"""
import numpy as np
from numpy.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import if_matplotlib
from sklearn.ensemble.partial_dependence import partial_dependence
from sklearn.ensemble.partial_dependence import plot_partial_dependence
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import GradientBoostingRegressor
from sklearn import datasets
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
# also load the boston dataset
boston = datasets.load_boston()
# also load the iris dataset
iris = datasets.load_iris()
def test_partial_dependence_classifier():
# Test partial dependence for classifier
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(X, y)
pdp, axes = partial_dependence(clf, [0], X=X, grid_resolution=5)
# only 4 grid points instead of 5 because only 4 unique X[:,0] vals
assert pdp.shape == (1, 4)
assert axes[0].shape[0] == 4
# now with our own grid
X_ = np.asarray(X)
grid = np.unique(X_[:, 0])
pdp_2, axes = partial_dependence(clf, [0], grid=grid)
assert axes is None
assert_array_equal(pdp, pdp_2)
def test_partial_dependence_multiclass():
# Test partial dependence for multi-class classifier
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(iris.data, iris.target)
grid_resolution = 25
n_classes = clf.n_classes_
pdp, axes = partial_dependence(
clf, [0], X=iris.data, grid_resolution=grid_resolution)
assert pdp.shape == (n_classes, grid_resolution)
assert len(axes) == 1
assert axes[0].shape[0] == grid_resolution
def test_partial_dependence_regressor():
# Test partial dependence for regressor
clf = GradientBoostingRegressor(n_estimators=10, random_state=1)
clf.fit(boston.data, boston.target)
grid_resolution = 25
pdp, axes = partial_dependence(
clf, [0], X=boston.data, grid_resolution=grid_resolution)
assert pdp.shape == (1, grid_resolution)
assert axes[0].shape[0] == grid_resolution
def test_partial_dependecy_input():
# Test input validation of partial dependence.
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(X, y)
assert_raises(ValueError, partial_dependence,
clf, [0], grid=None, X=None)
assert_raises(ValueError, partial_dependence,
clf, [0], grid=[0, 1], X=X)
# first argument must be an instance of BaseGradientBoosting
assert_raises(ValueError, partial_dependence,
{}, [0], X=X)
# Gradient boosting estimator must be fit
assert_raises(ValueError, partial_dependence,
GradientBoostingClassifier(), [0], X=X)
assert_raises(ValueError, partial_dependence, clf, [-1], X=X)
assert_raises(ValueError, partial_dependence, clf, [100], X=X)
# wrong ndim for grid
grid = np.random.rand(10, 2, 1)
assert_raises(ValueError, partial_dependence, clf, [0], grid=grid)
@if_matplotlib
def test_plot_partial_dependence():
# Test partial dependence plot function.
clf = GradientBoostingRegressor(n_estimators=10, random_state=1)
clf.fit(boston.data, boston.target)
grid_resolution = 25
fig, axs = plot_partial_dependence(clf, boston.data, [0, 1, (0, 1)],
grid_resolution=grid_resolution,
feature_names=boston.feature_names)
assert len(axs) == 3
assert all(ax.has_data for ax in axs)
# check with str features and array feature names
fig, axs = plot_partial_dependence(clf, boston.data, ['CRIM', 'ZN',
('CRIM', 'ZN')],
grid_resolution=grid_resolution,
feature_names=boston.feature_names)
assert len(axs) == 3
assert all(ax.has_data for ax in axs)
# check with list feature_names
feature_names = boston.feature_names.tolist()
fig, axs = plot_partial_dependence(clf, boston.data, ['CRIM', 'ZN',
('CRIM', 'ZN')],
grid_resolution=grid_resolution,
feature_names=feature_names)
assert len(axs) == 3
assert all(ax.has_data for ax in axs)
@if_matplotlib
def test_plot_partial_dependence_input():
# Test partial dependence plot function input checks.
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
# not fitted yet
assert_raises(ValueError, plot_partial_dependence,
clf, X, [0])
clf.fit(X, y)
assert_raises(ValueError, plot_partial_dependence,
clf, np.array(X)[:, :0], [0])
# first argument must be an instance of BaseGradientBoosting
assert_raises(ValueError, plot_partial_dependence,
{}, X, [0])
# must be larger than -1
assert_raises(ValueError, plot_partial_dependence,
clf, X, [-1])
# too large feature value
assert_raises(ValueError, plot_partial_dependence,
clf, X, [100])
# str feature but no feature_names
assert_raises(ValueError, plot_partial_dependence,
clf, X, ['foobar'])
# not valid features value
assert_raises(ValueError, plot_partial_dependence,
clf, X, [{'foo': 'bar'}])
@if_matplotlib
def test_plot_partial_dependence_multiclass():
# Test partial dependence plot function on multi-class input.
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(iris.data, iris.target)
grid_resolution = 25
fig, axs = plot_partial_dependence(clf, iris.data, [0, 1],
label=0,
grid_resolution=grid_resolution)
assert len(axs) == 2
assert all(ax.has_data for ax in axs)
# now with symbol labels
target = iris.target_names[iris.target]
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(iris.data, target)
grid_resolution = 25
fig, axs = plot_partial_dependence(clf, iris.data, [0, 1],
label='setosa',
grid_resolution=grid_resolution)
assert len(axs) == 2
assert all(ax.has_data for ax in axs)
# label not in gbrt.classes_
assert_raises(ValueError, plot_partial_dependence,
clf, iris.data, [0, 1], label='foobar',
grid_resolution=grid_resolution)
# label not provided
assert_raises(ValueError, plot_partial_dependence,
clf, iris.data, [0, 1],
grid_resolution=grid_resolution)
| bsd-3-clause |
danielTsky/ml-cipher-cracker | bigram_model-Copy0 (1).py | 3 | 10635 |
# coding: utf-8
# In[15]:
import numpy as np
import math
import matplotlib.pyplot as plt
import random
from numpy.random import rand
# read text
# In[1]:
def read_text_words(filename, wordsnumber):
with open(filename) as f:
X = f.readlines()
wordsnumber = len(X)
X = ''.join(X)
X = X.replace('\n', '{') #123
return X
def read_text_whole(filename):
with open(filename) as f:
X = f.read()
X = X.replace('\n', '{') #123
return X
def chop_text_to_size(text, size):
return text[:1024*1024*size]
def read_text_filesize(filename, size):
with open(filename) as f:
X = f.read(1024*1024*size)
X = X.replace('\n', '{') #123
return X
# counts
# In[3]:
def get_unicount(text):
length = len(text)
counts = np.zeros(27)
for i in xrange(length):
c = ord(text[i])
counts[c-97]+=1
#97-122, 123 - word delimiter
return counts[:26]
# bigram statistics
# In[4]:
def get_bigram_stats_dic(text):
length = len(text)
dic = {}
for i in xrange(length-1):
if ord(text[i]) == 123 or ord(text[i+1]) == 123:
continue
if (text[i], text[i+1]) in dic:
dic[(text[i], text[i+1])] += 1
else:
dic[(text[i], text[i+1])] = 1
for k,v in dic.items():
r = 0
if (k[0],'{') in dic.keys():
r = dic[(k[0],'{')]
dic[k] = v/(sum(stats))
return dic
# quality
# In[5]:
def quality(decrypted, original):
l = len(decrypted)
zipped = zip(decrypted, original)
return sum(1 for x,y in zipped if x != y)/l
# crypt
# In[6]:
def crypt(text):
p = range(26)
random.shuffle(p)
output=''
p.append(26)
for ch in text:
try:
x = ord(ch) - ord('a')
output+=(chr(p[x] + ord('a')))
except:
pass
return output, p
# metropolis and density maximization
# In[68]:
# from random import random
"""
This module implements algorithm of Metropolis-Hastings
for random variable generation.
The algorithm generates random variables from a desired
distribution (which may be unnormalized).
"""
def metropolis( desiredPDF, initValue, computableRVS, skipIterations = 200 ):
"""
This function returns a generator, which generates random variables
from some space S with a desired distribution using Metropolis-Hastings
algorithm.
Args:
desiredPDF (func) : PDF of desired distribution p( T ), where T from S
initValue : an object from S to initialize the starting point
of iterative proccess
computableRVS (func) : a generator of random value from space S
with given parameter T, which is also from S
skipIterations (int) : number of iterations to skip
(skipping more iterations leads to better accuracy?
but greater time consuming)
Returns: generator, which produce some values from S
and their denisity according to distribution desiredPDF
"""
random_variable = initValue
random_variableDensityValue = desiredPDF( random_variable )
"""
A state of MCMC
"""
#ignore first iterations to let the iterative proccess
#converge to some distribution, which is close to desired
for i in xrange( skipIterations ):
candidate = computableRVS( random_variable )
print candidate
candidateDensityValue = desiredPDF( candidate )
"""
next candidate for sample, generated by computableRVS
"""
# acceptanceProb = min( 1, candidateDensityValue / random_variableDensityValue )
# logp is returnd by desiredPDF_bigram, so here is the change
acceptanceProb = min( 0, candidateDensityValue - random_variableDensityValue )
"""
probability to accept candidate to sample
"""
# acceptanceProb = math.exp(acceptanceProb)
print acceptanceProb
if math.log(random.random()) < acceptanceProb:
random_variable = candidate
random_variableDensityValue = candidateDensityValue
#now when the procces is converged to desired distribution,
#return acceptable candidates
print "-----"
while True:
candidate = computableRVS( random_variable )
print candidate
candidateDensityValue = desiredPDF( candidate )
"""
next candidate for sample, generated by computableRVS
"""
# acceptanceProb = min( 1, candidateDensityValue / random_variableDensityValue )
# logp is returnd by desiredPDF_bigram, so here is the change
acceptanceProb = min( 0, candidateDensityValue - random_variableDensityValue )
"""
probability to accept candidate to sample
"""
print acceptanceProb
# acceptanceProb = math.exp(acceptanceProb)
if math.log(random.random()) < acceptanceProb:
random_variable = candidate
random_variableDensityValue = candidateDensityValue
yield random_variable, random_variableDensityValue
def densityMaximization( desiredPDF, initValue, computableRVS, skipIterations = 200 ):
"""
This function return a generator, which generates random variables
from some space S by trying to maximize givven density.
The algorithm is a modification of Metropolis-Hastings.
It rejects all objects, which decrease density.
Args:
desiredPDF (func) : PDF of desired distribution p( T ), where T from S
initValue : an object from S to initialize the starting point
of iterative proccess
computableRVS (func) : a generator of random value from space S
with given parameter T, which is also from S
skipIterations (int) : number of iterations to skip
(skipping more iterations leads to better accuracy?
but greater time consuming)
Returns: generator, which produce some values from S,
where each next value has no less density, and their denisity
"""
random_variable = initValue
random_variableDensityValue = desiredPDF( random_variable )
"""
A state of MCMC
"""
#ignore first iterations to let the iterative proccess to enter
#the high density regions
for i in xrange( skipIterations ):
candidate = computableRVS( random_variable )
candidateDensityValue = desiredPDF( candidate )
"""
next candidate for sample, generated by computableRVS
"""
if random_variableDensityValue < candidateDensityValue:
print candidate
print candidateDensityValue
random_variable = candidate
random_variableDensityValue = candidateDensityValue
#now when the procces is in high density regions,
#return acceptable candidates
while True:
candidate = computableRVS( random_variable )
candidateDensityValue = desiredPDF( candidate )
"""
next candidate for sample, generated by computableRVS
"""
if random_variableDensityValue < candidateDensityValue:
print candidate
print candidateDensityValue
random_variable = candidate
random_variableDensityValue = candidateDensityValue
yield random_variable, random_variableDensityValue
# permutation generator and computablervs
# In[8]:
"""
This module provide some functions,
that generate random permutations with different distributions.
There are a uniform distribution and a symmetric distribution,
which depends on some other permutation.
"""
def uniform( n ):
"""
Generates random permutation using Knuth algorithm.
Args:
n (int) : length of permutation
Returns: random permutation of length n from uniform distribution
"""
#initialize permutation with identical
permutation = [ i for i in xrange( n ) ]
#swap ith object with random onject from i to n - 1 enclusively
for i in xrange( n ):
j = random.randint( i, n - 1 )
permutation[ i ], permutation[ j ] = permutation[ j ], permutation[ i ]
permutation.append(26)
return permutation
def applyedTranspostions( basePermutation ):
"""
This function returns random permutation by applying random
transpositions to given permutation.
The result distribution is not uniform and
symmetric assuming parameter.
Args:
basePermutation (array) : parameter of distribution
Returns: random permutation generated from basePermutation
"""
n = len( basePermutation) -1
"""
length of permutation
"""
#apply n random transpositions (including identical) to base permutation
for i in xrange( n ):
k, l = random.randint( 0, n - 2 ), random.randint( 0, n - 2 )
basePermutation[ k ], basePermutation[ l ] = basePermutation[ l ], basePermutation[ k ]
return basePermutation
# desiredPDF
# In[19]:
def get_desiredPDF_bigram(permutation):
logp = 0
for i in xrange(len(encrypted)-1):
if (chr(permutation[ord(encrypted[i])-97]+97),
chr(permutation[ord(encrypted[i+1])-97]+97)) in stats.keys():
logp += math.log(stats[(chr(permutation[ord(encrypted[i])-97]+97),
chr(permutation[ord(encrypted[i+1])-97]+97))])
return logp
## Varying training text size
# Fix large (e.g. 5000 or more words) encrypted text and explore how the ratio of correctly decrypted symbols
# depends on the size of training text (using the same number of MCMC iterations)
## TO BE DELETED
# In[13]:
#TEST TEXT
fname = 'main/oliver_twist.txt'
original = read_text_words(fname, 1000)
encrypted, p = crypt(original)
#TRAIN TEXT
length = 575514
train_text = read_text_words('main/war_and_peace.txt', length)
counts = get_unicount(train_text)
stats = get_bigram_stats_dic(train_text)
print p
# In[69]:
computableGen = lambda t: applyedTranspostions(t)
init_p = uniform(26)
metropolisgenerator = metropolis(get_desiredPDF_bigram, init_p, computableGen )
# densityMaximization(get_desiredPDF_bigram, init_p, computableGen )
x = []
for i in xrange( 10 ):
x.append( metropolisgenerator.next()[0] )
# In[65]:
for i in x:
print i
# In[62]:
per = x[0]
for i in xrange(len(per)):
print (ord('a') + i) == (ord('a') + per[p[i]])
# In[ ]:
| mit |
marcusrehm/serenata-de-amor | rosie/rosie/chamber_of_deputies/tests/test_adapter.py | 1 | 2441 | import os
import shutil
from shutil import copy2
from tempfile import mkdtemp
from unittest import TestCase
from unittest.mock import patch
import pandas as pd
from rosie.chamber_of_deputies.adapter import COLUMNS as ADAPTER_COLUMNS
from rosie.chamber_of_deputies.adapter import Adapter as subject_class
class TestAdapter(TestCase):
def setUp(self):
self.temp_path = mkdtemp()
self.fixtures_path = os.path.join('rosie', 'chamber_of_deputies', 'tests', 'fixtures')
copies = (
('companies.xz', subject_class.COMPANIES_DATASET),
('reimbursements.xz', 'reimbursements.xz')
)
for source, target in copies:
copy2(os.path.join(self.fixtures_path, source), os.path.join(self.temp_path, target))
self.subject = subject_class(self.temp_path)
def tearDown(self):
shutil.rmtree(self.temp_path)
@patch('rosie.chamber_of_deputies.adapter.Dataset')
@patch('rosie.chamber_of_deputies.adapter.fetch')
def test_get_performs_a_left_merge_between_reimbursements_and_companies(self, fetch, chamber_of_deputies):
self.assertEqual(6, len(self.subject.dataset))
self.assertEqual(1, self.subject.dataset['legal_entity'].isnull().sum())
@patch('rosie.chamber_of_deputies.adapter.Dataset')
@patch('rosie.chamber_of_deputies.adapter.fetch')
def test_prepare_dataset(self, fetch, chamber_of_deputies):
"""
* Rename columns.
* Make `document_type` a category column.
* Rename values for `category`.
* Create `is_party_expense` column.
"""
dataset = self.subject.dataset
self.assertTrue(set(ADAPTER_COLUMNS.keys()).issubset(set(dataset.columns)))
document_types = ['bill_of_sale', 'simple_receipt', 'expense_made_abroad']
self.assertEqual(document_types,
dataset['document_type'].cat.categories.tolist())
fixture = pd.read_csv(os.path.join(self.fixtures_path, 'reimbursements.xz'))
meal_rows = fixture \
.query('subquota_description == "Congressperson meal"').index
self.assertEqual(['Meal'],
dataset.loc[meal_rows, 'category'].unique().tolist())
party_expense_rows = fixture[fixture['congressperson_id'].isnull()].index
self.assertEqual([True],
dataset.loc[party_expense_rows, 'is_party_expense'].unique().tolist())
| mit |
samfpetersen/gnuradio | gr-filter/examples/channelize.py | 58 | 7003 | #!/usr/bin/env python
#
# Copyright 2009,2012,2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr
from gnuradio import blocks
from gnuradio import filter
import sys, time
try:
from gnuradio import analog
except ImportError:
sys.stderr.write("Error: Program requires gr-analog.\n")
sys.exit(1)
try:
import scipy
from scipy import fftpack
except ImportError:
sys.stderr.write("Error: Program requires scipy (see: www.scipy.org).\n")
sys.exit(1)
try:
import pylab
from pylab import mlab
except ImportError:
sys.stderr.write("Error: Program requires matplotlib (see: matplotlib.sourceforge.net).\n")
sys.exit(1)
class pfb_top_block(gr.top_block):
def __init__(self):
gr.top_block.__init__(self)
self._N = 2000000 # number of samples to use
self._fs = 1000 # initial sampling rate
self._M = M = 9 # Number of channels to channelize
self._ifs = M*self._fs # initial sampling rate
# Create a set of taps for the PFB channelizer
self._taps = filter.firdes.low_pass_2(1, self._ifs, 475.50, 50,
attenuation_dB=100,
window=filter.firdes.WIN_BLACKMAN_hARRIS)
# Calculate the number of taps per channel for our own information
tpc = scipy.ceil(float(len(self._taps)) / float(self._M))
print "Number of taps: ", len(self._taps)
print "Number of channels: ", self._M
print "Taps per channel: ", tpc
# Create a set of signals at different frequencies
# freqs lists the frequencies of the signals that get stored
# in the list "signals", which then get summed together
self.signals = list()
self.add = blocks.add_cc()
freqs = [-70, -50, -30, -10, 10, 20, 40, 60, 80]
for i in xrange(len(freqs)):
f = freqs[i] + (M/2-M+i+1)*self._fs
self.signals.append(analog.sig_source_c(self._ifs, analog.GR_SIN_WAVE, f, 1))
self.connect(self.signals[i], (self.add,i))
self.head = blocks.head(gr.sizeof_gr_complex, self._N)
# Construct the channelizer filter
self.pfb = filter.pfb.channelizer_ccf(self._M, self._taps, 1)
# Construct a vector sink for the input signal to the channelizer
self.snk_i = blocks.vector_sink_c()
# Connect the blocks
self.connect(self.add, self.head, self.pfb)
self.connect(self.add, self.snk_i)
# Use this to play with the channel mapping
#self.pfb.set_channel_map([5,6,7,8,0,1,2,3,4])
# Create a vector sink for each of M output channels of the filter and connect it
self.snks = list()
for i in xrange(self._M):
self.snks.append(blocks.vector_sink_c())
self.connect((self.pfb, i), self.snks[i])
def main():
tstart = time.time()
tb = pfb_top_block()
tb.run()
tend = time.time()
print "Run time: %f" % (tend - tstart)
if 1:
fig_in = pylab.figure(1, figsize=(16,9), facecolor="w")
fig1 = pylab.figure(2, figsize=(16,9), facecolor="w")
fig2 = pylab.figure(3, figsize=(16,9), facecolor="w")
Ns = 1000
Ne = 10000
fftlen = 8192
winfunc = scipy.blackman
fs = tb._ifs
# Plot the input signal on its own figure
d = tb.snk_i.data()[Ns:Ne]
spin_f = fig_in.add_subplot(2, 1, 1)
X,freq = mlab.psd(d, NFFT=fftlen, noverlap=fftlen/4, Fs=fs,
window = lambda d: d*winfunc(fftlen),
scale_by_freq=True)
X_in = 10.0*scipy.log10(abs(X))
f_in = scipy.arange(-fs/2.0, fs/2.0, fs/float(X_in.size))
pin_f = spin_f.plot(f_in, X_in, "b")
spin_f.set_xlim([min(f_in), max(f_in)+1])
spin_f.set_ylim([-200.0, 50.0])
spin_f.set_title("Input Signal", weight="bold")
spin_f.set_xlabel("Frequency (Hz)")
spin_f.set_ylabel("Power (dBW)")
Ts = 1.0/fs
Tmax = len(d)*Ts
t_in = scipy.arange(0, Tmax, Ts)
x_in = scipy.array(d)
spin_t = fig_in.add_subplot(2, 1, 2)
pin_t = spin_t.plot(t_in, x_in.real, "b")
pin_t = spin_t.plot(t_in, x_in.imag, "r")
spin_t.set_xlabel("Time (s)")
spin_t.set_ylabel("Amplitude")
Ncols = int(scipy.floor(scipy.sqrt(tb._M)))
Nrows = int(scipy.floor(tb._M / Ncols))
if(tb._M % Ncols != 0):
Nrows += 1
# Plot each of the channels outputs. Frequencies on Figure 2 and
# time signals on Figure 3
fs_o = tb._fs
Ts_o = 1.0/fs_o
Tmax_o = len(d)*Ts_o
for i in xrange(len(tb.snks)):
# remove issues with the transients at the beginning
# also remove some corruption at the end of the stream
# this is a bug, probably due to the corner cases
d = tb.snks[i].data()[Ns:Ne]
sp1_f = fig1.add_subplot(Nrows, Ncols, 1+i)
X,freq = mlab.psd(d, NFFT=fftlen, noverlap=fftlen/4, Fs=fs_o,
window = lambda d: d*winfunc(fftlen),
scale_by_freq=True)
X_o = 10.0*scipy.log10(abs(X))
f_o = scipy.arange(-fs_o/2.0, fs_o/2.0, fs_o/float(X_o.size))
p2_f = sp1_f.plot(f_o, X_o, "b")
sp1_f.set_xlim([min(f_o), max(f_o)+1])
sp1_f.set_ylim([-200.0, 50.0])
sp1_f.set_title(("Channel %d" % i), weight="bold")
sp1_f.set_xlabel("Frequency (Hz)")
sp1_f.set_ylabel("Power (dBW)")
x_o = scipy.array(d)
t_o = scipy.arange(0, Tmax_o, Ts_o)
sp2_o = fig2.add_subplot(Nrows, Ncols, 1+i)
p2_o = sp2_o.plot(t_o, x_o.real, "b")
p2_o = sp2_o.plot(t_o, x_o.imag, "r")
sp2_o.set_xlim([min(t_o), max(t_o)+1])
sp2_o.set_ylim([-2, 2])
sp2_o.set_title(("Channel %d" % i), weight="bold")
sp2_o.set_xlabel("Time (s)")
sp2_o.set_ylabel("Amplitude")
pylab.show()
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
| gpl-3.0 |
vstarostenko/soteria | attack_similiarity.py | 1 | 2168 | #! /usr/bin/env python
from __future__ import division
from mrjob.job import MRJob
from itertools import combinations
# from sklearn.metrics import jaccard_similarity_score
import numpy as np
import sys
class AttackSimilarity(MRJob):
# INPUT_PROTOCOL = JSONValueProtocol
def extract_incident(self, _, line):
record = line.split(',')
# print record
if record[0] != 'incident_id':
feature = record[1:]
incident = record[0]
yield incident, list(feature)
def combine_incident(self, incident, feature):
allfeatures = list(feature)
yield incident, list(allfeatures[0])
def distribute_incident(self, incd, incdfeat):
yield "all" , [incd, list(incdfeat)]
def similar_incident(self, _, allincidents):
for (inc_a, feat_a), (inc_b, feat_b) in combinations(list(allincidents), r=2):
feat_a_array = np.array(feat_a, dtype='int')
feat_b_array = np.array(feat_b, dtype='int')
# similarity = jaccard_similarity_score(feat_a_array, feat_b_array)
feat_a_mag = np.sqrt(np.dot(feat_a_array, feat_a_array))
feat_b_mag = np.sqrt(np.dot(feat_b_array, feat_a_array))
similarity = float(np.dot(feat_a_array, feat_b_array))/ (feat_a_mag * feat_b_mag)
sys.stderr.write("Similarity: ({0},{1})\n".format([inc_a, inc_b],similarity))
if similarity >= 0.99 :
yield [inc_a, inc_b], similarity
def steps(self):
"""
MapReduce Steps:
extract_incident : <_, line> => <incident, feature>
combine_incident : <incident, [feature]> => <incident, allfeatures>
map_incident : <incident, [incedentfeatures] => <"all", [[incident, features]]
reduce_incident : <_, allincidents> => <[incident_pairs], similarity>
"""
return [
self.mr(mapper=self.extract_incident, reducer=self.combine_incident),
self.mr(mapper=self.distribute_incident, reducer=self.similar_incident)
]
if __name__ == '__main__':
AttackSimilarity.run()
| apache-2.0 |
d-chambers/animations | simplex/simplex.py | 1 | 13205 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 2 13:12:55 2017
@author: isti_ew
"""
import os
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.patches import Polygon
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.collections import PatchCollection
# import seaborn as sns
from mpl_toolkits.mplot3d.art3d import Poly3DCollection
STARTING_POINTS = np.array([[-.25, -.25], [0, 1], [1, 0]])
#STARTING_POINTS = np.array([[-2, -2], [-1, -1], [-1, -.5]])
STARTING_VALUES = np.array([1, 0, 0])
# STARTING_VALUES
def dummy_func(point):
if point in STARTING_POINTS:
ind = [num for num, x in enumerate(STARTING_POINTS)
if np.all(x == point)]
return STARTING_VALUES[ind[0]]
return 2
def func(point):
""" The Rosenbrock function with a=0, b=1:
https://en.wikipedia.org/wiki/Rosenbrock_function """
x = point[0]
y = point[1]
return x ** 2 + (y - x ** 2) ** 2
class cache:
""" A property like cache """
def __init__(self, method):
self.method = method
def __set_name__(self, owner, name):
self.name = name
def __get__(self, instance, owner):
assert hasattr(instance, 'cache')
if self.name not in instance.cache:
instance.cache[self.name] = self.method(instance)
return instance.cache[self.name]
class Simplex:
""" class to capture behavior of simplex for graphing """
alpha = 2 # reflection coef.
gamma = 4 # expansion coef.
rho = 0.5 # contraction coef.
sigma = 0.5 # shrink coef
def __init__(self, points, values_0=None, func=None):
# asserts and setups
assert values_0 is not None or func is not None
if values_0 is None: # evaluate
values_0 = np.array([func(x) for x in points])
assert len(points) == len(values_0)
# value place-holder
self.cvalues_ = values_0.astype(float)
self.cpoints = points.astype(float) # current points
self.ppoints = points.astype(float) # previous points
self.last_move = None
self.func = func
self.p_min_values = np.min(self.cvalues_)
self.cache = {}
# ---------------------------- cached properties
@cache
def sorted_cvalues(self):
return self.cvalues_[np.argsort(self.cvalues_)]
@cache
def sorted_values(self):
return np.sort(self.cvalues)
@cache
def cvalues(self):
return np.apply_along_axis(self.func, 1, self.cpoints)
@cache
def ccentroid(self):
return calculate_centroid(self.cpoints)
@cache
def pcentroid(self):
return calculate_centroid(self.ppoints)
@cache
def max_vertex_index(self):
return np.argmax(self.cvalues)
@cache
def max_vertex(self):
return self.cpoints[self.max_vertex_index]
@cache
def max_value(self):
return self.func(self.max_vertex)
@cache
def min_vertex_value(self):
return np.argmin(self.cvalues)
@cache
def min_vertex(self):
return self.cpoints[self.min_vertex_value]
@cache
def min_value(self):
return self.func(self.min_vertex)
@cache
def reflection(self):
""" get reflected version of triangle """
return self.ccentroid + (self.ccentroid - self.max_vertex) * self.alpha
@cache
def reflection_value(self):
return self.func(self.reflection)
@cache
def expansion(self):
""" get the expansion point """
return self.ccentroid + (self.reflection - self.ccentroid) * self.gamma
@cache
def expansion_value(self):
return self.func(self.expansion)
@cache
def contraction(self):
""" get contraction point """
return self.ccentroid + (self.max_vertex - self.ccentroid) * self.rho
@cache
def contraction_value(self):
return self.func(self.contraction)
@cache
def shrink(self):
""" get shrunken simplex """
ar = np.copy(self.cpoints)
lowest_ind = np.argmin(self.cvalues_)
for num, subar in enumerate(ar):
if num == lowest_ind:
continue
ar[num] = self.min_vertex + self.sigma * (subar - self.min_vertex)
return ar
# -------------------------------- methods
def update_highest_point(self, new_point):
ar = np.copy(self.cpoints)
ar[int(np.argmax(self.cvalues)), :] = new_point
assert np.all(ar[np.argmax(self.cvalues)] == new_point)
return ar
def _update(self, move):
assert move in {'reflection', 'expansion', 'contraction', 'shrink'}
self.ppoints = self.cpoints
self.last_move = move
new_val = getattr(self, move)
if move == 'shrink':
self.cpoints = new_val
else:
self.cpoints = self.update_highest_point(new_val)
self.p_min_values = self.min_value
def iterate(self):
""" run an iteration of simplex """
assert self.func, 'function must be defined in order to run simplex'
# clear cache
self.cache = {}
# run algorithm
assert self.reflection not in self.cpoints
if self.min_value < self.reflection_value < self.max_value:
self._update('reflection')
elif self.reflection_value < self.min_value:
assert self.expansion not in self.cpoints
if self.expansion_value < self.reflection_value:
self._update('expansion')
else:
self._update('reflection')
else:
assert self.reflection_value > self.max_value
assert self.contraction_value not in self.cpoints
if self.contraction_value < self.max_value:
self._update('contraction')
else:
self._update('shrink')
# ------------------------------------- simplex auxiliary methods
def to_polygon(points, **kwargs):
""" given points, wrap them in a closed polygon """
return Polygon(points, closed=True, **kwargs)
def calculate_centroid(array):
""" given an array of vertices calculate a centroid """
return array.mean(axis=0)
# ------------------------- matplot lib plotting stuff
def frame_by_frame(points_1, points_2, num=25):
""" Given two sets of points that represent polygons, return a new
ndarray (with shape = shape(points_1) + (num)) that extrapolates between
each point linearly in order to pass to visualization to simulate
motion """
assert points_1.shape == points_2.shape
assert np.any(points_1 != points_2)
change_vector = (points_2 - points_1) / float(num)
out = np.zeros(tuple([int(num) + 1] + list(np.shape(points_1))))
for ind in range(int(num) + 1):
out[ind] = points_1 + (ind * change_vector)
return out
def make_path(path):
path = os.path.join(os.path.dirname(__file__), path)
if not os.path.exists(path):
os.mkdir(path)
class PlotPlex2D:
""" convenience class for plotting simplex """
def __init__(self):
self.simplex = Simplex(STARTING_POINTS, func=dummy_func)
def get_figure(self):
fig = plt.Figure(figsize=(1, 1))
return fig
def set_axis_limits(self, ax, seqs):
cent_x = self.simplex.ccentroid[0]
cent_y = self.simplex.ccentroid[1]
x_min = seqs[:, :, 0].min()
x_max = seqs[:, :, 0].max()
y_min = seqs[:, :, 1].min()
y_max = seqs[:, :, 1].max()
ax.set_xlim(x_min - .5, x_max + .5)
ax.set_ylim(y_min - .5, y_max + .5)
ax.set_xticklabels([])
ax.set_yticklabels([])
def plot_2d_reflection(self):
s1 = self.simplex.cpoints
s2 = self.simplex.update_highest_point(self.simplex.reflection)
self._plot_2D_sequence(r'Reflection ($\alpha = 2$)', s1, s2)
def plot_2d_expansion(self):
s1 = self.simplex.cpoints
s2 = self.simplex.update_highest_point(self.simplex.expansion)
self._plot_2D_sequence(r'Expansion ($\gamma = 4$)', s1, s2)
def plot_2d_contraction(self):
s1 = self.simplex.cpoints
s2 = self.simplex.update_highest_point(self.simplex.contraction)
self._plot_2D_sequence(r'Contraction ($\rho = 0.5$)', s1, s2)
def plot_2d_shrink(self):
s1 = self.simplex.cpoints
s2 = self.simplex.shrink
self._plot_2D_sequence(r'Shrink ($\sigma = 0.5$)', s1, s2)
def _plot_2D_sequence(self, path, s1, s2, ax=None):
make_path(path.split()[0])
seqs = frame_by_frame(s1, s2)
centroid = self.simplex.ccentroid
new_centroid = calculate_centroid(s2)
for num, seq in enumerate(seqs):
plt.show()
fig = self.get_figure()
plt.axes().set_aspect('equal')
ax = plt.gca()
poly = to_polygon(seq)
collection = PatchCollection([poly], alpha=.3, linewidth=2,
edgecolor='k')
ax.add_collection(collection)
# import pdb; pdb.set_trace()
ax.scatter([centroid[0]], [centroid[1]], color='k')
ax.scatter([new_centroid[0]], [new_centroid[1]], color='r')
self.set_axis_limits(ax, seqs)
plt.title(path, fontsize=20)
fin = f'{num:03d}.png'
file_name = os.path.join(path.split()[0], fin)
plt.savefig(file_name)
if num == len(seqs) - 1:
for num in range(num, num + len(seqs) // 2):
fin = f'{num:03d}.png'
file_name = os.path.join(path.split()[0], fin)
plt.savefig(file_name)
class PlotPlex3D:
xvals = np.linspace(-2, 2, 100)
yvals = np.linspace(-2, 2, 100)
def __init__(self, simplex):
self.simplex = simplex
self.func = simplex.func
assert self.func is not None, 'simplex must have function to optimize'
self.mesh = np.stack(np.meshgrid(self.xvals, self.yvals))
self.func_values = np.apply_along_axis(self.func, 0, self.mesh)
self.min_val = self.func_values.min()
self.min_val_ind = np.where(self.func_values == self.min_val)
self.min_x = self.xvals[self.min_val_ind[0]]
self.min_y = self.yvals[self.min_val_ind[1]]
def plot_func(self, ax=None):
""" plot the objective function """
ax = ax or Axes3D(plt.gcf())
ax.plot_surface(self.mesh[0], self.mesh[1], self.func_values,
alpha=0.6)
ax.scatter([self.min_x], [self.min_y], [self.min_val], color='k')
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_zticklabels([])
plt.title('Reflection')
return ax
def plot_simplex(self, ax=None, verticies=None):
ax = ax or Axes3D(plt.gcf())
points = self.simplex.cpoints if verticies is None else verticies
values = np.apply_along_axis(self.func, 1, points)
# draw lines between points
lpoints = np.vstack([points, points[0]])
lvalues = np.append(values, values[0])
ax.plot(lpoints[:, 0], lpoints[:, 1], lvalues)
# draw scatter points on verticies
ax.scatter(points[:, 0], points[:, 1], values, color='r')
def plot_optimization(self, num_frames=8, num_iter=20):
count = 0
for _ in range(num_iter):
# run one round of optimization
make_path('Simplex')
self.simplex.iterate()
sequence = frame_by_frame(self.simplex.ppoints, self.simplex.cpoints,
num=num_frames)
for num, seq in enumerate(sequence):
plt.show()
f = plt.Figure(figsize=(9, 9))
f = plt.gcf() #Figure()
ax = f.add_subplot(111, projection='3d')
ax.view_init(elev=50)
self.plot_func(ax=ax)
self.plot_simplex(ax, verticies=seq)
plt.title(self.simplex.last_move.capitalize())
path = os.path.join('Simplex', f'{count:03d}.jpeg')
ax.view_init(elev=50)
plt.savefig(path, dpi=300)
count += 1
# plot pause
if num == len(sequence) - 1:
for _ in range(4):
path = os.path.join('Simplex', f'{count:03d}.jpeg')
ax.set_title('')
ax.view_init(elev=50)
plt.savefig(path, dpi=300)
count += 1
# ----------------------------- Run animations
make2d = 0
make3d = 1
if make2d: # make 2d stuff
pp = PlotPlex2D()
pp.plot_2d_reflection()
pp.plot_2d_expansion()
pp.plot_2d_contraction()
pp.plot_2d_shrink()
if make3d: # make 3d stuff
points = np.array([[-2, -2], [-1.95, -1.75], [-1.75, -1.95]])
simplex = Simplex(points, func=func)
pp = PlotPlex3D(simplex)
pp.plot_optimization()
# ffmpeg -r 12 -f image2 -s 1920x1080 -i %03d.png -vcodec libx264 -crf 25 -pix_fmt yuv420p reflection.mp4
| mit |
depet/scikit-learn | sklearn/ensemble/tests/test_gradient_boosting.py | 5 | 21877 | """
Testing for the gradient boosting module (sklearn.ensemble.gradient_boosting).
"""
import numpy as np
import warnings
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.metrics import mean_squared_error
from sklearn.utils import check_random_state, tosequence
from sklearn.utils.validation import DataConversionWarning
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import GradientBoostingRegressor
from sklearn import datasets
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
rng = np.random.RandomState(0)
# also load the boston dataset
# and randomly permute it
boston = datasets.load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
def test_classification_toy():
"""Check classification on a toy dataset."""
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(ValueError, clf.predict, T)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
deviance_decrease = (clf.train_score_[:-1] - clf.train_score_[1:])
assert np.any(deviance_decrease >= 0.0), \
"Train deviance does not monotonically decrease."
def test_parameter_checks():
"""Check input parameter validation."""
assert_raises(ValueError,
GradientBoostingClassifier(n_estimators=0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(n_estimators=-1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(learning_rate=0.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(learning_rate=-1.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='foobar').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_split=0.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_split=-1.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_leaf=0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_leaf=-1.).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(subsample=0.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(subsample=1.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(subsample=-0.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(max_depth=-0.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(max_depth=0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(init={}).fit, X, y)
# test fit before feature importance
assert_raises(ValueError,
lambda: GradientBoostingClassifier().feature_importances_)
# deviance requires ``n_classes >= 2``.
assert_raises(ValueError,
lambda X, y: GradientBoostingClassifier(
loss='deviance').fit(X, y),
X, [0, 0, 0, 0])
def test_loss_function():
assert_raises(ValueError,
GradientBoostingClassifier(loss='ls').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='lad').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='quantile').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='huber').fit, X, y)
assert_raises(ValueError,
GradientBoostingRegressor(loss='deviance').fit, X, y)
def test_classification_synthetic():
"""Test GradientBoostingClassifier on synthetic dataset used by
Hastie et al. in ESLII Example 12.7. """
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X_train, X_test = X[:2000], X[2000:]
y_train, y_test = y[:2000], y[2000:]
gbrt = GradientBoostingClassifier(n_estimators=100, min_samples_split=1,
max_depth=1,
learning_rate=1.0, random_state=0)
gbrt.fit(X_train, y_train)
error_rate = (1.0 - gbrt.score(X_test, y_test))
assert error_rate < 0.085, \
"GB failed with error %.4f" % error_rate
gbrt = GradientBoostingClassifier(n_estimators=200, min_samples_split=1,
max_depth=1,
learning_rate=1.0, subsample=0.5,
random_state=0)
gbrt.fit(X_train, y_train)
error_rate = (1.0 - gbrt.score(X_test, y_test))
assert error_rate < 0.08, \
"Stochastic GB failed with error %.4f" % error_rate
def test_boston():
"""Check consistency on dataset boston house prices with least squares
and least absolute deviation. """
for loss in ("ls", "lad", "huber"):
clf = GradientBoostingRegressor(n_estimators=100, loss=loss,
max_depth=4,
min_samples_split=1, random_state=1)
assert_raises(ValueError, clf.predict, boston.data)
clf.fit(boston.data, boston.target)
y_pred = clf.predict(boston.data)
mse = mean_squared_error(boston.target, y_pred)
assert mse < 6.0, "Failed with loss %s and mse = %.4f" % (loss, mse)
def test_iris():
"""Check consistency on dataset iris."""
for subsample in (1.0, 0.5):
clf = GradientBoostingClassifier(n_estimators=100, loss='deviance',
random_state=1, subsample=subsample)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert score > 0.9, "Failed with subsample %.1f " \
"and score = %f" % (subsample, score)
def test_regression_synthetic():
"""Test on synthetic regression datasets used in Leo Breiman,
`Bagging Predictors?. Machine Learning 24(2): 123-140 (1996). """
random_state = check_random_state(1)
regression_params = {'n_estimators': 100, 'max_depth': 4,
'min_samples_split': 1, 'learning_rate': 0.1,
'loss': 'ls'}
# Friedman1
X, y = datasets.make_friedman1(n_samples=1200,
random_state=random_state, noise=1.0)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
clf = GradientBoostingRegressor()
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert mse < 5.0, "Failed on Friedman1 with mse = %.4f" % mse
# Friedman2
X, y = datasets.make_friedman2(n_samples=1200, random_state=random_state)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
clf = GradientBoostingRegressor(**regression_params)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert mse < 1700.0, "Failed on Friedman2 with mse = %.4f" % mse
# Friedman3
X, y = datasets.make_friedman3(n_samples=1200, random_state=random_state)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
clf = GradientBoostingRegressor(**regression_params)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert mse < 0.015, "Failed on Friedman3 with mse = %.4f" % mse
def test_feature_importances():
X = np.array(boston.data, dtype=np.float32)
y = np.array(boston.target, dtype=np.float32)
clf = GradientBoostingRegressor(n_estimators=100, max_depth=5,
min_samples_split=1, random_state=1)
clf.fit(X, y)
#feature_importances = clf.feature_importances_
assert_true(hasattr(clf, 'feature_importances_'))
# true feature importance ranking
# true_ranking = np.array([3, 1, 8, 2, 10, 9, 4, 11, 0, 6, 7, 5, 12])
# assert_array_equal(true_ranking, feature_importances.argsort())
def test_probability():
"""Predict probabilities."""
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(ValueError, clf.predict_proba, T)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
# check if probabilities are in [0, 1].
y_proba = clf.predict_proba(T)
assert np.all(y_proba >= 0.0)
assert np.all(y_proba <= 1.0)
# derive predictions from probabilities
y_pred = clf.classes_.take(y_proba.argmax(axis=1), axis=0)
assert_array_equal(y_pred, true_result)
def test_check_inputs():
"""Test input checks (shape and type of X and y)."""
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(ValueError, clf.fit, X, y + [0, 1])
from scipy import sparse
X_sparse = sparse.csr_matrix(X)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(TypeError, clf.fit, X_sparse, y)
clf = GradientBoostingClassifier().fit(X, y)
assert_raises(TypeError, clf.predict, X_sparse)
def test_check_inputs_predict():
"""X has wrong shape """
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y)
x = np.array([1.0, 2.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
x = np.array([])
assert_raises(ValueError, clf.predict, x)
x = np.array([1.0, 2.0, 3.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
clf = GradientBoostingRegressor(n_estimators=100, random_state=1)
clf.fit(X, rng.rand(len(X)))
x = np.array([1.0, 2.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
x = np.array([])
assert_raises(ValueError, clf.predict, x)
x = np.array([1.0, 2.0, 3.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
def test_check_max_features():
"""test if max_features is valid. """
clf = GradientBoostingRegressor(n_estimators=100, random_state=1,
max_features=0)
assert_raises(ValueError, clf.fit, X, y)
clf = GradientBoostingRegressor(n_estimators=100, random_state=1,
max_features=(len(X[0]) + 1))
assert_raises(ValueError, clf.fit, X, y)
def test_max_feature_regression():
"""Test to make sure random state is set properly. """
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X_train, X_test = X[:2000], X[2000:]
y_train, y_test = y[:2000], y[2000:]
gbrt = GradientBoostingClassifier(n_estimators=100, min_samples_split=5,
max_depth=2, learning_rate=.1,
max_features=2, random_state=1)
gbrt.fit(X_train, y_train)
deviance = gbrt.loss_(y_test, gbrt.decision_function(X_test))
assert_true(deviance < 0.5, "GB failed with deviance %.4f" % deviance)
def test_staged_predict():
"""Test whether staged decision function eventually gives
the same prediction.
"""
X, y = datasets.make_friedman1(n_samples=1200,
random_state=1, noise=1.0)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
clf = GradientBoostingRegressor()
# test raise ValueError if not fitted
assert_raises(ValueError, lambda X: np.fromiter(
clf.staged_predict(X), dtype=np.float64), X_test)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
# test if prediction for last stage equals ``predict``
for y in clf.staged_predict(X_test):
assert_equal(y.shape, y_pred.shape)
assert_array_equal(y_pred, y)
def test_staged_predict_proba():
"""Test whether staged predict proba eventually gives
the same prediction.
"""
X, y = datasets.make_hastie_10_2(n_samples=1200,
random_state=1)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
clf = GradientBoostingClassifier(n_estimators=20)
# test raise ValueError if not fitted
assert_raises(ValueError, lambda X: np.fromiter(
clf.staged_predict_proba(X), dtype=np.float64), X_test)
clf.fit(X_train, y_train)
# test if prediction for last stage equals ``predict``
for y_pred in clf.staged_predict(X_test):
assert_equal(y_test.shape, y_pred.shape)
assert_array_equal(clf.predict(X_test), y_pred)
# test if prediction for last stage equals ``predict_proba``
for staged_proba in clf.staged_predict_proba(X_test):
assert_equal(y_test.shape[0], staged_proba.shape[0])
assert_equal(2, staged_proba.shape[1])
assert_array_equal(clf.predict_proba(X_test), staged_proba)
def test_serialization():
"""Check model serialization."""
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
try:
import cPickle as pickle
except ImportError:
import pickle
serialized_clf = pickle.dumps(clf, protocol=pickle.HIGHEST_PROTOCOL)
clf = None
clf = pickle.loads(serialized_clf)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
def test_degenerate_targets():
"""Check if we can fit even though all targets are equal. """
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
# classifier should raise exception
assert_raises(ValueError, clf.fit, X, np.ones(len(X)))
clf = GradientBoostingRegressor(n_estimators=100, random_state=1)
clf.fit(X, np.ones(len(X)))
clf.predict(rng.rand(2))
assert_array_equal(np.ones((1,), dtype=np.float64),
clf.predict(rng.rand(2)))
def test_quantile_loss():
"""Check if quantile loss with alpha=0.5 equals lad. """
clf_quantile = GradientBoostingRegressor(n_estimators=100, loss='quantile',
max_depth=4, alpha=0.5,
random_state=7)
clf_quantile.fit(boston.data, boston.target)
y_quantile = clf_quantile.predict(boston.data)
clf_lad = GradientBoostingRegressor(n_estimators=100, loss='lad',
max_depth=4, random_state=7)
clf_lad.fit(boston.data, boston.target)
y_lad = clf_lad.predict(boston.data)
assert_array_almost_equal(y_quantile, y_lad, decimal=4)
def test_symbol_labels():
"""Test with non-integer class labels. """
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
symbol_y = tosequence(map(str, y))
clf.fit(X, symbol_y)
assert_array_equal(clf.predict(T), tosequence(map(str, true_result)))
assert_equal(100, len(clf.estimators_))
def test_float_class_labels():
"""Test with float class labels. """
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
float_y = np.asarray(y, dtype=np.float32)
clf.fit(X, float_y)
assert_array_equal(clf.predict(T),
np.asarray(true_result, dtype=np.float32))
assert_equal(100, len(clf.estimators_))
def test_shape_y():
"""Test with float class labels. """
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
y_ = np.asarray(y, dtype=np.int32)
y_ = y_[:, np.newaxis]
with warnings.catch_warnings(record=True):
# This will raise a DataConversionWarning that we want to
# "always" raise, elsewhere the warnings gets ignored in the
# later tests, and the tests that check for this warning fail
warnings.simplefilter("always", DataConversionWarning)
clf.fit(X, y_)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
def test_mem_layout():
"""Test with different memory layouts of X and y"""
X_ = np.asfortranarray(X)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X_, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
X_ = np.ascontiguousarray(X)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X_, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
y_ = np.asarray(y, dtype=np.int32)
y_ = np.ascontiguousarray(y_)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y_)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
y_ = np.asarray(y, dtype=np.int32)
y_ = np.asfortranarray(y_)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y_)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
def test_oob_score():
"""Test if oob_score is deprecated. """
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
subsample=0.5)
clf.fit(X, y)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
assert_true(hasattr(clf, 'oob_score_'))
assert_equal(len(w), 1)
def test_oob_improvement():
"""Test if oob improvement has correct shape and regression test. """
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
subsample=0.5)
clf.fit(X, y)
assert clf.oob_improvement_.shape[0] == 100
# hard-coded regression test - change if modification in OOB computation
assert_array_almost_equal(clf.oob_improvement_[:5],
np.array([0.19, 0.15, 0.12, -0.12, -0.11]),
decimal=2)
def test_oob_improvement_raise():
"""Test if oob improvement has correct shape. """
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
subsample=1.0)
clf.fit(X, y)
assert_raises(AttributeError, lambda: clf.oob_improvement_)
def test_oob_multilcass_iris():
"""Check OOB improvement on multi-class dataset."""
clf = GradientBoostingClassifier(n_estimators=100, loss='deviance',
random_state=1, subsample=0.5)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert score > 0.9, "Failed with subsample %.1f " \
"and score = %f" % (0.5, score)
assert clf.oob_improvement_.shape[0] == clf.n_estimators
# hard-coded regression test - change if modification in OOB computation
# FIXME: the following snippet does not yield the same results on 32 bits
# assert_array_almost_equal(clf.oob_improvement_[:5],
# np.array([12.68, 10.45, 8.18, 6.43, 5.13]),
# decimal=2)
def test_verbose_output():
"""Check verbose=1 does not cause error. """
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
sys.stdout = StringIO()
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
verbose=1, subsample=0.8)
clf.fit(X, y)
verbose_output = sys.stdout
sys.stdout = old_stdout
# check output
verbose_output.seek(0)
header = verbose_output.readline().rstrip()
# with OOB
true_header = ' '.join(['%10s'] + ['%16s'] * 3) % (
'Iter', 'Train Loss', 'OOB Improve', 'Remaining Time')
assert_equal(true_header, header)
n_lines = sum(1 for l in verbose_output.readlines())
# one for 1-10 and then 9 for 20-100
assert_equal(10 + 9, n_lines)
def test_more_verbose_output():
"""Check verbose=2 does not cause error. """
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
sys.stdout = StringIO()
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
verbose=2)
clf.fit(X, y)
verbose_output = sys.stdout
sys.stdout = old_stdout
# check output
verbose_output.seek(0)
header = verbose_output.readline().rstrip()
# no OOB
true_header = ' '.join(['%10s'] + ['%16s'] * 2) % (
'Iter', 'Train Loss', 'Remaining Time')
assert_equal(true_header, header)
n_lines = sum(1 for l in verbose_output.readlines())
# 100 lines for n_estimators==100
assert_equal(100, n_lines)
def test_warn_deviance():
"""Test if mdeviance and bdeviance give deprecated warning. """
for loss in ('bdeviance', 'mdeviance'):
with warnings.catch_warnings(record=True) as w:
# This will raise a DataConversionWarning that we want to
# "always" raise, elsewhere the warnings gets ignored in the
# later tests, and the tests that check for this warning fail
warnings.simplefilter("always", DataConversionWarning)
clf = GradientBoostingClassifier(loss=loss)
try:
clf.fit(X, y)
except:
# mdeviance will raise ValueError because only 2 classes
pass
# deprecated warning for bdeviance and mdeviance
assert len(w) == 1
| bsd-3-clause |
murali-munna/scikit-learn | sklearn/datasets/tests/test_base.py | 205 | 5878 | import os
import shutil
import tempfile
import warnings
import nose
import numpy
from pickle import loads
from pickle import dumps
from sklearn.datasets import get_data_home
from sklearn.datasets import clear_data_home
from sklearn.datasets import load_files
from sklearn.datasets import load_sample_images
from sklearn.datasets import load_sample_image
from sklearn.datasets import load_digits
from sklearn.datasets import load_diabetes
from sklearn.datasets import load_linnerud
from sklearn.datasets import load_iris
from sklearn.datasets import load_boston
from sklearn.datasets.base import Bunch
from sklearn.externals.six import b, u
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
DATA_HOME = tempfile.mkdtemp(prefix="scikit_learn_data_home_test_")
LOAD_FILES_ROOT = tempfile.mkdtemp(prefix="scikit_learn_load_files_test_")
TEST_CATEGORY_DIR1 = ""
TEST_CATEGORY_DIR2 = ""
def _remove_dir(path):
if os.path.isdir(path):
shutil.rmtree(path)
def teardown_module():
"""Test fixture (clean up) run once after all tests of this module"""
for path in [DATA_HOME, LOAD_FILES_ROOT]:
_remove_dir(path)
def setup_load_files():
global TEST_CATEGORY_DIR1
global TEST_CATEGORY_DIR2
TEST_CATEGORY_DIR1 = tempfile.mkdtemp(dir=LOAD_FILES_ROOT)
TEST_CATEGORY_DIR2 = tempfile.mkdtemp(dir=LOAD_FILES_ROOT)
sample_file = tempfile.NamedTemporaryFile(dir=TEST_CATEGORY_DIR1,
delete=False)
sample_file.write(b("Hello World!\n"))
sample_file.close()
def teardown_load_files():
_remove_dir(TEST_CATEGORY_DIR1)
_remove_dir(TEST_CATEGORY_DIR2)
def test_data_home():
# get_data_home will point to a pre-existing folder
data_home = get_data_home(data_home=DATA_HOME)
assert_equal(data_home, DATA_HOME)
assert_true(os.path.exists(data_home))
# clear_data_home will delete both the content and the folder it-self
clear_data_home(data_home=data_home)
assert_false(os.path.exists(data_home))
# if the folder is missing it will be created again
data_home = get_data_home(data_home=DATA_HOME)
assert_true(os.path.exists(data_home))
def test_default_empty_load_files():
res = load_files(LOAD_FILES_ROOT)
assert_equal(len(res.filenames), 0)
assert_equal(len(res.target_names), 0)
assert_equal(res.DESCR, None)
@nose.tools.with_setup(setup_load_files, teardown_load_files)
def test_default_load_files():
res = load_files(LOAD_FILES_ROOT)
assert_equal(len(res.filenames), 1)
assert_equal(len(res.target_names), 2)
assert_equal(res.DESCR, None)
assert_equal(res.data, [b("Hello World!\n")])
@nose.tools.with_setup(setup_load_files, teardown_load_files)
def test_load_files_w_categories_desc_and_encoding():
category = os.path.abspath(TEST_CATEGORY_DIR1).split('/').pop()
res = load_files(LOAD_FILES_ROOT, description="test",
categories=category, encoding="utf-8")
assert_equal(len(res.filenames), 1)
assert_equal(len(res.target_names), 1)
assert_equal(res.DESCR, "test")
assert_equal(res.data, [u("Hello World!\n")])
@nose.tools.with_setup(setup_load_files, teardown_load_files)
def test_load_files_wo_load_content():
res = load_files(LOAD_FILES_ROOT, load_content=False)
assert_equal(len(res.filenames), 1)
assert_equal(len(res.target_names), 2)
assert_equal(res.DESCR, None)
assert_equal(res.get('data'), None)
def test_load_sample_images():
try:
res = load_sample_images()
assert_equal(len(res.images), 2)
assert_equal(len(res.filenames), 2)
assert_true(res.DESCR)
except ImportError:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_digits():
digits = load_digits()
assert_equal(digits.data.shape, (1797, 64))
assert_equal(numpy.unique(digits.target).size, 10)
def test_load_digits_n_class_lt_10():
digits = load_digits(9)
assert_equal(digits.data.shape, (1617, 64))
assert_equal(numpy.unique(digits.target).size, 9)
def test_load_sample_image():
try:
china = load_sample_image('china.jpg')
assert_equal(china.dtype, 'uint8')
assert_equal(china.shape, (427, 640, 3))
except ImportError:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_missing_sample_image_error():
have_PIL = True
try:
try:
from scipy.misc import imread
except ImportError:
from scipy.misc.pilutil import imread
except ImportError:
have_PIL = False
if have_PIL:
assert_raises(AttributeError, load_sample_image,
'blop.jpg')
else:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_diabetes():
res = load_diabetes()
assert_equal(res.data.shape, (442, 10))
assert_true(res.target.size, 442)
def test_load_linnerud():
res = load_linnerud()
assert_equal(res.data.shape, (20, 3))
assert_equal(res.target.shape, (20, 3))
assert_equal(len(res.target_names), 3)
assert_true(res.DESCR)
def test_load_iris():
res = load_iris()
assert_equal(res.data.shape, (150, 4))
assert_equal(res.target.size, 150)
assert_equal(res.target_names.size, 3)
assert_true(res.DESCR)
def test_load_boston():
res = load_boston()
assert_equal(res.data.shape, (506, 13))
assert_equal(res.target.size, 506)
assert_equal(res.feature_names.size, 13)
assert_true(res.DESCR)
def test_loads_dumps_bunch():
bunch = Bunch(x="x")
bunch_from_pkl = loads(dumps(bunch))
bunch_from_pkl.x = "y"
assert_equal(bunch_from_pkl['x'], bunch_from_pkl.x)
| bsd-3-clause |
IssamLaradji/scikit-learn | sklearn/datasets/twenty_newsgroups.py | 26 | 13430 | """Caching loader for the 20 newsgroups text classification dataset
The description of the dataset is available on the official website at:
http://people.csail.mit.edu/jrennie/20Newsgroups/
Quoting the introduction:
The 20 Newsgroups data set is a collection of approximately 20,000
newsgroup documents, partitioned (nearly) evenly across 20 different
newsgroups. To the best of my knowledge, it was originally collected
by Ken Lang, probably for his Newsweeder: Learning to filter netnews
paper, though he does not explicitly mention this collection. The 20
newsgroups collection has become a popular data set for experiments
in text applications of machine learning techniques, such as text
classification and text clustering.
This dataset loader will download the recommended "by date" variant of the
dataset and which features a point in time split between the train and
test sets. The compressed dataset size is around 14 Mb compressed. Once
uncompressed the train set is 52 MB and the test set is 34 MB.
The data is downloaded, extracted and cached in the '~/scikit_learn_data'
folder.
The `fetch_20newsgroups` function will not vectorize the data into numpy
arrays but the dataset lists the filenames of the posts and their categories
as target labels.
The `fetch_20newsgroups_tfidf` function will in addition do a simple tf-idf
vectorization step.
"""
# Copyright (c) 2011 Olivier Grisel <[email protected]>
# License: BSD 3 clause
import os
import logging
import tarfile
import pickle
import shutil
import re
import codecs
import numpy as np
import scipy.sparse as sp
from .base import get_data_home
from .base import Bunch
from .base import load_files
from ..utils import check_random_state
from ..feature_extraction.text import CountVectorizer
from ..preprocessing import normalize
from ..externals import joblib, six
if six.PY3:
from urllib.request import urlopen
else:
from urllib2 import urlopen
logger = logging.getLogger(__name__)
URL = ("http://people.csail.mit.edu/jrennie/"
"20Newsgroups/20news-bydate.tar.gz")
ARCHIVE_NAME = "20news-bydate.tar.gz"
CACHE_NAME = "20news-bydate.pkz"
TRAIN_FOLDER = "20news-bydate-train"
TEST_FOLDER = "20news-bydate-test"
def download_20newsgroups(target_dir, cache_path):
"""Download the 20 newsgroups data and stored it as a zipped pickle."""
archive_path = os.path.join(target_dir, ARCHIVE_NAME)
train_path = os.path.join(target_dir, TRAIN_FOLDER)
test_path = os.path.join(target_dir, TEST_FOLDER)
if not os.path.exists(target_dir):
os.makedirs(target_dir)
if os.path.exists(archive_path):
# Download is not complete as the .tar.gz file is removed after
# download.
logger.warn("Download was incomplete, downloading again.")
os.remove(archive_path)
logger.warn("Downloading dataset from %s (14 MB)", URL)
opener = urlopen(URL)
open(archive_path, 'wb').write(opener.read())
logger.info("Decompressing %s", archive_path)
tarfile.open(archive_path, "r:gz").extractall(path=target_dir)
os.remove(archive_path)
# Store a zipped pickle
cache = dict(train=load_files(train_path, encoding='latin1'),
test=load_files(test_path, encoding='latin1'))
compressed_content = codecs.encode(pickle.dumps(cache), 'zlib_codec')
open(cache_path, 'wb').write(compressed_content)
shutil.rmtree(target_dir)
return cache
def strip_newsgroup_header(text):
"""
Given text in "news" format, strip the headers, by removing everything
before the first blank line.
"""
_before, _blankline, after = text.partition('\n\n')
return after
_QUOTE_RE = re.compile(r'(writes in|writes:|wrote:|says:|said:'
r'|^In article|^Quoted from|^\||^>)')
def strip_newsgroup_quoting(text):
"""
Given text in "news" format, strip lines beginning with the quote
characters > or |, plus lines that often introduce a quoted section
(for example, because they contain the string 'writes:'.)
"""
good_lines = [line for line in text.split('\n')
if not _QUOTE_RE.search(line)]
return '\n'.join(good_lines)
def strip_newsgroup_footer(text):
"""
Given text in "news" format, attempt to remove a signature block.
As a rough heuristic, we assume that signatures are set apart by either
a blank line or a line made of hyphens, and that it is the last such line
in the file (disregarding blank lines at the end).
"""
lines = text.strip().split('\n')
for line_num in range(len(lines) - 1, -1, -1):
line = lines[line_num]
if line.strip().strip('-') == '':
break
if line_num > 0:
return '\n'.join(lines[:line_num])
else:
return text
def fetch_20newsgroups(data_home=None, subset='train', categories=None,
shuffle=True, random_state=42,
remove=(),
download_if_missing=True):
"""Load the filenames and data from the 20 newsgroups dataset.
Parameters
----------
subset: 'train' or 'test', 'all', optional
Select the dataset to load: 'train' for the training set, 'test'
for the test set, 'all' for both, with shuffled ordering.
data_home: optional, default: None
Specify an download and cache folder for the datasets. If None,
all scikit-learn data is stored in '~/scikit_learn_data' subfolders.
categories: None or collection of string or unicode
If None (default), load all the categories.
If not None, list of category names to load (other categories
ignored).
shuffle: bool, optional
Whether or not to shuffle the data: might be important for models that
make the assumption that the samples are independent and identically
distributed (i.i.d.), such as stochastic gradient descent.
random_state: numpy random number generator or seed integer
Used to shuffle the dataset.
download_if_missing: optional, True by default
If False, raise an IOError if the data is not locally available
instead of trying to download the data from the source site.
remove: tuple
May contain any subset of ('headers', 'footers', 'quotes'). Each of
these are kinds of text that will be detected and removed from the
newsgroup posts, preventing classifiers from overfitting on
metadata.
'headers' removes newsgroup headers, 'footers' removes blocks at the
ends of posts that look like signatures, and 'quotes' removes lines
that appear to be quoting another post.
'headers' follows an exact standard; the other filters are not always
correct.
"""
data_home = get_data_home(data_home=data_home)
cache_path = os.path.join(data_home, CACHE_NAME)
twenty_home = os.path.join(data_home, "20news_home")
cache = None
if os.path.exists(cache_path):
try:
with open(cache_path, 'rb') as f:
compressed_content = f.read()
uncompressed_content = codecs.decode(
compressed_content, 'zlib_codec')
cache = pickle.loads(uncompressed_content)
except Exception as e:
print(80 * '_')
print('Cache loading failed')
print(80 * '_')
print(e)
if cache is None:
if download_if_missing:
cache = download_20newsgroups(target_dir=twenty_home,
cache_path=cache_path)
else:
raise IOError('20Newsgroups dataset not found')
if subset in ('train', 'test'):
data = cache[subset]
elif subset == 'all':
data_lst = list()
target = list()
filenames = list()
for subset in ('train', 'test'):
data = cache[subset]
data_lst.extend(data.data)
target.extend(data.target)
filenames.extend(data.filenames)
data.data = data_lst
data.target = np.array(target)
data.filenames = np.array(filenames)
data.description = 'the 20 newsgroups by date dataset'
else:
raise ValueError(
"subset can only be 'train', 'test' or 'all', got '%s'" % subset)
if 'headers' in remove:
data.data = [strip_newsgroup_header(text) for text in data.data]
if 'footers' in remove:
data.data = [strip_newsgroup_footer(text) for text in data.data]
if 'quotes' in remove:
data.data = [strip_newsgroup_quoting(text) for text in data.data]
if categories is not None:
labels = [(data.target_names.index(cat), cat) for cat in categories]
# Sort the categories to have the ordering of the labels
labels.sort()
labels, categories = zip(*labels)
mask = np.in1d(data.target, labels)
data.filenames = data.filenames[mask]
data.target = data.target[mask]
# searchsorted to have continuous labels
data.target = np.searchsorted(labels, data.target)
data.target_names = list(categories)
# Use an object array to shuffle: avoids memory copy
data_lst = np.array(data.data, dtype=object)
data_lst = data_lst[mask]
data.data = data_lst.tolist()
if shuffle:
random_state = check_random_state(random_state)
indices = np.arange(data.target.shape[0])
random_state.shuffle(indices)
data.filenames = data.filenames[indices]
data.target = data.target[indices]
# Use an object array to shuffle: avoids memory copy
data_lst = np.array(data.data, dtype=object)
data_lst = data_lst[indices]
data.data = data_lst.tolist()
return data
def fetch_20newsgroups_vectorized(subset="train", remove=(), data_home=None):
"""Load the 20 newsgroups dataset and transform it into tf-idf vectors.
This is a convenience function; the tf-idf transformation is done using the
default settings for `sklearn.feature_extraction.text.Vectorizer`. For more
advanced usage (stopword filtering, n-gram extraction, etc.), combine
fetch_20newsgroups with a custom `Vectorizer` or `CountVectorizer`.
Parameters
----------
subset: 'train' or 'test', 'all', optional
Select the dataset to load: 'train' for the training set, 'test'
for the test set, 'all' for both, with shuffled ordering.
data_home: optional, default: None
Specify an download and cache folder for the datasets. If None,
all scikit-learn data is stored in '~/scikit_learn_data' subfolders.
remove: tuple
May contain any subset of ('headers', 'footers', 'quotes'). Each of
these are kinds of text that will be detected and removed from the
newsgroup posts, preventing classifiers from overfitting on
metadata.
'headers' removes newsgroup headers, 'footers' removes blocks at the
ends of posts that look like signatures, and 'quotes' removes lines
that appear to be quoting another post.
Returns
-------
bunch : Bunch object
bunch.data: sparse matrix, shape [n_samples, n_features]
bunch.target: array, shape [n_samples]
bunch.target_names: list, length [n_classes]
"""
data_home = get_data_home(data_home=data_home)
filebase = '20newsgroup_vectorized'
if remove:
filebase += 'remove-' + ('-'.join(remove))
target_file = os.path.join(data_home, filebase + ".pk")
# we shuffle but use a fixed seed for the memoization
data_train = fetch_20newsgroups(data_home=data_home,
subset='train',
categories=None,
shuffle=True,
random_state=12,
remove=remove)
data_test = fetch_20newsgroups(data_home=data_home,
subset='test',
categories=None,
shuffle=True,
random_state=12,
remove=remove)
if os.path.exists(target_file):
X_train, X_test = joblib.load(target_file)
else:
vectorizer = CountVectorizer(dtype=np.int16)
X_train = vectorizer.fit_transform(data_train.data).tocsr()
X_test = vectorizer.transform(data_test.data).tocsr()
joblib.dump((X_train, X_test), target_file, compress=9)
# the data is stored as int16 for compactness
# but normalize needs floats
X_train = X_train.astype(np.float64)
X_test = X_test.astype(np.float64)
normalize(X_train, copy=False)
normalize(X_test, copy=False)
target_names = data_train.target_names
if subset == "train":
data = X_train
target = data_train.target
elif subset == "test":
data = X_test
target = data_test.target
elif subset == "all":
data = sp.vstack((X_train, X_test)).tocsr()
target = np.concatenate((data_train.target, data_test.target))
else:
raise ValueError("%r is not a valid subset: should be one of "
"['train', 'test', 'all']" % subset)
return Bunch(data=data, target=target, target_names=target_names)
| bsd-3-clause |
sinhrks/expandas | doc/source/conf.py | 3 | 8742 | # -*- coding: utf-8 -*-
#
# pandas-ml documentation build configuration file, created by
# sphinx-quickstart on Sun Feb 22 14:58:37 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.autosummary',
'numpydoc']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'pandas_ml'
copyright = u'2015, sinhrks'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.3.0'
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'pandas_mldoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'pandas_ml.tex', u'pandas-ml Documentation',
u'sinhrks', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'pandas_ml', u'pandas-ml Documentation',
[u'sinhrks'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'pandas_ml', u'pandas-ml Documentation',
u'sinhrks', 'pandas_ml', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
# -- Build API doc ----------------------------------------------------------
numpydoc_show_class_members = False
fpath = os.path.dirname(__file__)
gen_path = os.path.join(fpath, 'generated')
app_path = os.path.join(os.path.dirname(os.path.dirname(fpath)), 'pandas_ml')
os.system('sphinx-apidoc -f -E -o {0} {1}'.format(gen_path, app_path))
| bsd-3-clause |
28ideas/quant-econ | examples/perm_inc_figs.py | 1 | 1521 | """
Plots consumption, income and debt for the simple infinite horizon LQ
permanent income model with Gaussian iid income.
"""
from __future__ import division
import random
import numpy as np
import matplotlib.pyplot as plt
r = 0.05
beta = 1 / (1 + r)
T = 60
sigma = 0.15
mu = 1
def time_path():
w = np.random.randn(T+1) # w_0, w_1, ..., w_T
w[0] = 0
b = np.zeros(T+1)
for t in range(1, T+1):
b[t] = w[1:t].sum()
b = - sigma * b
c = mu + (1 - beta) * (sigma * w - b)
return w, b, c
# == Figure showing a typical realization == #
if 1:
fig, ax = plt.subplots()
p_args = {'lw' : 2, 'alpha' : 0.7}
ax.grid()
ax.set_xlabel(r'Time')
bbox = (0., 1.02, 1., .102)
legend_args = {'bbox_to_anchor' : bbox, 'loc' : 'upper left', 'mode' : 'expand'}
w, b, c = time_path()
ax.plot(range(T+1), mu + sigma * w, 'g-', label="non-financial income", **p_args)
ax.plot(range(T+1), c, 'k-', label="consumption", **p_args)
ax.plot(range(T+1), b, 'b-', label="debt", **p_args)
ax.legend(ncol=3, **legend_args)
plt.show()
# == Figure showing multiple consumption paths == #
if 0:
fig, ax = plt.subplots()
p_args = {'lw' : 0.8, 'alpha' : 0.7}
ax.grid()
ax.set_xlabel(r'Time')
ax.set_ylabel(r'Consumption')
b_sum = np.zeros(T+1)
for i in range(250):
rcolor = random.choice(('c', 'g', 'b', 'k'))
w, b, c = time_path()
ax.plot(range(T+1), c, color=rcolor, **p_args)
plt.show()
| bsd-3-clause |
jhnnsnk/nest-simulator | pynest/examples/gap_junctions_inhibitory_network.py | 7 | 5989 | # -*- coding: utf-8 -*-
#
# gap_junctions_inhibitory_network.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""Gap Junctions: Inhibitory network example
-----------------------------------------------
This script simulates an inhibitory network of 500 Hodgkin-Huxley neurons.
Without the gap junctions (meaning for ``gap_weight = 0.0``) the network shows
an asynchronous irregular state that is caused by the external excitatory
Poissonian drive being balanced by the inhibitory feedback within the
network. With increasing `gap_weight` the network synchronizes:
For a lower gap weight of 0.3 nS the network remains in an asynchronous
state. With a weight of 0.54 nS the network switches randomly between the
asynchronous to the synchronous state, while for a gap weight of 0.7 nS a
stable synchronous state is reached.
This example is also used as test case 2 (see Figure 9 and 10)
in [1]_.
References
~~~~~~~~~~~
.. [1] Hahne et al. (2015) A unified framework for spiking and gap-junction
interactions in distributed neuronal network simulations, Front.
Neuroinform. http://dx.doi.org/10.3389/neuro.11.012.2008
"""
import nest
import matplotlib.pyplot as plt
import numpy
n_neuron = 500
gap_per_neuron = 60
inh_per_neuron = 50
delay = 1.0
j_exc = 300.
j_inh = -50.
threads = 8
stepsize = 0.05
simtime = 501.
gap_weight = 0.3
nest.ResetKernel()
###############################################################################
# First we set the random seed, adjust the kernel settings and create
# ``hh_psc_alpha_gap`` neurons, ``spike_recorder`` and ``poisson_generator``.
numpy.random.seed(1)
nest.SetKernelStatus({'resolution': 0.05,
'total_num_virtual_procs': threads,
'print_time': True,
# Settings for waveform relaxation
# 'use_wfr': False uses communication in every step
# instead of an iterative solution
'use_wfr': True,
'wfr_comm_interval': 1.0,
'wfr_tol': 0.0001,
'wfr_max_iterations': 15,
'wfr_interpolation_order': 3})
neurons = nest.Create('hh_psc_alpha_gap', n_neuron)
sr = nest.Create("spike_recorder")
pg = nest.Create("poisson_generator", params={'rate': 500.0})
###############################################################################
# Each neuron shall receive ``inh_per_neuron = 50`` inhibitory synaptic inputs
# that are randomly selected from all other neurons, each with synaptic
# weight ``j_inh = -50.0`` pA and a synaptic delay of 1.0 ms. Furthermore each
# neuron shall receive an excitatory external Poissonian input of 500.0 Hz
# with synaptic weight ``j_exc = 300.0`` pA and the same delay.
# The desired connections are created with the following commands:
conn_dict = {'rule': 'fixed_indegree',
'indegree': inh_per_neuron,
'allow_autapses': False,
'allow_multapses': True}
syn_dict = {'synapse_model': 'static_synapse',
'weight': j_inh,
'delay': delay}
nest.Connect(neurons, neurons, conn_dict, syn_dict)
nest.Connect(pg, neurons, 'all_to_all',
syn_spec={'synapse_model': 'static_synapse',
'weight': j_exc,
'delay': delay})
###############################################################################
# Then the neurons are connected to the ``spike_recorder`` and the initial
# membrane potential of each neuron is set randomly between -40 and -80 mV.
nest.Connect(neurons, sr)
neurons.V_m = nest.random.uniform(min=-80., max=-40.)
#######################################################################################
# Finally gap junctions are added to the network. :math:`(60*500)/2` ``gap_junction``
# connections are added randomly resulting in an average of 60 gap-junction
# connections per neuron. We must not use the ``fixed_indegree`` oder
# ``fixed_outdegree`` functionality of ``nest.Connect()`` to create the
# connections, as ``gap_junction`` connections are bidirectional connections
# and we need to make sure that the same neurons are connected in both ways.
# This is achieved by creating the connections on the Python level with the
# `random` module of the Python Standard Library and connecting the neurons
# using the ``make_symmetric`` flag for ``one_to_one`` connections.
n_connection = int(n_neuron * gap_per_neuron / 2)
neuron_list = neurons.tolist()
connections = numpy.random.choice(neuron_list, [n_connection, 2])
for source_node_id, target_node_id in connections:
nest.Connect(nest.NodeCollection([source_node_id]),
nest.NodeCollection([target_node_id]),
{'rule': 'one_to_one', 'make_symmetric': True},
{'synapse_model': 'gap_junction', 'weight': gap_weight})
###############################################################################
# In the end we start the simulation and plot the spike pattern.
nest.Simulate(simtime)
times = sr.get('events', 'times')
spikes = sr.get('events', 'senders')
n_spikes = sr.n_events
hz_rate = (1000.0 * n_spikes / simtime) / n_neuron
plt.figure(1)
plt.plot(times, spikes, 'o')
plt.title('Average spike rate (Hz): %.2f' % hz_rate)
plt.xlabel('time (ms)')
plt.ylabel('neuron no')
plt.show()
| gpl-2.0 |
homeslike/OpticalTweezer | scripts/p0.6_at0.1/vCOMhist.py | 28 | 1192 | import numpy as np
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
import sys
# import vCOMdata.dat as array
# folder="../output/runs/170123_2033/"
folder="../output/runs/"+str(sys.argv[1])
# when = str(sys.argv[1])
for i in range(0,len(sys.argv)):
print(str(i) + ": "+ str(sys.argv[i]))
# data = np.genfromtxt(folder+"/vCOMData.dat",usecols=(0,1,2), skip_header=100)
# vx = np.genfromtxt(folder+" "+str(sys.argv[1])+"/vCOMData.dat",usecols=0, skip_header=100)
vx = np.genfromtxt(folder+"/vCOMData.dat",usecols=0, skip_header=100)
vy = np.genfromtxt(folder+"/vCOMData.dat",usecols=1, skip_header=100)
vz = np.genfromtxt(folder+"/vCOMData.dat",usecols=2, skip_header=100)
# print(vx)
# vx = []
# for i in range(0,len(data)):
# vx.append(data[i][0])
# hist_vx = np.histogram(vx,bins=100)
# print(len(hist_vx[1]))
# print("")
# print(len(hist_vx[0]))
# hist_vy = np.histogram(vy,bins=100)
# hist_vz = np.histogram(vz,bins=100)
# printable = []
# for i in range(0,len(hist_vx[0])):
# printable.append((hist_vx[1][i],hist_vx[0][i]))
# print(hist_vx)
plt.hist(vx,bins=100)
# plt.hist(vy,bins=100)
# plt.hist(vz,bins=100)
# plt.plot(hist_vx[1],hist_vx[0])
plt.show()
| mit |
hgrif/incubator-airflow | airflow/www/views.py | 2 | 97479 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from past.builtins import basestring, unicode
import ast
import logging
import os
import pkg_resources
import socket
from functools import wraps
from datetime import datetime, timedelta
import dateutil.parser
import copy
import math
import json
import bleach
from collections import defaultdict
import inspect
from textwrap import dedent
import traceback
import sqlalchemy as sqla
from sqlalchemy import or_, desc, and_, union_all
from flask import (
abort, redirect, url_for, request, Markup, Response, current_app, render_template,
make_response)
from flask_admin import BaseView, expose, AdminIndexView
from flask_admin.contrib.sqla import ModelView
from flask_admin.actions import action
from flask_admin.babel import lazy_gettext
from flask_admin.tools import iterdecode
from flask_login import flash
from flask._compat import PY2
from jinja2.sandbox import ImmutableSandboxedEnvironment
from jinja2 import escape
import markdown
import nvd3
from wtforms import (
Form, SelectField, TextAreaField, PasswordField, StringField, validators)
from pygments import highlight, lexers
from pygments.formatters import HtmlFormatter
import airflow
from airflow import configuration as conf
from airflow import models
from airflow import settings
from airflow.api.common.experimental.mark_tasks import set_dag_run_state
from airflow.exceptions import AirflowException
from airflow.settings import Session
from airflow.models import XCom, DagRun
from airflow.ti_deps.dep_context import DepContext, QUEUE_DEPS, SCHEDULER_DEPS
from airflow.models import BaseOperator
from airflow.operators.subdag_operator import SubDagOperator
from airflow.utils.json import json_ser
from airflow.utils.state import State
from airflow.utils.db import provide_session
from airflow.utils.helpers import alchemy_to_dict
from airflow.utils.dates import infer_time_unit, scale_time_units
from airflow.www import utils as wwwutils
from airflow.www.forms import DateTimeForm, DateTimeWithNumRunsForm
from airflow.www.validators import GreaterEqualThan
QUERY_LIMIT = 100000
CHART_LIMIT = 200000
dagbag = models.DagBag(settings.DAGS_FOLDER)
login_required = airflow.login.login_required
current_user = airflow.login.current_user
logout_user = airflow.login.logout_user
FILTER_BY_OWNER = False
PAGE_SIZE = conf.getint('webserver', 'page_size')
if conf.getboolean('webserver', 'FILTER_BY_OWNER'):
# filter_by_owner if authentication is enabled and filter_by_owner is true
FILTER_BY_OWNER = not current_app.config['LOGIN_DISABLED']
def dag_link(v, c, m, p):
dag_id = bleach.clean(m.dag_id)
url = url_for(
'airflow.graph',
dag_id=dag_id)
return Markup(
'<a href="{}">{}</a>'.format(url, dag_id))
def log_url_formatter(v, c, m, p):
return Markup(
'<a href="{m.log_url}">'
' <span class="glyphicon glyphicon-book" aria-hidden="true">'
'</span></a>').format(**locals())
def task_instance_link(v, c, m, p):
dag_id = bleach.clean(m.dag_id)
task_id = bleach.clean(m.task_id)
url = url_for(
'airflow.task',
dag_id=dag_id,
task_id=task_id,
execution_date=m.execution_date.isoformat())
url_root = url_for(
'airflow.graph',
dag_id=dag_id,
root=task_id,
execution_date=m.execution_date.isoformat())
return Markup(
"""
<span style="white-space: nowrap;">
<a href="{url}">{task_id}</a>
<a href="{url_root}" title="Filter on this task and upstream">
<span class="glyphicon glyphicon-filter" style="margin-left: 0px;"
aria-hidden="true"></span>
</a>
</span>
""".format(**locals()))
def state_token(state):
color = State.color(state)
return Markup(
'<span class="label" style="background-color:{color};">'
'{state}</span>'.format(**locals()))
def state_f(v, c, m, p):
return state_token(m.state)
def duration_f(v, c, m, p):
if m.end_date and m.duration:
return timedelta(seconds=m.duration)
def datetime_f(v, c, m, p):
attr = getattr(m, p)
dttm = attr.isoformat() if attr else ''
if datetime.utcnow().isoformat()[:4] == dttm[:4]:
dttm = dttm[5:]
return Markup("<nobr>{}</nobr>".format(dttm))
def nobr_f(v, c, m, p):
return Markup("<nobr>{}</nobr>".format(getattr(m, p)))
def label_link(v, c, m, p):
try:
default_params = ast.literal_eval(m.default_params)
except:
default_params = {}
url = url_for(
'airflow.chart', chart_id=m.id, iteration_no=m.iteration_no,
**default_params)
return Markup("<a href='{url}'>{m.label}</a>".format(**locals()))
def pool_link(v, c, m, p):
url = '/admin/taskinstance/?flt1_pool_equals=' + m.pool
return Markup("<a href='{url}'>{m.pool}</a>".format(**locals()))
def pygment_html_render(s, lexer=lexers.TextLexer):
return highlight(
s,
lexer(),
HtmlFormatter(linenos=True),
)
def render(obj, lexer):
out = ""
if isinstance(obj, basestring):
out += pygment_html_render(obj, lexer)
elif isinstance(obj, (tuple, list)):
for i, s in enumerate(obj):
out += "<div>List item #{}</div>".format(i)
out += "<div>" + pygment_html_render(s, lexer) + "</div>"
elif isinstance(obj, dict):
for k, v in obj.items():
out += '<div>Dict item "{}"</div>'.format(k)
out += "<div>" + pygment_html_render(v, lexer) + "</div>"
return out
def wrapped_markdown(s):
return '<div class="rich_doc">' + markdown.markdown(s) + "</div>"
attr_renderer = {
'bash_command': lambda x: render(x, lexers.BashLexer),
'hql': lambda x: render(x, lexers.SqlLexer),
'sql': lambda x: render(x, lexers.SqlLexer),
'doc': lambda x: render(x, lexers.TextLexer),
'doc_json': lambda x: render(x, lexers.JsonLexer),
'doc_rst': lambda x: render(x, lexers.RstLexer),
'doc_yaml': lambda x: render(x, lexers.YamlLexer),
'doc_md': wrapped_markdown,
'python_callable': lambda x: render(
inspect.getsource(x), lexers.PythonLexer),
}
def data_profiling_required(f):
"""Decorator for views requiring data profiling access"""
@wraps(f)
def decorated_function(*args, **kwargs):
if (
current_app.config['LOGIN_DISABLED'] or
(not current_user.is_anonymous() and current_user.data_profiling())
):
return f(*args, **kwargs)
else:
flash("This page requires data profiling privileges", "error")
return redirect(url_for('admin.index'))
return decorated_function
def fused_slots(v, c, m, p):
url = (
'/admin/taskinstance/' +
'?flt1_pool_equals=' + m.pool +
'&flt2_state_equals=running')
return Markup("<a href='{0}'>{1}</a>".format(url, m.used_slots()))
def fqueued_slots(v, c, m, p):
url = (
'/admin/taskinstance/' +
'?flt1_pool_equals=' + m.pool +
'&flt2_state_equals=queued&sort=10&desc=1')
return Markup("<a href='{0}'>{1}</a>".format(url, m.queued_slots()))
def recurse_tasks(tasks, task_ids, dag_ids, task_id_to_dag):
if isinstance(tasks, list):
for task in tasks:
recurse_tasks(task, task_ids, dag_ids, task_id_to_dag)
return
if isinstance(tasks, SubDagOperator):
subtasks = tasks.subdag.tasks
dag_ids.append(tasks.subdag.dag_id)
for subtask in subtasks:
if subtask.task_id not in task_ids:
task_ids.append(subtask.task_id)
task_id_to_dag[subtask.task_id] = tasks.subdag
recurse_tasks(subtasks, task_ids, dag_ids, task_id_to_dag)
if isinstance(tasks, BaseOperator):
task_id_to_dag[tasks.task_id] = tasks.dag
def get_chart_height(dag):
"""
TODO(aoen): See [AIRFLOW-1263] We use the number of tasks in the DAG as a heuristic to
approximate the size of generated chart (otherwise the charts are tiny and unreadable
when DAGs have a large number of tasks). Ideally nvd3 should allow for dynamic-height
charts, that is charts that take up space based on the size of the components within.
"""
return 600 + len(dag.tasks) * 10
class Airflow(BaseView):
def is_visible(self):
return False
@expose('/')
@login_required
def index(self):
return self.render('airflow/dags.html')
@expose('/chart_data')
@data_profiling_required
@wwwutils.gzipped
# @cache.cached(timeout=3600, key_prefix=wwwutils.make_cache_key)
def chart_data(self):
from airflow import macros
import pandas as pd
if conf.getboolean('core', 'secure_mode'):
abort(404)
session = settings.Session()
chart_id = request.args.get('chart_id')
csv = request.args.get('csv') == "true"
chart = session.query(models.Chart).filter_by(id=chart_id).first()
db = session.query(
models.Connection).filter_by(conn_id=chart.conn_id).first()
session.expunge_all()
session.commit()
session.close()
payload = {
"state": "ERROR",
"error": ""
}
# Processing templated fields
try:
args = ast.literal_eval(chart.default_params)
if type(args) is not type(dict()):
raise AirflowException('Not a dict')
except:
args = {}
payload['error'] += (
"Default params is not valid, string has to evaluate as "
"a Python dictionary. ")
request_dict = {k: request.args.get(k) for k in request.args}
args.update(request_dict)
args['macros'] = macros
sandbox = ImmutableSandboxedEnvironment()
sql = sandbox.from_string(chart.sql).render(**args)
label = sandbox.from_string(chart.label).render(**args)
payload['sql_html'] = Markup(highlight(
sql,
lexers.SqlLexer(), # Lexer call
HtmlFormatter(noclasses=True))
)
payload['label'] = label
pd.set_option('display.max_colwidth', 100)
hook = db.get_hook()
try:
df = hook.get_pandas_df(
wwwutils.limit_sql(sql, CHART_LIMIT, conn_type=db.conn_type))
df = df.fillna(0)
except Exception as e:
payload['error'] += "SQL execution failed. Details: " + str(e)
if csv:
return Response(
response=df.to_csv(index=False),
status=200,
mimetype="application/text")
if not payload['error'] and len(df) == CHART_LIMIT:
payload['warning'] = (
"Data has been truncated to {0}"
" rows. Expect incomplete results.").format(CHART_LIMIT)
if not payload['error'] and len(df) == 0:
payload['error'] += "Empty result set. "
elif (
not payload['error'] and
chart.sql_layout == 'series' and
chart.chart_type != "datatable" and
len(df.columns) < 3):
payload['error'] += "SQL needs to return at least 3 columns. "
elif (
not payload['error'] and
chart.sql_layout == 'columns' and
len(df.columns) < 2):
payload['error'] += "SQL needs to return at least 2 columns. "
elif not payload['error']:
import numpy as np
chart_type = chart.chart_type
data = None
if chart.show_datatable or chart_type == "datatable":
data = df.to_dict(orient="split")
data['columns'] = [{'title': c} for c in data['columns']]
payload['data'] = data
# Trying to convert time to something Highcharts likes
x_col = 1 if chart.sql_layout == 'series' else 0
if chart.x_is_date:
try:
# From string to datetime
df[df.columns[x_col]] = pd.to_datetime(
df[df.columns[x_col]])
df[df.columns[x_col]] = df[df.columns[x_col]].apply(
lambda x: int(x.strftime("%s")) * 1000)
except Exception as e:
payload['error'] = "Time conversion failed"
if chart_type == 'datatable':
payload['state'] = 'SUCCESS'
return wwwutils.json_response(payload)
else:
if chart.sql_layout == 'series':
# User provides columns (series, x, y)
xaxis_label = df.columns[1]
yaxis_label = df.columns[2]
df[df.columns[2]] = df[df.columns[2]].astype(np.float)
df = df.pivot_table(
index=df.columns[1],
columns=df.columns[0],
values=df.columns[2], aggfunc=np.sum)
else:
# User provides columns (x, y, metric1, metric2, ...)
xaxis_label = df.columns[0]
yaxis_label = 'y'
df.index = df[df.columns[0]]
df = df.sort(df.columns[0])
del df[df.columns[0]]
for col in df.columns:
df[col] = df[col].astype(np.float)
df = df.fillna(0)
NVd3ChartClass = chart_mapping.get(chart.chart_type)
NVd3ChartClass = getattr(nvd3, NVd3ChartClass)
nvd3_chart = NVd3ChartClass(x_is_date=chart.x_is_date)
for col in df.columns:
nvd3_chart.add_serie(name=col, y=df[col].tolist(), x=df[col].index.tolist())
try:
nvd3_chart.buildcontent()
payload['chart_type'] = nvd3_chart.__class__.__name__
payload['htmlcontent'] = nvd3_chart.htmlcontent
except Exception as e:
payload['error'] = str(e)
payload['state'] = 'SUCCESS'
payload['request_dict'] = request_dict
return wwwutils.json_response(payload)
@expose('/chart')
@data_profiling_required
def chart(self):
if conf.getboolean('core', 'secure_mode'):
abort(404)
session = settings.Session()
chart_id = request.args.get('chart_id')
embed = request.args.get('embed')
chart = session.query(models.Chart).filter_by(id=chart_id).first()
session.expunge_all()
session.commit()
session.close()
NVd3ChartClass = chart_mapping.get(chart.chart_type)
if not NVd3ChartClass:
flash(
"Not supported anymore as the license was incompatible, "
"sorry",
"danger")
redirect('/admin/chart/')
sql = ""
if chart.show_sql:
sql = Markup(highlight(
chart.sql,
lexers.SqlLexer(), # Lexer call
HtmlFormatter(noclasses=True))
)
return self.render(
'airflow/nvd3.html',
chart=chart,
title="Airflow - Chart",
sql=sql,
label=chart.label,
embed=embed)
@expose('/dag_stats')
def dag_stats(self):
ds = models.DagStat
session = Session()
ds.update()
qry = (
session.query(ds.dag_id, ds.state, ds.count)
)
data = {}
for dag_id, state, count in qry:
if dag_id not in data:
data[dag_id] = {}
data[dag_id][state] = count
payload = {}
for dag in dagbag.dags.values():
payload[dag.safe_dag_id] = []
for state in State.dag_states:
try:
count = data[dag.dag_id][state]
except Exception:
count = 0
d = {
'state': state,
'count': count,
'dag_id': dag.dag_id,
'color': State.color(state)
}
payload[dag.safe_dag_id].append(d)
return wwwutils.json_response(payload)
@expose('/task_stats')
def task_stats(self):
TI = models.TaskInstance
DagRun = models.DagRun
Dag = models.DagModel
session = Session()
LastDagRun = (
session.query(DagRun.dag_id, sqla.func.max(DagRun.execution_date).label('execution_date'))
.join(Dag, Dag.dag_id == DagRun.dag_id)
.filter(DagRun.state != State.RUNNING)
.filter(Dag.is_active == True)
.group_by(DagRun.dag_id)
.subquery('last_dag_run')
)
RunningDagRun = (
session.query(DagRun.dag_id, DagRun.execution_date)
.join(Dag, Dag.dag_id == DagRun.dag_id)
.filter(DagRun.state == State.RUNNING)
.filter(Dag.is_active == True)
.subquery('running_dag_run')
)
# Select all task_instances from active dag_runs.
# If no dag_run is active, return task instances from most recent dag_run.
LastTI = (
session.query(TI.dag_id.label('dag_id'), TI.state.label('state'))
.join(LastDagRun, and_(
LastDagRun.c.dag_id == TI.dag_id,
LastDagRun.c.execution_date == TI.execution_date))
)
RunningTI = (
session.query(TI.dag_id.label('dag_id'), TI.state.label('state'))
.join(RunningDagRun, and_(
RunningDagRun.c.dag_id == TI.dag_id,
RunningDagRun.c.execution_date == TI.execution_date))
)
UnionTI = union_all(LastTI, RunningTI).alias('union_ti')
qry = (
session.query(UnionTI.c.dag_id, UnionTI.c.state, sqla.func.count())
.group_by(UnionTI.c.dag_id, UnionTI.c.state)
)
data = {}
for dag_id, state, count in qry:
if dag_id not in data:
data[dag_id] = {}
data[dag_id][state] = count
session.commit()
session.close()
payload = {}
for dag in dagbag.dags.values():
payload[dag.safe_dag_id] = []
for state in State.task_states:
try:
count = data[dag.dag_id][state]
except:
count = 0
d = {
'state': state,
'count': count,
'dag_id': dag.dag_id,
'color': State.color(state)
}
payload[dag.safe_dag_id].append(d)
return wwwutils.json_response(payload)
@expose('/code')
@login_required
def code(self):
dag_id = request.args.get('dag_id')
dag = dagbag.get_dag(dag_id)
title = dag_id
try:
with open(dag.fileloc, 'r') as f:
code = f.read()
html_code = highlight(
code, lexers.PythonLexer(), HtmlFormatter(linenos=True))
except IOError as e:
html_code = str(e)
return self.render(
'airflow/dag_code.html', html_code=html_code, dag=dag, title=title,
root=request.args.get('root'),
demo_mode=conf.getboolean('webserver', 'demo_mode'))
@expose('/dag_details')
@login_required
def dag_details(self):
dag_id = request.args.get('dag_id')
dag = dagbag.get_dag(dag_id)
title = "DAG details"
session = settings.Session()
TI = models.TaskInstance
states = (
session.query(TI.state, sqla.func.count(TI.dag_id))
.filter(TI.dag_id == dag_id)
.group_by(TI.state)
.all()
)
return self.render(
'airflow/dag_details.html',
dag=dag, title=title, states=states, State=State)
@current_app.errorhandler(404)
def circles(self):
return render_template(
'airflow/circles.html', hostname=socket.getfqdn()), 404
@current_app.errorhandler(500)
def show_traceback(self):
from airflow.utils import asciiart as ascii_
return render_template(
'airflow/traceback.html',
hostname=socket.getfqdn(),
nukular=ascii_.nukular,
info=traceback.format_exc()), 500
@expose('/noaccess')
def noaccess(self):
return self.render('airflow/noaccess.html')
@expose('/pickle_info')
@login_required
def pickle_info(self):
d = {}
dag_id = request.args.get('dag_id')
dags = [dagbag.dags.get(dag_id)] if dag_id else dagbag.dags.values()
for dag in dags:
if not dag.is_subdag:
d[dag.dag_id] = dag.pickle_info()
return wwwutils.json_response(d)
@expose('/login', methods=['GET', 'POST'])
def login(self):
return airflow.login.login(self, request)
@expose('/logout')
def logout(self):
logout_user()
flash('You have been logged out.')
return redirect(url_for('admin.index'))
@expose('/rendered')
@login_required
@wwwutils.action_logging
def rendered(self):
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
execution_date = request.args.get('execution_date')
dttm = dateutil.parser.parse(execution_date)
form = DateTimeForm(data={'execution_date': dttm})
dag = dagbag.get_dag(dag_id)
task = copy.copy(dag.get_task(task_id))
ti = models.TaskInstance(task=task, execution_date=dttm)
try:
ti.render_templates()
except Exception as e:
flash("Error rendering template: " + str(e), "error")
title = "Rendered Template"
html_dict = {}
for template_field in task.__class__.template_fields:
content = getattr(task, template_field)
if template_field in attr_renderer:
html_dict[template_field] = attr_renderer[template_field](content)
else:
html_dict[template_field] = (
"<pre><code>" + str(content) + "</pre></code>")
return self.render(
'airflow/ti_code.html',
html_dict=html_dict,
dag=dag,
task_id=task_id,
execution_date=execution_date,
form=form,
title=title, )
@expose('/log')
@login_required
@wwwutils.action_logging
def log(self):
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
execution_date = request.args.get('execution_date')
dttm = dateutil.parser.parse(execution_date)
form = DateTimeForm(data={'execution_date': dttm})
dag = dagbag.get_dag(dag_id)
session = Session()
ti = session.query(models.TaskInstance).filter(
models.TaskInstance.dag_id == dag_id,
models.TaskInstance.task_id == task_id,
models.TaskInstance.execution_date == dttm).first()
if ti is None:
logs = ["*** Task instance did not exist in the DB\n"]
else:
logger = logging.getLogger('airflow.task')
task_log_reader = conf.get('core', 'task_log_reader')
handler = next((handler for handler in logger.handlers
if handler.name == task_log_reader), None)
try:
ti.task = dag.get_task(ti.task_id)
logs = handler.read(ti)
except AttributeError as e:
logs = ["Task log handler {} does not support read logs.\n{}\n" \
.format(task_log_reader, e.message)]
for i, log in enumerate(logs):
if PY2 and not isinstance(log, unicode):
logs[i] = log.decode('utf-8')
return self.render(
'airflow/ti_log.html',
logs=logs, dag=dag, title="Log by attempts", task_id=task_id,
execution_date=execution_date, form=form)
@expose('/task')
@login_required
@wwwutils.action_logging
def task(self):
TI = models.TaskInstance
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
# Carrying execution_date through, even though it's irrelevant for
# this context
execution_date = request.args.get('execution_date')
dttm = dateutil.parser.parse(execution_date)
form = DateTimeForm(data={'execution_date': dttm})
dag = dagbag.get_dag(dag_id)
if not dag or task_id not in dag.task_ids:
flash(
"Task [{}.{}] doesn't seem to exist"
" at the moment".format(dag_id, task_id),
"error")
return redirect('/admin/')
task = copy.copy(dag.get_task(task_id))
task.resolve_template_files()
ti = TI(task=task, execution_date=dttm)
ti.refresh_from_db()
ti_attrs = []
for attr_name in dir(ti):
if not attr_name.startswith('_'):
attr = getattr(ti, attr_name)
if type(attr) != type(self.task):
ti_attrs.append((attr_name, str(attr)))
task_attrs = []
for attr_name in dir(task):
if not attr_name.startswith('_'):
attr = getattr(task, attr_name)
if type(attr) != type(self.task) and \
attr_name not in attr_renderer:
task_attrs.append((attr_name, str(attr)))
# Color coding the special attributes that are code
special_attrs_rendered = {}
for attr_name in attr_renderer:
if hasattr(task, attr_name):
source = getattr(task, attr_name)
special_attrs_rendered[attr_name] = attr_renderer[attr_name](source)
no_failed_deps_result = [(
"Unknown",
dedent("""\
All dependencies are met but the task instance is not running. In most cases this just means that the task will probably be scheduled soon unless:<br/>
- The scheduler is down or under heavy load<br/>
{}
<br/>
If this task instance does not start soon please contact your Airflow administrator for assistance."""
.format(
"- This task instance already ran and had it's state changed manually (e.g. cleared in the UI)<br/>"
if ti.state == State.NONE else "")))]
# Use the scheduler's context to figure out which dependencies are not met
dep_context = DepContext(SCHEDULER_DEPS)
failed_dep_reasons = [(dep.dep_name, dep.reason) for dep in
ti.get_failed_dep_statuses(
dep_context=dep_context)]
title = "Task Instance Details"
return self.render(
'airflow/task.html',
task_attrs=task_attrs,
ti_attrs=ti_attrs,
failed_dep_reasons=failed_dep_reasons or no_failed_deps_result,
task_id=task_id,
execution_date=execution_date,
special_attrs_rendered=special_attrs_rendered,
form=form,
dag=dag, title=title)
@expose('/xcom')
@login_required
@wwwutils.action_logging
def xcom(self):
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
# Carrying execution_date through, even though it's irrelevant for
# this context
execution_date = request.args.get('execution_date')
dttm = dateutil.parser.parse(execution_date)
form = DateTimeForm(data={'execution_date': dttm})
dag = dagbag.get_dag(dag_id)
if not dag or task_id not in dag.task_ids:
flash(
"Task [{}.{}] doesn't seem to exist"
" at the moment".format(dag_id, task_id),
"error")
return redirect('/admin/')
session = Session()
xcomlist = session.query(XCom).filter(
XCom.dag_id == dag_id, XCom.task_id == task_id,
XCom.execution_date == dttm).all()
attributes = []
for xcom in xcomlist:
if not xcom.key.startswith('_'):
attributes.append((xcom.key, xcom.value))
title = "XCom"
return self.render(
'airflow/xcom.html',
attributes=attributes,
task_id=task_id,
execution_date=execution_date,
form=form,
dag=dag, title=title)
@expose('/run')
@login_required
@wwwutils.action_logging
@wwwutils.notify_owner
def run(self):
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
origin = request.args.get('origin')
dag = dagbag.get_dag(dag_id)
task = dag.get_task(task_id)
execution_date = request.args.get('execution_date')
execution_date = dateutil.parser.parse(execution_date)
ignore_all_deps = request.args.get('ignore_all_deps') == "true"
ignore_task_deps = request.args.get('ignore_task_deps') == "true"
ignore_ti_state = request.args.get('ignore_ti_state') == "true"
try:
from airflow.executors import GetDefaultExecutor
from airflow.executors.celery_executor import CeleryExecutor
executor = GetDefaultExecutor()
if not isinstance(executor, CeleryExecutor):
flash("Only works with the CeleryExecutor, sorry", "error")
return redirect(origin)
except ImportError:
# in case CeleryExecutor cannot be imported it is not active either
flash("Only works with the CeleryExecutor, sorry", "error")
return redirect(origin)
ti = models.TaskInstance(task=task, execution_date=execution_date)
ti.refresh_from_db()
# Make sure the task instance can be queued
dep_context = DepContext(
deps=QUEUE_DEPS,
ignore_all_deps=ignore_all_deps,
ignore_task_deps=ignore_task_deps,
ignore_ti_state=ignore_ti_state)
failed_deps = list(ti.get_failed_dep_statuses(dep_context=dep_context))
if failed_deps:
failed_deps_str = ", ".join(
["{}: {}".format(dep.dep_name, dep.reason) for dep in failed_deps])
flash("Could not queue task instance for execution, dependencies not met: "
"{}".format(failed_deps_str),
"error")
return redirect(origin)
executor.start()
executor.queue_task_instance(
ti,
ignore_all_deps=ignore_all_deps,
ignore_task_deps=ignore_task_deps,
ignore_ti_state=ignore_ti_state)
executor.heartbeat()
flash(
"Sent {} to the message queue, "
"it should start any moment now.".format(ti))
return redirect(origin)
@expose('/trigger')
@login_required
@wwwutils.action_logging
@wwwutils.notify_owner
def trigger(self):
dag_id = request.args.get('dag_id')
origin = request.args.get('origin') or "/admin/"
dag = dagbag.get_dag(dag_id)
if not dag:
flash("Cannot find dag {}".format(dag_id))
return redirect(origin)
execution_date = datetime.utcnow()
run_id = "manual__{0}".format(execution_date.isoformat())
dr = DagRun.find(dag_id=dag_id, run_id=run_id)
if dr:
flash("This run_id {} already exists".format(run_id))
return redirect(origin)
run_conf = {}
dag.create_dagrun(
run_id=run_id,
execution_date=execution_date,
state=State.RUNNING,
conf=run_conf,
external_trigger=True
)
flash(
"Triggered {}, "
"it should start any moment now.".format(dag_id))
return redirect(origin)
def _clear_dag_tis(self, dag, start_date, end_date, origin,
recursive=False, confirmed=False):
if confirmed:
count = dag.clear(
start_date=start_date,
end_date=end_date,
include_subdags=recursive)
flash("{0} task instances have been cleared".format(count))
return redirect(origin)
tis = dag.clear(
start_date=start_date,
end_date=end_date,
include_subdags=recursive,
dry_run=True)
if not tis:
flash("No task instances to clear", 'error')
response = redirect(origin)
else:
details = "\n".join([str(t) for t in tis])
response = self.render(
'airflow/confirm.html',
message=("Here's the list of task instances you are about "
"to clear:"),
details=details)
return response
@expose('/clear')
@login_required
@wwwutils.action_logging
@wwwutils.notify_owner
def clear(self):
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
origin = request.args.get('origin')
dag = dagbag.get_dag(dag_id)
execution_date = request.args.get('execution_date')
execution_date = dateutil.parser.parse(execution_date)
confirmed = request.args.get('confirmed') == "true"
upstream = request.args.get('upstream') == "true"
downstream = request.args.get('downstream') == "true"
future = request.args.get('future') == "true"
past = request.args.get('past') == "true"
recursive = request.args.get('recursive') == "true"
dag = dag.sub_dag(
task_regex=r"^{0}$".format(task_id),
include_downstream=downstream,
include_upstream=upstream)
end_date = execution_date if not future else None
start_date = execution_date if not past else None
return self._clear_dag_tis(dag, start_date, end_date, origin,
recursive=recursive, confirmed=confirmed)
@expose('/dagrun_clear')
@login_required
@wwwutils.action_logging
@wwwutils.notify_owner
def dagrun_clear(self):
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
origin = request.args.get('origin')
execution_date = request.args.get('execution_date')
confirmed = request.args.get('confirmed') == "true"
dag = dagbag.get_dag(dag_id)
execution_date = dateutil.parser.parse(execution_date)
start_date = execution_date
end_date = execution_date
return self._clear_dag_tis(dag, start_date, end_date, origin,
recursive=True, confirmed=confirmed)
@expose('/blocked')
@login_required
def blocked(self):
session = settings.Session()
DR = models.DagRun
dags = (
session.query(DR.dag_id, sqla.func.count(DR.id))
.filter(DR.state == State.RUNNING)
.group_by(DR.dag_id)
.all()
)
payload = []
for dag_id, active_dag_runs in dags:
max_active_runs = 0
if dag_id in dagbag.dags:
max_active_runs = dagbag.dags[dag_id].max_active_runs
payload.append({
'dag_id': dag_id,
'active_dag_run': active_dag_runs,
'max_active_runs': max_active_runs,
})
return wwwutils.json_response(payload)
@expose('/dagrun_success')
@login_required
@wwwutils.action_logging
@wwwutils.notify_owner
def dagrun_success(self):
dag_id = request.args.get('dag_id')
execution_date = request.args.get('execution_date')
confirmed = request.args.get('confirmed') == 'true'
origin = request.args.get('origin')
if not execution_date:
flash('Invalid execution date', 'error')
return redirect(origin)
execution_date = dateutil.parser.parse(execution_date)
dag = dagbag.get_dag(dag_id)
if not dag:
flash('Cannot find DAG: {}'.format(dag_id), 'error')
return redirect(origin)
new_dag_state = set_dag_run_state(dag, execution_date, state=State.SUCCESS,
commit=confirmed)
if confirmed:
flash('Marked success on {} task instances'.format(len(new_dag_state)))
return redirect(origin)
else:
details = '\n'.join([str(t) for t in new_dag_state])
response = self.render('airflow/confirm.html',
message=("Here's the list of task instances you are "
"about to mark as successful:"),
details=details)
return response
@expose('/success')
@login_required
@wwwutils.action_logging
@wwwutils.notify_owner
def success(self):
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
origin = request.args.get('origin')
dag = dagbag.get_dag(dag_id)
task = dag.get_task(task_id)
task.dag = dag
execution_date = request.args.get('execution_date')
execution_date = dateutil.parser.parse(execution_date)
confirmed = request.args.get('confirmed') == "true"
upstream = request.args.get('upstream') == "true"
downstream = request.args.get('downstream') == "true"
future = request.args.get('future') == "true"
past = request.args.get('past') == "true"
if not dag:
flash("Cannot find DAG: {}".format(dag_id))
return redirect(origin)
if not task:
flash("Cannot find task {} in DAG {}".format(task_id, dag.dag_id))
return redirect(origin)
from airflow.api.common.experimental.mark_tasks import set_state
if confirmed:
altered = set_state(task=task, execution_date=execution_date,
upstream=upstream, downstream=downstream,
future=future, past=past, state=State.SUCCESS,
commit=True)
flash("Marked success on {} task instances".format(len(altered)))
return redirect(origin)
to_be_altered = set_state(task=task, execution_date=execution_date,
upstream=upstream, downstream=downstream,
future=future, past=past, state=State.SUCCESS,
commit=False)
details = "\n".join([str(t) for t in to_be_altered])
response = self.render("airflow/confirm.html",
message=("Here's the list of task instances you are "
"about to mark as successful:"),
details=details)
return response
@expose('/tree')
@login_required
@wwwutils.gzipped
@wwwutils.action_logging
def tree(self):
dag_id = request.args.get('dag_id')
blur = conf.getboolean('webserver', 'demo_mode')
dag = dagbag.get_dag(dag_id)
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_downstream=False,
include_upstream=True)
session = settings.Session()
base_date = request.args.get('base_date')
num_runs = request.args.get('num_runs')
num_runs = int(num_runs) if num_runs else 25
if base_date:
base_date = dateutil.parser.parse(base_date)
else:
base_date = dag.latest_execution_date or datetime.utcnow()
dates = dag.date_range(base_date, num=-abs(num_runs))
min_date = dates[0] if dates else datetime(2000, 1, 1)
DR = models.DagRun
dag_runs = (
session.query(DR)
.filter(
DR.dag_id == dag.dag_id,
DR.execution_date <= base_date,
DR.execution_date >= min_date)
.all()
)
dag_runs = {
dr.execution_date: alchemy_to_dict(dr) for dr in dag_runs}
dates = sorted(list(dag_runs.keys()))
max_date = max(dates) if dates else None
tis = dag.get_task_instances(
session, start_date=min_date, end_date=base_date)
task_instances = {}
for ti in tis:
tid = alchemy_to_dict(ti)
dr = dag_runs.get(ti.execution_date)
tid['external_trigger'] = dr['external_trigger'] if dr else False
task_instances[(ti.task_id, ti.execution_date)] = tid
expanded = []
# The default recursion traces every path so that tree view has full
# expand/collapse functionality. After 5,000 nodes we stop and fall
# back on a quick DFS search for performance. See PR #320.
node_count = [0]
node_limit = 5000 / max(1, len(dag.roots))
def recurse_nodes(task, visited):
visited.add(task)
node_count[0] += 1
children = [
recurse_nodes(t, visited) for t in task.upstream_list
if node_count[0] < node_limit or t not in visited]
# D3 tree uses children vs _children to define what is
# expanded or not. The following block makes it such that
# repeated nodes are collapsed by default.
children_key = 'children'
if task.task_id not in expanded:
expanded.append(task.task_id)
elif children:
children_key = "_children"
def set_duration(tid):
if (isinstance(tid, dict) and tid.get("state") == State.RUNNING and
tid["start_date"] is not None):
d = datetime.utcnow() - dateutil.parser.parse(tid["start_date"])
tid["duration"] = d.total_seconds()
return tid
return {
'name': task.task_id,
'instances': [
set_duration(task_instances.get((task.task_id, d))) or {
'execution_date': d.isoformat(),
'task_id': task.task_id
}
for d in dates],
children_key: children,
'num_dep': len(task.upstream_list),
'operator': task.task_type,
'retries': task.retries,
'owner': task.owner,
'start_date': task.start_date,
'end_date': task.end_date,
'depends_on_past': task.depends_on_past,
'ui_color': task.ui_color,
}
data = {
'name': '[DAG]',
'children': [recurse_nodes(t, set()) for t in dag.roots],
'instances': [
dag_runs.get(d) or {'execution_date': d.isoformat()}
for d in dates],
}
data = json.dumps(data, indent=4, default=json_ser)
session.commit()
session.close()
form = DateTimeWithNumRunsForm(data={'base_date': max_date,
'num_runs': num_runs})
return self.render(
'airflow/tree.html',
operators=sorted(
list(set([op.__class__ for op in dag.tasks])),
key=lambda x: x.__name__
),
root=root,
form=form,
dag=dag, data=data, blur=blur)
@expose('/graph')
@login_required
@wwwutils.gzipped
@wwwutils.action_logging
def graph(self):
session = settings.Session()
dag_id = request.args.get('dag_id')
blur = conf.getboolean('webserver', 'demo_mode')
dag = dagbag.get_dag(dag_id)
if dag_id not in dagbag.dags:
flash('DAG "{0}" seems to be missing.'.format(dag_id), "error")
return redirect('/admin/')
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_upstream=True,
include_downstream=False)
arrange = request.args.get('arrange', dag.orientation)
nodes = []
edges = []
for task in dag.tasks:
nodes.append({
'id': task.task_id,
'value': {
'label': task.task_id,
'labelStyle': "fill:{0};".format(task.ui_fgcolor),
'style': "fill:{0};".format(task.ui_color),
}
})
def get_upstream(task):
for t in task.upstream_list:
edge = {
'u': t.task_id,
'v': task.task_id,
}
if edge not in edges:
edges.append(edge)
get_upstream(t)
for t in dag.roots:
get_upstream(t)
dttm = request.args.get('execution_date')
if dttm:
dttm = dateutil.parser.parse(dttm)
else:
dttm = dag.latest_execution_date or datetime.utcnow().date()
DR = models.DagRun
drs = (
session.query(DR)
.filter_by(dag_id=dag_id)
.order_by(desc(DR.execution_date)).all()
)
dr_choices = []
dr_state = None
for dr in drs:
dr_choices.append((dr.execution_date.isoformat(), dr.run_id))
if dttm == dr.execution_date:
dr_state = dr.state
class GraphForm(Form):
execution_date = SelectField("DAG run", choices=dr_choices)
arrange = SelectField("Layout", choices=(
('LR', "Left->Right"),
('RL', "Right->Left"),
('TB', "Top->Bottom"),
('BT', "Bottom->Top"),
))
form = GraphForm(
data={'execution_date': dttm.isoformat(), 'arrange': arrange})
task_instances = {
ti.task_id: alchemy_to_dict(ti)
for ti in dag.get_task_instances(session, dttm, dttm)}
tasks = {
t.task_id: {
'dag_id': t.dag_id,
'task_type': t.task_type,
}
for t in dag.tasks}
if not tasks:
flash("No tasks found", "error")
session.commit()
session.close()
doc_md = markdown.markdown(dag.doc_md) if hasattr(dag, 'doc_md') and dag.doc_md else ''
return self.render(
'airflow/graph.html',
dag=dag,
form=form,
width=request.args.get('width', "100%"),
height=request.args.get('height', "800"),
execution_date=dttm.isoformat(),
state_token=state_token(dr_state),
doc_md=doc_md,
arrange=arrange,
operators=sorted(
list(set([op.__class__ for op in dag.tasks])),
key=lambda x: x.__name__
),
blur=blur,
root=root or '',
task_instances=json.dumps(task_instances, indent=2),
tasks=json.dumps(tasks, indent=2),
nodes=json.dumps(nodes, indent=2),
edges=json.dumps(edges, indent=2), )
@expose('/duration')
@login_required
@wwwutils.action_logging
def duration(self):
session = settings.Session()
dag_id = request.args.get('dag_id')
dag = dagbag.get_dag(dag_id)
base_date = request.args.get('base_date')
num_runs = request.args.get('num_runs')
num_runs = int(num_runs) if num_runs else 25
if base_date:
base_date = dateutil.parser.parse(base_date)
else:
base_date = dag.latest_execution_date or datetime.utcnow()
dates = dag.date_range(base_date, num=-abs(num_runs))
min_date = dates[0] if dates else datetime(2000, 1, 1)
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_upstream=True,
include_downstream=False)
chart_height = get_chart_height(dag)
chart = nvd3.lineChart(
name="lineChart", x_is_date=True, height=chart_height, width="1200")
cum_chart = nvd3.lineChart(
name="cumLineChart", x_is_date=True, height=chart_height, width="1200")
y = defaultdict(list)
x = defaultdict(list)
cum_y = defaultdict(list)
tis = dag.get_task_instances(
session, start_date=min_date, end_date=base_date)
TF = models.TaskFail
ti_fails = (
session
.query(TF)
.filter(
TF.dag_id == dag.dag_id,
TF.execution_date >= min_date,
TF.execution_date <= base_date,
TF.task_id.in_([t.task_id for t in dag.tasks]))
.all()
)
fails_totals = defaultdict(int)
for tf in ti_fails:
dict_key = (tf.dag_id, tf.task_id, tf.execution_date)
fails_totals[dict_key] += tf.duration
for ti in tis:
if ti.duration:
dttm = wwwutils.epoch(ti.execution_date)
x[ti.task_id].append(dttm)
y[ti.task_id].append(float(ti.duration))
fails_dict_key = (ti.dag_id, ti.task_id, ti.execution_date)
fails_total = fails_totals[fails_dict_key]
cum_y[ti.task_id].append(float(ti.duration + fails_total))
# determine the most relevant time unit for the set of task instance
# durations for the DAG
y_unit = infer_time_unit([d for t in y.values() for d in t])
cum_y_unit = infer_time_unit([d for t in cum_y.values() for d in t])
# update the y Axis on both charts to have the correct time units
chart.create_y_axis('yAxis', format='.02f', custom_format=False,
label='Duration ({})'.format(y_unit))
chart.axislist['yAxis']['axisLabelDistance'] = '40'
cum_chart.create_y_axis('yAxis', format='.02f', custom_format=False,
label='Duration ({})'.format(cum_y_unit))
cum_chart.axislist['yAxis']['axisLabelDistance'] = '40'
for task in dag.tasks:
if x[task.task_id]:
chart.add_serie(name=task.task_id, x=x[task.task_id],
y=scale_time_units(y[task.task_id], y_unit))
cum_chart.add_serie(name=task.task_id, x=x[task.task_id],
y=scale_time_units(cum_y[task.task_id],
cum_y_unit))
dates = sorted(list({ti.execution_date for ti in tis}))
max_date = max([ti.execution_date for ti in tis]) if dates else None
session.commit()
session.close()
form = DateTimeWithNumRunsForm(data={'base_date': max_date,
'num_runs': num_runs})
chart.buildcontent()
cum_chart.buildcontent()
s_index = cum_chart.htmlcontent.rfind('});')
cum_chart.htmlcontent = (cum_chart.htmlcontent[:s_index] +
"$(function() {$( document ).trigger('chartload') })" +
cum_chart.htmlcontent[s_index:])
return self.render(
'airflow/duration_chart.html',
dag=dag,
demo_mode=conf.getboolean('webserver', 'demo_mode'),
root=root,
form=form,
chart=chart.htmlcontent,
cum_chart=cum_chart.htmlcontent
)
@expose('/tries')
@login_required
@wwwutils.action_logging
def tries(self):
session = settings.Session()
dag_id = request.args.get('dag_id')
dag = dagbag.get_dag(dag_id)
base_date = request.args.get('base_date')
num_runs = request.args.get('num_runs')
num_runs = int(num_runs) if num_runs else 25
if base_date:
base_date = dateutil.parser.parse(base_date)
else:
base_date = dag.latest_execution_date or datetime.utcnow()
dates = dag.date_range(base_date, num=-abs(num_runs))
min_date = dates[0] if dates else datetime(2000, 1, 1)
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_upstream=True,
include_downstream=False)
chart_height = get_chart_height(dag)
chart = nvd3.lineChart(
name="lineChart", x_is_date=True, y_axis_format='d', height=chart_height,
width="1200")
for task in dag.tasks:
y = []
x = []
for ti in task.get_task_instances(session, start_date=min_date,
end_date=base_date):
dttm = wwwutils.epoch(ti.execution_date)
x.append(dttm)
y.append(ti.try_number)
if x:
chart.add_serie(name=task.task_id, x=x, y=y)
tis = dag.get_task_instances(
session, start_date=min_date, end_date=base_date)
tries = sorted(list({ti.try_number for ti in tis}))
max_date = max([ti.execution_date for ti in tis]) if tries else None
session.commit()
session.close()
form = DateTimeWithNumRunsForm(data={'base_date': max_date,
'num_runs': num_runs})
chart.buildcontent()
return self.render(
'airflow/chart.html',
dag=dag,
demo_mode=conf.getboolean('webserver', 'demo_mode'),
root=root,
form=form,
chart=chart.htmlcontent
)
@expose('/landing_times')
@login_required
@wwwutils.action_logging
def landing_times(self):
session = settings.Session()
dag_id = request.args.get('dag_id')
dag = dagbag.get_dag(dag_id)
base_date = request.args.get('base_date')
num_runs = request.args.get('num_runs')
num_runs = int(num_runs) if num_runs else 25
if base_date:
base_date = dateutil.parser.parse(base_date)
else:
base_date = dag.latest_execution_date or datetime.utcnow()
dates = dag.date_range(base_date, num=-abs(num_runs))
min_date = dates[0] if dates else datetime(2000, 1, 1)
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_upstream=True,
include_downstream=False)
chart_height = get_chart_height(dag)
chart = nvd3.lineChart(
name="lineChart", x_is_date=True, height=chart_height, width="1200")
y = {}
x = {}
for task in dag.tasks:
y[task.task_id] = []
x[task.task_id] = []
for ti in task.get_task_instances(session, start_date=min_date,
end_date=base_date):
ts = ti.execution_date
if dag.schedule_interval and dag.following_schedule(ts):
ts = dag.following_schedule(ts)
if ti.end_date:
dttm = wwwutils.epoch(ti.execution_date)
secs = (ti.end_date - ts).total_seconds()
x[ti.task_id].append(dttm)
y[ti.task_id].append(secs)
# determine the most relevant time unit for the set of landing times
# for the DAG
y_unit = infer_time_unit([d for t in y.values() for d in t])
# update the y Axis to have the correct time units
chart.create_y_axis('yAxis', format='.02f', custom_format=False,
label='Landing Time ({})'.format(y_unit))
chart.axislist['yAxis']['axisLabelDistance'] = '40'
for task in dag.tasks:
if x[task.task_id]:
chart.add_serie(name=task.task_id, x=x[task.task_id],
y=scale_time_units(y[task.task_id], y_unit))
tis = dag.get_task_instances(
session, start_date=min_date, end_date=base_date)
dates = sorted(list({ti.execution_date for ti in tis}))
max_date = max([ti.execution_date for ti in tis]) if dates else None
session.commit()
session.close()
form = DateTimeWithNumRunsForm(data={'base_date': max_date,
'num_runs': num_runs})
chart.buildcontent()
return self.render(
'airflow/chart.html',
dag=dag,
chart=chart.htmlcontent,
height=str(chart_height + 100) + "px",
demo_mode=conf.getboolean('webserver', 'demo_mode'),
root=root,
form=form,
)
@expose('/paused', methods=['POST'])
@login_required
@wwwutils.action_logging
def paused(self):
DagModel = models.DagModel
dag_id = request.args.get('dag_id')
session = settings.Session()
orm_dag = session.query(
DagModel).filter(DagModel.dag_id == dag_id).first()
if request.args.get('is_paused') == 'false':
orm_dag.is_paused = True
else:
orm_dag.is_paused = False
session.merge(orm_dag)
session.commit()
session.close()
dagbag.get_dag(dag_id)
return "OK"
@expose('/refresh')
@login_required
@wwwutils.action_logging
def refresh(self):
DagModel = models.DagModel
dag_id = request.args.get('dag_id')
session = settings.Session()
orm_dag = session.query(
DagModel).filter(DagModel.dag_id == dag_id).first()
if orm_dag:
orm_dag.last_expired = datetime.utcnow()
session.merge(orm_dag)
session.commit()
session.close()
dagbag.get_dag(dag_id)
flash("DAG [{}] is now fresh as a daisy".format(dag_id))
return redirect(request.referrer)
@expose('/refresh_all')
@login_required
@wwwutils.action_logging
def refresh_all(self):
dagbag.collect_dags(only_if_updated=False)
flash("All DAGs are now up to date")
return redirect('/')
@expose('/gantt')
@login_required
@wwwutils.action_logging
def gantt(self):
session = settings.Session()
dag_id = request.args.get('dag_id')
dag = dagbag.get_dag(dag_id)
demo_mode = conf.getboolean('webserver', 'demo_mode')
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_upstream=True,
include_downstream=False)
dttm = request.args.get('execution_date')
if dttm:
dttm = dateutil.parser.parse(dttm)
else:
dttm = dag.latest_execution_date or datetime.utcnow().date()
form = DateTimeForm(data={'execution_date': dttm})
tis = [
ti for ti in dag.get_task_instances(session, dttm, dttm)
if ti.start_date]
tis = sorted(tis, key=lambda ti: ti.start_date)
tasks = []
for ti in tis:
end_date = ti.end_date if ti.end_date else datetime.utcnow()
tasks.append({
'startDate': wwwutils.epoch(ti.start_date),
'endDate': wwwutils.epoch(end_date),
'isoStart': ti.start_date.isoformat()[:-4],
'isoEnd': end_date.isoformat()[:-4],
'taskName': ti.task_id,
'duration': "{}".format(end_date - ti.start_date)[:-4],
'status': ti.state,
'executionDate': ti.execution_date.isoformat(),
})
states = {ti.state: ti.state for ti in tis}
data = {
'taskNames': [ti.task_id for ti in tis],
'tasks': tasks,
'taskStatus': states,
'height': len(tis) * 25 + 25,
}
session.commit()
session.close()
return self.render(
'airflow/gantt.html',
dag=dag,
execution_date=dttm.isoformat(),
form=form,
data=json.dumps(data, indent=2),
base_date='',
demo_mode=demo_mode,
root=root,
)
@expose('/object/task_instances')
@login_required
@wwwutils.action_logging
def task_instances(self):
session = settings.Session()
dag_id = request.args.get('dag_id')
dag = dagbag.get_dag(dag_id)
dttm = request.args.get('execution_date')
if dttm:
dttm = dateutil.parser.parse(dttm)
else:
return ("Error: Invalid execution_date")
task_instances = {
ti.task_id: alchemy_to_dict(ti)
for ti in dag.get_task_instances(session, dttm, dttm)}
return json.dumps(task_instances)
@expose('/variables/<form>', methods=["GET", "POST"])
@login_required
@wwwutils.action_logging
def variables(self, form):
try:
if request.method == 'POST':
data = request.json
if data:
session = settings.Session()
var = models.Variable(key=form, val=json.dumps(data))
session.add(var)
session.commit()
return ""
else:
return self.render(
'airflow/variables/{}.html'.format(form)
)
except:
# prevent XSS
form = escape(form)
return ("Error: form airflow/variables/{}.html "
"not found.").format(form), 404
@expose('/varimport', methods=["GET", "POST"])
@login_required
@wwwutils.action_logging
def varimport(self):
try:
out = str(request.files['file'].read())
d = json.loads(out)
except Exception:
flash("Missing file or syntax error.")
else:
for k, v in d.items():
models.Variable.set(k, v, serialize_json=isinstance(v, dict))
flash("{} variable(s) successfully updated.".format(len(d)))
return redirect('/admin/variable')
class HomeView(AdminIndexView):
@expose("/")
@login_required
def index(self):
session = Session()
DM = models.DagModel
# restrict the dags shown if filter_by_owner and current user is not superuser
do_filter = FILTER_BY_OWNER and (not current_user.is_superuser())
owner_mode = conf.get('webserver', 'OWNER_MODE').strip().lower()
hide_paused_dags_by_default = conf.getboolean('webserver',
'hide_paused_dags_by_default')
show_paused_arg = request.args.get('showPaused', 'None')
def get_int_arg(value, default=0):
try:
return int(value)
except ValueError:
return default
arg_current_page = request.args.get('page', '0')
arg_search_query = request.args.get('search', None)
dags_per_page = PAGE_SIZE
current_page = get_int_arg(arg_current_page, default=0)
if show_paused_arg.strip().lower() == 'false':
hide_paused = True
elif show_paused_arg.strip().lower() == 'true':
hide_paused = False
else:
hide_paused = hide_paused_dags_by_default
# read orm_dags from the db
sql_query = session.query(DM)
if do_filter and owner_mode == 'ldapgroup':
sql_query = sql_query.filter(
~DM.is_subdag,
DM.is_active,
DM.owners.in_(current_user.ldap_groups)
)
elif do_filter and owner_mode == 'user':
sql_query = sql_query.filter(
~DM.is_subdag, DM.is_active,
DM.owners == current_user.user.username
)
else:
sql_query = sql_query.filter(
~DM.is_subdag, DM.is_active
)
# optionally filter out "paused" dags
if hide_paused:
sql_query = sql_query.filter(~DM.is_paused)
orm_dags = {dag.dag_id: dag for dag
in sql_query
.all()}
import_errors = session.query(models.ImportError).all()
for ie in import_errors:
flash(
"Broken DAG: [{ie.filename}] {ie.stacktrace}".format(ie=ie),
"error")
session.expunge_all()
session.commit()
session.close()
# get a list of all non-subdag dags visible to everyone
# optionally filter out "paused" dags
if hide_paused:
unfiltered_webserver_dags = [dag for dag in dagbag.dags.values() if
not dag.parent_dag and not dag.is_paused]
else:
unfiltered_webserver_dags = [dag for dag in dagbag.dags.values() if
not dag.parent_dag]
# optionally filter to get only dags that the user should see
if do_filter and owner_mode == 'ldapgroup':
# only show dags owned by someone in @current_user.ldap_groups
webserver_dags = {
dag.dag_id: dag
for dag in unfiltered_webserver_dags
if dag.owner in current_user.ldap_groups
}
elif do_filter and owner_mode == 'user':
# only show dags owned by @current_user.user.username
webserver_dags = {
dag.dag_id: dag
for dag in unfiltered_webserver_dags
if dag.owner == current_user.user.username
}
else:
webserver_dags = {
dag.dag_id: dag
for dag in unfiltered_webserver_dags
}
if arg_search_query:
lower_search_query = arg_search_query.lower()
# filter by dag_id
webserver_dags_filtered = {
dag_id: dag
for dag_id, dag in webserver_dags.items()
if (lower_search_query in dag_id.lower() or
lower_search_query in dag.owner.lower())
}
all_dag_ids = (set([dag.dag_id for dag in orm_dags.values()
if lower_search_query in dag.dag_id.lower() or
lower_search_query in dag.owners.lower()]) |
set(webserver_dags_filtered.keys()))
sorted_dag_ids = sorted(all_dag_ids)
else:
webserver_dags_filtered = webserver_dags
sorted_dag_ids = sorted(set(orm_dags.keys()) | set(webserver_dags.keys()))
start = current_page * dags_per_page
end = start + dags_per_page
num_of_all_dags = len(sorted_dag_ids)
page_dag_ids = sorted_dag_ids[start:end]
num_of_pages = int(math.ceil(num_of_all_dags / float(dags_per_page)))
auto_complete_data = set()
for dag in webserver_dags_filtered.values():
auto_complete_data.add(dag.dag_id)
auto_complete_data.add(dag.owner)
for dag in orm_dags.values():
auto_complete_data.add(dag.dag_id)
auto_complete_data.add(dag.owners)
return self.render(
'airflow/dags.html',
webserver_dags=webserver_dags_filtered,
orm_dags=orm_dags,
hide_paused=hide_paused,
current_page=current_page,
search_query=arg_search_query if arg_search_query else '',
page_size=dags_per_page,
num_of_pages=num_of_pages,
num_dag_from=start + 1,
num_dag_to=min(end, num_of_all_dags),
num_of_all_dags=num_of_all_dags,
paging=wwwutils.generate_pages(current_page, num_of_pages,
search=arg_search_query,
showPaused=not hide_paused),
dag_ids_in_page=page_dag_ids,
auto_complete_data=auto_complete_data)
class QueryView(wwwutils.DataProfilingMixin, BaseView):
@expose('/', methods=['POST', 'GET'])
@wwwutils.gzipped
def query(self):
session = settings.Session()
dbs = session.query(models.Connection).order_by(
models.Connection.conn_id).all()
session.expunge_all()
db_choices = list(
((db.conn_id, db.conn_id) for db in dbs if db.get_hook()))
conn_id_str = request.form.get('conn_id')
csv = request.form.get('csv') == "true"
sql = request.form.get('sql')
class QueryForm(Form):
conn_id = SelectField("Layout", choices=db_choices)
sql = TextAreaField("SQL", widget=wwwutils.AceEditorWidget())
data = {
'conn_id': conn_id_str,
'sql': sql,
}
results = None
has_data = False
error = False
if conn_id_str:
db = [db for db in dbs if db.conn_id == conn_id_str][0]
hook = db.get_hook()
try:
df = hook.get_pandas_df(wwwutils.limit_sql(sql, QUERY_LIMIT, conn_type=db.conn_type))
# df = hook.get_pandas_df(sql)
has_data = len(df) > 0
df = df.fillna('')
results = df.to_html(
classes=[
'table', 'table-bordered', 'table-striped', 'no-wrap'],
index=False,
na_rep='',
) if has_data else ''
except Exception as e:
flash(str(e), 'error')
error = True
if has_data and len(df) == QUERY_LIMIT:
flash(
"Query output truncated at " + str(QUERY_LIMIT) +
" rows", 'info')
if not has_data and error:
flash('No data', 'error')
if csv:
return Response(
response=df.to_csv(index=False),
status=200,
mimetype="application/text")
form = QueryForm(request.form, data=data)
session.commit()
session.close()
return self.render(
'airflow/query.html', form=form,
title="Ad Hoc Query",
results=results or '',
has_data=has_data)
class AirflowModelView(ModelView):
list_template = 'airflow/model_list.html'
edit_template = 'airflow/model_edit.html'
create_template = 'airflow/model_create.html'
column_display_actions = True
page_size = PAGE_SIZE
class ModelViewOnly(wwwutils.LoginMixin, AirflowModelView):
"""
Modifying the base ModelView class for non edit, browse only operations
"""
named_filter_urls = True
can_create = False
can_edit = False
can_delete = False
column_display_pk = True
class PoolModelView(wwwutils.SuperUserMixin, AirflowModelView):
column_list = ('pool', 'slots', 'used_slots', 'queued_slots')
column_formatters = dict(
pool=pool_link, used_slots=fused_slots, queued_slots=fqueued_slots)
named_filter_urls = True
form_args = {
'pool': {
'validators': [
validators.DataRequired(),
]
}
}
class SlaMissModelView(wwwutils.SuperUserMixin, ModelViewOnly):
verbose_name_plural = "SLA misses"
verbose_name = "SLA miss"
column_list = (
'dag_id', 'task_id', 'execution_date', 'email_sent', 'timestamp')
column_formatters = dict(
task_id=task_instance_link,
execution_date=datetime_f,
timestamp=datetime_f,
dag_id=dag_link)
named_filter_urls = True
column_searchable_list = ('dag_id', 'task_id',)
column_filters = (
'dag_id', 'task_id', 'email_sent', 'timestamp', 'execution_date')
form_widget_args = {
'email_sent': {'disabled': True},
'timestamp': {'disabled': True},
}
class ChartModelView(wwwutils.DataProfilingMixin, AirflowModelView):
verbose_name = "chart"
verbose_name_plural = "charts"
form_columns = (
'label',
'owner',
'conn_id',
'chart_type',
'show_datatable',
'x_is_date',
'y_log_scale',
'show_sql',
'height',
'sql_layout',
'sql',
'default_params',
)
column_list = (
'label',
'conn_id',
'chart_type',
'owner',
'last_modified',
)
column_sortable_list = (
'label',
'conn_id',
'chart_type',
('owner', 'owner.username'),
'last_modified',
)
column_formatters = dict(label=label_link, last_modified=datetime_f)
column_default_sort = ('last_modified', True)
create_template = 'airflow/chart/create.html'
edit_template = 'airflow/chart/edit.html'
column_filters = ('label', 'owner.username', 'conn_id')
column_searchable_list = ('owner.username', 'label', 'sql')
column_descriptions = {
'label': "Can include {{ templated_fields }} and {{ macros }}",
'chart_type': "The type of chart to be displayed",
'sql': "Can include {{ templated_fields }} and {{ macros }}.",
'height': "Height of the chart, in pixels.",
'conn_id': "Source database to run the query against",
'x_is_date': (
"Whether the X axis should be casted as a date field. Expect most "
"intelligible date formats to get casted properly."
),
'owner': (
"The chart's owner, mostly used for reference and filtering in "
"the list view."
),
'show_datatable':
"Whether to display an interactive data table under the chart.",
'default_params': (
'A dictionary of {"key": "values",} that define what the '
'templated fields (parameters) values should be by default. '
'To be valid, it needs to "eval" as a Python dict. '
'The key values will show up in the url\'s querystring '
'and can be altered there.'
),
'show_sql': "Whether to display the SQL statement as a collapsible "
"section in the chart page.",
'y_log_scale': "Whether to use a log scale for the Y axis.",
'sql_layout': (
"Defines the layout of the SQL that the application should "
"expect. Depending on the tables you are sourcing from, it may "
"make more sense to pivot / unpivot the metrics."
),
}
column_labels = {
'sql': "SQL",
'height': "Chart Height",
'sql_layout': "SQL Layout",
'show_sql': "Display the SQL Statement",
'default_params': "Default Parameters",
}
form_choices = {
'chart_type': [
('line', 'Line Chart'),
('spline', 'Spline Chart'),
('bar', 'Bar Chart'),
('column', 'Column Chart'),
('area', 'Overlapping Area Chart'),
('stacked_area', 'Stacked Area Chart'),
('percent_area', 'Percent Area Chart'),
('datatable', 'No chart, data table only'),
],
'sql_layout': [
('series', 'SELECT series, x, y FROM ...'),
('columns', 'SELECT x, y (series 1), y (series 2), ... FROM ...'),
],
'conn_id': [
(c.conn_id, c.conn_id)
for c in (
Session().query(models.Connection.conn_id)
.group_by(models.Connection.conn_id)
)
]
}
def on_model_change(self, form, model, is_created=True):
if model.iteration_no is None:
model.iteration_no = 0
else:
model.iteration_no += 1
if not model.user_id and current_user and hasattr(current_user, 'id'):
model.user_id = current_user.id
model.last_modified = datetime.utcnow()
chart_mapping = (
('line', 'lineChart'),
('spline', 'lineChart'),
('bar', 'multiBarChart'),
('column', 'multiBarChart'),
('area', 'stackedAreaChart'),
('stacked_area', 'stackedAreaChart'),
('percent_area', 'stackedAreaChart'),
('datatable', 'datatable'),
)
chart_mapping = dict(chart_mapping)
class KnownEventView(wwwutils.DataProfilingMixin, AirflowModelView):
verbose_name = "known event"
verbose_name_plural = "known events"
form_columns = (
'label',
'event_type',
'start_date',
'end_date',
'reported_by',
'description',
)
form_args = {
'label': {
'validators': [
validators.DataRequired(),
],
},
'event_type': {
'validators': [
validators.DataRequired(),
],
},
'start_date': {
'validators': [
validators.DataRequired(),
],
},
'end_date': {
'validators': [
validators.DataRequired(),
GreaterEqualThan(fieldname='start_date'),
],
},
'reported_by': {
'validators': [
validators.DataRequired(),
],
}
}
column_list = (
'label',
'event_type',
'start_date',
'end_date',
'reported_by',
)
column_default_sort = ("start_date", True)
column_sortable_list = (
'label',
('event_type', 'event_type.know_event_type'),
'start_date',
'end_date',
('reported_by', 'reported_by.username'),
)
class KnownEventTypeView(wwwutils.DataProfilingMixin, AirflowModelView):
pass
# NOTE: For debugging / troubleshooting
# mv = KnowEventTypeView(
# models.KnownEventType,
# Session, name="Known Event Types", category="Manage")
# admin.add_view(mv)
# class DagPickleView(SuperUserMixin, ModelView):
# pass
# mv = DagPickleView(
# models.DagPickle,
# Session, name="Pickles", category="Manage")
# admin.add_view(mv)
class VariableView(wwwutils.DataProfilingMixin, AirflowModelView):
verbose_name = "Variable"
verbose_name_plural = "Variables"
list_template = 'airflow/variable_list.html'
def hidden_field_formatter(view, context, model, name):
if wwwutils.should_hide_value_for_key(model.key):
return Markup('*' * 8)
try:
return getattr(model, name)
except AirflowException:
return Markup('<span class="label label-danger">Invalid</span>')
form_columns = (
'key',
'val',
)
column_list = ('key', 'val', 'is_encrypted',)
column_filters = ('key', 'val')
column_searchable_list = ('key', 'val')
column_default_sort = ('key', False)
form_widget_args = {
'is_encrypted': {'disabled': True},
'val': {
'rows': 20,
}
}
form_args = {
'key': {
'validators': {
validators.DataRequired(),
},
},
}
column_sortable_list = (
'key',
'val',
'is_encrypted',
)
column_formatters = {
'val': hidden_field_formatter,
}
# Default flask-admin export functionality doesn't handle serialized json
@action('varexport', 'Export', None)
def action_varexport(self, ids):
V = models.Variable
session = settings.Session()
qry = session.query(V).filter(V.id.in_(ids)).all()
session.close()
var_dict = {}
d = json.JSONDecoder()
for var in qry:
val = None
try:
val = d.decode(var.val)
except:
val = var.val
var_dict[var.key] = val
response = make_response(json.dumps(var_dict, sort_keys=True, indent=4))
response.headers["Content-Disposition"] = "attachment; filename=variables.json"
return response
def on_form_prefill(self, form, id):
if wwwutils.should_hide_value_for_key(form.key.data):
form.val.data = '*' * 8
class XComView(wwwutils.SuperUserMixin, AirflowModelView):
verbose_name = "XCom"
verbose_name_plural = "XComs"
form_columns = (
'key',
'value',
'execution_date',
'task_id',
'dag_id',
)
form_extra_fields = {
'value': StringField('Value'),
}
column_filters = ('key', 'timestamp', 'execution_date', 'task_id', 'dag_id')
column_searchable_list = ('key', 'timestamp', 'execution_date', 'task_id', 'dag_id')
class JobModelView(ModelViewOnly):
verbose_name_plural = "jobs"
verbose_name = "job"
column_display_actions = False
column_default_sort = ('start_date', True)
column_filters = (
'job_type', 'dag_id', 'state',
'unixname', 'hostname', 'start_date', 'end_date', 'latest_heartbeat')
column_formatters = dict(
start_date=datetime_f,
end_date=datetime_f,
hostname=nobr_f,
state=state_f,
latest_heartbeat=datetime_f)
class DagRunModelView(ModelViewOnly):
verbose_name_plural = "DAG Runs"
can_edit = True
can_create = True
column_editable_list = ('state',)
verbose_name = "dag run"
column_default_sort = ('execution_date', True)
form_choices = {
'state': [
('success', 'success'),
('running', 'running'),
('failed', 'failed'),
],
}
form_args = dict(
dag_id=dict(validators=[validators.DataRequired()])
)
column_list = (
'state', 'dag_id', 'execution_date', 'run_id', 'external_trigger')
column_filters = column_list
column_searchable_list = ('dag_id', 'state', 'run_id')
column_formatters = dict(
execution_date=datetime_f,
state=state_f,
start_date=datetime_f,
dag_id=dag_link)
@action('new_delete', "Delete", "Are you sure you want to delete selected records?")
def action_new_delete(self, ids):
session = settings.Session()
deleted = set(session.query(models.DagRun)
.filter(models.DagRun.id.in_(ids))
.all())
session.query(models.DagRun) \
.filter(models.DagRun.id.in_(ids)) \
.delete(synchronize_session='fetch')
session.commit()
dirty_ids = []
for row in deleted:
dirty_ids.append(row.dag_id)
models.DagStat.update(dirty_ids, dirty_only=False, session=session)
session.close()
@action('set_running', "Set state to 'running'", None)
def action_set_running(self, ids):
self.set_dagrun_state(ids, State.RUNNING)
@action('set_failed', "Set state to 'failed'", None)
def action_set_failed(self, ids):
self.set_dagrun_state(ids, State.FAILED)
@action('set_success', "Set state to 'success'", None)
def action_set_success(self, ids):
self.set_dagrun_state(ids, State.SUCCESS)
@provide_session
def set_dagrun_state(self, ids, target_state, session=None):
try:
DR = models.DagRun
count = 0
dirty_ids = []
for dr in session.query(DR).filter(DR.id.in_(ids)).all():
dirty_ids.append(dr.dag_id)
count += 1
dr.state = target_state
if target_state == State.RUNNING:
dr.start_date = datetime.utcnow()
else:
dr.end_date = datetime.utcnow()
session.commit()
models.DagStat.update(dirty_ids, session=session)
flash(
"{count} dag runs were set to '{target_state}'".format(**locals()))
except Exception as ex:
if not self.handle_view_exception(ex):
raise Exception("Ooops")
flash('Failed to set state', 'error')
class LogModelView(ModelViewOnly):
verbose_name_plural = "logs"
verbose_name = "log"
column_display_actions = False
column_default_sort = ('dttm', True)
column_filters = ('dag_id', 'task_id', 'execution_date')
column_formatters = dict(
dttm=datetime_f, execution_date=datetime_f, dag_id=dag_link)
class TaskInstanceModelView(ModelViewOnly):
verbose_name_plural = "task instances"
verbose_name = "task instance"
column_filters = (
'state', 'dag_id', 'task_id', 'execution_date', 'hostname',
'queue', 'pool', 'operator', 'start_date', 'end_date')
named_filter_urls = True
column_formatters = dict(
log_url=log_url_formatter,
task_id=task_instance_link,
hostname=nobr_f,
state=state_f,
execution_date=datetime_f,
start_date=datetime_f,
end_date=datetime_f,
queued_dttm=datetime_f,
dag_id=dag_link, duration=duration_f)
column_searchable_list = ('dag_id', 'task_id', 'state')
column_default_sort = ('job_id', True)
form_choices = {
'state': [
('success', 'success'),
('running', 'running'),
('failed', 'failed'),
],
}
column_list = (
'state', 'dag_id', 'task_id', 'execution_date', 'operator',
'start_date', 'end_date', 'duration', 'job_id', 'hostname',
'unixname', 'priority_weight', 'queue', 'queued_dttm', 'try_number',
'pool', 'log_url')
page_size = PAGE_SIZE
@action('set_running', "Set state to 'running'", None)
def action_set_running(self, ids):
self.set_task_instance_state(ids, State.RUNNING)
@action('set_failed', "Set state to 'failed'", None)
def action_set_failed(self, ids):
self.set_task_instance_state(ids, State.FAILED)
@action('set_success', "Set state to 'success'", None)
def action_set_success(self, ids):
self.set_task_instance_state(ids, State.SUCCESS)
@action('set_retry', "Set state to 'up_for_retry'", None)
def action_set_retry(self, ids):
self.set_task_instance_state(ids, State.UP_FOR_RETRY)
@provide_session
@action('clear',
lazy_gettext('Clear'),
lazy_gettext(
'Are you sure you want to clear the state of the selected task instance(s)'
' and set their dagruns to the running state?'))
def action_clear(self, ids, session=None):
try:
TI = models.TaskInstance
dag_to_tis = {}
for id in ids:
task_id, dag_id, execution_date = id.split(',')
ti = session.query(TI).filter(TI.task_id == task_id,
TI.dag_id == dag_id,
TI.execution_date == execution_date).one()
dag = dagbag.get_dag(dag_id)
tis = dag_to_tis.setdefault(dag, [])
tis.append(ti)
for dag, tis in dag_to_tis.items():
models.clear_task_instances(tis, session, dag=dag)
session.commit()
flash("{0} task instances have been cleared".format(len(ids)))
except Exception as ex:
if not self.handle_view_exception(ex):
raise Exception("Ooops")
flash('Failed to clear task instances', 'error')
@provide_session
def set_task_instance_state(self, ids, target_state, session=None):
try:
TI = models.TaskInstance
count = len(ids)
for id in ids:
task_id, dag_id, execution_date = id.split(',')
execution_date = datetime.strptime(execution_date, '%Y-%m-%d %H:%M:%S')
ti = session.query(TI).filter(TI.task_id == task_id,
TI.dag_id == dag_id,
TI.execution_date == execution_date).one()
ti.state = target_state
session.commit()
flash(
"{count} task instances were set to '{target_state}'".format(**locals()))
except Exception as ex:
if not self.handle_view_exception(ex):
raise Exception("Ooops")
flash('Failed to set state', 'error')
def get_one(self, id):
"""
As a workaround for AIRFLOW-252, this method overrides Flask-Admin's ModelView.get_one().
TODO: this method should be removed once the below bug is fixed on Flask-Admin side.
https://github.com/flask-admin/flask-admin/issues/1226
"""
task_id, dag_id, execution_date = iterdecode(id)
execution_date = dateutil.parser.parse(execution_date)
return self.session.query(self.model).get((task_id, dag_id, execution_date))
class ConnectionModelView(wwwutils.SuperUserMixin, AirflowModelView):
create_template = 'airflow/conn_create.html'
edit_template = 'airflow/conn_edit.html'
list_template = 'airflow/conn_list.html'
form_columns = (
'conn_id',
'conn_type',
'host',
'schema',
'login',
'password',
'port',
'extra',
'extra__jdbc__drv_path',
'extra__jdbc__drv_clsname',
'extra__google_cloud_platform__project',
'extra__google_cloud_platform__key_path',
'extra__google_cloud_platform__keyfile_dict',
'extra__google_cloud_platform__scope',
)
verbose_name = "Connection"
verbose_name_plural = "Connections"
column_default_sort = ('conn_id', False)
column_list = ('conn_id', 'conn_type', 'host', 'port', 'is_encrypted', 'is_extra_encrypted',)
form_overrides = dict(_password=PasswordField, _extra=TextAreaField)
form_widget_args = {
'is_extra_encrypted': {'disabled': True},
'is_encrypted': {'disabled': True},
}
# Used to customized the form, the forms elements get rendered
# and results are stored in the extra field as json. All of these
# need to be prefixed with extra__ and then the conn_type ___ as in
# extra__{conn_type}__name. You can also hide form elements and rename
# others from the connection_form.js file
form_extra_fields = {
'extra__jdbc__drv_path': StringField('Driver Path'),
'extra__jdbc__drv_clsname': StringField('Driver Class'),
'extra__google_cloud_platform__project': StringField('Project Id'),
'extra__google_cloud_platform__key_path': StringField('Keyfile Path'),
'extra__google_cloud_platform__keyfile_dict': PasswordField('Keyfile JSON'),
'extra__google_cloud_platform__scope': StringField('Scopes (comma separated)'),
}
form_choices = {
'conn_type': models.Connection._types
}
def on_model_change(self, form, model, is_created):
formdata = form.data
if formdata['conn_type'] in ['jdbc', 'google_cloud_platform']:
extra = {
key: formdata[key]
for key in self.form_extra_fields.keys() if key in formdata}
model.extra = json.dumps(extra)
@classmethod
def alert_fernet_key(cls):
fk = None
try:
fk = conf.get('core', 'fernet_key')
except:
pass
return fk is None
@classmethod
def is_secure(cls):
"""
Used to display a message in the Connection list view making it clear
that the passwords and `extra` field can't be encrypted.
"""
is_secure = False
try:
import cryptography
conf.get('core', 'fernet_key')
is_secure = True
except:
pass
return is_secure
def on_form_prefill(self, form, id):
try:
d = json.loads(form.data.get('extra', '{}'))
except Exception:
d = {}
for field in list(self.form_extra_fields.keys()):
value = d.get(field, '')
if value:
field = getattr(form, field)
field.data = value
class UserModelView(wwwutils.SuperUserMixin, AirflowModelView):
verbose_name = "User"
verbose_name_plural = "Users"
column_default_sort = 'username'
class VersionView(wwwutils.SuperUserMixin, BaseView):
@expose('/')
def version(self):
# Look at the version from setup.py
try:
airflow_version = pkg_resources.require("apache-airflow")[0].version
except Exception as e:
airflow_version = None
logging.error(e)
# Get the Git repo and git hash
git_version = None
try:
with open(os.path.join(*[settings.AIRFLOW_HOME, 'airflow', 'git_version'])) as f:
git_version = f.readline()
except Exception as e:
logging.error(e)
# Render information
title = "Version Info"
return self.render('airflow/version.html',
title=title,
airflow_version=airflow_version,
git_version=git_version)
class ConfigurationView(wwwutils.SuperUserMixin, BaseView):
@expose('/')
def conf(self):
raw = request.args.get('raw') == "true"
title = "Airflow Configuration"
subtitle = conf.AIRFLOW_CONFIG
if conf.getboolean("webserver", "expose_config"):
with open(conf.AIRFLOW_CONFIG, 'r') as f:
config = f.read()
table = [(section, key, value, source)
for section, parameters in conf.as_dict(True, True).items()
for key, (value, source) in parameters.items()]
else:
config = (
"# Your Airflow administrator chose not to expose the "
"configuration, most likely for security reasons.")
table = None
if raw:
return Response(
response=config,
status=200,
mimetype="application/text")
else:
code_html = Markup(highlight(
config,
lexers.IniLexer(), # Lexer call
HtmlFormatter(noclasses=True))
)
return self.render(
'airflow/config.html',
pre_subtitle=settings.HEADER + " v" + airflow.__version__,
code_html=code_html, title=title, subtitle=subtitle,
table=table)
class DagModelView(wwwutils.SuperUserMixin, ModelView):
column_list = ('dag_id', 'owners')
column_editable_list = ('is_paused',)
form_excluded_columns = ('is_subdag', 'is_active')
column_searchable_list = ('dag_id',)
column_filters = (
'dag_id', 'owners', 'is_paused', 'is_active', 'is_subdag',
'last_scheduler_run', 'last_expired')
form_widget_args = {
'last_scheduler_run': {'disabled': True},
'fileloc': {'disabled': True},
'is_paused': {'disabled': True},
'last_pickled': {'disabled': True},
'pickle_id': {'disabled': True},
'last_loaded': {'disabled': True},
'last_expired': {'disabled': True},
'pickle_size': {'disabled': True},
'scheduler_lock': {'disabled': True},
'owners': {'disabled': True},
}
column_formatters = dict(
dag_id=dag_link,
)
can_delete = False
can_create = False
page_size = PAGE_SIZE
list_template = 'airflow/list_dags.html'
named_filter_urls = True
def get_query(self):
"""
Default filters for model
"""
return (
super(DagModelView, self)
.get_query()
.filter(or_(models.DagModel.is_active, models.DagModel.is_paused))
.filter(~models.DagModel.is_subdag)
)
def get_count_query(self):
"""
Default filters for model
"""
return (
super(DagModelView, self)
.get_count_query()
.filter(models.DagModel.is_active)
.filter(~models.DagModel.is_subdag)
)
| apache-2.0 |
yamitzky/dotfiles | home/.ipython/profile_default/startup/01-function.py | 1 | 3117 | def ulen(li):
return len(set(li))
def set_style():
import seaborn as sns
sns.set_style("whitegrid")
sns.set_context("poster")
set_japanese_font()
def set_japanese_font():
import matplotlib as __matplotlib
font_path = '/Library/Fonts/Osaka.ttf'
font_prop = __matplotlib.font_manager.FontProperties(fname=font_path)
__matplotlib.rcParams['font.family'] = font_prop.get_name()
__matplotlib.rcParams['pdf.fonttype'] = 42
__matplotlib.rcParams['savefig.dpi'] = 200
__matplotlib.rcParams['mathtext.default'] = 'regular'
def import_ds():
global pd, np, plt, sns, zimpala, zhive
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import zutil.impala as zimpala
import zutil.hive as zhive
def nanargsort(a, axis=-1, kind='quicksort', order=None):
"""Returns the indices that would sort an array.
Perform an indirect sort along the given axis using the algorithm specified
by the `kind` keyword. It returns an array of indices of the same shape as
`a` that index data along the given axis in sorted order.
Parameters
----------
a : array_like
Array to sort.
axis : int or None, optional
Axis along which to sort. The default is -1 (the last axis). If None,
the flattened array is used.
kind : {'quicksort', 'mergesort', 'heapsort'}, optional
Sorting algorithm.
order : list, optional
When `a` is an array with fields defined, this argument specifies
which fields to compare first, second, etc. Not all fields need be
specified.
Returns
-------
index_array : ndarray, int
Array of indices that sort `a` along the specified axis.
In other words, ``a[index_array]`` yields a sorted `a`.
See Also
--------
sort : Describes sorting algorithms used.
lexsort : Indirect stable sort with multiple keys.
ndarray.sort : Inplace sort.
argpartition : Indirect partial sort.
Notes
-----
See `sort` for notes on the different sorting algorithms.
As of NumPy 1.4.0 `argsort` works with real/complex arrays containing
nan values. The enhanced sort order is documented in `sort`.
Examples
--------
One dimensional array:
>>> x = np.array([3, 1, 2])
>>> np.argsort(x)
array([1, 2, 0])
Two-dimensional array:
>>> x = np.array([[0, 3], [2, 2]])
>>> x
array([[0, 3],
[2, 2]])
>>> np.argsort(x, axis=0)
array([[0, 1],
[1, 0]])
>>> np.argsort(x, axis=1)
array([[0, 1],
[0, 1]])
Sorting with keys:
>>> x = np.array([(1, 0), (0, 1)], dtype=[('x', '<i4'), ('y', '<i4')])
>>> x
array([(1, 0), (0, 1)],
dtype=[('x', '<i4'), ('y', '<i4')])
>>> np.argsort(x, order=('x','y'))
array([1, 0])
>>> np.argsort(x, order=('y','x'))
array([0, 1])
"""
import numpy as np
if np.any(np.isnan(a)):
raise Exception("The matrix contains NaN value")
else:
return np.argsort(a, axis, kind, order)
| mit |
ltalirz/asetk | scripts/cube-ex-plane.py | 2 | 1861 | #!/usr/bin/env python
import numpy as np
import argparse
from asetk.format.cube import Cube
# Define command line parser
parser = argparse.ArgumentParser(
description='Extracts plane from cube file.')
parser.add_argument('--version', action='version', version='%(prog)s 27.01.2014')
parser.add_argument(
'cube',
metavar='',
help='Cube file to be sliced.')
parser.add_argument(
'dir',
metavar='DIRECTION',
type=str,
help='Plane normal. May be "x", "y" or "z"')
parser.add_argument(
'index',
metavar='INDEX',
type=int,
help='Plane index.')
parser.add_argument(
'--plot',
action='store_true',
default=False,
help='Whether to plot plane.')
parser.add_argument(
'--replicate',
nargs='+',
metavar='LIST',
type=int,
default=None,
help='To replicate 3x along x and 2x along y, set --replicate 3 2.')
args = parser.parse_args()
print("Reading cube file {}".format(args.cube))
c = Cube.from_file(args.cube, read_data=True)
plane = c.get_plane(dir=args.dir, i=args.index,
return_object=True, replica=args.replicate)
outfile = '{f}.plane{i}.dat'.format(f=args.cube, i=args.index)
print("Writing plane data to {}".format(outfile))
np.savetxt(outfile, plane.data)
if args.plot:
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
fig = plt.figure()
cax = plt.imshow(plane.imdata, extent=plane.extent, cmap='gray')
plt.xlabel('x [$\AA$]')
plt.ylabel('y [$\AA$]')
cbar = fig.colorbar(cax, format='%.2e')
#cbar.set_label('$|\psi|^2$ $[e/a_0^2]$')
#elif kind == 'i':
# cbar = fig.colorbar(cax, format='%.2f')
# cbar.set_label('z [$\AA$]')
outfile = '{f}.plane{i}.png'.format(f=args.cube, i=args.index)
print("Plotting into {}".format(outfile))
plt.savefig(outfile, dpi=200)
| mit |
phobson/statsmodels | examples/incomplete/dates.py | 29 | 1251 | """
Using dates with timeseries models
"""
import statsmodels.api as sm
import pandas as pd
# Getting started
# ---------------
data = sm.datasets.sunspots.load()
# Right now an annual date series must be datetimes at the end of the year.
dates = sm.tsa.datetools.dates_from_range('1700', length=len(data.endog))
# Using Pandas
# ------------
# Make a pandas TimeSeries or DataFrame
endog = pd.TimeSeries(data.endog, index=dates)
# and instantiate the model
ar_model = sm.tsa.AR(endog, freq='A')
pandas_ar_res = ar_model.fit(maxlag=9, method='mle', disp=-1)
# Let's do some out-of-sample prediction
pred = pandas_ar_res.predict(start='2005', end='2015')
print(pred)
# Using explicit dates
# --------------------
ar_model = sm.tsa.AR(data.endog, dates=dates, freq='A')
ar_res = ar_model.fit(maxlag=9, method='mle', disp=-1)
pred = ar_res.predict(start='2005', end='2015')
print(pred)
# This just returns a regular array, but since the model has date information
# attached, you can get the prediction dates in a roundabout way.
print(ar_res.data.predict_dates)
# This attribute only exists if predict has been called. It holds the dates
# associated with the last call to predict.
#..TODO: should this be attached to the results instance?
| bsd-3-clause |
ucsd-ccbb/jupyter-genomics | src/dnaSeq/VAPr/variantannotation/annovar_processing.py | 1 | 3937 | import pandas
from variantannotation import genotype_calling
from variantannotation import utilities
#REFACTOR THIS CODEEEEE AASAP
def get_list_from_annovar_csv(df, chunk_ids):
df = df.rename(columns={'1000g2015aug_all': 'ThousandGenomeAll'})
df.Chr = df.Chr.replace(to_replace='chrM', value='chrMT')
df['Start'] = pandas.to_numeric(df['Start'])
df['End'] = pandas.to_numeric(df['End'])
print 'Converting columns to float ...'
df["nci60"] = utilities.to_float(df, "nci60")
df["ThousandGenomeAll"] = utilities.to_float(df, "ThousandGenomeAll")
df["ESP6500si_ALL"] = utilities.to_float(df, "ESP6500si_ALL")
print 'Processing knownGene info ...'
utilities.split_string(df, "Func.knownGene")
utilities.split_string(df, "ExonicFunc.knownGene")
print 'Processing tfbsConsSites info ...'
df["tfbsConsSites"] = df["tfbsConsSites"].dropna().apply(utilities.cell_to_dict)
#print 'Processing targetScanS info ...'
#df["targetScanS"] = df["targetScanS"].dropna().apply(utilities.cell_to_dict)
print 'Processing genomicSuperDups info ...'
df["genomicSuperDups"] = df["genomicSuperDups"].dropna().apply(utilities.cell_to_dict)
print 'Processing cytoBand info ...'
df["cytoBand"] = df["cytoBand"].dropna().apply(utilities.split_cytoband)
df["cytoBand"] = df["cytoBand"].dropna().apply(utilities.lists_to_dict)
print 'Creating hgvs key ...'
df['hgvs_key'] = pandas.Series(chunk_ids)
print 'Processing genotype call info ...'
my_sample_id = df["Otherinfo"].dropna().apply(genotype_calling.split_sample_ID)
genotype_call = my_sample_id.apply(lambda x: x[-3::])
dict_split = genotype_call.apply(genotype_calling.return_dict)
df['Otherinfo'] = dict_split
df = df.rename(columns={'Otherinfo': 'Genotype_Call'})
df = utilities.modify_df(df)
print 'Transforming to JSON from dataFrame'
#Clean up dataframe
df_final = df.where((pandas.notnull(df)), None)
list_dict = df_final.T.to_dict().values()
#Attempt to transform dataframe to dictionary
#Set the ID to be the HGVS_ID
print 'cleaning up...'
for i in range(0, len(list_dict)):
list_dict[i] = utilities.scrub_dict(list_dict[i])
#list_filtered = []
#for key in filtered.keys():
# list_filtered.append({key: filtered[key]})
print 'Done'
return list_dict
#REFACTOR THIS CODEEEEE AASAP
def get_df_from_annovar_csv(df, chunk_ids):
df = df.rename(columns={'1000g2015aug_all': 'ThousandGenomeAll'})
df.Chr = df.Chr.replace(to_replace='chrM', value='chrMT')
df['Start'] = pandas.to_numeric(df['Start'])
df['End'] = pandas.to_numeric(df['End'])
df["nci60"] = utilities.to_float(df, "nci60")
df["ThousandGenomeAll"] = utilities.to_float(df, "ThousandGenomeAll")
df["ESP6500si_ALL"] = utilities.to_float(df, "ESP6500si_ALL")
df["tfbsConsSites"] = df["tfbsConsSites"].dropna().apply(utilities.cell_to_dict)
utilities.split_string(df, "Func.knownGene")
utilities.split_string(df, "ExonicFunc.knownGene")
#df["targetScanS"] = df["targetScanS"].dropna().apply(utilities.cell_to_dict)
df["genomicSuperDups"] = df["genomicSuperDups"].dropna().apply(utilities.cell_to_dict)
df["cytoBand"] = df["cytoBand"].dropna().apply(utilities.split_cytoband)
df["cytoBand"] = df["cytoBand"].dropna().apply(utilities.lists_to_dict)
df['hgvs_key'] = pandas.Series(chunk_ids)
my_sample_id = df["Otherinfo"].dropna().apply(genotype_calling.split_sample_ID)
genotype_call = my_sample_id.apply(lambda x: x[-2::])
dict_split = genotype_call.apply(genotype_calling.return_dict)
df['Otherinfo'] = dict_split
df = df.rename(columns={'Otherinfo': 'Genotype_Call'})
#Clean up dataframe
df = utilities.modify_df(df)
df_final = df.where((pandas.notnull(df)), None)
return df_final
#after myvariant data has been obtained
#def join_df_chunks(df):
| mit |
ashokpant/rnnlib | utils/plot_errors.py | 7 | 3501 | #Copyright 2009,2010 Alex Graves
#
# This file is part of RNNLIB.
#
# RNNLIB is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# RNNLIB is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with RNNLIB. If not, see <http://www.gnu.org/licenses/>.
#!/usr/bin/env python
from pylab import *
import re
from optparse import OptionParser
usage = "usage: %prog log_file"
parser = OptionParser(usage)
parser.add_option("-v", "--verbose", dest="verbose", default=False, action="store_true", help="verbose plot (including error types with verboseChar)?")
parser.add_option("-c", "--verbosechar", dest="verboseChar", default="_", action="store", help="special character for verbose plots")
parser.add_option("-e", "--err-types", dest="errTypes", default="seqRmsError seqMixtureError crossEntropyError classificationError mixtureError sumSquaresError labelError ctcError mdlError", action="store", help="space separated list of error types to plot (empty list => plot all)")
(opt, args) = parser.parse_args()
errors = dict()
#print opt
if len(args) != 1:
parser.error("incorrect number of arguments")
filename = args[0]
print "plotting errors from", filename
lines = file(filename, 'r').readlines()
errorType = ""
epochNum = -1
bestEpochs = dict()
allowedErrors = opt.errTypes.split()
for l in lines:
words = l.split()
if (l.find("epoch") >= 0 and l.find("took") >= 0):
epochNum = int(l.split()[1])
elif (epochNum >= 0):
if (l.find("train errors") >= 0):
errorType = "train"
elif (l.find("test errors") >= 0):
errorType = "test"
elif (l.find("validation errors") >= 0):
errorType = "validation"
elif (len(words) == 0 or l.find("best") >= 0):
errorType = ""
if l.find("best network (") >= 0:
bestEpochs[l.split()[2]] = epochNum
elif l.find("saving to") >= 0 and l.find(".best_") >= 0:
bestEpochs['(' + l.split('_')[-1].split('.')[0] + ')'] = epochNum
elif len(words) == 2 and errorType <> "":
errWord = words[0]
if (len(allowedErrors)==0 or errWord in allowedErrors) and (opt.verbose or opt.verboseChar not in errWord):
errVal = float(words[1].strip('%'))
if errWord not in errors:
errors[errWord] = dict()
if errorType in errors[words[0]]:
errors[errWord][errorType][0].append(epochNum)
errors[errWord][errorType][1].append(errVal)
else:
errors[errWord][errorType] = [[epochNum],[errVal]]
if (len(errors)):
for err in errors.items():
figure()
title(filename + ' \n' + err[0])
for dataSet in err[1].items():
plot(dataSet[1][0], dataSet[1][1], linewidth=1.5, label=dataSet[0], marker='+')
axes = gca()
yRange = [axis()[2], axis()[3]]
if len(bestEpochs) > 0:
bone()
for best in bestEpochs.items():
if re.search("\(.*\)", best[0]):
lab = "best "+best[0]
else:
lab = "best network"
plot([best[1], best[1]], yRange, linestyle ='--', linewidth=1, label=lab)
legend()
legend(prop = matplotlib.font_manager.FontProperties(size = 'smaller'))
else:
print "allowed errors:", allowedErrors
print "no allowed errors found, exiting"
show()
| gpl-3.0 |
evgchz/scikit-learn | examples/svm/plot_rbf_parameters.py | 26 | 4273 | '''
==================
RBF SVM parameters
==================
This example illustrates the effect of the parameters `gamma`
and `C` of the rbf kernel SVM.
Intuitively, the `gamma` parameter defines how far the influence
of a single training example reaches, with low values meaning 'far'
and high values meaning 'close'.
The `C` parameter trades off misclassification of training examples
against simplicity of the decision surface. A low C makes
the decision surface smooth, while a high C aims at classifying
all training examples correctly.
Two plots are generated. The first is a visualization of the
decision function for a variety of parameter values, and the second
is a heatmap of the classifier's cross-validation accuracy as
a function of `C` and `gamma`. For this example we explore a relatively
large grid for illustration purposes. In practice, a logarithmic
grid from `10**-3` to `10**3` is usually sufficient.
'''
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.svm import SVC
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import load_iris
from sklearn.cross_validation import StratifiedKFold
from sklearn.grid_search import GridSearchCV
##############################################################################
# Load and prepare data set
#
# dataset for grid search
iris = load_iris()
X = iris.data
Y = iris.target
# dataset for decision function visualization
X_2d = X[:, :2]
X_2d = X_2d[Y > 0]
Y_2d = Y[Y > 0]
Y_2d -= 1
# It is usually a good idea to scale the data for SVM training.
# We are cheating a bit in this example in scaling all of the data,
# instead of fitting the transformation on the training set and
# just applying it on the test set.
scaler = StandardScaler()
X = scaler.fit_transform(X)
X_2d = scaler.fit_transform(X_2d)
##############################################################################
# Train classifier
#
# For an initial search, a logarithmic grid with basis
# 10 is often helpful. Using a basis of 2, a finer
# tuning can be achieved but at a much higher cost.
C_range = 10.0 ** np.arange(-2, 9)
gamma_range = 10.0 ** np.arange(-5, 4)
param_grid = dict(gamma=gamma_range, C=C_range)
cv = StratifiedKFold(y=Y, n_folds=3)
grid = GridSearchCV(SVC(), param_grid=param_grid, cv=cv)
grid.fit(X, Y)
print("The best classifier is: ", grid.best_estimator_)
# Now we need to fit a classifier for all parameters in the 2d version
# (we use a smaller set of parameters here because it takes a while to train)
C_2d_range = [1, 1e2, 1e4]
gamma_2d_range = [1e-1, 1, 1e1]
classifiers = []
for C in C_2d_range:
for gamma in gamma_2d_range:
clf = SVC(C=C, gamma=gamma)
clf.fit(X_2d, Y_2d)
classifiers.append((C, gamma, clf))
##############################################################################
# visualization
#
# draw visualization of parameter effects
plt.figure(figsize=(8, 6))
xx, yy = np.meshgrid(np.linspace(-5, 5, 200), np.linspace(-5, 5, 200))
for (k, (C, gamma, clf)) in enumerate(classifiers):
# evaluate decision function in a grid
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
# visualize decision function for these parameters
plt.subplot(len(C_2d_range), len(gamma_2d_range), k + 1)
plt.title("gamma 10^%d, C 10^%d" % (np.log10(gamma), np.log10(C)),
size='medium')
# visualize parameter's effect on decision function
plt.pcolormesh(xx, yy, -Z, cmap=plt.cm.jet)
plt.scatter(X_2d[:, 0], X_2d[:, 1], c=Y_2d, cmap=plt.cm.jet)
plt.xticks(())
plt.yticks(())
plt.axis('tight')
# plot the scores of the grid
# grid_scores_ contains parameter settings and scores
score_dict = grid.grid_scores_
# We extract just the scores
scores = [x[1] for x in score_dict]
scores = np.array(scores).reshape(len(C_range), len(gamma_range))
# draw heatmap of accuracy as a function of gamma and C
plt.figure(figsize=(8, 6))
plt.subplots_adjust(left=0.05, right=0.95, bottom=0.15, top=0.95)
plt.imshow(scores, interpolation='nearest', cmap=plt.cm.spectral)
plt.xlabel('gamma')
plt.ylabel('C')
plt.colorbar()
plt.xticks(np.arange(len(gamma_range)), gamma_range, rotation=45)
plt.yticks(np.arange(len(C_range)), C_range)
plt.show()
| bsd-3-clause |
ZlataNaumova/vis101-hw-naumova-zlata | research/test3/main.py | 1 | 21014 | from builtins import *
__author__ = 'zlata'
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn import linear_model
import openpyxl
from sklearn.metrics import r2_score
#import ml_metrics as metrics
from scipy.interpolate import interp1d #for interpolation
#for P
#wb = openpyxl.load_workbook(filename = 'error_wind_test_P.xlsx')
#for U
#wb = openpyxl.load_workbook(filename = 'error_wind_test_U.xlsx')
#for V
wb = openpyxl.load_workbook(filename = 'error_wind_test_V.xlsx')
sheet = wb["1"]
sheet2 = wb["2"]
sheet3 = wb["3"]
sheet4 = wb["4"]
path = "D:/Buchan/regressiya/pres/FORCE/B1_interp/2013090100.txt"
files3 = os.listdir("D:/Buchan/regressiya/test/pres/FORCE/B1_interp/")
norma = 6
wind_read_x_list=[6,10,20,30,60]
#
wind_train_list = [60,120,180,240]
wind_train_list_y=[24,48,96,180]
#
wind_data_join_y=6
h = 50
for w in range(len(wind_read_x_list)):
#srednekvadrat
SKO_Simple=[]
SKO_Ridge=[]
SKO_Lasso=[]
#srednee znachenie
SO_Simple=[]
SO_Ridge=[]
SO_Lasso=[]
SAO_Simple=[]
SAO_Ridge=[]
SAO_Lasso=[]
#r2- coef determination
R2_Simple=[]
R2_Ridge=[]
R2_Lasso=[]
wind_read = wind_read_x_list[w]
delta =wind_read-norma
start_pr = 1920 #min start_pr =1860
#for P
#minVal = 990
#maxVal = 1050
#for U
#minVal = -40
#maxVal = 40
#for V
minVal = -27
maxVal = 27
start_pr_for_y =start_pr-delta*(start_pr/wind_read)
wind_data_join_x = wind_data_join_y + delta
matrix_simple_coeficent = np.zeros((len(wind_train_list)*h,460))
matrix_ridge_coeficent = np.zeros((len(wind_train_list)*h,460))
matrix_lasso_coeficent = np.zeros((len(wind_train_list)*h,460))
for z in range(len(wind_train_list)):
wind_train_y=wind_train_list_y[z]
wind_train=(wind_train_y*wind_read)/norma
matrix_pr_simple = np.zeros((h, 60))
matrix_pr_ridge = np.zeros((h, 60))
matrix_pr_lasso = np.zeros((h, 60))
matrix_Y = np.zeros((h, 60))
matrix_Hirlam = np.zeros((h, 60))
matrix_Force = np.zeros((h, 60))
matrix_GFS = np.zeros((h, 60))
SKO_Simple_temp=[]
SKO_Ridge_temp=[]
SKO_Lasso_temp=[]
SO_Simple_temp=[]
SO_Ridge_temp=[]
SO_Lasso_temp=[]
SAO_Simple_temp=[]
SAO_Ridge_temp=[]
SAO_Lasso_temp=[]
R2_Simple_temp=[]
R2_Ridge_temp=[]
R2_Lasso_temp=[]
for r in range(int(60/wind_read)): # have to be 60/wind_read
data_0_6 = np.loadtxt(path)[:0, 1:2]
begin = r * wind_read
begin_coeff=r*45
if r!=0:
begin_coeff+=1
begin_y = r*norma
finish = begin + wind_read
for f in files3:
path1 = str("D:/Buchan/regressiya/test/pres/FORCE/B1_interp/" + f)
data_0_6 = np.append(data_0_6, np.loadtxt(path1)[:0, 1:2])
path_f = []
for d, dirs, files in os.walk("D:/Buchan/regressiya/test/"):
for f in files:
path = os.path.join(d, f)
path_f.append(path)
for i in range(len(path_f)):
data_test = np.loadtxt(path)[:0, 1:2]
data_test = np.append(data_test, np.loadtxt(path_f[i])[begin:finish, 1:2])
data_0_6 = np.hstack((data_0_6, data_test))
c = np.reshape(data_0_6, (45,len(data_0_6) / 45))
c = np.transpose(c)
#for P
#y_B1 = np.loadtxt("D:/Buchan/regressiya/prog/Preasure/B1_restored_Pressure_gridded1H.txt")[begin:, 1:2]
#for U
#y_B1 = np.loadtxt("D:/Buchan/regressiya/prog/U/B1_restored_Wind_u_gridded1H.txt")[begin:, 1:2]
#for V
y_B1 = np.loadtxt("D:/Buchan/regressiya/prog/V/B1_restored_v_gridded1H.txt")[begin:, 1:2]
for b in range(h):
temp_result_simple = []
temp_result_Ridge = []
temp_result_Lasso = []
temp_result_simple_coef = []
temp_result_Ridge_coef = []
temp_result_Lasso_coef = []
arr = np.zeros((wind_train, 45))
arr_pr = np.zeros((wind_read, 45))
arr[:, 0] = 1
arr_pr[:, 0] = 1
df = pd.DataFrame(arr)
df_arr = pd.DataFrame(arr_pr)
test_Y = y_B1[start_pr_for_y + (b * wind_data_join_y):start_pr_for_y + (b * wind_data_join_y) + wind_read, :1]
train_Y = y_B1[:0,:]#create matrix with right demention
for k in range(int(wind_train/wind_read)):
temp_dem=start_pr_for_y-wind_train_y+(b*wind_data_join_y)-begin_y+k*(wind_read-delta)
temp_train_Y=y_B1[temp_dem:temp_dem+wind_read,:]
train_Y=np.vstack((train_Y,temp_train_Y))
for l in range(wind_read):
matrix_Y[b, begin + l] = test_Y[l]
temp_train_X_i = c[start_pr - wind_train + (b * wind_data_join_x) - begin:start_pr + (b * wind_data_join_x) - begin, :]
temp_test_X_i = c[start_pr + (b * wind_data_join_x):start_pr + (b * wind_data_join_x) + wind_read, :]
temp_E = np.array(df[0])
temp_E_pr = np.array(df_arr[0])
test_X_i = np.column_stack((temp_E_pr, temp_test_X_i))
train_X_i = np.column_stack((temp_E, temp_train_X_i))
regr_simple_i = linear_model.LinearRegression()
regr_simple_i.fit(train_X_i, train_Y)
regr_Ridge_i = linear_model.RidgeCV()
regr_Ridge_i.fit(train_X_i, train_Y)
regr_Lasso_i = linear_model.LassoCV()
regr_Lasso_i.fit(train_X_i, train_Y)
temp_result_simple.append(regr_simple_i.predict(test_X_i))
temp_result_Ridge.append(regr_Ridge_i.predict(test_X_i))
temp_result_Lasso.append(regr_Lasso_i.predict(test_X_i))
temp_result_simple_coef.append(regr_simple_i.coef_)
temp_result_Ridge_coef.append(regr_Ridge_i.coef_)
temp_result_Lasso_coef.append(regr_Lasso_i.coef_)
print("b: "+str(b)+" r: "+str(r)+" z: "+str(z)+" w: "+str(w))
for i in range(len(test_X_i)):
# for P
#matrix_Hirlam[b, begin + i] = test_X_i[i][1] / 100
#matrix_Force[b, begin + i] = test_X_i[i][6] / 100
#matrix_GFS[b, begin + i] = test_X_i[i][11] / 100
#for U
#matrix_Hirlam[b, begin + i] = test_X_i[i][16]
#matrix_Force[b, begin + i] = test_X_i[i][21]
#matrix_GFS[b, begin + i] = test_X_i[i][26]
# for V
matrix_Hirlam[b, begin + i] = test_X_i[i][31]
matrix_Force[b, begin + i] = test_X_i[i][36]
matrix_GFS[b, begin + i] = test_X_i[i][41]
for i in range(len(temp_result_simple)):
for j in range(len(temp_result_simple[0])):
matrix_pr_simple[b, begin + j] = temp_result_simple[i][j]
matrix_pr_ridge[b, begin + j] = temp_result_Ridge[i][j]
matrix_pr_lasso[b, begin + j] = temp_result_Lasso[i][j]
#b+w*h*(len(wind_train_list))+z*h,begin+j] = temp_result_simple_coef[i][j]
for j in range(46):
matrix_simple_coeficent[b+z*h, begin_coeff + j] = temp_result_simple_coef[0][0][j]
matrix_lasso_coeficent[b+z*h, begin_coeff + j] = temp_result_Lasso_coef[0][j]
matrix_ridge_coeficent[b+z*h, begin_coeff + j] = temp_result_Ridge_coef[0][0][j]
#print(matrix_simple_coeficent)
a=int(60/wind_read)
SKO_Simple_temp_wind=np.zeros((1,a))
SKO_Ridge_temp_wind=np.zeros((1,a))
SKO_Lasso_temp_wind=np.zeros((1,a))
SO_Simple_temp_wind=np.zeros((1,a))
SO_Ridge_temp_wind=np.zeros((1,a))
SO_Lasso_temp_wind=np.zeros((1,a))
SAO_Simple_temp_wind=np.zeros((1,a))
SAO_Ridge_temp_wind=np.zeros((1,a))
SAO_Lasso_temp_wind=np.zeros((1,a))
R2_Simple_temp_wind=np.zeros((1,a))
R2_Ridge_temp_wind=np.zeros((1,a))
R2_Lasso_temp_wind=np.zeros((1,a))
#g=len(matrix_pr_simple)
for i in range(h):
#for error in window prediction
for y in range(a):
result_simple_wind = []
result_Ridge_wind = []
result_Lasso_wind = []
result_Hirlam_wind = []
result_Force_wind = []
result_GFS_wind=[]
test_Y_i_wind = []
result_simple_wind = matrix_pr_simple[i, y*wind_read:(y+1)*wind_read]
result_Ridge_wind = matrix_pr_ridge[i, y*wind_read:(y+1)*wind_read]
result_Lasso_wind = matrix_pr_lasso[i, y*wind_read:(y+1)*wind_read]
result_Hirlam_wind = matrix_Hirlam[i, y*wind_read:(y+1)*wind_read]
result_Force_wind = matrix_Force[i, y*wind_read:(y+1)*wind_read]
result_GFS_wind = matrix_GFS[i, y*wind_read:(y+1)*wind_read]
test_Y_i_wind = matrix_Y[i, y*wind_read:(y+1)*wind_read]
R2_Simple_temp_wind[0,y]+=(r2_score(test_Y_i_wind,result_simple_wind))
R2_Ridge_temp_wind[0,y]+=(r2_score(test_Y_i_wind, result_Ridge_wind))
R2_Lasso_temp_wind[0,y]+=(r2_score(test_Y_i_wind,result_Lasso_wind))
SO_Simple_temp_wind[0,y]+=(np.mean(result_simple_wind-test_Y_i_wind))
SO_Lasso_temp_wind[0,y]+=(np.mean(result_Lasso_wind-test_Y_i_wind))
SO_Ridge_temp_wind[0,y]+=(np.mean(result_Ridge_wind-test_Y_i_wind))
SKO_Simple_temp_wind[0,y]+=(np.std(result_simple_wind))
SKO_Ridge_temp_wind[0,y]+=(np.std(result_Ridge_wind))
SKO_Lasso_temp_wind[0,y]+=(np.std(result_Lasso_wind))
SAO_Simple_temp_wind[0,y]+=(np.mean(np.abs(result_simple_wind-test_Y_i_wind)))
SAO_Lasso_temp_wind[0,y]+=(np.mean(np.abs(result_Lasso_wind-test_Y_i_wind)))
SAO_Ridge_temp_wind[0,y]+=(np.mean(np.abs(result_Ridge_wind-test_Y_i_wind)))
result_simple = []
result_Ridge = []
result_Lasso = []
result_Hirlam = []
result_Force = []
result_GFS = []
test_Y_i = []
#for all prediction
result_simple = matrix_pr_simple[i, :60]
result_Ridge = matrix_pr_ridge[i, :60]
result_Lasso = matrix_pr_lasso[i, :60]
result_Hirlam = matrix_Hirlam[i, :60]
result_Force = matrix_Force[i, :60]
result_GFS = matrix_GFS[i, :60]
test_Y_i = matrix_Y[i, :60]
R2_Simple_temp.append(r2_score(test_Y_i,result_simple))
R2_Ridge_temp.append(r2_score(test_Y_i, result_Ridge))
R2_Lasso_temp.append(r2_score(test_Y_i,result_Lasso))
SO_Simple_temp.append(np.mean(result_simple-test_Y_i))
SO_Lasso_temp.append(np.mean(result_Lasso-test_Y_i))
SO_Ridge_temp.append(np.mean(result_Ridge-test_Y_i))
SKO_Simple_temp.append(np.std(result_simple))
SKO_Ridge_temp.append(np.std(result_Ridge))
SKO_Lasso_temp.append(np.std(result_Lasso))
SAO_Simple_temp.append(np.mean(np.abs(result_simple-test_Y_i)))
SAO_Lasso_temp.append(np.mean(np.abs(result_Lasso-test_Y_i)))
SAO_Ridge_temp.append(np.mean(np.abs(result_Ridge-test_Y_i)))
plt.plot(result_simple, color='blue', linewidth=2, label="simple")
plt.plot(result_Ridge, color='brown', linewidth=2, label="Ridge")
plt.plot(result_Lasso, color='yellow', linewidth=2, label="Lasso")
plt.plot(result_Hirlam, color='green', linewidth=2, label="Hirlam")
plt.plot(result_GFS, color='silver', linewidth=2, label="GFS")
plt.plot(result_Force, color='black', linewidth=2, label="Forse")
plt.plot(test_Y_i, color='red', linewidth=2, label="data")
plt.grid()
plt.legend(loc=0)
plt.ylim([minVal,maxVal])
#plt.show()
# i there == h
# for P
#plt.savefig("./graphs_p/wind_read" +str(w)+"/wind_train"+str(z)+"/"+ str(i) + ".png")
# for U
#plt.savefig("./graphs_u/wind_read" +str(w)+"/wind_train"+str(z)+"/"+ str(i) + ".png")
# for V
plt.savefig("./graphs_v/wind_read" +str(w)+"/wind_train"+str(z)+"/"+ str(i) + ".png")
plt.close()
#for window prediction
for y in range(a):
R2_Simple_temp_wind[0,y]=R2_Simple_temp_wind[0,y]/h
R2_Ridge_temp_wind[0,y]=R2_Ridge_temp_wind[0,y]/h
R2_Lasso_temp_wind[0,y]=R2_Lasso_temp_wind[0,y]/h
SO_Simple_temp_wind[0,y]=SO_Simple_temp_wind[0,y]/h
SO_Lasso_temp_wind[0,y]=SO_Lasso_temp_wind[0,y]/h
SO_Ridge_temp_wind[0,y]=SO_Ridge_temp_wind[0,y]/h
SKO_Simple_temp_wind[0,y]=SKO_Simple_temp_wind[0,y]/h
SKO_Ridge_temp_wind[0,y]=SKO_Ridge_temp_wind[0,y]/h
SKO_Lasso_temp_wind[0,y]=SKO_Lasso_temp_wind[0,y]/h
SAO_Simple_temp_wind[0,y]=SAO_Simple_temp_wind[0,y]/h
SAO_Lasso_temp_wind[0,y]=SAO_Lasso_temp_wind[0,y]/h
SAO_Ridge_temp_wind[0,y]=SAO_Ridge_temp_wind[0,y]/h
#for U
k = open('./graphs_u/wind_read'+str(w)+"/wind_train"+str(z)+'/'+ 'error.txt','w')
#for V
k = open('./graphs_v/wind_read'+str(w)+"/wind_train"+str(z)+'/'+ 'error.txt','w')
#for Preasure
#k = open('./graphs_p/wind_read'+str(w)+"/wind_train"+str(z)+'/'+ 'error.txt','w')
k.write("R2 Simple. wind reading"+str(wind_read)+"\n")
for i in range(a):
k.write(str(R2_Simple_temp_wind[0,i]) +'\t')
k.write('\n')
k.write("\n"+"R2 Ridge. wind reading"+str(wind_read)+"\n")
for i in range(a):
k.write(str(R2_Ridge_temp_wind[0,i]) +'\t')
k.write('\n')
k.write("\n"+"R2 Lasso. wind reading"+str(wind_read)+"\n")
for i in range(a):
k.write(str(R2_Lasso_temp_wind[0,i]) +'\t')
k.write('\n')
k.write("\n"+"SKO Simple. wind reading"+str(wind_read)+"\n")
for i in range(a):
k.write(str(SKO_Simple_temp_wind[0,i]) +'\t')
k.write('\n')
k.write("\n"+"SKO Ridge. wind reading"+str(wind_read)+"\n")
for i in range(a):
k.write(str(SKO_Ridge_temp_wind[0,i]) +'\t')
k.write('\n')
k.write("\n"+"SKO Lasso. wind reading"+str(wind_read)+"\n")
for i in range(a):
k.write(str(SKO_Lasso_temp_wind[0,i]) +'\t')
k.write('\n')
k.write("\n"+"SO Simple. wind reading"+str(wind_read)+"\n")
for i in range(a):
k.write(str(SO_Simple_temp_wind[0,i]) +'\t')
k.write('\n')
k.write("\n"+"SO Ridge. wind reading"+str(wind_read)+"\n")
for i in range(a):
k.write(str(SO_Ridge_temp_wind[0,i]) +'\t')
k.write('\n')
k.write("\n"+"SO Lasso. wind reading"+str(wind_read)+"\n")
for i in range(a):
k.write(str(SO_Lasso_temp_wind[0,i]) +'\t')
k.write('\n')
k.close()
count=1+w*len(wind_train_list)*3+z*3
for q in range(a):
sheet.cell(row=count,column=q+1).value=str(SKO_Simple_temp_wind[0,q])
sheet.cell(row=count+1,column=q+1).value=str(SKO_Ridge_temp_wind[0,q])
sheet.cell(row=count+2,column=q+1).value=str(SKO_Lasso_temp_wind[0,q])
sheet2.cell(row=count,column=q+1).value=str(SO_Simple_temp_wind[0,q])
sheet2.cell(row=count+1,column=q+1).value=str(SO_Ridge_temp_wind[0,q])
sheet2.cell(row=count+2,column=q+1).value=str(SO_Lasso_temp_wind[0,q])
sheet3.cell(row=count,column=q+1).value=str(R2_Simple_temp_wind[0,q])
sheet3.cell(row=count+1,column=q+1).value=str(R2_Ridge_temp_wind[0,q])
sheet3.cell(row=count+2,column=q+1).value=str(R2_Lasso_temp_wind[0,q])
sheet4.cell(row=count,column=q+1).value=str(SAO_Simple_temp_wind[0,q])
sheet4.cell(row=count+1,column=q+1).value=str(SAO_Ridge_temp_wind[0,q])
sheet4.cell(row=count+2,column=q+1).value=str(SAO_Lasso_temp_wind[0,q])
#for all prediction
R2_S=0
R2_R=0
R2_L=0
SO_S=0
SO_L=0
SO_R=0
SAO_S=0
SAO_L=0
SAO_R=0
SKO_S=0
SKO_L=0
SKO_R=0
for i in range(len(R2_Simple_temp)):
R2_S += R2_Simple_temp[i]
R2_R += R2_Ridge_temp[i]
R2_L += R2_Lasso_temp[i]
SO_S += SO_Simple_temp[i]
SO_R += SO_Ridge_temp[i]
SO_L += SO_Lasso_temp[i]
SAO_S += SAO_Simple_temp[i]
SAO_R += SAO_Ridge_temp[i]
SAO_L += SAO_Lasso_temp[i]
SKO_S += SKO_Simple_temp[i]
SKO_R += SKO_Ridge_temp[i]
SKO_L += SKO_Lasso_temp[i]
R2_S=R2_S/h
R2_L=R2_L/h
R2_R=R2_R/h
SO_S=SO_S/h
SO_R=SO_R/h
SO_L=SO_L/h
SAO_S=SAO_S/h
SAO_R=SAO_R/h
SAO_L=SAO_L/h
SKO_S=SKO_S/h
SKO_R=SKO_R/h
SKO_L=SKO_L/h
R2_Simple.append(R2_S)
R2_Lasso.append(R2_L)
R2_Ridge.append(R2_R)
SO_Simple.append(SO_S)
SO_Ridge.append(SO_R)
SO_Lasso.append(SO_L)
SAO_Simple.append(SAO_S)
SAO_Ridge.append(SAO_R)
SAO_Lasso.append(SAO_L)
SKO_Simple.append(SKO_S)
SKO_Ridge.append(SKO_R)
SKO_Lasso.append(SKO_L)
#for P
#f = open('./graphs_p/wind_read'+str(w)+"/"+ 'error.txt','w')
#for U
#f = open('./graphs_u/wind_read'+str(w)+"/"+ 'error.txt','w')
#for V
f = open('./graphs_v/wind_read'+str(w)+"/"+ 'error.txt','w')
f.write("R2 Simple"+"\n")
for i in range(len(wind_train_list)):
f.write(str(R2_Simple[i]) +'\t')
f.write('\n')
f.write("\n"+"R2 Ridge"+"\n")
for i in range(len(wind_train_list)):
f.write(str(R2_Ridge[i]) +'\t')
f.write('\n')
f.write("\n"+"R2 Lasso"+"\n")
for i in range(len(wind_train_list)):
f.write(str(R2_Lasso[i]) +'\t')
f.write('\n')
f.write("\n"+"SO Simple"+"\n")
for i in range(len(wind_train_list)):
f.write(str(SO_Simple[i]) +'\t')
f.write('\n')
f.write("\n"+"SO Ridge"+"\n")
for i in range(len(wind_train_list)):
f.write(str(SO_Ridge[i]) +'\t')
f.write('\n')
f.write("\n"+"SO Lasso"+"\n")
for i in range(len(wind_train_list)):
f.write(str(SO_Lasso[i]) +'\t')
f.write('\n')
f.write("\n"+"SKO Simple"+"\n")
for i in range(len(wind_train_list)):
f.write(str(SKO_Simple[i]) +'\t')
f.write('\n')
f.write("\n"+"SKO Ridge"+"\n")
for i in range(len(wind_train_list)):
f.write(str(SKO_Ridge[i]) +'\t')
f.write('\n')
f.write("\n"+"SKO Lasso"+"\n")
for i in range(len(wind_train_list)):
f.write(str(SKO_Lasso[i]) +'\t')
f.write("\n"+"SAO Simple"+"\n")
for i in range(len(wind_train_list)):
f.write(str(SAO_Simple[i]) +'\t')
f.write('\n')
f.write("\n"+"SAO Ridge"+"\n")
for i in range(len(wind_train_list)):
f.write(str(SAO_Ridge[i]) +'\t')
f.write('\n')
f.write("\n"+"SAO Lasso"+"\n")
for i in range(len(wind_train_list)):
f.write(str(SAO_Lasso[i]) +'\t')
f.write('\n')
f.close()
#for P
#l = open('graphs_p/coeff_simple'+str(w)+".txt",'w')
#l1 = open('graphs_p/coeff_ridge'+str(w)+".txt",'w')
#l2 = open('graphs_p/coeff_lasso'+str(w)+".txt",'w')
#for U
#l = open('graphs_u/coeff_simple'+str(w)+".txt",'w')
#l1 = open('graphs_u/coeff_ridge'+str(w)+".txt",'w')
#l2 = open('graphs_u/coeff_lasso'+str(w)+".txt",'w')
#for V
l = open('graphs_v/coeff_simple'+str(w)+".txt",'w')
l1 = open('graphs_v/coeff_ridge'+str(w)+".txt",'w')
l2 = open('graphs_v/coeff_lasso'+str(w)+".txt",'w')
for i in range(h*len(wind_train_list)):
for j in range(460):
l.write(str(matrix_simple_coeficent[i,j])+"\t")
l1.write(str(matrix_ridge_coeficent[i,j])+"\t")
l2.write(str(matrix_lasso_coeficent[i,j])+"\t")
l.write("\n")
l1.write("\n")
l2.write("\n")
l.close()
l1.close()
l2.close()
#for P
#wb.save(filename='error_wind_test_P.xlsx')
#for U
#wb.save(filename='error_wind_test_U.xlsx')
#for V
wb.save(filename='error_wind_test_V.xlsx')
| mit |
nicholasmalaya/arcanus | uq/ps3/hpd.py | 2 | 1394 | #
#
#
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.mlab as mlab
from scipy.integrate import simps
from copy import copy
def hpd(x, pdf, obs):
""" Compute credibility interval (beta) """
assert (abs(simps(pdf,x) - 1.0) < 1e-12) , 'hpd(x, pdf, obs): pdf must integrate to 1'
assert (x[0] < obs) and (obs < x[-1]), 'hpd(x, pdf, obs): obs must be contained in x'
pobs = np.interp(obs, x, pdf)
pdfc = copy(pdf)
pdfc[np.where(pdfc < pobs)] = 0
# For debugging only:
# plt.figure()
# plt.plot(x, pdfc, 'o-')
# plt.title('in hpd()')
# plt.show()
beta = simps(pdfc, x)
return beta
def plotpdfandobs(x, pdf, obs, filename=None, beta=[]):
""" Plot pdf along with the given observation """
maxval = round(max(pdf)*10.+1)/10.
plt.figure()
plt.plot(x, pdf, 'b', label='pdf')
plt.plot([obs, obs], [0.0, maxval], 'k--', label='obs')
if beta:
xt = x[len(x)/6]
yt = 8.*maxval/10.
plt.text(xt, yt, 'beta = ' + str(beta))
plt.legend()
if filename == None: plt.show()
else: plt.savefig(filename + '.eps', format='eps', dpi=300)
#
# main function
#
if __name__ == '__main__':
mean = 0
variance = 1
sigma = np.sqrt(variance)
x = np.linspace(-3,3,100)
pdf = mlab.normpdf(x,-1,0.5) + mlab.normpdf(x, 1, 0.5)
pdf = pdf / simps(pdf, x)
# print pdf
obs = 0.2
beta = hpd(x, pdf, obs)
plotpdfandobs(x, pdf, obs, 'test', beta)
print beta
| mit |
kashif/scikit-learn | examples/applications/plot_out_of_core_classification.py | 32 | 13829 | """
======================================================
Out-of-core classification of text documents
======================================================
This is an example showing how scikit-learn can be used for classification
using an out-of-core approach: learning from data that doesn't fit into main
memory. We make use of an online classifier, i.e., one that supports the
partial_fit method, that will be fed with batches of examples. To guarantee
that the features space remains the same over time we leverage a
HashingVectorizer that will project each example into the same feature space.
This is especially useful in the case of text classification where new
features (words) may appear in each batch.
The dataset used in this example is Reuters-21578 as provided by the UCI ML
repository. It will be automatically downloaded and uncompressed on first run.
The plot represents the learning curve of the classifier: the evolution
of classification accuracy over the course of the mini-batches. Accuracy is
measured on the first 1000 samples, held out as a validation set.
To limit the memory consumption, we queue examples up to a fixed amount before
feeding them to the learner.
"""
# Authors: Eustache Diemert <[email protected]>
# @FedericoV <https://github.com/FedericoV/>
# License: BSD 3 clause
from __future__ import print_function
from glob import glob
import itertools
import os.path
import re
import tarfile
import time
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import rcParams
from sklearn.externals.six.moves import html_parser
from sklearn.externals.six.moves import urllib
from sklearn.datasets import get_data_home
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.linear_model import SGDClassifier
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.linear_model import Perceptron
from sklearn.naive_bayes import MultinomialNB
def _not_in_sphinx():
# Hack to detect whether we are running by the sphinx builder
return '__file__' in globals()
###############################################################################
# Reuters Dataset related routines
###############################################################################
class ReutersParser(html_parser.HTMLParser):
"""Utility class to parse a SGML file and yield documents one at a time."""
def __init__(self, encoding='latin-1'):
html_parser.HTMLParser.__init__(self)
self._reset()
self.encoding = encoding
def handle_starttag(self, tag, attrs):
method = 'start_' + tag
getattr(self, method, lambda x: None)(attrs)
def handle_endtag(self, tag):
method = 'end_' + tag
getattr(self, method, lambda: None)()
def _reset(self):
self.in_title = 0
self.in_body = 0
self.in_topics = 0
self.in_topic_d = 0
self.title = ""
self.body = ""
self.topics = []
self.topic_d = ""
def parse(self, fd):
self.docs = []
for chunk in fd:
self.feed(chunk.decode(self.encoding))
for doc in self.docs:
yield doc
self.docs = []
self.close()
def handle_data(self, data):
if self.in_body:
self.body += data
elif self.in_title:
self.title += data
elif self.in_topic_d:
self.topic_d += data
def start_reuters(self, attributes):
pass
def end_reuters(self):
self.body = re.sub(r'\s+', r' ', self.body)
self.docs.append({'title': self.title,
'body': self.body,
'topics': self.topics})
self._reset()
def start_title(self, attributes):
self.in_title = 1
def end_title(self):
self.in_title = 0
def start_body(self, attributes):
self.in_body = 1
def end_body(self):
self.in_body = 0
def start_topics(self, attributes):
self.in_topics = 1
def end_topics(self):
self.in_topics = 0
def start_d(self, attributes):
self.in_topic_d = 1
def end_d(self):
self.in_topic_d = 0
self.topics.append(self.topic_d)
self.topic_d = ""
def stream_reuters_documents(data_path=None):
"""Iterate over documents of the Reuters dataset.
The Reuters archive will automatically be downloaded and uncompressed if
the `data_path` directory does not exist.
Documents are represented as dictionaries with 'body' (str),
'title' (str), 'topics' (list(str)) keys.
"""
DOWNLOAD_URL = ('http://archive.ics.uci.edu/ml/machine-learning-databases/'
'reuters21578-mld/reuters21578.tar.gz')
ARCHIVE_FILENAME = 'reuters21578.tar.gz'
if data_path is None:
data_path = os.path.join(get_data_home(), "reuters")
if not os.path.exists(data_path):
"""Download the dataset."""
print("downloading dataset (once and for all) into %s" %
data_path)
os.mkdir(data_path)
def progress(blocknum, bs, size):
total_sz_mb = '%.2f MB' % (size / 1e6)
current_sz_mb = '%.2f MB' % ((blocknum * bs) / 1e6)
if _not_in_sphinx():
print('\rdownloaded %s / %s' % (current_sz_mb, total_sz_mb),
end='')
archive_path = os.path.join(data_path, ARCHIVE_FILENAME)
urllib.request.urlretrieve(DOWNLOAD_URL, filename=archive_path,
reporthook=progress)
if _not_in_sphinx():
print('\r', end='')
print("untarring Reuters dataset...")
tarfile.open(archive_path, 'r:gz').extractall(data_path)
print("done.")
parser = ReutersParser()
for filename in glob(os.path.join(data_path, "*.sgm")):
for doc in parser.parse(open(filename, 'rb')):
yield doc
###############################################################################
# Main
###############################################################################
# Create the vectorizer and limit the number of features to a reasonable
# maximum
vectorizer = HashingVectorizer(decode_error='ignore', n_features=2 ** 18,
non_negative=True)
# Iterator over parsed Reuters SGML files.
data_stream = stream_reuters_documents()
# We learn a binary classification between the "acq" class and all the others.
# "acq" was chosen as it is more or less evenly distributed in the Reuters
# files. For other datasets, one should take care of creating a test set with
# a realistic portion of positive instances.
all_classes = np.array([0, 1])
positive_class = 'acq'
# Here are some classifiers that support the `partial_fit` method
partial_fit_classifiers = {
'SGD': SGDClassifier(),
'Perceptron': Perceptron(),
'NB Multinomial': MultinomialNB(alpha=0.01),
'Passive-Aggressive': PassiveAggressiveClassifier(),
}
def get_minibatch(doc_iter, size, pos_class=positive_class):
"""Extract a minibatch of examples, return a tuple X_text, y.
Note: size is before excluding invalid docs with no topics assigned.
"""
data = [(u'{title}\n\n{body}'.format(**doc), pos_class in doc['topics'])
for doc in itertools.islice(doc_iter, size)
if doc['topics']]
if not len(data):
return np.asarray([], dtype=int), np.asarray([], dtype=int)
X_text, y = zip(*data)
return X_text, np.asarray(y, dtype=int)
def iter_minibatches(doc_iter, minibatch_size):
"""Generator of minibatches."""
X_text, y = get_minibatch(doc_iter, minibatch_size)
while len(X_text):
yield X_text, y
X_text, y = get_minibatch(doc_iter, minibatch_size)
# test data statistics
test_stats = {'n_test': 0, 'n_test_pos': 0}
# First we hold out a number of examples to estimate accuracy
n_test_documents = 1000
tick = time.time()
X_test_text, y_test = get_minibatch(data_stream, 1000)
parsing_time = time.time() - tick
tick = time.time()
X_test = vectorizer.transform(X_test_text)
vectorizing_time = time.time() - tick
test_stats['n_test'] += len(y_test)
test_stats['n_test_pos'] += sum(y_test)
print("Test set is %d documents (%d positive)" % (len(y_test), sum(y_test)))
def progress(cls_name, stats):
"""Report progress information, return a string."""
duration = time.time() - stats['t0']
s = "%20s classifier : \t" % cls_name
s += "%(n_train)6d train docs (%(n_train_pos)6d positive) " % stats
s += "%(n_test)6d test docs (%(n_test_pos)6d positive) " % test_stats
s += "accuracy: %(accuracy).3f " % stats
s += "in %.2fs (%5d docs/s)" % (duration, stats['n_train'] / duration)
return s
cls_stats = {}
for cls_name in partial_fit_classifiers:
stats = {'n_train': 0, 'n_train_pos': 0,
'accuracy': 0.0, 'accuracy_history': [(0, 0)], 't0': time.time(),
'runtime_history': [(0, 0)], 'total_fit_time': 0.0}
cls_stats[cls_name] = stats
get_minibatch(data_stream, n_test_documents)
# Discard test set
# We will feed the classifier with mini-batches of 1000 documents; this means
# we have at most 1000 docs in memory at any time. The smaller the document
# batch, the bigger the relative overhead of the partial fit methods.
minibatch_size = 1000
# Create the data_stream that parses Reuters SGML files and iterates on
# documents as a stream.
minibatch_iterators = iter_minibatches(data_stream, minibatch_size)
total_vect_time = 0.0
# Main loop : iterate on mini-batches of examples
for i, (X_train_text, y_train) in enumerate(minibatch_iterators):
tick = time.time()
X_train = vectorizer.transform(X_train_text)
total_vect_time += time.time() - tick
for cls_name, cls in partial_fit_classifiers.items():
tick = time.time()
# update estimator with examples in the current mini-batch
cls.partial_fit(X_train, y_train, classes=all_classes)
# accumulate test accuracy stats
cls_stats[cls_name]['total_fit_time'] += time.time() - tick
cls_stats[cls_name]['n_train'] += X_train.shape[0]
cls_stats[cls_name]['n_train_pos'] += sum(y_train)
tick = time.time()
cls_stats[cls_name]['accuracy'] = cls.score(X_test, y_test)
cls_stats[cls_name]['prediction_time'] = time.time() - tick
acc_history = (cls_stats[cls_name]['accuracy'],
cls_stats[cls_name]['n_train'])
cls_stats[cls_name]['accuracy_history'].append(acc_history)
run_history = (cls_stats[cls_name]['accuracy'],
total_vect_time + cls_stats[cls_name]['total_fit_time'])
cls_stats[cls_name]['runtime_history'].append(run_history)
if i % 3 == 0:
print(progress(cls_name, cls_stats[cls_name]))
if i % 3 == 0:
print('\n')
###############################################################################
# Plot results
###############################################################################
def plot_accuracy(x, y, x_legend):
"""Plot accuracy as a function of x."""
x = np.array(x)
y = np.array(y)
plt.title('Classification accuracy as a function of %s' % x_legend)
plt.xlabel('%s' % x_legend)
plt.ylabel('Accuracy')
plt.grid(True)
plt.plot(x, y)
rcParams['legend.fontsize'] = 10
cls_names = list(sorted(cls_stats.keys()))
# Plot accuracy evolution
plt.figure()
for _, stats in sorted(cls_stats.items()):
# Plot accuracy evolution with #examples
accuracy, n_examples = zip(*stats['accuracy_history'])
plot_accuracy(n_examples, accuracy, "training examples (#)")
ax = plt.gca()
ax.set_ylim((0.8, 1))
plt.legend(cls_names, loc='best')
plt.figure()
for _, stats in sorted(cls_stats.items()):
# Plot accuracy evolution with runtime
accuracy, runtime = zip(*stats['runtime_history'])
plot_accuracy(runtime, accuracy, 'runtime (s)')
ax = plt.gca()
ax.set_ylim((0.8, 1))
plt.legend(cls_names, loc='best')
# Plot fitting times
plt.figure()
fig = plt.gcf()
cls_runtime = []
for cls_name, stats in sorted(cls_stats.items()):
cls_runtime.append(stats['total_fit_time'])
cls_runtime.append(total_vect_time)
cls_names.append('Vectorization')
bar_colors = ['b', 'g', 'r', 'c', 'm', 'y']
ax = plt.subplot(111)
rectangles = plt.bar(range(len(cls_names)), cls_runtime, width=0.5,
color=bar_colors)
ax.set_xticks(np.linspace(0.25, len(cls_names) - 0.75, len(cls_names)))
ax.set_xticklabels(cls_names, fontsize=10)
ymax = max(cls_runtime) * 1.2
ax.set_ylim((0, ymax))
ax.set_ylabel('runtime (s)')
ax.set_title('Training Times')
def autolabel(rectangles):
"""attach some text vi autolabel on rectangles."""
for rect in rectangles:
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width() / 2.,
1.05 * height, '%.4f' % height,
ha='center', va='bottom')
autolabel(rectangles)
plt.show()
# Plot prediction times
plt.figure()
cls_runtime = []
cls_names = list(sorted(cls_stats.keys()))
for cls_name, stats in sorted(cls_stats.items()):
cls_runtime.append(stats['prediction_time'])
cls_runtime.append(parsing_time)
cls_names.append('Read/Parse\n+Feat.Extr.')
cls_runtime.append(vectorizing_time)
cls_names.append('Hashing\n+Vect.')
ax = plt.subplot(111)
rectangles = plt.bar(range(len(cls_names)), cls_runtime, width=0.5,
color=bar_colors)
ax.set_xticks(np.linspace(0.25, len(cls_names) - 0.75, len(cls_names)))
ax.set_xticklabels(cls_names, fontsize=8)
plt.setp(plt.xticks()[1], rotation=30)
ymax = max(cls_runtime) * 1.2
ax.set_ylim((0, ymax))
ax.set_ylabel('runtime (s)')
ax.set_title('Prediction Times (%d instances)' % n_test_documents)
autolabel(rectangles)
plt.show()
| bsd-3-clause |
RobertABT/heightmap | build/matplotlib/examples/animation/old_animation/animate_decay_tk_blit.py | 3 | 1342 | from __future__ import print_function
import time, sys
import numpy as np
import matplotlib.pyplot as plt
def data_gen():
t = data_gen.t
data_gen.t += 0.05
return np.sin(2*np.pi*t) * np.exp(-t/10.)
data_gen.t = 0
fig, ax = plt.subplots()
line, = ax.plot([], [], animated=True, lw=2)
ax.set_ylim(-1.1, 1.1)
ax.set_xlim(0, 5)
ax.grid()
xdata, ydata = [], []
def run(*args):
background = fig.canvas.copy_from_bbox(ax.bbox)
# for profiling
tstart = time.time()
while 1:
# restore the clean slate background
fig.canvas.restore_region(background)
# update the data
t = data_gen.t
y = data_gen()
xdata.append(t)
ydata.append(y)
xmin, xmax = ax.get_xlim()
if t>=xmax:
ax.set_xlim(xmin, 2*xmax)
fig.canvas.draw()
background = fig.canvas.copy_from_bbox(ax.bbox)
line.set_data(xdata, ydata)
# just draw the animated artist
ax.draw_artist(line)
# just redraw the axes rectangle
fig.canvas.blit(ax.bbox)
if run.cnt==1000:
# print the timing info and quit
print('FPS:' , 1000/(time.time()-tstart))
sys.exit()
run.cnt += 1
run.cnt = 0
manager = plt.get_current_fig_manager()
manager.window.after(100, run)
plt.show()
| mit |
noelevans/sandpit | bayesian_methods_for_hackers/autocorrelation_ch03.py | 1 | 1070 | from matplotlib import pyplot as plt
import numpy as np
import pymc as pm
def autocorr(x):
result = np.correlate(x, x, mode='full')
result = result / np.max(result)
return result[result.size / 2:]
def main():
x_t = pm.rnormal(0, 1, 200)
x_t[0] = 0
y_t = np.zeros(200)
for i in range(1, 200):
y_t[i] = pm.rnormal(y_t[i - 1], 1)
plt.plot(y_t, label="$y_t$", lw=3)
plt.plot(x_t, label="$x_t$", lw=3)
plt.xlabel("time, $t$")
plt.legend()
plt.show()
colors = ["#348ABD", "#A60628", "#7A68A6"]
x = np.arange(1, 200)
plt.bar(x, autocorr(y_t)[1:], width=1, label="$y_t$",
edgecolor=colors[0], color=colors[0])
plt.bar(x, autocorr(x_t)[1:], width=1, label="$x_t$",
color=colors[1], edgecolor=colors[1])
plt.legend(title="Autocorrelation")
plt.ylabel("measured correlation \nbetween $y_t$ and $y_{t-k}$.")
plt.xlabel("k (lag)")
plt.title("Autocorrelation plot of $y_t$ and $x_t$ for differing $k$ lags.")
plt.show()
if __name__ == '__main__':
main()
| mit |
ilyes14/scikit-learn | examples/svm/plot_weighted_samples.py | 188 | 1943 | """
=====================
SVM: Weighted samples
=====================
Plot decision function of a weighted dataset, where the size of points
is proportional to its weight.
The sample weighting rescales the C parameter, which means that the classifier
puts more emphasis on getting these points right. The effect might often be
subtle.
To emphasize the effect here, we particularly weight outliers, making the
deformation of the decision boundary very visible.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
def plot_decision_function(classifier, sample_weight, axis, title):
# plot the decision function
xx, yy = np.meshgrid(np.linspace(-4, 5, 500), np.linspace(-4, 5, 500))
Z = classifier.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
# plot the line, the points, and the nearest vectors to the plane
axis.contourf(xx, yy, Z, alpha=0.75, cmap=plt.cm.bone)
axis.scatter(X[:, 0], X[:, 1], c=Y, s=100 * sample_weight, alpha=0.9,
cmap=plt.cm.bone)
axis.axis('off')
axis.set_title(title)
# we create 20 points
np.random.seed(0)
X = np.r_[np.random.randn(10, 2) + [1, 1], np.random.randn(10, 2)]
Y = [1] * 10 + [-1] * 10
sample_weight_last_ten = abs(np.random.randn(len(X)))
sample_weight_constant = np.ones(len(X))
# and bigger weights to some outliers
sample_weight_last_ten[15:] *= 5
sample_weight_last_ten[9] *= 15
# for reference, first fit without class weights
# fit the model
clf_weights = svm.SVC()
clf_weights.fit(X, Y, sample_weight=sample_weight_last_ten)
clf_no_weights = svm.SVC()
clf_no_weights.fit(X, Y)
fig, axes = plt.subplots(1, 2, figsize=(14, 6))
plot_decision_function(clf_no_weights, sample_weight_constant, axes[0],
"Constant weights")
plot_decision_function(clf_weights, sample_weight_last_ten, axes[1],
"Modified weights")
plt.show()
| bsd-3-clause |
hugobowne/scikit-learn | sklearn/linear_model/tests/test_coordinate_descent.py | 25 | 25114 | # Authors: Olivier Grisel <[email protected]>
# Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
from sys import version_info
import numpy as np
from scipy import interpolate, sparse
from copy import deepcopy
from sklearn.datasets import load_boston
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import TempMemmap
from sklearn.linear_model.coordinate_descent import Lasso, \
LassoCV, ElasticNet, ElasticNetCV, MultiTaskLasso, MultiTaskElasticNet, \
MultiTaskElasticNetCV, MultiTaskLassoCV, lasso_path, enet_path
from sklearn.linear_model import LassoLarsCV, lars_path
from sklearn.utils import check_array
def check_warnings():
if version_info < (2, 6):
raise SkipTest("Testing for warnings is not supported in versions \
older than Python 2.6")
def test_lasso_zero():
# Check that the lasso can handle zero data without crashing
X = [[0], [0], [0]]
y = [0, 0, 0]
clf = Lasso(alpha=0.1).fit(X, y)
pred = clf.predict([[1], [2], [3]])
assert_array_almost_equal(clf.coef_, [0])
assert_array_almost_equal(pred, [0, 0, 0])
assert_almost_equal(clf.dual_gap_, 0)
def test_lasso_toy():
# Test Lasso on a toy example for various values of alpha.
# When validating this against glmnet notice that glmnet divides it
# against nobs.
X = [[-1], [0], [1]]
Y = [-1, 0, 1] # just a straight line
T = [[2], [3], [4]] # test sample
clf = Lasso(alpha=1e-8)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = Lasso(alpha=0.1)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [.85])
assert_array_almost_equal(pred, [1.7, 2.55, 3.4])
assert_almost_equal(clf.dual_gap_, 0)
clf = Lasso(alpha=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [.25])
assert_array_almost_equal(pred, [0.5, 0.75, 1.])
assert_almost_equal(clf.dual_gap_, 0)
clf = Lasso(alpha=1)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [.0])
assert_array_almost_equal(pred, [0, 0, 0])
assert_almost_equal(clf.dual_gap_, 0)
def test_enet_toy():
# Test ElasticNet for various parameters of alpha and l1_ratio.
# Actually, the parameters alpha = 0 should not be allowed. However,
# we test it as a border case.
# ElasticNet is tested with and without precomputed Gram matrix
X = np.array([[-1.], [0.], [1.]])
Y = [-1, 0, 1] # just a straight line
T = [[2.], [3.], [4.]] # test sample
# this should be the same as lasso
clf = ElasticNet(alpha=1e-8, l1_ratio=1.0)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.3, max_iter=100,
precompute=False)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf.set_params(max_iter=100, precompute=True)
clf.fit(X, Y) # with Gram
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf.set_params(max_iter=100, precompute=np.dot(X.T, X))
clf.fit(X, Y) # with Gram
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.45454], 3)
assert_array_almost_equal(pred, [0.9090, 1.3636, 1.8181], 3)
assert_almost_equal(clf.dual_gap_, 0)
def build_dataset(n_samples=50, n_features=200, n_informative_features=10,
n_targets=1):
"""
build an ill-posed linear regression problem with many noisy features and
comparatively few samples
"""
random_state = np.random.RandomState(0)
if n_targets > 1:
w = random_state.randn(n_features, n_targets)
else:
w = random_state.randn(n_features)
w[n_informative_features:] = 0.0
X = random_state.randn(n_samples, n_features)
y = np.dot(X, w)
X_test = random_state.randn(n_samples, n_features)
y_test = np.dot(X_test, w)
return X, y, X_test, y_test
def test_lasso_cv():
X, y, X_test, y_test = build_dataset()
max_iter = 150
clf = LassoCV(n_alphas=10, eps=1e-3, max_iter=max_iter).fit(X, y)
assert_almost_equal(clf.alpha_, 0.056, 2)
clf = LassoCV(n_alphas=10, eps=1e-3, max_iter=max_iter, precompute=True)
clf.fit(X, y)
assert_almost_equal(clf.alpha_, 0.056, 2)
# Check that the lars and the coordinate descent implementation
# select a similar alpha
lars = LassoLarsCV(normalize=False, max_iter=30).fit(X, y)
# for this we check that they don't fall in the grid of
# clf.alphas further than 1
assert_true(np.abs(
np.searchsorted(clf.alphas_[::-1], lars.alpha_) -
np.searchsorted(clf.alphas_[::-1], clf.alpha_)) <= 1)
# check that they also give a similar MSE
mse_lars = interpolate.interp1d(lars.cv_alphas_, lars.cv_mse_path_.T)
np.testing.assert_approx_equal(mse_lars(clf.alphas_[5]).mean(),
clf.mse_path_[5].mean(), significant=2)
# test set
assert_greater(clf.score(X_test, y_test), 0.99)
def test_lasso_cv_positive_constraint():
X, y, X_test, y_test = build_dataset()
max_iter = 500
# Ensure the unconstrained fit has a negative coefficient
clf_unconstrained = LassoCV(n_alphas=3, eps=1e-1, max_iter=max_iter, cv=2,
n_jobs=1)
clf_unconstrained.fit(X, y)
assert_true(min(clf_unconstrained.coef_) < 0)
# On same data, constrained fit has non-negative coefficients
clf_constrained = LassoCV(n_alphas=3, eps=1e-1, max_iter=max_iter,
positive=True, cv=2, n_jobs=1)
clf_constrained.fit(X, y)
assert_true(min(clf_constrained.coef_) >= 0)
def test_lasso_path_return_models_vs_new_return_gives_same_coefficients():
# Test that lasso_path with lars_path style output gives the
# same result
# Some toy data
X = np.array([[1, 2, 3.1], [2.3, 5.4, 4.3]]).T
y = np.array([1, 2, 3.1])
alphas = [5., 1., .5]
# Use lars_path and lasso_path(new output) with 1D linear interpolation
# to compute the same path
alphas_lars, _, coef_path_lars = lars_path(X, y, method='lasso')
coef_path_cont_lars = interpolate.interp1d(alphas_lars[::-1],
coef_path_lars[:, ::-1])
alphas_lasso2, coef_path_lasso2, _ = lasso_path(X, y, alphas=alphas,
return_models=False)
coef_path_cont_lasso = interpolate.interp1d(alphas_lasso2[::-1],
coef_path_lasso2[:, ::-1])
assert_array_almost_equal(
coef_path_cont_lasso(alphas), coef_path_cont_lars(alphas),
decimal=1)
def test_enet_path():
# We use a large number of samples and of informative features so that
# the l1_ratio selected is more toward ridge than lasso
X, y, X_test, y_test = build_dataset(n_samples=200, n_features=100,
n_informative_features=100)
max_iter = 150
# Here we have a small number of iterations, and thus the
# ElasticNet might not converge. This is to speed up tests
clf = ElasticNetCV(alphas=[0.01, 0.05, 0.1], eps=2e-3,
l1_ratio=[0.5, 0.7], cv=3,
max_iter=max_iter)
ignore_warnings(clf.fit)(X, y)
# Well-conditioned settings, we should have selected our
# smallest penalty
assert_almost_equal(clf.alpha_, min(clf.alphas_))
# Non-sparse ground truth: we should have selected an elastic-net
# that is closer to ridge than to lasso
assert_equal(clf.l1_ratio_, min(clf.l1_ratio))
clf = ElasticNetCV(alphas=[0.01, 0.05, 0.1], eps=2e-3,
l1_ratio=[0.5, 0.7], cv=3,
max_iter=max_iter, precompute=True)
ignore_warnings(clf.fit)(X, y)
# Well-conditioned settings, we should have selected our
# smallest penalty
assert_almost_equal(clf.alpha_, min(clf.alphas_))
# Non-sparse ground truth: we should have selected an elastic-net
# that is closer to ridge than to lasso
assert_equal(clf.l1_ratio_, min(clf.l1_ratio))
# We are in well-conditioned settings with low noise: we should
# have a good test-set performance
assert_greater(clf.score(X_test, y_test), 0.99)
# Multi-output/target case
X, y, X_test, y_test = build_dataset(n_features=10, n_targets=3)
clf = MultiTaskElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7],
cv=3, max_iter=max_iter)
ignore_warnings(clf.fit)(X, y)
# We are in well-conditioned settings with low noise: we should
# have a good test-set performance
assert_greater(clf.score(X_test, y_test), 0.99)
assert_equal(clf.coef_.shape, (3, 10))
# Mono-output should have same cross-validated alpha_ and l1_ratio_
# in both cases.
X, y, _, _ = build_dataset(n_features=10)
clf1 = ElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
clf1.fit(X, y)
clf2 = MultiTaskElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
clf2.fit(X, y[:, np.newaxis])
assert_almost_equal(clf1.l1_ratio_, clf2.l1_ratio_)
assert_almost_equal(clf1.alpha_, clf2.alpha_)
def test_path_parameters():
X, y, _, _ = build_dataset()
max_iter = 100
clf = ElasticNetCV(n_alphas=50, eps=1e-3, max_iter=max_iter,
l1_ratio=0.5, tol=1e-3)
clf.fit(X, y) # new params
assert_almost_equal(0.5, clf.l1_ratio)
assert_equal(50, clf.n_alphas)
assert_equal(50, len(clf.alphas_))
def test_warm_start():
X, y, _, _ = build_dataset()
clf = ElasticNet(alpha=0.1, max_iter=5, warm_start=True)
ignore_warnings(clf.fit)(X, y)
ignore_warnings(clf.fit)(X, y) # do a second round with 5 iterations
clf2 = ElasticNet(alpha=0.1, max_iter=10)
ignore_warnings(clf2.fit)(X, y)
assert_array_almost_equal(clf2.coef_, clf.coef_)
def test_lasso_alpha_warning():
X = [[-1], [0], [1]]
Y = [-1, 0, 1] # just a straight line
clf = Lasso(alpha=0)
assert_warns(UserWarning, clf.fit, X, Y)
def test_lasso_positive_constraint():
X = [[-1], [0], [1]]
y = [1, 0, -1] # just a straight line with negative slope
lasso = Lasso(alpha=0.1, max_iter=1000, positive=True)
lasso.fit(X, y)
assert_true(min(lasso.coef_) >= 0)
lasso = Lasso(alpha=0.1, max_iter=1000, precompute=True, positive=True)
lasso.fit(X, y)
assert_true(min(lasso.coef_) >= 0)
def test_enet_positive_constraint():
X = [[-1], [0], [1]]
y = [1, 0, -1] # just a straight line with negative slope
enet = ElasticNet(alpha=0.1, max_iter=1000, positive=True)
enet.fit(X, y)
assert_true(min(enet.coef_) >= 0)
def test_enet_cv_positive_constraint():
X, y, X_test, y_test = build_dataset()
max_iter = 500
# Ensure the unconstrained fit has a negative coefficient
enetcv_unconstrained = ElasticNetCV(n_alphas=3, eps=1e-1,
max_iter=max_iter,
cv=2, n_jobs=1)
enetcv_unconstrained.fit(X, y)
assert_true(min(enetcv_unconstrained.coef_) < 0)
# On same data, constrained fit has non-negative coefficients
enetcv_constrained = ElasticNetCV(n_alphas=3, eps=1e-1, max_iter=max_iter,
cv=2, positive=True, n_jobs=1)
enetcv_constrained.fit(X, y)
assert_true(min(enetcv_constrained.coef_) >= 0)
def test_uniform_targets():
enet = ElasticNetCV(fit_intercept=True, n_alphas=3)
m_enet = MultiTaskElasticNetCV(fit_intercept=True, n_alphas=3)
lasso = LassoCV(fit_intercept=True, n_alphas=3)
m_lasso = MultiTaskLassoCV(fit_intercept=True, n_alphas=3)
models_single_task = (enet, lasso)
models_multi_task = (m_enet, m_lasso)
rng = np.random.RandomState(0)
X_train = rng.random_sample(size=(10, 3))
X_test = rng.random_sample(size=(10, 3))
y1 = np.empty(10)
y2 = np.empty((10, 2))
for model in models_single_task:
for y_values in (0, 5):
y1.fill(y_values)
assert_array_equal(model.fit(X_train, y1).predict(X_test), y1)
assert_array_equal(model.alphas_, [np.finfo(float).resolution]*3)
for model in models_multi_task:
for y_values in (0, 5):
y2[:, 0].fill(y_values)
y2[:, 1].fill(2 * y_values)
assert_array_equal(model.fit(X_train, y2).predict(X_test), y2)
assert_array_equal(model.alphas_, [np.finfo(float).resolution]*3)
def test_multi_task_lasso_and_enet():
X, y, X_test, y_test = build_dataset()
Y = np.c_[y, y]
# Y_test = np.c_[y_test, y_test]
clf = MultiTaskLasso(alpha=1, tol=1e-8).fit(X, Y)
assert_true(0 < clf.dual_gap_ < 1e-5)
assert_array_almost_equal(clf.coef_[0], clf.coef_[1])
clf = MultiTaskElasticNet(alpha=1, tol=1e-8).fit(X, Y)
assert_true(0 < clf.dual_gap_ < 1e-5)
assert_array_almost_equal(clf.coef_[0], clf.coef_[1])
def test_lasso_readonly_data():
X = np.array([[-1], [0], [1]])
Y = np.array([-1, 0, 1]) # just a straight line
T = np.array([[2], [3], [4]]) # test sample
with TempMemmap((X, Y)) as (X, Y):
clf = Lasso(alpha=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [.25])
assert_array_almost_equal(pred, [0.5, 0.75, 1.])
assert_almost_equal(clf.dual_gap_, 0)
def test_multi_task_lasso_readonly_data():
X, y, X_test, y_test = build_dataset()
Y = np.c_[y, y]
with TempMemmap((X, Y)) as (X, Y):
Y = np.c_[y, y]
clf = MultiTaskLasso(alpha=1, tol=1e-8).fit(X, Y)
assert_true(0 < clf.dual_gap_ < 1e-5)
assert_array_almost_equal(clf.coef_[0], clf.coef_[1])
def test_enet_multitarget():
n_targets = 3
X, y, _, _ = build_dataset(n_samples=10, n_features=8,
n_informative_features=10, n_targets=n_targets)
estimator = ElasticNet(alpha=0.01, fit_intercept=True)
estimator.fit(X, y)
coef, intercept, dual_gap = (estimator.coef_, estimator.intercept_,
estimator.dual_gap_)
for k in range(n_targets):
estimator.fit(X, y[:, k])
assert_array_almost_equal(coef[k, :], estimator.coef_)
assert_array_almost_equal(intercept[k], estimator.intercept_)
assert_array_almost_equal(dual_gap[k], estimator.dual_gap_)
def test_multioutput_enetcv_error():
X = np.random.randn(10, 2)
y = np.random.randn(10, 2)
clf = ElasticNetCV()
assert_raises(ValueError, clf.fit, X, y)
def test_multitask_enet_and_lasso_cv():
X, y, _, _ = build_dataset(n_features=50, n_targets=3)
clf = MultiTaskElasticNetCV().fit(X, y)
assert_almost_equal(clf.alpha_, 0.00556, 3)
clf = MultiTaskLassoCV().fit(X, y)
assert_almost_equal(clf.alpha_, 0.00278, 3)
X, y, _, _ = build_dataset(n_targets=3)
clf = MultiTaskElasticNetCV(n_alphas=10, eps=1e-3, max_iter=100,
l1_ratio=[0.3, 0.5], tol=1e-3)
clf.fit(X, y)
assert_equal(0.5, clf.l1_ratio_)
assert_equal((3, X.shape[1]), clf.coef_.shape)
assert_equal((3, ), clf.intercept_.shape)
assert_equal((2, 10, 3), clf.mse_path_.shape)
assert_equal((2, 10), clf.alphas_.shape)
X, y, _, _ = build_dataset(n_targets=3)
clf = MultiTaskLassoCV(n_alphas=10, eps=1e-3, max_iter=100, tol=1e-3)
clf.fit(X, y)
assert_equal((3, X.shape[1]), clf.coef_.shape)
assert_equal((3, ), clf.intercept_.shape)
assert_equal((10, 3), clf.mse_path_.shape)
assert_equal(10, len(clf.alphas_))
def test_1d_multioutput_enet_and_multitask_enet_cv():
X, y, _, _ = build_dataset(n_features=10)
y = y[:, np.newaxis]
clf = ElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
clf.fit(X, y[:, 0])
clf1 = MultiTaskElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
clf1.fit(X, y)
assert_almost_equal(clf.l1_ratio_, clf1.l1_ratio_)
assert_almost_equal(clf.alpha_, clf1.alpha_)
assert_almost_equal(clf.coef_, clf1.coef_[0])
assert_almost_equal(clf.intercept_, clf1.intercept_[0])
def test_1d_multioutput_lasso_and_multitask_lasso_cv():
X, y, _, _ = build_dataset(n_features=10)
y = y[:, np.newaxis]
clf = LassoCV(n_alphas=5, eps=2e-3)
clf.fit(X, y[:, 0])
clf1 = MultiTaskLassoCV(n_alphas=5, eps=2e-3)
clf1.fit(X, y)
assert_almost_equal(clf.alpha_, clf1.alpha_)
assert_almost_equal(clf.coef_, clf1.coef_[0])
assert_almost_equal(clf.intercept_, clf1.intercept_[0])
def test_sparse_input_dtype_enet_and_lassocv():
X, y, _, _ = build_dataset(n_features=10)
clf = ElasticNetCV(n_alphas=5)
clf.fit(sparse.csr_matrix(X), y)
clf1 = ElasticNetCV(n_alphas=5)
clf1.fit(sparse.csr_matrix(X, dtype=np.float32), y)
assert_almost_equal(clf.alpha_, clf1.alpha_, decimal=6)
assert_almost_equal(clf.coef_, clf1.coef_, decimal=6)
clf = LassoCV(n_alphas=5)
clf.fit(sparse.csr_matrix(X), y)
clf1 = LassoCV(n_alphas=5)
clf1.fit(sparse.csr_matrix(X, dtype=np.float32), y)
assert_almost_equal(clf.alpha_, clf1.alpha_, decimal=6)
assert_almost_equal(clf.coef_, clf1.coef_, decimal=6)
def test_precompute_invalid_argument():
X, y, _, _ = build_dataset()
for clf in [ElasticNetCV(precompute="invalid"),
LassoCV(precompute="invalid")]:
assert_raises(ValueError, clf.fit, X, y)
def test_warm_start_convergence():
X, y, _, _ = build_dataset()
model = ElasticNet(alpha=1e-3, tol=1e-3).fit(X, y)
n_iter_reference = model.n_iter_
# This dataset is not trivial enough for the model to converge in one pass.
assert_greater(n_iter_reference, 2)
# Check that n_iter_ is invariant to multiple calls to fit
# when warm_start=False, all else being equal.
model.fit(X, y)
n_iter_cold_start = model.n_iter_
assert_equal(n_iter_cold_start, n_iter_reference)
# Fit the same model again, using a warm start: the optimizer just performs
# a single pass before checking that it has already converged
model.set_params(warm_start=True)
model.fit(X, y)
n_iter_warm_start = model.n_iter_
assert_equal(n_iter_warm_start, 1)
def test_warm_start_convergence_with_regularizer_decrement():
boston = load_boston()
X, y = boston.data, boston.target
# Train a model to converge on a lightly regularized problem
final_alpha = 1e-5
low_reg_model = ElasticNet(alpha=final_alpha).fit(X, y)
# Fitting a new model on a more regularized version of the same problem.
# Fitting with high regularization is easier it should converge faster
# in general.
high_reg_model = ElasticNet(alpha=final_alpha * 10).fit(X, y)
assert_greater(low_reg_model.n_iter_, high_reg_model.n_iter_)
# Fit the solution to the original, less regularized version of the
# problem but from the solution of the highly regularized variant of
# the problem as a better starting point. This should also converge
# faster than the original model that starts from zero.
warm_low_reg_model = deepcopy(high_reg_model)
warm_low_reg_model.set_params(warm_start=True, alpha=final_alpha)
warm_low_reg_model.fit(X, y)
assert_greater(low_reg_model.n_iter_, warm_low_reg_model.n_iter_)
def test_random_descent():
# Test that both random and cyclic selection give the same results.
# Ensure that the test models fully converge and check a wide
# range of conditions.
# This uses the coordinate descent algo using the gram trick.
X, y, _, _ = build_dataset(n_samples=50, n_features=20)
clf_cyclic = ElasticNet(selection='cyclic', tol=1e-8)
clf_cyclic.fit(X, y)
clf_random = ElasticNet(selection='random', tol=1e-8, random_state=42)
clf_random.fit(X, y)
assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_)
assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_)
# This uses the descent algo without the gram trick
clf_cyclic = ElasticNet(selection='cyclic', tol=1e-8)
clf_cyclic.fit(X.T, y[:20])
clf_random = ElasticNet(selection='random', tol=1e-8, random_state=42)
clf_random.fit(X.T, y[:20])
assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_)
assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_)
# Sparse Case
clf_cyclic = ElasticNet(selection='cyclic', tol=1e-8)
clf_cyclic.fit(sparse.csr_matrix(X), y)
clf_random = ElasticNet(selection='random', tol=1e-8, random_state=42)
clf_random.fit(sparse.csr_matrix(X), y)
assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_)
assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_)
# Multioutput case.
new_y = np.hstack((y[:, np.newaxis], y[:, np.newaxis]))
clf_cyclic = MultiTaskElasticNet(selection='cyclic', tol=1e-8)
clf_cyclic.fit(X, new_y)
clf_random = MultiTaskElasticNet(selection='random', tol=1e-8,
random_state=42)
clf_random.fit(X, new_y)
assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_)
assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_)
# Raise error when selection is not in cyclic or random.
clf_random = ElasticNet(selection='invalid')
assert_raises(ValueError, clf_random.fit, X, y)
def test_enet_path_positive():
# Test that the coefs returned by positive=True in enet_path are positive
X, y, _, _ = build_dataset(n_samples=50, n_features=50)
for path in [enet_path, lasso_path]:
pos_path_coef = path(X, y, positive=True)[1]
assert_true(np.all(pos_path_coef >= 0))
def test_sparse_dense_descent_paths():
# Test that dense and sparse input give the same input for descent paths.
X, y, _, _ = build_dataset(n_samples=50, n_features=20)
csr = sparse.csr_matrix(X)
for path in [enet_path, lasso_path]:
_, coefs, _ = path(X, y, fit_intercept=False)
_, sparse_coefs, _ = path(csr, y, fit_intercept=False)
assert_array_almost_equal(coefs, sparse_coefs)
def test_check_input_false():
X, y, _, _ = build_dataset(n_samples=20, n_features=10)
X = check_array(X, order='F', dtype='float64')
y = check_array(X, order='F', dtype='float64')
clf = ElasticNet(selection='cyclic', tol=1e-8)
# Check that no error is raised if data is provided in the right format
clf.fit(X, y, check_input=False)
X = check_array(X, order='F', dtype='float32')
clf.fit(X, y, check_input=True)
# Check that an error is raised if data is provided in the wrong dtype,
# because of check bypassing
assert_raises(ValueError, clf.fit, X, y, check_input=False)
# With no input checking, providing X in C order should result in false
# computation
X = check_array(X, order='C', dtype='float64')
assert_raises(ValueError, clf.fit, X, y, check_input=False)
def test_overrided_gram_matrix():
X, y, _, _ = build_dataset(n_samples=20, n_features=10)
Gram = X.T.dot(X)
clf = ElasticNet(selection='cyclic', tol=1e-8, precompute=Gram,
fit_intercept=True)
assert_warns_message(UserWarning,
"Gram matrix was provided but X was centered"
" to fit intercept, "
"or X was normalized : recomputing Gram matrix.",
clf.fit, X, y)
def test_lasso_non_float_y():
X = [[0, 0], [1, 1], [-1, -1]]
y = [0, 1, 2]
y_float = [0.0, 1.0, 2.0]
for model in [ElasticNet, Lasso]:
clf = model(fit_intercept=False)
clf.fit(X, y)
clf_float = model(fit_intercept=False)
clf_float.fit(X, y_float)
assert_array_equal(clf.coef_, clf_float.coef_)
| bsd-3-clause |
DGrady/pandas | asv_bench/benchmarks/indexing.py | 6 | 8132 | from .pandas_vb_common import *
class Int64Indexing(object):
goal_time = 0.2
def setup(self):
self.s = Series(np.random.rand(1000000))
def time_getitem_scalar(self):
self.s[800000]
def time_getitem_slice(self):
self.s[:800000]
def time_getitem_list_like(self):
self.s[[800000]]
def time_getitem_array(self):
self.s[np.arange(10000)]
def time_getitem_lists(self):
self.s[np.arange(10000).tolist()]
def time_iloc_array(self):
self.s.iloc[np.arange(10000)]
def time_iloc_list_like(self):
self.s.iloc[[800000]]
def time_iloc_scalar(self):
self.s.iloc[800000]
def time_iloc_slice(self):
self.s.iloc[:800000]
def time_ix_array(self):
self.s.ix[np.arange(10000)]
def time_ix_list_like(self):
self.s.ix[[800000]]
def time_ix_scalar(self):
self.s.ix[800000]
def time_ix_slice(self):
self.s.ix[:800000]
def time_loc_array(self):
self.s.loc[np.arange(10000)]
def time_loc_list_like(self):
self.s.loc[[800000]]
def time_loc_scalar(self):
self.s.loc[800000]
def time_loc_slice(self):
self.s.loc[:800000]
class StringIndexing(object):
goal_time = 0.2
def setup(self):
self.index = tm.makeStringIndex(1000000)
self.s = Series(np.random.rand(1000000), index=self.index)
self.lbl = self.s.index[800000]
def time_getitem_label_slice(self):
self.s[:self.lbl]
def time_getitem_pos_slice(self):
self.s[:800000]
def time_get_value(self):
self.s.get_value(self.lbl)
class DatetimeIndexing(object):
goal_time = 0.2
def setup(self):
tm.N = 1000
self.ts = tm.makeTimeSeries()
self.dt = self.ts.index[500]
def time_getitem_scalar(self):
self.ts[self.dt]
class DataFrameIndexing(object):
goal_time = 0.2
def setup(self):
self.index = tm.makeStringIndex(1000)
self.columns = tm.makeStringIndex(30)
self.df = DataFrame(np.random.randn(1000, 30), index=self.index,
columns=self.columns)
self.idx = self.index[100]
self.col = self.columns[10]
self.df2 = DataFrame(np.random.randn(10000, 4),
columns=['A', 'B', 'C', 'D'])
self.indexer = (self.df2['B'] > 0)
self.obj_indexer = self.indexer.astype('O')
# duptes
self.idx_dupe = (np.array(range(30)) * 99)
self.df3 = DataFrame({'A': ([0.1] * 1000), 'B': ([1] * 1000),})
self.df3 = concat([self.df3, (2 * self.df3), (3 * self.df3)])
self.df_big = DataFrame(dict(A=(['foo'] * 1000000)))
def time_get_value(self):
self.df.get_value(self.idx, self.col)
def time_get_value_ix(self):
self.df.ix[(self.idx, self.col)]
def time_getitem_scalar(self):
self.df[self.col][self.idx]
def time_boolean_rows(self):
self.df2[self.indexer]
def time_boolean_rows_object(self):
self.df2[self.obj_indexer]
def time_iloc_dups(self):
self.df3.iloc[self.idx_dupe]
def time_loc_dups(self):
self.df3.loc[self.idx_dupe]
def time_iloc_big(self):
self.df_big.iloc[:100, 0]
class IndexingMethods(object):
# GH 13166
goal_time = 0.2
def setup(self):
a = np.arange(100000)
self.ind = pd.Float64Index(a * 4.8000000418824129e-08)
self.s = Series(np.random.rand(100000))
self.ts = Series(np.random.rand(100000),
index=date_range('2011-01-01', freq='S', periods=100000))
self.indexer = ([True, False, True, True, False] * 20000)
def time_get_loc_float(self):
self.ind.get_loc(0)
def time_take_dtindex(self):
self.ts.take(self.indexer)
def time_take_intindex(self):
self.s.take(self.indexer)
class MultiIndexing(object):
goal_time = 0.2
def setup(self):
self.mi = MultiIndex.from_tuples([(x, y) for x in range(1000) for y in range(1000)])
self.s = Series(np.random.randn(1000000), index=self.mi)
self.df = DataFrame(self.s)
# slicers
np.random.seed(1234)
self.idx = pd.IndexSlice
self.n = 100000
self.mdt = pandas.DataFrame()
self.mdt['A'] = np.random.choice(range(10000, 45000, 1000), self.n)
self.mdt['B'] = np.random.choice(range(10, 400), self.n)
self.mdt['C'] = np.random.choice(range(1, 150), self.n)
self.mdt['D'] = np.random.choice(range(10000, 45000), self.n)
self.mdt['x'] = np.random.choice(range(400), self.n)
self.mdt['y'] = np.random.choice(range(25), self.n)
self.test_A = 25000
self.test_B = 25
self.test_C = 40
self.test_D = 35000
self.eps_A = 5000
self.eps_B = 5
self.eps_C = 5
self.eps_D = 5000
self.mdt2 = self.mdt.set_index(['A', 'B', 'C', 'D']).sortlevel()
self.miint = MultiIndex.from_product(
[np.arange(1000),
np.arange(1000)], names=['one', 'two'])
import string
self.mi_large = MultiIndex.from_product(
[np.arange(1000), np.arange(20), list(string.ascii_letters)],
names=['one', 'two', 'three'])
self.mi_med = MultiIndex.from_product(
[np.arange(1000), np.arange(10), list('A')],
names=['one', 'two', 'three'])
self.mi_small = MultiIndex.from_product(
[np.arange(100), list('A'), list('A')],
names=['one', 'two', 'three'])
rng = np.random.RandomState(4)
size = 1 << 16
self.mi_unused_levels = pd.MultiIndex.from_arrays([
rng.randint(0, 1 << 13, size),
rng.randint(0, 1 << 10, size)])[rng.rand(size) < 0.1]
def time_series_xs_mi_ix(self):
self.s.ix[999]
def time_frame_xs_mi_ix(self):
self.df.ix[999]
def time_multiindex_slicers(self):
self.mdt2.loc[self.idx[
(self.test_A - self.eps_A):(self.test_A + self.eps_A),
(self.test_B - self.eps_B):(self.test_B + self.eps_B),
(self.test_C - self.eps_C):(self.test_C + self.eps_C),
(self.test_D - self.eps_D):(self.test_D + self.eps_D)], :]
def time_multiindex_get_indexer(self):
self.miint.get_indexer(
np.array([(0, 10), (0, 11), (0, 12),
(0, 13), (0, 14), (0, 15),
(0, 16), (0, 17), (0, 18),
(0, 19)], dtype=object))
def time_multiindex_large_get_loc(self):
self.mi_large.get_loc((999, 19, 'Z'))
def time_multiindex_large_get_loc_warm(self):
for _ in range(1000):
self.mi_large.get_loc((999, 19, 'Z'))
def time_multiindex_med_get_loc(self):
self.mi_med.get_loc((999, 9, 'A'))
def time_multiindex_med_get_loc_warm(self):
for _ in range(1000):
self.mi_med.get_loc((999, 9, 'A'))
def time_multiindex_string_get_loc(self):
self.mi_small.get_loc((99, 'A', 'A'))
def time_multiindex_small_get_loc_warm(self):
for _ in range(1000):
self.mi_small.get_loc((99, 'A', 'A'))
def time_is_monotonic(self):
self.miint.is_monotonic
def time_remove_unused_levels(self):
self.mi_unused_levels.remove_unused_levels()
class IntervalIndexing(object):
goal_time = 0.2
def setup(self):
self.monotonic = Series(np.arange(1000000),
index=IntervalIndex.from_breaks(np.arange(1000001)))
def time_getitem_scalar(self):
self.monotonic[80000]
def time_loc_scalar(self):
self.monotonic.loc[80000]
def time_getitem_list(self):
self.monotonic[80000:]
def time_loc_list(self):
self.monotonic.loc[80000:]
class PanelIndexing(object):
goal_time = 0.2
def setup(self):
self.p = Panel(np.random.randn(100, 100, 100))
self.inds = range(0, 100, 10)
def time_subset(self):
self.p.ix[(self.inds, self.inds, self.inds)]
| bsd-3-clause |
DonBeo/scikit-learn | sklearn/linear_model/tests/test_ridge.py | 14 | 20805 | import numpy as np
import scipy.sparse as sp
from scipy import linalg
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import ignore_warnings
from sklearn import datasets
from sklearn.metrics import mean_squared_error
from sklearn.metrics import make_scorer
from sklearn.metrics import get_scorer
from sklearn.linear_model.base import LinearRegression
from sklearn.linear_model.ridge import ridge_regression
from sklearn.linear_model.ridge import Ridge
from sklearn.linear_model.ridge import _RidgeGCV
from sklearn.linear_model.ridge import RidgeCV
from sklearn.linear_model.ridge import RidgeClassifier
from sklearn.linear_model.ridge import RidgeClassifierCV
from sklearn.linear_model.ridge import _solve_cholesky
from sklearn.linear_model.ridge import _solve_cholesky_kernel
from sklearn.cross_validation import KFold
diabetes = datasets.load_diabetes()
X_diabetes, y_diabetes = diabetes.data, diabetes.target
ind = np.arange(X_diabetes.shape[0])
rng = np.random.RandomState(0)
rng.shuffle(ind)
ind = ind[:200]
X_diabetes, y_diabetes = X_diabetes[ind], y_diabetes[ind]
iris = datasets.load_iris()
X_iris = sp.csr_matrix(iris.data)
y_iris = iris.target
DENSE_FILTER = lambda X: X
SPARSE_FILTER = lambda X: sp.csr_matrix(X)
def test_ridge():
# Ridge regression convergence test using score
# TODO: for this test to be robust, we should use a dataset instead
# of np.random.
rng = np.random.RandomState(0)
alpha = 1.0
for solver in ("svd", "sparse_cg", "cholesky", "lsqr"):
# With more samples than features
n_samples, n_features = 6, 5
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
ridge = Ridge(alpha=alpha, solver=solver)
ridge.fit(X, y)
assert_equal(ridge.coef_.shape, (X.shape[1], ))
assert_greater(ridge.score(X, y), 0.47)
if solver == "cholesky":
# Currently the only solver to support sample_weight.
ridge.fit(X, y, sample_weight=np.ones(n_samples))
assert_greater(ridge.score(X, y), 0.47)
# With more features than samples
n_samples, n_features = 5, 10
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
ridge = Ridge(alpha=alpha, solver=solver)
ridge.fit(X, y)
assert_greater(ridge.score(X, y), .9)
if solver == "cholesky":
# Currently the only solver to support sample_weight.
ridge.fit(X, y, sample_weight=np.ones(n_samples))
assert_greater(ridge.score(X, y), 0.9)
def test_primal_dual_relationship():
y = y_diabetes.reshape(-1, 1)
coef = _solve_cholesky(X_diabetes, y, alpha=[1e-2])
K = np.dot(X_diabetes, X_diabetes.T)
dual_coef = _solve_cholesky_kernel(K, y, alpha=[1e-2])
coef2 = np.dot(X_diabetes.T, dual_coef).T
assert_array_almost_equal(coef, coef2)
def test_ridge_singular():
# test on a singular matrix
rng = np.random.RandomState(0)
n_samples, n_features = 6, 6
y = rng.randn(n_samples // 2)
y = np.concatenate((y, y))
X = rng.randn(n_samples // 2, n_features)
X = np.concatenate((X, X), axis=0)
ridge = Ridge(alpha=0)
ridge.fit(X, y)
assert_greater(ridge.score(X, y), 0.9)
def test_ridge_sample_weights():
rng = np.random.RandomState(0)
for solver in ("cholesky", ):
for n_samples, n_features in ((6, 5), (5, 10)):
for alpha in (1.0, 1e-2):
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
sample_weight = 1 + rng.rand(n_samples)
coefs = ridge_regression(X, y,
alpha=alpha,
sample_weight=sample_weight,
solver=solver)
# Sample weight can be implemented via a simple rescaling
# for the square loss.
coefs2 = ridge_regression(
X * np.sqrt(sample_weight)[:, np.newaxis],
y * np.sqrt(sample_weight),
alpha=alpha, solver=solver)
assert_array_almost_equal(coefs, coefs2)
# Test for fit_intercept = True
est = Ridge(alpha=alpha, solver=solver)
est.fit(X, y, sample_weight=sample_weight)
# Check using Newton's Method
# Quadratic function should be solved in a single step.
# Initialize
sample_weight = np.sqrt(sample_weight)
X_weighted = sample_weight[:, np.newaxis] * (
np.column_stack((np.ones(n_samples), X)))
y_weighted = y * sample_weight
# Gradient is (X*coef-y)*X + alpha*coef_[1:]
# Remove coef since it is initialized to zero.
grad = -np.dot(y_weighted, X_weighted)
# Hessian is (X.T*X) + alpha*I except that the first
# diagonal element should be zero, since there is no
# penalization of intercept.
diag = alpha * np.ones(n_features + 1)
diag[0] = 0.
hess = np.dot(X_weighted.T, X_weighted)
hess.flat[::n_features + 2] += diag
coef_ = - np.dot(linalg.inv(hess), grad)
assert_almost_equal(coef_[0], est.intercept_)
assert_array_almost_equal(coef_[1:], est.coef_)
def test_ridge_shapes():
# Test shape of coef_ and intercept_
rng = np.random.RandomState(0)
n_samples, n_features = 5, 10
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
Y1 = y[:, np.newaxis]
Y = np.c_[y, 1 + y]
ridge = Ridge()
ridge.fit(X, y)
assert_equal(ridge.coef_.shape, (n_features,))
assert_equal(ridge.intercept_.shape, ())
ridge.fit(X, Y1)
assert_equal(ridge.coef_.shape, (1, n_features))
assert_equal(ridge.intercept_.shape, (1, ))
ridge.fit(X, Y)
assert_equal(ridge.coef_.shape, (2, n_features))
assert_equal(ridge.intercept_.shape, (2, ))
def test_ridge_intercept():
# Test intercept with multiple targets GH issue #708
rng = np.random.RandomState(0)
n_samples, n_features = 5, 10
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
Y = np.c_[y, 1. + y]
ridge = Ridge()
ridge.fit(X, y)
intercept = ridge.intercept_
ridge.fit(X, Y)
assert_almost_equal(ridge.intercept_[0], intercept)
assert_almost_equal(ridge.intercept_[1], intercept + 1.)
def test_toy_ridge_object():
# Test BayesianRegression ridge classifier
# TODO: test also n_samples > n_features
X = np.array([[1], [2]])
Y = np.array([1, 2])
clf = Ridge(alpha=0.0)
clf.fit(X, Y)
X_test = [[1], [2], [3], [4]]
assert_almost_equal(clf.predict(X_test), [1., 2, 3, 4])
assert_equal(len(clf.coef_.shape), 1)
assert_equal(type(clf.intercept_), np.float64)
Y = np.vstack((Y, Y)).T
clf.fit(X, Y)
X_test = [[1], [2], [3], [4]]
assert_equal(len(clf.coef_.shape), 2)
assert_equal(type(clf.intercept_), np.ndarray)
def test_ridge_vs_lstsq():
# On alpha=0., Ridge and OLS yield the same solution.
rng = np.random.RandomState(0)
# we need more samples than features
n_samples, n_features = 5, 4
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
ridge = Ridge(alpha=0., fit_intercept=False)
ols = LinearRegression(fit_intercept=False)
ridge.fit(X, y)
ols.fit(X, y)
assert_almost_equal(ridge.coef_, ols.coef_)
ridge.fit(X, y)
ols.fit(X, y)
assert_almost_equal(ridge.coef_, ols.coef_)
def test_ridge_individual_penalties():
# Tests the ridge object using individual penalties
rng = np.random.RandomState(42)
n_samples, n_features, n_targets = 20, 10, 5
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples, n_targets)
penalties = np.arange(n_targets)
coef_cholesky = np.array([
Ridge(alpha=alpha, solver="cholesky").fit(X, target).coef_
for alpha, target in zip(penalties, y.T)])
coefs_indiv_pen = [
Ridge(alpha=penalties, solver=solver, tol=1e-6).fit(X, y).coef_
for solver in ['svd', 'sparse_cg', 'lsqr', 'cholesky']]
for coef_indiv_pen in coefs_indiv_pen:
assert_array_almost_equal(coef_cholesky, coef_indiv_pen)
# Test error is raised when number of targets and penalties do not match.
ridge = Ridge(alpha=penalties[:3])
assert_raises(ValueError, ridge.fit, X, y)
def _test_ridge_loo(filter_):
# test that can work with both dense or sparse matrices
n_samples = X_diabetes.shape[0]
ret = []
ridge_gcv = _RidgeGCV(fit_intercept=False)
ridge = Ridge(alpha=1.0, fit_intercept=False)
# generalized cross-validation (efficient leave-one-out)
decomp = ridge_gcv._pre_compute(X_diabetes, y_diabetes)
errors, c = ridge_gcv._errors(1.0, y_diabetes, *decomp)
values, c = ridge_gcv._values(1.0, y_diabetes, *decomp)
# brute-force leave-one-out: remove one example at a time
errors2 = []
values2 = []
for i in range(n_samples):
sel = np.arange(n_samples) != i
X_new = X_diabetes[sel]
y_new = y_diabetes[sel]
ridge.fit(X_new, y_new)
value = ridge.predict([X_diabetes[i]])[0]
error = (y_diabetes[i] - value) ** 2
errors2.append(error)
values2.append(value)
# check that efficient and brute-force LOO give same results
assert_almost_equal(errors, errors2)
assert_almost_equal(values, values2)
# generalized cross-validation (efficient leave-one-out,
# SVD variation)
decomp = ridge_gcv._pre_compute_svd(X_diabetes, y_diabetes)
errors3, c = ridge_gcv._errors_svd(ridge.alpha, y_diabetes, *decomp)
values3, c = ridge_gcv._values_svd(ridge.alpha, y_diabetes, *decomp)
# check that efficient and SVD efficient LOO give same results
assert_almost_equal(errors, errors3)
assert_almost_equal(values, values3)
# check best alpha
ridge_gcv.fit(filter_(X_diabetes), y_diabetes)
alpha_ = ridge_gcv.alpha_
ret.append(alpha_)
# check that we get same best alpha with custom loss_func
f = ignore_warnings
scoring = make_scorer(mean_squared_error, greater_is_better=False)
ridge_gcv2 = RidgeCV(fit_intercept=False, scoring=scoring)
f(ridge_gcv2.fit)(filter_(X_diabetes), y_diabetes)
assert_equal(ridge_gcv2.alpha_, alpha_)
# check that we get same best alpha with custom score_func
func = lambda x, y: -mean_squared_error(x, y)
scoring = make_scorer(func)
ridge_gcv3 = RidgeCV(fit_intercept=False, scoring=scoring)
f(ridge_gcv3.fit)(filter_(X_diabetes), y_diabetes)
assert_equal(ridge_gcv3.alpha_, alpha_)
# check that we get same best alpha with a scorer
scorer = get_scorer('mean_squared_error')
ridge_gcv4 = RidgeCV(fit_intercept=False, scoring=scorer)
ridge_gcv4.fit(filter_(X_diabetes), y_diabetes)
assert_equal(ridge_gcv4.alpha_, alpha_)
# check that we get same best alpha with sample weights
ridge_gcv.fit(filter_(X_diabetes), y_diabetes,
sample_weight=np.ones(n_samples))
assert_equal(ridge_gcv.alpha_, alpha_)
# simulate several responses
Y = np.vstack((y_diabetes, y_diabetes)).T
ridge_gcv.fit(filter_(X_diabetes), Y)
Y_pred = ridge_gcv.predict(filter_(X_diabetes))
ridge_gcv.fit(filter_(X_diabetes), y_diabetes)
y_pred = ridge_gcv.predict(filter_(X_diabetes))
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T,
Y_pred, decimal=5)
return ret
def _test_ridge_cv(filter_):
n_samples = X_diabetes.shape[0]
ridge_cv = RidgeCV()
ridge_cv.fit(filter_(X_diabetes), y_diabetes)
ridge_cv.predict(filter_(X_diabetes))
assert_equal(len(ridge_cv.coef_.shape), 1)
assert_equal(type(ridge_cv.intercept_), np.float64)
cv = KFold(n_samples, 5)
ridge_cv.set_params(cv=cv)
ridge_cv.fit(filter_(X_diabetes), y_diabetes)
ridge_cv.predict(filter_(X_diabetes))
assert_equal(len(ridge_cv.coef_.shape), 1)
assert_equal(type(ridge_cv.intercept_), np.float64)
def _test_ridge_diabetes(filter_):
ridge = Ridge(fit_intercept=False)
ridge.fit(filter_(X_diabetes), y_diabetes)
return np.round(ridge.score(filter_(X_diabetes), y_diabetes), 5)
def _test_multi_ridge_diabetes(filter_):
# simulate several responses
Y = np.vstack((y_diabetes, y_diabetes)).T
n_features = X_diabetes.shape[1]
ridge = Ridge(fit_intercept=False)
ridge.fit(filter_(X_diabetes), Y)
assert_equal(ridge.coef_.shape, (2, n_features))
Y_pred = ridge.predict(filter_(X_diabetes))
ridge.fit(filter_(X_diabetes), y_diabetes)
y_pred = ridge.predict(filter_(X_diabetes))
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T,
Y_pred, decimal=3)
def _test_ridge_classifiers(filter_):
n_classes = np.unique(y_iris).shape[0]
n_features = X_iris.shape[1]
for clf in (RidgeClassifier(), RidgeClassifierCV()):
clf.fit(filter_(X_iris), y_iris)
assert_equal(clf.coef_.shape, (n_classes, n_features))
y_pred = clf.predict(filter_(X_iris))
assert_greater(np.mean(y_iris == y_pred), .79)
n_samples = X_iris.shape[0]
cv = KFold(n_samples, 5)
clf = RidgeClassifierCV(cv=cv)
clf.fit(filter_(X_iris), y_iris)
y_pred = clf.predict(filter_(X_iris))
assert_true(np.mean(y_iris == y_pred) >= 0.8)
def _test_tolerance(filter_):
ridge = Ridge(tol=1e-5)
ridge.fit(filter_(X_diabetes), y_diabetes)
score = ridge.score(filter_(X_diabetes), y_diabetes)
ridge2 = Ridge(tol=1e-3)
ridge2.fit(filter_(X_diabetes), y_diabetes)
score2 = ridge2.score(filter_(X_diabetes), y_diabetes)
assert_true(score >= score2)
def test_dense_sparse():
for test_func in (_test_ridge_loo,
_test_ridge_cv,
_test_ridge_diabetes,
_test_multi_ridge_diabetes,
_test_ridge_classifiers,
_test_tolerance):
# test dense matrix
ret_dense = test_func(DENSE_FILTER)
# test sparse matrix
ret_sparse = test_func(SPARSE_FILTER)
# test that the outputs are the same
if ret_dense is not None and ret_sparse is not None:
assert_array_almost_equal(ret_dense, ret_sparse, decimal=3)
def test_ridge_cv_sparse_svd():
X = sp.csr_matrix(X_diabetes)
ridge = RidgeCV(gcv_mode="svd")
assert_raises(TypeError, ridge.fit, X)
def test_ridge_sparse_svd():
X = sp.csc_matrix(rng.rand(100, 10))
y = rng.rand(100)
ridge = Ridge(solver='svd')
assert_raises(TypeError, ridge.fit, X, y)
def test_class_weights():
# Test class weights.
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = RidgeClassifier(class_weight=None)
clf.fit(X, y)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# we give a small weights to class 1
clf = RidgeClassifier(class_weight={1: 0.001})
clf.fit(X, y)
# now the hyperplane should rotate clock-wise and
# the prediction on this point should shift
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1]))
# check if class_weight = 'auto' can handle negative labels.
clf = RidgeClassifier(class_weight='auto')
clf.fit(X, y)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# class_weight = 'auto', and class_weight = None should return
# same values when y has equal number of all labels
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0], [1.0, 1.0]])
y = [1, 1, -1, -1]
clf = RidgeClassifier(class_weight=None)
clf.fit(X, y)
clfa = RidgeClassifier(class_weight='auto')
clfa.fit(X, y)
assert_equal(len(clfa.classes_), 2)
assert_array_almost_equal(clf.coef_, clfa.coef_)
assert_array_almost_equal(clf.intercept_, clfa.intercept_)
def test_class_weights_cv():
# Test class weights for cross validated ridge classifier.
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = RidgeClassifierCV(class_weight=None, alphas=[.01, .1, 1])
clf.fit(X, y)
# we give a small weights to class 1
clf = RidgeClassifierCV(class_weight={1: 0.001}, alphas=[.01, .1, 1, 10])
clf.fit(X, y)
assert_array_equal(clf.predict([[-.2, 2]]), np.array([-1]))
def test_ridgecv_store_cv_values():
# Test _RidgeCV's store_cv_values attribute.
rng = rng = np.random.RandomState(42)
n_samples = 8
n_features = 5
x = rng.randn(n_samples, n_features)
alphas = [1e-1, 1e0, 1e1]
n_alphas = len(alphas)
r = RidgeCV(alphas=alphas, store_cv_values=True)
# with len(y.shape) == 1
y = rng.randn(n_samples)
r.fit(x, y)
assert_equal(r.cv_values_.shape, (n_samples, n_alphas))
# with len(y.shape) == 2
n_responses = 3
y = rng.randn(n_samples, n_responses)
r.fit(x, y)
assert_equal(r.cv_values_.shape, (n_samples, n_responses, n_alphas))
def test_raises_value_error_if_sample_weights_greater_than_1d():
# Sample weights must be either scalar or 1D
n_sampless = [2, 3]
n_featuress = [3, 2]
rng = np.random.RandomState(42)
for n_samples, n_features in zip(n_sampless, n_featuress):
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
sample_weights_OK = rng.randn(n_samples) ** 2 + 1
sample_weights_OK_1 = 1.
sample_weights_OK_2 = 2.
sample_weights_not_OK = sample_weights_OK[:, np.newaxis]
sample_weights_not_OK_2 = sample_weights_OK[np.newaxis, :]
ridge = Ridge(alpha=1)
# make sure the "OK" sample weights actually work
ridge.fit(X, y, sample_weights_OK)
ridge.fit(X, y, sample_weights_OK_1)
ridge.fit(X, y, sample_weights_OK_2)
def fit_ridge_not_ok():
ridge.fit(X, y, sample_weights_not_OK)
def fit_ridge_not_ok_2():
ridge.fit(X, y, sample_weights_not_OK_2)
assert_raise_message(ValueError,
"Sample weights must be 1D array or scalar",
fit_ridge_not_ok)
assert_raise_message(ValueError,
"Sample weights must be 1D array or scalar",
fit_ridge_not_ok_2)
def test_sparse_design_with_sample_weights():
# Sample weights must work with sparse matrices
n_sampless = [2, 3]
n_featuress = [3, 2]
rng = np.random.RandomState(42)
sparse_matrix_converters = [sp.coo_matrix,
sp.csr_matrix,
sp.csc_matrix,
sp.lil_matrix,
sp.dok_matrix
]
sparse_ridge = Ridge(alpha=1., fit_intercept=False)
dense_ridge = Ridge(alpha=1., fit_intercept=False)
for n_samples, n_features in zip(n_sampless, n_featuress):
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
sample_weights = rng.randn(n_samples) ** 2 + 1
for sparse_converter in sparse_matrix_converters:
X_sparse = sparse_converter(X)
sparse_ridge.fit(X_sparse, y, sample_weight=sample_weights)
dense_ridge.fit(X, y, sample_weight=sample_weights)
assert_array_almost_equal(sparse_ridge.coef_, dense_ridge.coef_,
decimal=6)
def test_raises_value_error_if_solver_not_supported():
# Tests whether a ValueError is raised if a non-identified solver
# is passed to ridge_regression
wrong_solver = "This is not a solver (MagritteSolveCV QuantumBitcoin)"
exception = ValueError
message = "Solver %s not understood" % wrong_solver
def func():
X = np.eye(3)
y = np.ones(3)
ridge_regression(X, y, alpha=1., solver=wrong_solver)
assert_raise_message(exception, message, func)
def test_sparse_cg_max_iter():
reg = Ridge(solver="sparse_cg", max_iter=1)
reg.fit(X_diabetes, y_diabetes)
assert_equal(reg.coef_.shape[0], X_diabetes.shape[1])
| bsd-3-clause |
appapantula/deeppy | examples/convnet_mnist.py | 9 | 2988 | #!/usr/bin/env python
"""
Convnets for image classification (1)
=====================================
"""
import numpy as np
import deeppy as dp
import matplotlib
import matplotlib.pyplot as plt
# Fetch MNIST data
dataset = dp.dataset.MNIST()
x_train, y_train, x_test, y_test = dataset.data(dp_dtypes=True)
# Bring images to BCHW format
x_train = x_train[:, np.newaxis, :, :]
x_test = x_test[:, np.newaxis, :, :]
# Normalize pixel intensities
scaler = dp.StandardScaler()
x_train = scaler.fit_transform(x_train)
x_test = scaler.transform(x_test)
# Prepare network inputs
batch_size = 128
train_input = dp.SupervisedInput(x_train, y_train, batch_size=batch_size)
test_input = dp.Input(x_test)
# Setup network
def pool_layer():
return dp.Pool(
win_shape=(2, 2),
strides=(2, 2),
border_mode='valid',
method='max',
)
def conv_layer(n_filters):
return dp.Convolution(
n_filters=n_filters,
filter_shape=(5, 5),
border_mode='valid',
weights=dp.Parameter(dp.AutoFiller(gain=1.39),
weight_decay=0.0005),
)
weight_gain_fc = 1.84
weight_decay_fc = 0.002
net = dp.NeuralNetwork(
layers=[
conv_layer(32),
dp.ReLU(),
pool_layer(),
conv_layer(64),
dp.ReLU(),
pool_layer(),
dp.Flatten(),
dp.DropoutFullyConnected(
n_out=512,
dropout=0.5,
weights=dp.Parameter(dp.AutoFiller(weight_gain_fc),
weight_decay=weight_decay_fc),
),
dp.ReLU(),
dp.FullyConnected(
n_out=dataset.n_classes,
weights=dp.Parameter(dp.AutoFiller(weight_gain_fc)),
),
],
loss=dp.SoftmaxCrossEntropy(),
)
# Train network
n_epochs = [50, 15, 15]
learn_rate = 0.05
momentum = 0.88
for i, epochs in enumerate(n_epochs):
trainer = dp.StochasticGradientDescent(
max_epochs=epochs, learn_rule=dp.Momentum(learn_rate=learn_rate/10**i,
momentum=momentum),
)
trainer.train(net, train_input)
# Plot misclassified images.
def plot_img(img, title):
plt.figure()
plt.imshow(img, cmap='gray', interpolation='nearest')
plt.title(title)
plt.axis('off')
plt.tight_layout()
errors = net.predict(x_test) != y_test
n_errors = np.sum(errors)
x_errors = np.squeeze(x_test[errors])
plot_img(dp.misc.img_tile(dp.misc.img_stretch(x_errors), aspect_ratio=0.6),
'All %i misclassified digits' % n_errors)
# Plot convolutional filters.
filters = [l.weights.array for l in net.layers
if isinstance(l, dp.Convolution)]
fig = plt.figure()
gs = matplotlib.gridspec.GridSpec(2, 1, height_ratios=[1, 3])
for i, f in enumerate(filters):
ax = plt.subplot(gs[i])
ax.imshow(dp.misc.conv_filter_tile(f), cmap='gray',
interpolation='nearest')
ax.set_title('Conv layer %i' % i)
ax.axis('off')
plt.tight_layout()
| mit |
kenshay/ImageScripter | ProgramData/SystemFiles/Python/Lib/site-packages/matplotlib/backends/backend_pdf.py | 6 | 95758 | # -*- coding: utf-8 -*-
"""
A PDF matplotlib backend
Author: Jouni K Seppänen <[email protected]>
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import codecs
import os
import re
import struct
import sys
import time
import warnings
import zlib
from io import BytesIO
import numpy as np
from six import unichr
from datetime import datetime
from math import ceil, cos, floor, pi, sin
import matplotlib
from matplotlib import __version__, rcParams
from matplotlib._pylab_helpers import Gcf
from matplotlib.backend_bases import (RendererBase, GraphicsContextBase,
FigureManagerBase, FigureCanvasBase)
from matplotlib.backends.backend_mixed import MixedModeRenderer
from matplotlib.cbook import (Bunch, is_string_like, get_realpath_and_stat,
is_writable_file_like, maxdict)
from matplotlib.figure import Figure
from matplotlib.font_manager import findfont, is_opentype_cff_font, get_font
from matplotlib.afm import AFM
import matplotlib.type1font as type1font
import matplotlib.dviread as dviread
from matplotlib.ft2font import (FIXED_WIDTH, ITALIC, LOAD_NO_SCALE,
LOAD_NO_HINTING, KERNING_UNFITTED)
from matplotlib.mathtext import MathTextParser
from matplotlib.transforms import Affine2D, BboxBase
from matplotlib.path import Path
from matplotlib import _path
from matplotlib import _png
from matplotlib import ttconv
# Overview
#
# The low-level knowledge about pdf syntax lies mainly in the pdfRepr
# function and the classes Reference, Name, Operator, and Stream. The
# PdfFile class knows about the overall structure of pdf documents.
# It provides a "write" method for writing arbitrary strings in the
# file, and an "output" method that passes objects through the pdfRepr
# function before writing them in the file. The output method is
# called by the RendererPdf class, which contains the various draw_foo
# methods. RendererPdf contains a GraphicsContextPdf instance, and
# each draw_foo calls self.check_gc before outputting commands. This
# method checks whether the pdf graphics state needs to be modified
# and outputs the necessary commands. GraphicsContextPdf represents
# the graphics state, and its "delta" method returns the commands that
# modify the state.
# Add "pdf.use14corefonts: True" in your configuration file to use only
# the 14 PDF core fonts. These fonts do not need to be embedded; every
# PDF viewing application is required to have them. This results in very
# light PDF files you can use directly in LaTeX or ConTeXt documents
# generated with pdfTeX, without any conversion.
# These fonts are: Helvetica, Helvetica-Bold, Helvetica-Oblique,
# Helvetica-BoldOblique, Courier, Courier-Bold, Courier-Oblique,
# Courier-BoldOblique, Times-Roman, Times-Bold, Times-Italic,
# Times-BoldItalic, Symbol, ZapfDingbats.
#
# Some tricky points:
#
# 1. The clip path can only be widened by popping from the state
# stack. Thus the state must be pushed onto the stack before narrowing
# the clip path. This is taken care of by GraphicsContextPdf.
#
# 2. Sometimes it is necessary to refer to something (e.g., font,
# image, or extended graphics state, which contains the alpha value)
# in the page stream by a name that needs to be defined outside the
# stream. PdfFile provides the methods fontName, imageObject, and
# alphaState for this purpose. The implementations of these methods
# should perhaps be generalized.
# TODOs:
#
# * encoding of fonts, including mathtext fonts and unicode support
# * TTF support has lots of small TODOs, e.g., how do you know if a font
# is serif/sans-serif, or symbolic/non-symbolic?
# * draw_markers, draw_line_collection, etc.
def fill(strings, linelen=75):
"""Make one string from sequence of strings, with whitespace
in between. The whitespace is chosen to form lines of at most
linelen characters, if possible."""
currpos = 0
lasti = 0
result = []
for i, s in enumerate(strings):
length = len(s)
if currpos + length < linelen:
currpos += length + 1
else:
result.append(b' '.join(strings[lasti:i]))
lasti = i
currpos = length
result.append(b' '.join(strings[lasti:]))
return b'\n'.join(result)
# PDF strings are supposed to be able to include any eight-bit data,
# except that unbalanced parens and backslashes must be escaped by a
# backslash. However, sf bug #2708559 shows that the carriage return
# character may get read as a newline; these characters correspond to
# \gamma and \Omega in TeX's math font encoding. Escaping them fixes
# the bug.
_string_escape_regex = re.compile(br'([\\()\r\n])')
def _string_escape(match):
m = match.group(0)
if m in br'\()':
return b'\\' + m
elif m == b'\n':
return br'\n'
elif m == b'\r':
return br'\r'
assert False
def pdfRepr(obj):
"""Map Python objects to PDF syntax."""
# Some objects defined later have their own pdfRepr method.
if hasattr(obj, 'pdfRepr'):
return obj.pdfRepr()
# Floats. PDF does not have exponential notation (1.0e-10) so we
# need to use %f with some precision. Perhaps the precision
# should adapt to the magnitude of the number?
elif isinstance(obj, (float, np.floating)):
if not np.isfinite(obj):
raise ValueError("Can only output finite numbers in PDF")
r = ("%.10f" % obj).encode('ascii')
return r.rstrip(b'0').rstrip(b'.')
# Booleans. Needs to be tested before integers since
# isinstance(True, int) is true.
elif isinstance(obj, bool):
return [b'false', b'true'][obj]
# Integers are written as such.
elif isinstance(obj, (six.integer_types, np.integer)):
return ("%d" % obj).encode('ascii')
# Unicode strings are encoded in UTF-16BE with byte-order mark.
elif isinstance(obj, six.text_type):
try:
# But maybe it's really ASCII?
s = obj.encode('ASCII')
return pdfRepr(s)
except UnicodeEncodeError:
s = codecs.BOM_UTF16_BE + obj.encode('UTF-16BE')
return pdfRepr(s)
# Strings are written in parentheses, with backslashes and parens
# escaped. Actually balanced parens are allowed, but it is
# simpler to escape them all. TODO: cut long strings into lines;
# I believe there is some maximum line length in PDF.
elif isinstance(obj, bytes):
return b'(' + _string_escape_regex.sub(_string_escape, obj) + b')'
# Dictionaries. The keys must be PDF names, so if we find strings
# there, we make Name objects from them. The values may be
# anything, so the caller must ensure that PDF names are
# represented as Name objects.
elif isinstance(obj, dict):
r = [b"<<"]
r.extend([Name(key).pdfRepr() + b" " + pdfRepr(val)
for key, val in six.iteritems(obj)])
r.append(b">>")
return fill(r)
# Lists.
elif isinstance(obj, (list, tuple)):
r = [b"["]
r.extend([pdfRepr(val) for val in obj])
r.append(b"]")
return fill(r)
# The null keyword.
elif obj is None:
return b'null'
# A date.
elif isinstance(obj, datetime):
r = obj.strftime('D:%Y%m%d%H%M%S')
if time.daylight:
z = time.altzone
else:
z = time.timezone
if z == 0:
r += 'Z'
elif z < 0:
r += "+%02d'%02d'" % ((-z) // 3600, (-z) % 3600)
else:
r += "-%02d'%02d'" % (z // 3600, z % 3600)
return pdfRepr(r)
# A bounding box
elif isinstance(obj, BboxBase):
return fill([pdfRepr(val) for val in obj.bounds])
else:
msg = "Don't know a PDF representation for %s objects." % type(obj)
raise TypeError(msg)
class Reference(object):
"""PDF reference object.
Use PdfFile.reserveObject() to create References.
"""
def __init__(self, id):
self.id = id
def __repr__(self):
return "<Reference %d>" % self.id
def pdfRepr(self):
return ("%d 0 R" % self.id).encode('ascii')
def write(self, contents, file):
write = file.write
write(("%d 0 obj\n" % self.id).encode('ascii'))
write(pdfRepr(contents))
write(b"\nendobj\n")
class Name(object):
"""PDF name object."""
__slots__ = ('name',)
_regex = re.compile(r'[^!-~]')
def __init__(self, name):
if isinstance(name, Name):
self.name = name.name
else:
if isinstance(name, bytes):
name = name.decode('ascii')
self.name = self._regex.sub(Name.hexify, name).encode('ascii')
def __repr__(self):
return "<Name %s>" % self.name
def __str__(self):
return '/' + six.text_type(self.name)
@staticmethod
def hexify(match):
return '#%02x' % ord(match.group())
def pdfRepr(self):
return b'/' + self.name
class Operator(object):
"""PDF operator object."""
__slots__ = ('op',)
def __init__(self, op):
self.op = op
def __repr__(self):
return '<Operator %s>' % self.op
def pdfRepr(self):
return self.op
class Verbatim(object):
"""Store verbatim PDF command content for later inclusion in the
stream."""
def __init__(self, x):
self._x = x
def pdfRepr(self):
return self._x
# PDF operators (not an exhaustive list)
_pdfops = dict(
close_fill_stroke=b'b', fill_stroke=b'B', fill=b'f', closepath=b'h',
close_stroke=b's', stroke=b'S', endpath=b'n', begin_text=b'BT',
end_text=b'ET', curveto=b'c', rectangle=b're', lineto=b'l', moveto=b'm',
concat_matrix=b'cm', use_xobject=b'Do', setgray_stroke=b'G',
setgray_nonstroke=b'g', setrgb_stroke=b'RG', setrgb_nonstroke=b'rg',
setcolorspace_stroke=b'CS', setcolorspace_nonstroke=b'cs',
setcolor_stroke=b'SCN', setcolor_nonstroke=b'scn', setdash=b'd',
setlinejoin=b'j', setlinecap=b'J', setgstate=b'gs', gsave=b'q',
grestore=b'Q', textpos=b'Td', selectfont=b'Tf', textmatrix=b'Tm',
show=b'Tj', showkern=b'TJ', setlinewidth=b'w', clip=b'W', shading=b'sh')
Op = Bunch(**dict([(name, Operator(value))
for name, value in six.iteritems(_pdfops)]))
def _paint_path(fill, stroke):
"""Return the PDF operator to paint a path in the following way:
fill: fill the path with the fill color
stroke: stroke the outline of the path with the line color"""
if stroke:
if fill:
return Op.fill_stroke
else:
return Op.stroke
else:
if fill:
return Op.fill
else:
return Op.endpath
Op.paint_path = _paint_path
class Stream(object):
"""PDF stream object.
This has no pdfRepr method. Instead, call begin(), then output the
contents of the stream by calling write(), and finally call end().
"""
__slots__ = ('id', 'len', 'pdfFile', 'file', 'compressobj', 'extra', 'pos')
def __init__(self, id, len, file, extra=None, png=None):
"""id: object id of stream; len: an unused Reference object for the
length of the stream, or None (to use a memory buffer); file:
a PdfFile; extra: a dictionary of extra key-value pairs to
include in the stream header; png: if the data is already
png compressed, the decode parameters"""
self.id = id # object id
self.len = len # id of length object
self.pdfFile = file
self.file = file.fh # file to which the stream is written
self.compressobj = None # compression object
if extra is None:
self.extra = dict()
else:
self.extra = extra.copy()
if png is not None:
self.extra.update({'Filter': Name('FlateDecode'),
'DecodeParms': png})
self.pdfFile.recordXref(self.id)
if rcParams['pdf.compression'] and not png:
self.compressobj = zlib.compressobj(rcParams['pdf.compression'])
if self.len is None:
self.file = BytesIO()
else:
self._writeHeader()
self.pos = self.file.tell()
def _writeHeader(self):
write = self.file.write
write(("%d 0 obj\n" % self.id).encode('ascii'))
dict = self.extra
dict['Length'] = self.len
if rcParams['pdf.compression']:
dict['Filter'] = Name('FlateDecode')
write(pdfRepr(dict))
write(b"\nstream\n")
def end(self):
"""Finalize stream."""
self._flush()
if self.len is None:
contents = self.file.getvalue()
self.len = len(contents)
self.file = self.pdfFile.fh
self._writeHeader()
self.file.write(contents)
self.file.write(b"\nendstream\nendobj\n")
else:
length = self.file.tell() - self.pos
self.file.write(b"\nendstream\nendobj\n")
self.pdfFile.writeObject(self.len, length)
def write(self, data):
"""Write some data on the stream."""
if self.compressobj is None:
self.file.write(data)
else:
compressed = self.compressobj.compress(data)
self.file.write(compressed)
def _flush(self):
"""Flush the compression object."""
if self.compressobj is not None:
compressed = self.compressobj.flush()
self.file.write(compressed)
self.compressobj = None
class PdfFile(object):
"""PDF file object."""
def __init__(self, filename):
self.nextObject = 1 # next free object id
self.xrefTable = [[0, 65535, 'the zero object']]
self.passed_in_file_object = False
self.original_file_like = None
self.tell_base = 0
if is_string_like(filename):
fh = open(filename, 'wb')
elif is_writable_file_like(filename):
try:
self.tell_base = filename.tell()
except IOError:
fh = BytesIO()
self.original_file_like = filename
else:
fh = filename
self.passed_in_file_object = True
else:
raise ValueError("filename must be a path or a file-like object")
self._core14fontdir = os.path.join(
rcParams['datapath'], 'fonts', 'pdfcorefonts')
self.fh = fh
self.currentstream = None # stream object to write to, if any
fh.write(b"%PDF-1.4\n") # 1.4 is the first version to have alpha
# Output some eight-bit chars as a comment so various utilities
# recognize the file as binary by looking at the first few
# lines (see note in section 3.4.1 of the PDF reference).
fh.write(b"%\254\334 \253\272\n")
self.rootObject = self.reserveObject('root')
self.pagesObject = self.reserveObject('pages')
self.pageList = []
self.fontObject = self.reserveObject('fonts')
self.alphaStateObject = self.reserveObject('extended graphics states')
self.hatchObject = self.reserveObject('tiling patterns')
self.gouraudObject = self.reserveObject('Gouraud triangles')
self.XObjectObject = self.reserveObject('external objects')
self.resourceObject = self.reserveObject('resources')
root = {'Type': Name('Catalog'),
'Pages': self.pagesObject}
self.writeObject(self.rootObject, root)
revision = ''
self.infoDict = {
'Creator': 'matplotlib %s, http://matplotlib.org' % __version__,
'Producer': 'matplotlib pdf backend%s' % revision,
'CreationDate': datetime.today()
}
self.fontNames = {} # maps filenames to internal font names
self.nextFont = 1 # next free internal font name
self.dviFontInfo = {} # information on dvi fonts
# differently encoded Type-1 fonts may share the same descriptor
self.type1Descriptors = {}
self.used_characters = {}
self.alphaStates = {} # maps alpha values to graphics state objects
self.nextAlphaState = 1
self.hatchPatterns = {}
self.nextHatch = 1
self.gouraudTriangles = []
self._images = {}
self.nextImage = 1
self.markers = {}
self.multi_byte_charprocs = {}
self.paths = []
self.pageAnnotations = [] # A list of annotations for the
# current page
# The PDF spec recommends to include every procset
procsets = [Name(x)
for x in "PDF Text ImageB ImageC ImageI".split()]
# Write resource dictionary.
# Possibly TODO: more general ExtGState (graphics state dictionaries)
# ColorSpace Pattern Shading Properties
resources = {'Font': self.fontObject,
'XObject': self.XObjectObject,
'ExtGState': self.alphaStateObject,
'Pattern': self.hatchObject,
'Shading': self.gouraudObject,
'ProcSet': procsets}
self.writeObject(self.resourceObject, resources)
def newPage(self, width, height):
self.endStream()
self.width, self.height = width, height
contentObject = self.reserveObject('page contents')
thePage = {'Type': Name('Page'),
'Parent': self.pagesObject,
'Resources': self.resourceObject,
'MediaBox': [0, 0, 72 * width, 72 * height],
'Contents': contentObject,
'Group': {'Type': Name('Group'),
'S': Name('Transparency'),
'CS': Name('DeviceRGB')},
'Annots': self.pageAnnotations,
}
pageObject = self.reserveObject('page')
self.writeObject(pageObject, thePage)
self.pageList.append(pageObject)
self.beginStream(contentObject.id,
self.reserveObject('length of content stream'))
# Initialize the pdf graphics state to match the default mpl
# graphics context: currently only the join style needs to be set
self.output(GraphicsContextPdf.joinstyles['round'], Op.setlinejoin)
# Clear the list of annotations for the next page
self.pageAnnotations = []
def newTextnote(self, text, positionRect=[-100, -100, 0, 0]):
# Create a new annotation of type text
theNote = {'Type': Name('Annot'),
'Subtype': Name('Text'),
'Contents': text,
'Rect': positionRect,
}
annotObject = self.reserveObject('annotation')
self.writeObject(annotObject, theNote)
self.pageAnnotations.append(annotObject)
def close(self):
self.endStream()
# Write out the various deferred objects
self.writeFonts()
self.writeObject(self.alphaStateObject,
dict([(val[0], val[1])
for val in six.itervalues(self.alphaStates)]))
self.writeHatches()
self.writeGouraudTriangles()
xobjects = dict(x[1:] for x in six.itervalues(self._images))
for tup in six.itervalues(self.markers):
xobjects[tup[0]] = tup[1]
for name, value in six.iteritems(self.multi_byte_charprocs):
xobjects[name] = value
for name, path, trans, ob, join, cap, padding, filled, stroked \
in self.paths:
xobjects[name] = ob
self.writeObject(self.XObjectObject, xobjects)
self.writeImages()
self.writeMarkers()
self.writePathCollectionTemplates()
self.writeObject(self.pagesObject,
{'Type': Name('Pages'),
'Kids': self.pageList,
'Count': len(self.pageList)})
self.writeInfoDict()
# Finalize the file
self.writeXref()
self.writeTrailer()
if self.passed_in_file_object:
self.fh.flush()
elif self.original_file_like is not None:
self.original_file_like.write(self.fh.getvalue())
self.fh.close()
else:
self.fh.close()
def write(self, data):
if self.currentstream is None:
self.fh.write(data)
else:
self.currentstream.write(data)
def output(self, *data):
self.write(fill([pdfRepr(x) for x in data]))
self.write(b'\n')
def beginStream(self, id, len, extra=None, png=None):
assert self.currentstream is None
self.currentstream = Stream(id, len, self, extra, png)
def endStream(self):
if self.currentstream is not None:
self.currentstream.end()
self.currentstream = None
def fontName(self, fontprop):
"""
Select a font based on fontprop and return a name suitable for
Op.selectfont. If fontprop is a string, it will be interpreted
as the filename (or dvi name) of the font.
"""
if is_string_like(fontprop):
filename = fontprop
elif rcParams['pdf.use14corefonts']:
filename = findfont(
fontprop, fontext='afm', directory=self._core14fontdir)
if filename is None:
filename = findfont(
"Helvetica", fontext='afm', directory=self._core14fontdir)
else:
filename = findfont(fontprop)
Fx = self.fontNames.get(filename)
if Fx is None:
Fx = Name('F%d' % self.nextFont)
self.fontNames[filename] = Fx
self.nextFont += 1
matplotlib.verbose.report(
'Assigning font %s = %r' % (Fx, filename),
'debug')
return Fx
def writeFonts(self):
fonts = {}
for filename, Fx in six.iteritems(self.fontNames):
matplotlib.verbose.report('Embedding font %s' % filename, 'debug')
if filename.endswith('.afm'):
# from pdf.use14corefonts
matplotlib.verbose.report('Writing AFM font', 'debug')
fonts[Fx] = self._write_afm_font(filename)
elif filename in self.dviFontInfo:
# a Type 1 font from a dvi file;
# the filename is really the TeX name
matplotlib.verbose.report('Writing Type-1 font', 'debug')
fonts[Fx] = self.embedTeXFont(filename,
self.dviFontInfo[filename])
else:
# a normal TrueType font
matplotlib.verbose.report('Writing TrueType font', 'debug')
realpath, stat_key = get_realpath_and_stat(filename)
chars = self.used_characters.get(stat_key)
if chars is not None and len(chars[1]):
fonts[Fx] = self.embedTTF(realpath, chars[1])
self.writeObject(self.fontObject, fonts)
def _write_afm_font(self, filename):
with open(filename, 'rb') as fh:
font = AFM(fh)
fontname = font.get_fontname()
fontdict = {'Type': Name('Font'),
'Subtype': Name('Type1'),
'BaseFont': Name(fontname),
'Encoding': Name('WinAnsiEncoding')}
fontdictObject = self.reserveObject('font dictionary')
self.writeObject(fontdictObject, fontdict)
return fontdictObject
def embedTeXFont(self, texname, fontinfo):
msg = ('Embedding TeX font ' + texname + ' - fontinfo=' +
repr(fontinfo.__dict__))
matplotlib.verbose.report(msg, 'debug')
# Widths
widthsObject = self.reserveObject('font widths')
self.writeObject(widthsObject, fontinfo.dvifont.widths)
# Font dictionary
fontdictObject = self.reserveObject('font dictionary')
fontdict = {
'Type': Name('Font'),
'Subtype': Name('Type1'),
'FirstChar': 0,
'LastChar': len(fontinfo.dvifont.widths) - 1,
'Widths': widthsObject,
}
# Encoding (if needed)
if fontinfo.encodingfile is not None:
enc = dviread.Encoding(fontinfo.encodingfile)
differencesArray = [Name(ch) for ch in enc]
differencesArray = [0] + differencesArray
fontdict['Encoding'] = \
{'Type': Name('Encoding'),
'Differences': differencesArray}
# If no file is specified, stop short
if fontinfo.fontfile is None:
msg = ('Because of TeX configuration (pdftex.map, see updmap '
'option pdftexDownloadBase14) the font {0} is not '
'embedded. This is deprecated as of PDF 1.5 and it may '
'cause the consumer application to show something that '
'was not intended.').format(fontinfo.basefont)
warnings.warn(msg)
fontdict['BaseFont'] = Name(fontinfo.basefont)
self.writeObject(fontdictObject, fontdict)
return fontdictObject
# We have a font file to embed - read it in and apply any effects
t1font = type1font.Type1Font(fontinfo.fontfile)
if fontinfo.effects:
t1font = t1font.transform(fontinfo.effects)
fontdict['BaseFont'] = Name(t1font.prop['FontName'])
# Font descriptors may be shared between differently encoded
# Type-1 fonts, so only create a new descriptor if there is no
# existing descriptor for this font.
effects = (fontinfo.effects.get('slant', 0.0),
fontinfo.effects.get('extend', 1.0))
fontdesc = self.type1Descriptors.get((fontinfo.fontfile, effects))
if fontdesc is None:
fontdesc = self.createType1Descriptor(t1font, fontinfo.fontfile)
self.type1Descriptors[(fontinfo.fontfile, effects)] = fontdesc
fontdict['FontDescriptor'] = fontdesc
self.writeObject(fontdictObject, fontdict)
return fontdictObject
def createType1Descriptor(self, t1font, fontfile):
# Create and write the font descriptor and the font file
# of a Type-1 font
fontdescObject = self.reserveObject('font descriptor')
fontfileObject = self.reserveObject('font file')
italic_angle = t1font.prop['ItalicAngle']
fixed_pitch = t1font.prop['isFixedPitch']
flags = 0
# fixed width
if fixed_pitch:
flags |= 1 << 0
# TODO: serif
if 0:
flags |= 1 << 1
# TODO: symbolic (most TeX fonts are)
if 1:
flags |= 1 << 2
# non-symbolic
else:
flags |= 1 << 5
# italic
if italic_angle:
flags |= 1 << 6
# TODO: all caps
if 0:
flags |= 1 << 16
# TODO: small caps
if 0:
flags |= 1 << 17
# TODO: force bold
if 0:
flags |= 1 << 18
ft2font = get_font(fontfile)
descriptor = {
'Type': Name('FontDescriptor'),
'FontName': Name(t1font.prop['FontName']),
'Flags': flags,
'FontBBox': ft2font.bbox,
'ItalicAngle': italic_angle,
'Ascent': ft2font.ascender,
'Descent': ft2font.descender,
'CapHeight': 1000, # TODO: find this out
'XHeight': 500, # TODO: this one too
'FontFile': fontfileObject,
'FontFamily': t1font.prop['FamilyName'],
'StemV': 50, # TODO
# (see also revision 3874; but not all TeX distros have AFM files!)
# 'FontWeight': a number where 400 = Regular, 700 = Bold
}
self.writeObject(fontdescObject, descriptor)
self.beginStream(fontfileObject.id, None,
{'Length1': len(t1font.parts[0]),
'Length2': len(t1font.parts[1]),
'Length3': 0})
self.currentstream.write(t1font.parts[0])
self.currentstream.write(t1font.parts[1])
self.endStream()
return fontdescObject
def _get_xobject_symbol_name(self, filename, symbol_name):
return "%s-%s" % (
os.path.splitext(os.path.basename(filename))[0],
symbol_name)
_identityToUnicodeCMap = """/CIDInit /ProcSet findresource begin
12 dict begin
begincmap
/CIDSystemInfo
<< /Registry (Adobe)
/Ordering (UCS)
/Supplement 0
>> def
/CMapName /Adobe-Identity-UCS def
/CMapType 2 def
1 begincodespacerange
<0000> <ffff>
endcodespacerange
%d beginbfrange
%s
endbfrange
endcmap
CMapName currentdict /CMap defineresource pop
end
end"""
def embedTTF(self, filename, characters):
"""Embed the TTF font from the named file into the document."""
font = get_font(filename)
fonttype = rcParams['pdf.fonttype']
def cvt(length, upe=font.units_per_EM, nearest=True):
"Convert font coordinates to PDF glyph coordinates"
value = length / upe * 1000
if nearest:
return np.round(value)
# Perhaps best to round away from zero for bounding
# boxes and the like
if value < 0:
return floor(value)
else:
return ceil(value)
def embedTTFType3(font, characters, descriptor):
"""The Type 3-specific part of embedding a Truetype font"""
widthsObject = self.reserveObject('font widths')
fontdescObject = self.reserveObject('font descriptor')
fontdictObject = self.reserveObject('font dictionary')
charprocsObject = self.reserveObject('character procs')
differencesArray = []
firstchar, lastchar = 0, 255
bbox = [cvt(x, nearest=False) for x in font.bbox]
fontdict = {
'Type': Name('Font'),
'BaseFont': ps_name,
'FirstChar': firstchar,
'LastChar': lastchar,
'FontDescriptor': fontdescObject,
'Subtype': Name('Type3'),
'Name': descriptor['FontName'],
'FontBBox': bbox,
'FontMatrix': [.001, 0, 0, .001, 0, 0],
'CharProcs': charprocsObject,
'Encoding': {
'Type': Name('Encoding'),
'Differences': differencesArray},
'Widths': widthsObject
}
# Make the "Widths" array
from encodings import cp1252
# The "decoding_map" was changed
# to a "decoding_table" as of Python 2.5.
if hasattr(cp1252, 'decoding_map'):
def decode_char(charcode):
return cp1252.decoding_map[charcode] or 0
else:
def decode_char(charcode):
return ord(cp1252.decoding_table[charcode])
def get_char_width(charcode):
s = decode_char(charcode)
width = font.load_char(
s, flags=LOAD_NO_SCALE | LOAD_NO_HINTING).horiAdvance
return cvt(width)
widths = [get_char_width(charcode)
for charcode in range(firstchar, lastchar+1)]
descriptor['MaxWidth'] = max(widths)
# Make the "Differences" array, sort the ccodes < 255 from
# the multi-byte ccodes, and build the whole set of glyph ids
# that we need from this font.
glyph_ids = []
differences = []
multi_byte_chars = set()
for c in characters:
ccode = c
gind = font.get_char_index(ccode)
glyph_ids.append(gind)
glyph_name = font.get_glyph_name(gind)
if ccode <= 255:
differences.append((ccode, glyph_name))
else:
multi_byte_chars.add(glyph_name)
differences.sort()
last_c = -2
for c, name in differences:
if c != last_c + 1:
differencesArray.append(c)
differencesArray.append(Name(name))
last_c = c
# Make the charprocs array (using ttconv to generate the
# actual outlines)
rawcharprocs = ttconv.get_pdf_charprocs(
filename.encode(sys.getfilesystemencoding()), glyph_ids)
charprocs = {}
for charname, stream in six.iteritems(rawcharprocs):
charprocDict = {'Length': len(stream)}
# The 2-byte characters are used as XObjects, so they
# need extra info in their dictionary
if charname in multi_byte_chars:
charprocDict['Type'] = Name('XObject')
charprocDict['Subtype'] = Name('Form')
charprocDict['BBox'] = bbox
# Each glyph includes bounding box information,
# but xpdf and ghostscript can't handle it in a
# Form XObject (they segfault!!!), so we remove it
# from the stream here. It's not needed anyway,
# since the Form XObject includes it in its BBox
# value.
stream = stream[stream.find(b"d1") + 2:]
charprocObject = self.reserveObject('charProc')
self.beginStream(charprocObject.id, None, charprocDict)
self.currentstream.write(stream)
self.endStream()
# Send the glyphs with ccode > 255 to the XObject dictionary,
# and the others to the font itself
if charname in multi_byte_chars:
name = self._get_xobject_symbol_name(filename, charname)
self.multi_byte_charprocs[name] = charprocObject
else:
charprocs[charname] = charprocObject
# Write everything out
self.writeObject(fontdictObject, fontdict)
self.writeObject(fontdescObject, descriptor)
self.writeObject(widthsObject, widths)
self.writeObject(charprocsObject, charprocs)
return fontdictObject
def embedTTFType42(font, characters, descriptor):
"""The Type 42-specific part of embedding a Truetype font"""
fontdescObject = self.reserveObject('font descriptor')
cidFontDictObject = self.reserveObject('CID font dictionary')
type0FontDictObject = self.reserveObject('Type 0 font dictionary')
cidToGidMapObject = self.reserveObject('CIDToGIDMap stream')
fontfileObject = self.reserveObject('font file stream')
wObject = self.reserveObject('Type 0 widths')
toUnicodeMapObject = self.reserveObject('ToUnicode map')
cidFontDict = {
'Type': Name('Font'),
'Subtype': Name('CIDFontType2'),
'BaseFont': ps_name,
'CIDSystemInfo': {
'Registry': 'Adobe',
'Ordering': 'Identity',
'Supplement': 0},
'FontDescriptor': fontdescObject,
'W': wObject,
'CIDToGIDMap': cidToGidMapObject
}
type0FontDict = {
'Type': Name('Font'),
'Subtype': Name('Type0'),
'BaseFont': ps_name,
'Encoding': Name('Identity-H'),
'DescendantFonts': [cidFontDictObject],
'ToUnicode': toUnicodeMapObject
}
# Make fontfile stream
descriptor['FontFile2'] = fontfileObject
length1Object = self.reserveObject('decoded length of a font')
self.beginStream(
fontfileObject.id,
self.reserveObject('length of font stream'),
{'Length1': length1Object})
with open(filename, 'rb') as fontfile:
length1 = 0
while True:
data = fontfile.read(4096)
if not data:
break
length1 += len(data)
self.currentstream.write(data)
self.endStream()
self.writeObject(length1Object, length1)
# Make the 'W' (Widths) array, CidToGidMap and ToUnicode CMap
# at the same time
cid_to_gid_map = ['\u0000'] * 65536
widths = []
max_ccode = 0
for c in characters:
ccode = c
gind = font.get_char_index(ccode)
glyph = font.load_char(ccode, flags=LOAD_NO_HINTING)
widths.append((ccode, glyph.horiAdvance / 6))
if ccode < 65536:
cid_to_gid_map[ccode] = unichr(gind)
max_ccode = max(ccode, max_ccode)
widths.sort()
cid_to_gid_map = cid_to_gid_map[:max_ccode + 1]
last_ccode = -2
w = []
max_width = 0
unicode_groups = []
for ccode, width in widths:
if ccode != last_ccode + 1:
w.append(ccode)
w.append([width])
unicode_groups.append([ccode, ccode])
else:
w[-1].append(width)
unicode_groups[-1][1] = ccode
max_width = max(max_width, width)
last_ccode = ccode
unicode_bfrange = []
for start, end in unicode_groups:
unicode_bfrange.append(
"<%04x> <%04x> [%s]" %
(start, end,
" ".join(["<%04x>" % x for x in range(start, end+1)])))
unicode_cmap = (self._identityToUnicodeCMap %
(len(unicode_groups),
"\n".join(unicode_bfrange))).encode('ascii')
# CIDToGIDMap stream
cid_to_gid_map = "".join(cid_to_gid_map).encode("utf-16be")
self.beginStream(cidToGidMapObject.id,
None,
{'Length': len(cid_to_gid_map)})
self.currentstream.write(cid_to_gid_map)
self.endStream()
# ToUnicode CMap
self.beginStream(toUnicodeMapObject.id,
None,
{'Length': unicode_cmap})
self.currentstream.write(unicode_cmap)
self.endStream()
descriptor['MaxWidth'] = max_width
# Write everything out
self.writeObject(cidFontDictObject, cidFontDict)
self.writeObject(type0FontDictObject, type0FontDict)
self.writeObject(fontdescObject, descriptor)
self.writeObject(wObject, w)
return type0FontDictObject
# Beginning of main embedTTF function...
# You are lost in a maze of TrueType tables, all different...
sfnt = font.get_sfnt()
try:
ps_name = sfnt[(1, 0, 0, 6)].decode('macroman') # Macintosh scheme
except KeyError:
# Microsoft scheme:
ps_name = sfnt[(3, 1, 0x0409, 6)].decode('utf-16be')
# (see freetype/ttnameid.h)
ps_name = ps_name.encode('ascii', 'replace')
ps_name = Name(ps_name)
pclt = font.get_sfnt_table('pclt') or {'capHeight': 0, 'xHeight': 0}
post = font.get_sfnt_table('post') or {'italicAngle': (0, 0)}
ff = font.face_flags
sf = font.style_flags
flags = 0
symbolic = False # ps_name.name in ('Cmsy10', 'Cmmi10', 'Cmex10')
if ff & FIXED_WIDTH:
flags |= 1 << 0
if 0: # TODO: serif
flags |= 1 << 1
if symbolic:
flags |= 1 << 2
else:
flags |= 1 << 5
if sf & ITALIC:
flags |= 1 << 6
if 0: # TODO: all caps
flags |= 1 << 16
if 0: # TODO: small caps
flags |= 1 << 17
if 0: # TODO: force bold
flags |= 1 << 18
descriptor = {
'Type': Name('FontDescriptor'),
'FontName': ps_name,
'Flags': flags,
'FontBBox': [cvt(x, nearest=False) for x in font.bbox],
'Ascent': cvt(font.ascender, nearest=False),
'Descent': cvt(font.descender, nearest=False),
'CapHeight': cvt(pclt['capHeight'], nearest=False),
'XHeight': cvt(pclt['xHeight']),
'ItalicAngle': post['italicAngle'][1], # ???
'StemV': 0 # ???
}
# The font subsetting to a Type 3 font does not work for
# OpenType (.otf) that embed a Postscript CFF font, so avoid that --
# save as a (non-subsetted) Type 42 font instead.
if is_opentype_cff_font(filename):
fonttype = 42
msg = ("'%s' can not be subsetted into a Type 3 font. "
"The entire font will be embedded in the output.")
warnings.warn(msg % os.path.basename(filename))
if fonttype == 3:
return embedTTFType3(font, characters, descriptor)
elif fonttype == 42:
return embedTTFType42(font, characters, descriptor)
def alphaState(self, alpha):
"""Return name of an ExtGState that sets alpha to the given value"""
state = self.alphaStates.get(alpha, None)
if state is not None:
return state[0]
name = Name('A%d' % self.nextAlphaState)
self.nextAlphaState += 1
self.alphaStates[alpha] = \
(name, {'Type': Name('ExtGState'),
'CA': alpha[0], 'ca': alpha[1]})
return name
def hatchPattern(self, hatch_style):
# The colors may come in as numpy arrays, which aren't hashable
if hatch_style is not None:
edge, face, hatch = hatch_style
if edge is not None:
edge = tuple(edge)
if face is not None:
face = tuple(face)
hatch_style = (edge, face, hatch)
pattern = self.hatchPatterns.get(hatch_style, None)
if pattern is not None:
return pattern
name = Name('H%d' % self.nextHatch)
self.nextHatch += 1
self.hatchPatterns[hatch_style] = name
return name
def writeHatches(self):
hatchDict = dict()
sidelen = 72.0
for hatch_style, name in six.iteritems(self.hatchPatterns):
ob = self.reserveObject('hatch pattern')
hatchDict[name] = ob
res = {'Procsets':
[Name(x) for x in "PDF Text ImageB ImageC ImageI".split()]}
self.beginStream(
ob.id, None,
{'Type': Name('Pattern'),
'PatternType': 1, 'PaintType': 1, 'TilingType': 1,
'BBox': [0, 0, sidelen, sidelen],
'XStep': sidelen, 'YStep': sidelen,
'Resources': res,
# Change origin to match Agg at top-left.
'Matrix': [1, 0, 0, 1, 0, self.height * 72]})
stroke_rgb, fill_rgb, path = hatch_style
self.output(stroke_rgb[0], stroke_rgb[1], stroke_rgb[2],
Op.setrgb_stroke)
if fill_rgb is not None:
self.output(fill_rgb[0], fill_rgb[1], fill_rgb[2],
Op.setrgb_nonstroke,
0, 0, sidelen, sidelen, Op.rectangle,
Op.fill)
self.output(rcParams['hatch.linewidth'], Op.setlinewidth)
self.output(*self.pathOperations(
Path.hatch(path),
Affine2D().scale(sidelen),
simplify=False))
self.output(Op.fill_stroke)
self.endStream()
self.writeObject(self.hatchObject, hatchDict)
def addGouraudTriangles(self, points, colors):
name = Name('GT%d' % len(self.gouraudTriangles))
self.gouraudTriangles.append((name, points, colors))
return name
def writeGouraudTriangles(self):
gouraudDict = dict()
for name, points, colors in self.gouraudTriangles:
ob = self.reserveObject('Gouraud triangle')
gouraudDict[name] = ob
shape = points.shape
flat_points = points.reshape((shape[0] * shape[1], 2))
flat_colors = colors.reshape((shape[0] * shape[1], 4))
points_min = np.min(flat_points, axis=0) - (1 << 8)
points_max = np.max(flat_points, axis=0) + (1 << 8)
factor = float(0xffffffff) / (points_max - points_min)
self.beginStream(
ob.id, None,
{'ShadingType': 4,
'BitsPerCoordinate': 32,
'BitsPerComponent': 8,
'BitsPerFlag': 8,
'ColorSpace': Name('DeviceRGB'),
'AntiAlias': True,
'Decode': [points_min[0], points_max[0],
points_min[1], points_max[1],
0, 1, 0, 1, 0, 1]
})
streamarr = np.empty(
(shape[0] * shape[1],),
dtype=[(str('flags'), str('u1')),
(str('points'), str('>u4'), (2,)),
(str('colors'), str('u1'), (3,))])
streamarr['flags'] = 0
streamarr['points'] = (flat_points - points_min) * factor
streamarr['colors'] = flat_colors[:, :3] * 255.0
self.write(streamarr.tostring())
self.endStream()
self.writeObject(self.gouraudObject, gouraudDict)
def imageObject(self, image):
"""Return name of an image XObject representing the given image."""
entry = self._images.get(id(image), None)
if entry is not None:
return entry[1]
name = Name('I%d' % self.nextImage)
ob = self.reserveObject('image %d' % self.nextImage)
self.nextImage += 1
self._images[id(image)] = (image, name, ob)
return name
def _unpack(self, im):
"""
Unpack the image object im into height, width, data, alpha,
where data and alpha are HxWx3 (RGB) or HxWx1 (grayscale or alpha)
arrays, except alpha is None if the image is fully opaque.
"""
h, w = im.shape[:2]
im = im[::-1]
if im.ndim == 2:
return h, w, im, None
else:
rgb = im[:, :, :3]
rgb = np.array(rgb, order='C')
# PDF needs a separate alpha image
if im.shape[2] == 4:
alpha = im[:, :, 3][..., None]
if np.all(alpha == 255):
alpha = None
else:
alpha = np.array(alpha, order='C')
else:
alpha = None
return h, w, rgb, alpha
def _writePng(self, data):
"""
Write the image *data* into the pdf file using png
predictors with Flate compression.
"""
buffer = BytesIO()
_png.write_png(data, buffer)
buffer.seek(8)
written = 0
header = bytearray(8)
while True:
n = buffer.readinto(header)
assert n == 8
length, type = struct.unpack(b'!L4s', bytes(header))
if type == b'IDAT':
data = bytearray(length)
n = buffer.readinto(data)
assert n == length
self.currentstream.write(bytes(data))
written += n
elif type == b'IEND':
break
else:
buffer.seek(length, 1)
buffer.seek(4, 1) # skip CRC
def _writeImg(self, data, height, width, grayscale, id, smask=None):
"""
Write the image *data* of size *height* x *width*, as grayscale
if *grayscale* is true and RGB otherwise, as pdf object *id*
and with the soft mask (alpha channel) *smask*, which should be
either None or a *height* x *width* x 1 array.
"""
obj = {'Type': Name('XObject'),
'Subtype': Name('Image'),
'Width': width,
'Height': height,
'ColorSpace': Name('DeviceGray' if grayscale
else 'DeviceRGB'),
'BitsPerComponent': 8}
if smask:
obj['SMask'] = smask
if rcParams['pdf.compression']:
png = {'Predictor': 10,
'Colors': 1 if grayscale else 3,
'Columns': width}
else:
png = None
self.beginStream(
id,
self.reserveObject('length of image stream'),
obj,
png=png
)
if png:
self._writePng(data)
else:
self.currentstream.write(data.tostring())
self.endStream()
def writeImages(self):
for img, name, ob in six.itervalues(self._images):
height, width, data, adata = self._unpack(img)
if adata is not None:
smaskObject = self.reserveObject("smask")
self._writeImg(adata, height, width, True, smaskObject.id)
else:
smaskObject = None
self._writeImg(data, height, width, False,
ob.id, smaskObject)
def markerObject(self, path, trans, fill, stroke, lw, joinstyle,
capstyle):
"""Return name of a marker XObject representing the given path."""
# self.markers used by markerObject, writeMarkers, close:
# mapping from (path operations, fill?, stroke?) to
# [name, object reference, bounding box, linewidth]
# This enables different draw_markers calls to share the XObject
# if the gc is sufficiently similar: colors etc can vary, but
# the choices of whether to fill and whether to stroke cannot.
# We need a bounding box enclosing all of the XObject path,
# but since line width may vary, we store the maximum of all
# occurring line widths in self.markers.
# close() is somewhat tightly coupled in that it expects the
# first two components of each value in self.markers to be the
# name and object reference.
pathops = self.pathOperations(path, trans, simplify=False)
key = (tuple(pathops), bool(fill), bool(stroke), joinstyle, capstyle)
result = self.markers.get(key)
if result is None:
name = Name('M%d' % len(self.markers))
ob = self.reserveObject('marker %d' % len(self.markers))
bbox = path.get_extents(trans)
self.markers[key] = [name, ob, bbox, lw]
else:
if result[-1] < lw:
result[-1] = lw
name = result[0]
return name
def writeMarkers(self):
for ((pathops, fill, stroke, joinstyle, capstyle),
(name, ob, bbox, lw)) in six.iteritems(self.markers):
bbox = bbox.padded(lw * 0.5)
self.beginStream(
ob.id, None,
{'Type': Name('XObject'), 'Subtype': Name('Form'),
'BBox': list(bbox.extents)})
self.output(GraphicsContextPdf.joinstyles[joinstyle],
Op.setlinejoin)
self.output(GraphicsContextPdf.capstyles[capstyle], Op.setlinecap)
self.output(*pathops)
self.output(Op.paint_path(fill, stroke))
self.endStream()
def pathCollectionObject(self, gc, path, trans, padding, filled, stroked):
name = Name('P%d' % len(self.paths))
ob = self.reserveObject('path %d' % len(self.paths))
self.paths.append(
(name, path, trans, ob, gc.get_joinstyle(), gc.get_capstyle(),
padding, filled, stroked))
return name
def writePathCollectionTemplates(self):
for (name, path, trans, ob, joinstyle, capstyle, padding, filled,
stroked) in self.paths:
pathops = self.pathOperations(path, trans, simplify=False)
bbox = path.get_extents(trans)
if not np.all(np.isfinite(bbox.extents)):
extents = [0, 0, 0, 0]
else:
bbox = bbox.padded(padding)
extents = list(bbox.extents)
self.beginStream(
ob.id, None,
{'Type': Name('XObject'), 'Subtype': Name('Form'),
'BBox': extents})
self.output(GraphicsContextPdf.joinstyles[joinstyle],
Op.setlinejoin)
self.output(GraphicsContextPdf.capstyles[capstyle], Op.setlinecap)
self.output(*pathops)
self.output(Op.paint_path(filled, stroked))
self.endStream()
@staticmethod
def pathOperations(path, transform, clip=None, simplify=None, sketch=None):
return [Verbatim(_path.convert_to_string(
path, transform, clip, simplify, sketch,
6,
[Op.moveto.op, Op.lineto.op, b'', Op.curveto.op, Op.closepath.op],
True))]
def writePath(self, path, transform, clip=False, sketch=None):
if clip:
clip = (0.0, 0.0, self.width * 72, self.height * 72)
simplify = path.should_simplify
else:
clip = None
simplify = False
cmds = self.pathOperations(path, transform, clip, simplify=simplify,
sketch=sketch)
self.output(*cmds)
def reserveObject(self, name=''):
"""Reserve an ID for an indirect object.
The name is used for debugging in case we forget to print out
the object with writeObject.
"""
id = self.nextObject
self.nextObject += 1
self.xrefTable.append([None, 0, name])
return Reference(id)
def recordXref(self, id):
self.xrefTable[id][0] = self.fh.tell() - self.tell_base
def writeObject(self, object, contents):
self.recordXref(object.id)
object.write(contents, self)
def writeXref(self):
"""Write out the xref table."""
self.startxref = self.fh.tell() - self.tell_base
self.write(("xref\n0 %d\n" % self.nextObject).encode('ascii'))
i = 0
borken = False
for offset, generation, name in self.xrefTable:
if offset is None:
print('No offset for object %d (%s)' % (i, name),
file=sys.stderr)
borken = True
else:
if name == 'the zero object':
key = "f"
else:
key = "n"
text = "%010d %05d %s \n" % (offset, generation, key)
self.write(text.encode('ascii'))
i += 1
if borken:
raise AssertionError('Indirect object does not exist')
def writeInfoDict(self):
"""Write out the info dictionary, checking it for good form"""
def is_date(x):
return isinstance(x, datetime)
check_trapped = (lambda x: isinstance(x, Name) and
x.name in ('True', 'False', 'Unknown'))
keywords = {'Title': is_string_like,
'Author': is_string_like,
'Subject': is_string_like,
'Keywords': is_string_like,
'Creator': is_string_like,
'Producer': is_string_like,
'CreationDate': is_date,
'ModDate': is_date,
'Trapped': check_trapped}
for k in six.iterkeys(self.infoDict):
if k not in keywords:
warnings.warn('Unknown infodict keyword: %s' % k)
else:
if not keywords[k](self.infoDict[k]):
warnings.warn('Bad value for infodict keyword %s' % k)
self.infoObject = self.reserveObject('info')
self.writeObject(self.infoObject, self.infoDict)
def writeTrailer(self):
"""Write out the PDF trailer."""
self.write(b"trailer\n")
self.write(pdfRepr(
{'Size': self.nextObject,
'Root': self.rootObject,
'Info': self.infoObject}))
# Could add 'ID'
self.write(("\nstartxref\n%d\n%%%%EOF\n" %
self.startxref).encode('ascii'))
class RendererPdf(RendererBase):
afm_font_cache = maxdict(50)
def __init__(self, file, image_dpi, height, width):
RendererBase.__init__(self)
self.height = height
self.width = width
self.file = file
self.gc = self.new_gc()
self.mathtext_parser = MathTextParser("Pdf")
self.image_dpi = image_dpi
self.tex_font_map = None
def finalize(self):
self.file.output(*self.gc.finalize())
def check_gc(self, gc, fillcolor=None):
orig_fill = getattr(gc, '_fillcolor', (0., 0., 0.))
gc._fillcolor = fillcolor
orig_alphas = getattr(gc, '_effective_alphas', (1.0, 1.0))
if gc._forced_alpha:
gc._effective_alphas = (gc._alpha, gc._alpha)
elif fillcolor is None or len(fillcolor) < 4:
gc._effective_alphas = (gc._rgb[3], 1.0)
else:
gc._effective_alphas = (gc._rgb[3], fillcolor[3])
delta = self.gc.delta(gc)
if delta:
self.file.output(*delta)
# Restore gc to avoid unwanted side effects
gc._fillcolor = orig_fill
gc._effective_alphas = orig_alphas
def tex_font_mapping(self, texfont):
if self.tex_font_map is None:
self.tex_font_map = \
dviread.PsfontsMap(dviread.find_tex_file('pdftex.map'))
return self.tex_font_map[texfont]
def track_characters(self, font, s):
"""Keeps track of which characters are required from
each font."""
if isinstance(font, six.string_types):
fname = font
else:
fname = font.fname
realpath, stat_key = get_realpath_and_stat(fname)
used_characters = self.file.used_characters.setdefault(
stat_key, (realpath, set()))
used_characters[1].update([ord(x) for x in s])
def merge_used_characters(self, other):
for stat_key, (realpath, charset) in six.iteritems(other):
used_characters = self.file.used_characters.setdefault(
stat_key, (realpath, set()))
used_characters[1].update(charset)
def get_image_magnification(self):
return self.image_dpi/72.0
def option_scale_image(self):
"""
pdf backend support arbitrary scaling of image.
"""
return True
def option_image_nocomposite(self):
"""
return whether to generate a composite image from multiple images on
a set of axes
"""
return not rcParams['image.composite_image']
def draw_image(self, gc, x, y, im, transform=None):
h, w = im.shape[:2]
if w == 0 or h == 0:
return
if transform is None:
# If there's no transform, alpha has already been applied
gc.set_alpha(1.0)
self.check_gc(gc)
w = 72.0 * w / self.image_dpi
h = 72.0 * h / self.image_dpi
imob = self.file.imageObject(im)
if transform is None:
self.file.output(Op.gsave,
w, 0, 0, h, x, y, Op.concat_matrix,
imob, Op.use_xobject, Op.grestore)
else:
tr1, tr2, tr3, tr4, tr5, tr6 = transform.frozen().to_values()
self.file.output(Op.gsave,
1, 0, 0, 1, x, y, Op.concat_matrix,
tr1, tr2, tr3, tr4, tr5, tr6, Op.concat_matrix,
imob, Op.use_xobject, Op.grestore)
def draw_path(self, gc, path, transform, rgbFace=None):
self.check_gc(gc, rgbFace)
self.file.writePath(
path, transform,
rgbFace is None and gc.get_hatch_path() is None,
gc.get_sketch_params())
self.file.output(self.gc.paint())
def draw_path_collection(self, gc, master_transform, paths, all_transforms,
offsets, offsetTrans, facecolors, edgecolors,
linewidths, linestyles, antialiaseds, urls,
offset_position):
# We can only reuse the objects if the presence of fill and
# stroke (and the amount of alpha for each) is the same for
# all of them
can_do_optimization = True
facecolors = np.asarray(facecolors)
edgecolors = np.asarray(edgecolors)
if not len(facecolors):
filled = False
can_do_optimization = not gc.get_hatch()
else:
if np.all(facecolors[:, 3] == facecolors[0, 3]):
filled = facecolors[0, 3] != 0.0
else:
can_do_optimization = False
if not len(edgecolors):
stroked = False
else:
if np.all(np.asarray(linewidths) == 0.0):
stroked = False
elif np.all(edgecolors[:, 3] == edgecolors[0, 3]):
stroked = edgecolors[0, 3] != 0.0
else:
can_do_optimization = False
# Is the optimization worth it? Rough calculation:
# cost of emitting a path in-line is len_path * uses_per_path
# cost of XObject is len_path + 5 for the definition,
# uses_per_path for the uses
len_path = len(paths[0].vertices) if len(paths) > 0 else 0
uses_per_path = self._iter_collection_uses_per_path(
paths, all_transforms, offsets, facecolors, edgecolors)
should_do_optimization = \
len_path + uses_per_path + 5 < len_path * uses_per_path
if (not can_do_optimization) or (not should_do_optimization):
return RendererBase.draw_path_collection(
self, gc, master_transform, paths, all_transforms,
offsets, offsetTrans, facecolors, edgecolors,
linewidths, linestyles, antialiaseds, urls,
offset_position)
padding = np.max(linewidths)
path_codes = []
for i, (path, transform) in enumerate(self._iter_collection_raw_paths(
master_transform, paths, all_transforms)):
name = self.file.pathCollectionObject(
gc, path, transform, padding, filled, stroked)
path_codes.append(name)
output = self.file.output
output(*self.gc.push())
lastx, lasty = 0, 0
for xo, yo, path_id, gc0, rgbFace in self._iter_collection(
gc, master_transform, all_transforms, path_codes, offsets,
offsetTrans, facecolors, edgecolors, linewidths, linestyles,
antialiaseds, urls, offset_position):
self.check_gc(gc0, rgbFace)
dx, dy = xo - lastx, yo - lasty
output(1, 0, 0, 1, dx, dy, Op.concat_matrix, path_id,
Op.use_xobject)
lastx, lasty = xo, yo
output(*self.gc.pop())
def draw_markers(self, gc, marker_path, marker_trans, path, trans,
rgbFace=None):
# Same logic as in draw_path_collection
len_marker_path = len(marker_path)
uses = len(path)
if len_marker_path * uses < len_marker_path + uses + 5:
RendererBase.draw_markers(self, gc, marker_path, marker_trans,
path, trans, rgbFace)
return
self.check_gc(gc, rgbFace)
fill = gc.fill(rgbFace)
stroke = gc.stroke()
output = self.file.output
marker = self.file.markerObject(
marker_path, marker_trans, fill, stroke, self.gc._linewidth,
gc.get_joinstyle(), gc.get_capstyle())
output(Op.gsave)
lastx, lasty = 0, 0
for vertices, code in path.iter_segments(
trans,
clip=(0, 0, self.file.width*72, self.file.height*72),
simplify=False):
if len(vertices):
x, y = vertices[-2:]
if (x < 0 or y < 0 or
x > self.file.width * 72 or y > self.file.height * 72):
continue
dx, dy = x - lastx, y - lasty
output(1, 0, 0, 1, dx, dy, Op.concat_matrix,
marker, Op.use_xobject)
lastx, lasty = x, y
output(Op.grestore)
def draw_gouraud_triangle(self, gc, points, colors, trans):
self.draw_gouraud_triangles(gc, points.reshape((1, 3, 2)),
colors.reshape((1, 3, 4)), trans)
def draw_gouraud_triangles(self, gc, points, colors, trans):
assert len(points) == len(colors)
assert points.ndim == 3
assert points.shape[1] == 3
assert points.shape[2] == 2
assert colors.ndim == 3
assert colors.shape[1] == 3
assert colors.shape[2] == 4
shape = points.shape
points = points.reshape((shape[0] * shape[1], 2))
tpoints = trans.transform(points)
tpoints = tpoints.reshape(shape)
name = self.file.addGouraudTriangles(tpoints, colors)
self.check_gc(gc)
self.file.output(name, Op.shading)
def _setup_textpos(self, x, y, angle, oldx=0, oldy=0, oldangle=0):
if angle == oldangle == 0:
self.file.output(x - oldx, y - oldy, Op.textpos)
else:
angle = angle / 180.0 * pi
self.file.output(cos(angle), sin(angle),
-sin(angle), cos(angle),
x, y, Op.textmatrix)
self.file.output(0, 0, Op.textpos)
def draw_mathtext(self, gc, x, y, s, prop, angle):
# TODO: fix positioning and encoding
width, height, descent, glyphs, rects, used_characters = \
self.mathtext_parser.parse(s, 72, prop)
self.merge_used_characters(used_characters)
# When using Type 3 fonts, we can't use character codes higher
# than 255, so we use the "Do" command to render those
# instead.
global_fonttype = rcParams['pdf.fonttype']
# Set up a global transformation matrix for the whole math expression
a = angle / 180.0 * pi
self.file.output(Op.gsave)
self.file.output(cos(a), sin(a), -sin(a), cos(a), x, y,
Op.concat_matrix)
self.check_gc(gc, gc._rgb)
self.file.output(Op.begin_text)
prev_font = None, None
oldx, oldy = 0, 0
for ox, oy, fontname, fontsize, num, symbol_name in glyphs:
if is_opentype_cff_font(fontname):
fonttype = 42
else:
fonttype = global_fonttype
if fonttype == 42 or num <= 255:
self._setup_textpos(ox, oy, 0, oldx, oldy)
oldx, oldy = ox, oy
if (fontname, fontsize) != prev_font:
self.file.output(self.file.fontName(fontname), fontsize,
Op.selectfont)
prev_font = fontname, fontsize
self.file.output(self.encode_string(unichr(num), fonttype),
Op.show)
self.file.output(Op.end_text)
# If using Type 3 fonts, render all of the multi-byte characters
# as XObjects using the 'Do' command.
if global_fonttype == 3:
for ox, oy, fontname, fontsize, num, symbol_name in glyphs:
if is_opentype_cff_font(fontname):
fonttype = 42
else:
fonttype = global_fonttype
if fonttype == 3 and num > 255:
self.file.fontName(fontname)
self.file.output(Op.gsave,
0.001 * fontsize, 0,
0, 0.001 * fontsize,
ox, oy, Op.concat_matrix)
name = self.file._get_xobject_symbol_name(
fontname, symbol_name)
self.file.output(Name(name), Op.use_xobject)
self.file.output(Op.grestore)
# Draw any horizontal lines in the math layout
for ox, oy, width, height in rects:
self.file.output(Op.gsave, ox, oy, width, height,
Op.rectangle, Op.fill, Op.grestore)
# Pop off the global transformation
self.file.output(Op.grestore)
def draw_tex(self, gc, x, y, s, prop, angle, ismath='TeX!', mtext=None):
texmanager = self.get_texmanager()
fontsize = prop.get_size_in_points()
dvifile = texmanager.make_dvi(s, fontsize)
dvi = dviread.Dvi(dvifile, 72)
page = next(iter(dvi))
dvi.close()
# Gather font information and do some setup for combining
# characters into strings. The variable seq will contain a
# sequence of font and text entries. A font entry is a list
# ['font', name, size] where name is a Name object for the
# font. A text entry is ['text', x, y, glyphs, x+w] where x
# and y are the starting coordinates, w is the width, and
# glyphs is a list; in this phase it will always contain just
# one one-character string, but later it may have longer
# strings interspersed with kern amounts.
oldfont, seq = None, []
for x1, y1, dvifont, glyph, width in page.text:
if dvifont != oldfont:
pdfname = self.file.fontName(dvifont.texname)
if dvifont.texname not in self.file.dviFontInfo:
psfont = self.tex_font_mapping(dvifont.texname)
self.file.dviFontInfo[dvifont.texname] = Bunch(
fontfile=psfont.filename,
basefont=psfont.psname,
encodingfile=psfont.encoding,
effects=psfont.effects,
dvifont=dvifont)
seq += [['font', pdfname, dvifont.size]]
oldfont = dvifont
# We need to convert the glyph numbers to bytes, and the easiest
# way to do this on both Python 2 and 3 is .encode('latin-1')
seq += [['text', x1, y1,
[six.unichr(glyph).encode('latin-1')], x1+width]]
# Find consecutive text strings with constant y coordinate and
# combine into a sequence of strings and kerns, or just one
# string (if any kerns would be less than 0.1 points).
i, curx, fontsize = 0, 0, None
while i < len(seq)-1:
elt, nxt = seq[i:i+2]
if elt[0] == 'font':
fontsize = elt[2]
elif elt[0] == nxt[0] == 'text' and elt[2] == nxt[2]:
offset = elt[4] - nxt[1]
if abs(offset) < 0.1:
elt[3][-1] += nxt[3][0]
elt[4] += nxt[4]-nxt[1]
else:
elt[3] += [offset*1000.0/fontsize, nxt[3][0]]
elt[4] = nxt[4]
del seq[i+1]
continue
i += 1
# Create a transform to map the dvi contents to the canvas.
mytrans = Affine2D().rotate_deg(angle).translate(x, y)
# Output the text.
self.check_gc(gc, gc._rgb)
self.file.output(Op.begin_text)
curx, cury, oldx, oldy = 0, 0, 0, 0
for elt in seq:
if elt[0] == 'font':
self.file.output(elt[1], elt[2], Op.selectfont)
elif elt[0] == 'text':
curx, cury = mytrans.transform_point((elt[1], elt[2]))
self._setup_textpos(curx, cury, angle, oldx, oldy)
oldx, oldy = curx, cury
if len(elt[3]) == 1:
self.file.output(elt[3][0], Op.show)
else:
self.file.output(elt[3], Op.showkern)
else:
assert False
self.file.output(Op.end_text)
# Then output the boxes (e.g., variable-length lines of square
# roots).
boxgc = self.new_gc()
boxgc.copy_properties(gc)
boxgc.set_linewidth(0)
pathops = [Path.MOVETO, Path.LINETO, Path.LINETO, Path.LINETO,
Path.CLOSEPOLY]
for x1, y1, h, w in page.boxes:
path = Path([[x1, y1], [x1+w, y1], [x1+w, y1+h], [x1, y1+h],
[0, 0]], pathops)
self.draw_path(boxgc, path, mytrans, gc._rgb)
def encode_string(self, s, fonttype):
if fonttype in (1, 3):
return s.encode('cp1252', 'replace')
return s.encode('utf-16be', 'replace')
def draw_text(self, gc, x, y, s, prop, angle, ismath=False, mtext=None):
# TODO: combine consecutive texts into one BT/ET delimited section
# This function is rather complex, since there is no way to
# access characters of a Type 3 font with codes > 255. (Type
# 3 fonts can not have a CIDMap). Therefore, we break the
# string into chunks, where each chunk contains exclusively
# 1-byte or exclusively 2-byte characters, and output each
# chunk a separate command. 1-byte characters use the regular
# text show command (Tj), whereas 2-byte characters use the
# use XObject command (Do). If using Type 42 fonts, all of
# this complication is avoided, but of course, those fonts can
# not be subsetted.
self.check_gc(gc, gc._rgb)
if ismath:
return self.draw_mathtext(gc, x, y, s, prop, angle)
fontsize = prop.get_size_in_points()
if rcParams['pdf.use14corefonts']:
font = self._get_font_afm(prop)
l, b, w, h = font.get_str_bbox(s)
fonttype = 1
else:
font = self._get_font_ttf(prop)
self.track_characters(font, s)
font.set_text(s, 0.0, flags=LOAD_NO_HINTING)
fonttype = rcParams['pdf.fonttype']
# We can't subset all OpenType fonts, so switch to Type 42
# in that case.
if is_opentype_cff_font(font.fname):
fonttype = 42
def check_simple_method(s):
"""Determine if we should use the simple or woven method
to output this text, and chunks the string into 1-byte and
2-byte sections if necessary."""
use_simple_method = True
chunks = []
if not rcParams['pdf.use14corefonts']:
if fonttype == 3 and not isinstance(s, bytes) and len(s) != 0:
# Break the string into chunks where each chunk is either
# a string of chars <= 255, or a single character > 255.
s = six.text_type(s)
for c in s:
if ord(c) <= 255:
char_type = 1
else:
char_type = 2
if len(chunks) and chunks[-1][0] == char_type:
chunks[-1][1].append(c)
else:
chunks.append((char_type, [c]))
use_simple_method = (len(chunks) == 1 and
chunks[-1][0] == 1)
return use_simple_method, chunks
def draw_text_simple():
"""Outputs text using the simple method."""
self.file.output(Op.begin_text,
self.file.fontName(prop),
fontsize,
Op.selectfont)
self._setup_textpos(x, y, angle)
self.file.output(self.encode_string(s, fonttype), Op.show,
Op.end_text)
def draw_text_woven(chunks):
"""Outputs text using the woven method, alternating
between chunks of 1-byte characters and 2-byte characters.
Only used for Type 3 fonts."""
chunks = [(a, ''.join(b)) for a, b in chunks]
# Do the rotation and global translation as a single matrix
# concatenation up front
self.file.output(Op.gsave)
a = angle / 180.0 * pi
self.file.output(cos(a), sin(a), -sin(a), cos(a), x, y,
Op.concat_matrix)
# Output all the 1-byte characters in a BT/ET group, then
# output all the 2-byte characters.
for mode in (1, 2):
newx = oldx = 0
# Output a 1-byte character chunk
if mode == 1:
self.file.output(Op.begin_text,
self.file.fontName(prop),
fontsize,
Op.selectfont)
for chunk_type, chunk in chunks:
if mode == 1 and chunk_type == 1:
self._setup_textpos(newx, 0, 0, oldx, 0, 0)
self.file.output(self.encode_string(chunk, fonttype),
Op.show)
oldx = newx
lastgind = None
for c in chunk:
ccode = ord(c)
gind = font.get_char_index(ccode)
if gind is not None:
if mode == 2 and chunk_type == 2:
glyph_name = font.get_glyph_name(gind)
self.file.output(Op.gsave)
self.file.output(0.001 * fontsize, 0,
0, 0.001 * fontsize,
newx, 0, Op.concat_matrix)
name = self.file._get_xobject_symbol_name(
font.fname, glyph_name)
self.file.output(Name(name), Op.use_xobject)
self.file.output(Op.grestore)
# Move the pointer based on the character width
# and kerning
glyph = font.load_char(ccode,
flags=LOAD_NO_HINTING)
if lastgind is not None:
kern = font.get_kerning(
lastgind, gind, KERNING_UNFITTED)
else:
kern = 0
lastgind = gind
newx += kern/64.0 + glyph.linearHoriAdvance/65536.0
if mode == 1:
self.file.output(Op.end_text)
self.file.output(Op.grestore)
use_simple_method, chunks = check_simple_method(s)
if use_simple_method:
return draw_text_simple()
else:
return draw_text_woven(chunks)
def get_text_width_height_descent(self, s, prop, ismath):
if rcParams['text.usetex']:
texmanager = self.get_texmanager()
fontsize = prop.get_size_in_points()
w, h, d = texmanager.get_text_width_height_descent(s, fontsize,
renderer=self)
return w, h, d
if ismath:
w, h, d, glyphs, rects, used_characters = \
self.mathtext_parser.parse(s, 72, prop)
elif rcParams['pdf.use14corefonts']:
font = self._get_font_afm(prop)
l, b, w, h, d = font.get_str_bbox_and_descent(s)
scale = prop.get_size_in_points()
w *= scale / 1000
h *= scale / 1000
d *= scale / 1000
else:
font = self._get_font_ttf(prop)
font.set_text(s, 0.0, flags=LOAD_NO_HINTING)
w, h = font.get_width_height()
scale = (1.0 / 64.0)
w *= scale
h *= scale
d = font.get_descent()
d *= scale
return w, h, d
def _get_font_afm(self, prop):
key = hash(prop)
font = self.afm_font_cache.get(key)
if font is None:
filename = findfont(
prop, fontext='afm', directory=self.file._core14fontdir)
if filename is None:
filename = findfont(
"Helvetica", fontext='afm',
directory=self.file._core14fontdir)
font = self.afm_font_cache.get(filename)
if font is None:
with open(filename, 'rb') as fh:
font = AFM(fh)
self.afm_font_cache[filename] = font
self.afm_font_cache[key] = font
return font
def _get_font_ttf(self, prop):
filename = findfont(prop)
font = get_font(filename)
font.clear()
font.set_size(prop.get_size_in_points(), 72)
return font
def flipy(self):
return False
def get_canvas_width_height(self):
return self.file.width * 72.0, self.file.height * 72.0
def new_gc(self):
return GraphicsContextPdf(self.file)
class GraphicsContextPdf(GraphicsContextBase):
def __init__(self, file):
GraphicsContextBase.__init__(self)
self._fillcolor = (0.0, 0.0, 0.0)
self._effective_alphas = (1.0, 1.0)
self.file = file
self.parent = None
def __repr__(self):
d = dict(self.__dict__)
del d['file']
del d['parent']
return repr(d)
def stroke(self):
"""
Predicate: does the path need to be stroked (its outline drawn)?
This tests for the various conditions that disable stroking
the path, in which case it would presumably be filled.
"""
# _linewidth > 0: in pdf a line of width 0 is drawn at minimum
# possible device width, but e.g., agg doesn't draw at all
return (self._linewidth > 0 and self._alpha > 0 and
(len(self._rgb) <= 3 or self._rgb[3] != 0.0))
def fill(self, *args):
"""
Predicate: does the path need to be filled?
An optional argument can be used to specify an alternative
_fillcolor, as needed by RendererPdf.draw_markers.
"""
if len(args):
_fillcolor = args[0]
else:
_fillcolor = self._fillcolor
return (self._hatch or
(_fillcolor is not None and
(len(_fillcolor) <= 3 or _fillcolor[3] != 0.0)))
def paint(self):
"""
Return the appropriate pdf operator to cause the path to be
stroked, filled, or both.
"""
return Op.paint_path(self.fill(), self.stroke())
capstyles = {'butt': 0, 'round': 1, 'projecting': 2}
joinstyles = {'miter': 0, 'round': 1, 'bevel': 2}
def capstyle_cmd(self, style):
return [self.capstyles[style], Op.setlinecap]
def joinstyle_cmd(self, style):
return [self.joinstyles[style], Op.setlinejoin]
def linewidth_cmd(self, width):
return [width, Op.setlinewidth]
def dash_cmd(self, dashes):
offset, dash = dashes
if dash is None:
dash = []
offset = 0
return [list(dash), offset, Op.setdash]
def alpha_cmd(self, alpha, forced, effective_alphas):
name = self.file.alphaState(effective_alphas)
return [name, Op.setgstate]
def hatch_cmd(self, hatch):
if not hatch:
if self._fillcolor is not None:
return self.fillcolor_cmd(self._fillcolor)
else:
return [Name('DeviceRGB'), Op.setcolorspace_nonstroke]
else:
hatch_style = (self._hatch_color, self._fillcolor, hatch)
name = self.file.hatchPattern(hatch_style)
return [Name('Pattern'), Op.setcolorspace_nonstroke,
name, Op.setcolor_nonstroke]
def rgb_cmd(self, rgb):
if rcParams['pdf.inheritcolor']:
return []
if rgb[0] == rgb[1] == rgb[2]:
return [rgb[0], Op.setgray_stroke]
else:
return list(rgb[:3]) + [Op.setrgb_stroke]
def fillcolor_cmd(self, rgb):
if rgb is None or rcParams['pdf.inheritcolor']:
return []
elif rgb[0] == rgb[1] == rgb[2]:
return [rgb[0], Op.setgray_nonstroke]
else:
return list(rgb[:3]) + [Op.setrgb_nonstroke]
def push(self):
parent = GraphicsContextPdf(self.file)
parent.copy_properties(self)
parent.parent = self.parent
self.parent = parent
return [Op.gsave]
def pop(self):
assert self.parent is not None
self.copy_properties(self.parent)
self.parent = self.parent.parent
return [Op.grestore]
def clip_cmd(self, cliprect, clippath):
"""Set clip rectangle. Calls self.pop() and self.push()."""
cmds = []
# Pop graphics state until we hit the right one or the stack is empty
while ((self._cliprect, self._clippath) != (cliprect, clippath)
and self.parent is not None):
cmds.extend(self.pop())
# Unless we hit the right one, set the clip polygon
if ((self._cliprect, self._clippath) != (cliprect, clippath) or
self.parent is None):
cmds.extend(self.push())
if self._cliprect != cliprect:
cmds.extend([cliprect, Op.rectangle, Op.clip, Op.endpath])
if self._clippath != clippath:
path, affine = clippath.get_transformed_path_and_affine()
cmds.extend(
PdfFile.pathOperations(path, affine, simplify=False) +
[Op.clip, Op.endpath])
return cmds
commands = (
# must come first since may pop
(('_cliprect', '_clippath'), clip_cmd),
(('_alpha', '_forced_alpha', '_effective_alphas'), alpha_cmd),
(('_capstyle',), capstyle_cmd),
(('_fillcolor',), fillcolor_cmd),
(('_joinstyle',), joinstyle_cmd),
(('_linewidth',), linewidth_cmd),
(('_dashes',), dash_cmd),
(('_rgb',), rgb_cmd),
(('_hatch',), hatch_cmd), # must come after fillcolor and rgb
)
# TODO: _linestyle
def delta(self, other):
"""
Copy properties of other into self and return PDF commands
needed to transform self into other.
"""
cmds = []
fill_performed = False
for params, cmd in self.commands:
different = False
for p in params:
ours = getattr(self, p)
theirs = getattr(other, p)
try:
if (ours is None or theirs is None):
different = bool(not(ours is theirs))
else:
different = bool(ours != theirs)
except ValueError:
ours = np.asarray(ours)
theirs = np.asarray(theirs)
different = (ours.shape != theirs.shape or
np.any(ours != theirs))
if different:
break
# Need to update hatching if we also updated fillcolor
if params == ('_hatch',) and fill_performed:
different = True
if different:
if params == ('_fillcolor',):
fill_performed = True
theirs = [getattr(other, p) for p in params]
cmds.extend(cmd(self, *theirs))
for p in params:
setattr(self, p, getattr(other, p))
return cmds
def copy_properties(self, other):
"""
Copy properties of other into self.
"""
GraphicsContextBase.copy_properties(self, other)
fillcolor = getattr(other, '_fillcolor', self._fillcolor)
effective_alphas = getattr(other, '_effective_alphas',
self._effective_alphas)
self._fillcolor = fillcolor
self._effective_alphas = effective_alphas
def finalize(self):
"""
Make sure every pushed graphics state is popped.
"""
cmds = []
while self.parent is not None:
cmds.extend(self.pop())
return cmds
########################################################################
#
# The following functions and classes are for pylab and implement
# window/figure managers, etc...
#
########################################################################
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
# if a main-level app must be created, this is the usual place to
# do it -- see backend_wx, backend_wxagg and backend_tkagg for
# examples. Not all GUIs require explicit instantiation of a
# main-level app (egg backend_gtk, backend_gtkagg) for pylab
FigureClass = kwargs.pop('FigureClass', Figure)
thisFig = FigureClass(*args, **kwargs)
return new_figure_manager_given_figure(num, thisFig)
def new_figure_manager_given_figure(num, figure):
"""
Create a new figure manager instance for the given figure.
"""
canvas = FigureCanvasPdf(figure)
manager = FigureManagerPdf(canvas, num)
return manager
class PdfPages(object):
"""
A multi-page PDF file.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> # Initialize:
>>> with PdfPages('foo.pdf') as pdf:
... # As many times as you like, create a figure fig and save it:
... fig = plt.figure()
... pdf.savefig(fig)
... # When no figure is specified the current figure is saved
... pdf.savefig()
Notes
-----
In reality :class:`PdfPages` is a thin wrapper around :class:`PdfFile`, in
order to avoid confusion when using :func:`~matplotlib.pyplot.savefig` and
forgetting the format argument.
"""
__slots__ = ('_file', 'keep_empty')
def __init__(self, filename, keep_empty=True):
"""
Create a new PdfPages object.
Parameters
----------
filename: str
Plots using :meth:`PdfPages.savefig` will be written to a file at
this location. The file is opened at once and any older file with
the same name is overwritten.
keep_empty: bool, optional
If set to False, then empty pdf files will be deleted automatically
when closed.
"""
self._file = PdfFile(filename)
self.keep_empty = keep_empty
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
def close(self):
"""
Finalize this object, making the underlying file a complete
PDF file.
"""
self._file.close()
if (self.get_pagecount() == 0 and not self.keep_empty and
not self._file.passed_in_file_object):
os.remove(self._file.fh.name)
self._file = None
def infodict(self):
"""
Return a modifiable information dictionary object
(see PDF reference section 10.2.1 'Document Information
Dictionary').
"""
return self._file.infoDict
def savefig(self, figure=None, **kwargs):
"""
Saves a :class:`~matplotlib.figure.Figure` to this file as a new page.
Any other keyword arguments are passed to
:meth:`~matplotlib.figure.Figure.savefig`.
Parameters
----------
figure: :class:`~matplotlib.figure.Figure` or int, optional
Specifies what figure is saved to file. If not specified, the
active figure is saved. If a :class:`~matplotlib.figure.Figure`
instance is provided, this figure is saved. If an int is specified,
the figure instance to save is looked up by number.
"""
if isinstance(figure, Figure):
figure.savefig(self, format='pdf', **kwargs)
else:
if figure is None:
figureManager = Gcf.get_active()
else:
figureManager = Gcf.get_fig_manager(figure)
if figureManager is None:
raise ValueError("No such figure: " + repr(figure))
else:
figureManager.canvas.figure.savefig(self, format='pdf',
**kwargs)
def get_pagecount(self):
"""
Returns the current number of pages in the multipage pdf file.
"""
return len(self._file.pageList)
def attach_note(self, text, positionRect=[-100, -100, 0, 0]):
"""
Add a new text note to the page to be saved next. The optional
positionRect specifies the position of the new note on the
page. It is outside the page per default to make sure it is
invisible on printouts.
"""
self._file.newTextnote(text, positionRect)
class FigureCanvasPdf(FigureCanvasBase):
"""
The canvas the figure renders into. Calls the draw and print fig
methods, creates the renderers, etc...
Public attribute
figure - A Figure instance
"""
fixed_dpi = 72
def draw(self):
pass
filetypes = {'pdf': 'Portable Document Format'}
def get_default_filetype(self):
return 'pdf'
def print_pdf(self, filename, **kwargs):
image_dpi = kwargs.get('dpi', 72) # dpi to use for images
self.figure.set_dpi(72) # there are 72 pdf points to an inch
width, height = self.figure.get_size_inches()
if isinstance(filename, PdfPages):
file = filename._file
else:
file = PdfFile(filename)
try:
file.newPage(width, height)
_bbox_inches_restore = kwargs.pop("bbox_inches_restore", None)
renderer = MixedModeRenderer(
self.figure, width, height, image_dpi,
RendererPdf(file, image_dpi, height, width),
bbox_inches_restore=_bbox_inches_restore)
self.figure.draw(renderer)
renderer.finalize()
finally:
if isinstance(filename, PdfPages): # finish off this page
file.endStream()
else: # we opened the file above; now finish it off
file.close()
class FigureManagerPdf(FigureManagerBase):
pass
FigureCanvas = FigureCanvasPdf
FigureManager = FigureManagerPdf
| gpl-3.0 |
willyrv/IICREstimator | estimIICR.py | 1 | 17079 | #!/usr/bin/python
import os
import numpy as np
import matplotlib.pyplot as plt
import json
import re
from scipy.special import comb
import argparse
def generate_MS_tk(ms_command):
# Simulate T2 values using MS.
# The input is a string containing the MS-command
# The output is a list of float containing independent values of Tk
# where Tk is the first coalescent event of the sample
o = os.popen(ms_command).read()
newick_re = "\([(0-9.,:)]+\)" # Find the tree line
newick_pattern = re.compile(newick_re)
single_coal_re = "\([0-9.,:]+\)"
single_coal_pattern = re.compile(single_coal_re)
t_obs = []
for newick_line in newick_pattern.finditer(o):
newick_text = newick_line.group()
coal_times = []
for single_coal_event in single_coal_pattern.finditer(newick_text):
matched_text = single_coal_event.group()
coal_time = float(matched_text.split(':')[1].split(',')[0])
coal_times.append(coal_time)
t_obs.append(min(coal_times))
return t_obs
def generate_MS_t2(ms_command):
# Simulate T2 values using MS.
# The input is a string containing the MS-command
# The output is a list of float containing independent values of T2
o = os.popen(ms_command).read()
o = o.split('\n')
t_obs = []
for l in o:
if l[:6] == 'time:\t':
temp = l.split('\t')
t_obs.append(float(temp[1]))
return t_obs
def compute_real_history_from_ms_command(ms_command, N0):
# Returns a function depending on the scenario found in the ms_command
# First we compute the value of N0
msc = ms_command.split(' ')
# Case of instantaneous changes
if ms_command.__contains__('-eN'):
size_changes = ms_command.split(' -eN ')
(t_k, alpha_k) = ([i.split(' ')[0] for i in size_changes[1:]],
[j.split(' ')[1] for j in size_changes[1:]])
t_k = [0]+[4*N0*float(t) for t in t_k]
N_k = [N0]+[N0*float(alpha) for alpha in alpha_k]
return ('-eN', t_k, N_k)
# print 'case 1'
# Case of exponential growth
elif ms_command.__contains__('G'):
alpha = float(msc[msc.index('-G') + 1])
T = float(msc[msc.index('-G') + 3])
return ('ExponGrow', [alpha, T, N0])
# print 'exponential growth'
# StSI case
elif ms_command.__contains__('-I'):
n = int(msc[msc.index('-I') + 1])
M = float(msc[msc.index('-I') + n+2])
if msc[msc.index('-I') + 2] == '2':
return ('StSI same_island', [n, M, N0])
else:
return ('StSI disctint_island', [n, M, N0])
else:
return ('-eN', [[0], [N0]])
def compute_empirical_dist(obs, x_vector=''):
# This method computes the empirical distribution given the
# observations.
# The functions are evaluated in the x_vector parameter
# by default x_vector is computed as a function of the data
# by default the differences 'dx' are a vector
if len(x_vector) == 0:
actual_x_vector = np.arange(0, max(obs)+0.1, 0.1)
elif x_vector[-1]<=max(obs): # extend the vector to cover all the data
actual_x_vector = list(x_vector)
actual_x_vector.append(max(obs))
actual_x_vector = np.array(x_vector)
else:
actual_x_vector = np.array(x_vector)
actual_x_vector[0] = 0 # The first element of actual_x_vector should be 0
half_dx = np.true_divide(actual_x_vector[1:]-actual_x_vector[:-1], 2)
# Computes the cumulative distribution and the distribution
x_vector_shift = actual_x_vector[:-1] + half_dx
x_vector_shift = np.array([0] + list(x_vector_shift) +
[actual_x_vector[-1]+half_dx[-1]])
counts = np.histogram(obs, bins = actual_x_vector)[0]
counts_shift = np.histogram(obs, bins = x_vector_shift)[0]
cdf_x = counts.cumsum()
cdf_x = np.array([0]+list(cdf_x))
# now we compute the pdf (the derivative of the cdf)
dy_shift = counts_shift
dx_shift = x_vector_shift[1:] - x_vector_shift[:-1]
pdf_obs_x = np.true_divide(dy_shift, dx_shift)
return (cdf_x, pdf_obs_x)
def compute_t_vector(start, end, number_of_values, vector_type):
if vector_type == 'linear':
x_vector = np.linspace(start, end, number_of_values)
elif vector_type == 'log':
n = number_of_values
x_vector = [0.1*(np.exp(i * np.log(1+10*end)/n)-1)
for i in range(n+1)]
x_vector[0] = x_vector[0]+start
else:
# For the moment, the default output is a linspace distribution
x_vector = np.linspace(start, end, number_of_values)
return np.array(x_vector)
def group_t(time_interval, pattern):
# Groupes the time following the pattern as specifyed in the psmc
# documentation
constant_blocks = pattern.split('+')
t = list(time_interval)
t = t[:]+t[-1:]
temp = [t[0]]
current_pos = 0
for b in constant_blocks:
if b.__contains__('*'):
n_of_blocks = int(b.split('*')[0])
size_of_blocks = int(b.split('*')[1])
for i in range(n_of_blocks):
temp.append(t[current_pos+size_of_blocks])
current_pos+=size_of_blocks
else:
size_of_blocks = int(b)
temp.append(t[current_pos+size_of_blocks])
current_pos+=size_of_blocks
return np.array(temp)
def is_array_like(obj, string_is_array = False, tuple_is_array = True):
result = hasattr(obj, "__len__") and hasattr(obj, '__getitem__')
if result and not string_is_array and isinstance(obj, str):
result = False
if result and not tuple_is_array and isinstance(obj, tuple):
result = False
return result
def compute_IICR_n_islands(t, params):
n = params["n"]
M = params["M"]
s = params["sampling_same_island"]
if(is_array_like(n)):
raise TypeError("Having multiple number of islands is not yet supported!")
if(is_array_like(M)):
tau = params["tau"]
c = params["size"]
if(not (is_array_like(tau) or is_array_like(c))):
raise TypeError("Both 'tau' and 'size' must be array types!")
if(len(M) != len(tau)):
raise ValueError("Vectors 'M' and 'tau' must have the same length!")
if(tau[0] != 0):
raise ValueError("The time of the first event must be 0!")
if(len(M) != len(c)):
raise ValueError("Vectors 'M' and 'size' must have the same length!")
return compute_piecewise_stationary_IICR_n_islands(n, M, tau, c, t, s)
return compute_stationary_IICR_n_islands(n, M, t, s)
def compute_stationary_IICR_n_islands(n, M, t, s=True):
# This method evaluates the lambda function in a vector
# of time values t.
# If 's' is True we are in the case when two individuals where
# sampled from the same island. If 's' is false, then the two
# individuals where sampled from different islands.
# Computing constants
gamma = np.true_divide(M, n-1)
delta = (1+n*gamma)**2 - 4*gamma
alpha = 0.5*(1+n*gamma + np.sqrt(delta))
beta = 0.5*(1+n*gamma - np.sqrt(delta))
# Now we evaluate
x_vector = t
if s:
numerator = (1-beta)*np.exp(-alpha*x_vector) + (alpha-1)*np.exp(-beta*x_vector)
denominator = (alpha-gamma)*np.exp(-alpha*x_vector) + (gamma-beta)*np.exp(-beta*x_vector)
else:
numerator = beta*np.exp(-alpha*(x_vector)) - alpha*np.exp(-beta*(x_vector))
denominator = gamma * (np.exp(-alpha*(x_vector)) - np.exp(-beta*(x_vector)))
lambda_t = np.true_divide(numerator, denominator)
return lambda_t
def compute_piecewise_stationary_IICR_n_islands(n, M, tau, c, t, s=True):
from model import Pnisland
sampling = []
if(s):
sampling = [2] + [0] * (n - 1)
else:
sampling = [1, 1] + [0] * (n - 2)
scenarios = []
for i in range(len(M)):
thisdict = {"time" : tau[i], "n": n, "M": M[i], "c": c[i]}
scenarios.append(thisdict)
model_params = {"nbLoci" : 100, "samplingVector" : sampling, "scenario" : scenarios}
nsnic = Pnisland(model_params)
return nsnic.evaluateIICR(t)
def get_PSMC_IICR(filename):
a = open(filename, 'r')
result = a.read()
a.close()
# getting the time windows and the lambda values
last_block = result.split('//\n')[-2]
last_block = last_block.split('\n')
time_windows = []
estimated_lambdas = []
for line in last_block:
if line[:2]=='RS':
time_windows.append(float(line.split('\t')[2]))
estimated_lambdas.append(float(line.split('\t')[3]))
# getting the estimations of theta and N0
result = result.split('PA\t') # The 'PA' lines contain the estimated lambda values
result = result[-1].split('\n')[0]
result = result.split(' ')
theta = float(result[1])
return(time_windows, estimated_lambdas, theta)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Simulate T2 values with ms then plot the IICR')
parser.add_argument('params_file', type=str,
help='the filename of the parameters')
args = parser.parse_args()
with open(args.params_file) as json_params:
p = json.load(json_params)
times_vector = []
if p["custom_x_vector"]["set_custom_xvector"] == 0:
start = p["computation_parameters"]["start"]
end = p["computation_parameters"]["end"]
number_of_values = p["computation_parameters"]["number_of_values"]
vector_type = p["computation_parameters"]["x_vector_type"]
t_vector = compute_t_vector(start, end, number_of_values, vector_type)
pattern = p["computation_parameters"]["pattern"]
times_vector = group_t(t_vector, pattern)
else:
times_vector = np.array(p["custom_x_vector"]["x_vector"])
empirical_densities = []
empirical_histories = []
# Do n independent simulations
for i in range(len(p["scenarios"])):
ms_full_cmd = os.path.join(p["path2ms"], p["scenarios"][i]["ms_command"])
obs = generate_MS_tk(ms_full_cmd)
obs = 2*np.array(obs) # Given that in ms time is scaled to 4N0 and
# our model scales times to 2N0, we multiply the output of MS by 2.
(F_x, f_x) = compute_empirical_dist(obs, times_vector)
empirical_densities.append(np.true_divide(np.array(f_x), sum(np.array(f_x))))
F_x = np.array(F_x)
x = times_vector
# If the sample size on the ms command is greater than 2
# the IICR that we obtain when the sample size is 2
# must be multiplied by a factor
# Parsing the ms command for getting the sample size
ms_command = p["scenarios"][i]["ms_command"]
sample_size = int(ms_command.split("ms ")[1].split(" ")[0])
factor = comb(sample_size, 2)
empirical_lambda = factor * np.true_divide(len(obs)-F_x, f_x)
empirical_histories.append((x, empirical_lambda))
# Do the plot
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
N0 = p["scale_params"]["N0"]
g_time = p["scale_params"]["generation_time"]
if "use_real_data" in p:
for d in p["use_real_data"]:
(t_real_data, IICR_real_data, theta) = get_PSMC_IICR(d["psmc_results_file"])
thisN0 = theta / (4.0 * d["mu"] * d["binsize"])
t_real_data = np.array(t_real_data) * 2.0 * thisN0 * g_time
IICR_real_data = np.array(IICR_real_data) * thisN0
plot_label = d["label"]
linecolor = d["color"]
line_style = d["linestyle"]
linewidth = d["linewidth"]
alpha = d["alpha"]
ax.plot(t_real_data, IICR_real_data, color = linecolor,
ls=line_style, linewidth=linewidth, drawstyle='steps-post', alpha=alpha, label=plot_label)
for i in range(len(empirical_histories)):
(x, empirical_lambda) = empirical_histories[i]
# Avoiding to have x[0]=0 in a logscale
if x[0] == 0:
x[0] = float(x[1])/100
linecolor = p["scenarios"][i]["color"]
line_style = p["scenarios"][i]["linestyle"]
linewidth = p["scenarios"][i]["linewidth"]
alpha = p["scenarios"][i]["alpha"]
plot_label = p["scenarios"][i]["label"]
ax.plot(2 * N0 * g_time*x, N0 * empirical_lambda, color = linecolor,
ls=line_style, linewidth=linewidth, drawstyle='steps-post', alpha=alpha, label=plot_label)
# Save IICR functions to a file (one line for times and one line for IICR values)
if "save_IICR_as_file" in p:
if p["save_IICR_as_file"]:
for i in range(len(empirical_histories)):
(x, empirical_lambda) = empirical_histories[i]
with open("./IICR_{}_text_file.txt".format(i), "w") as f:
x2write = [str(2 * N0 * g_time * value) for value in x]
IICR2write = [str(N0 * value) for value in empirical_lambda]
f.write("{}\n".format(" ".join(x2write)))
f.write("{}\n".format(" ".join(IICR2write)))
# Draw the vertical lines (if specified)
for vl in p["vertical_lines"]:
ax.axvline(4 * N0 * g_time * vl, color='k', ls='--')
# Plot the real history (if commanded)
if p["plot_params"]["plot_real_ms_history"]:
[case, x, y] = compute_real_history_from_ms_command(p.ms_command, p.N0)
print(case)
print(x)
print(y)
x[0] = min(float(x[1])/5, p.plot_limits[2]) # this is for avoiding
# to have x[0]=0 in a logscale
x.append(1e7) # adding the last value
y.append(y[-1])
ax.step(x, y, '-b', where='post', label='Real history')
if p["plot_params"]["plot_theor_IICR"]:
theoretical_IICR_list = []
T_max = np.log10(p["plot_params"]["plot_limits"][1])
t_k = np.logspace(1, T_max, 1000)
t_k = np.true_divide(t_k, 2 * N0 * g_time)
for i in range(len(p["theoretical_IICR_nisland"])):
params = p["theoretical_IICR_nisland"][i]
theoretical_IICR_list.append(compute_IICR_n_islands(t_k, params))
# Plotting the theoretical IICR
for i in range(len(p["theoretical_IICR_nisland"])):
linecolor = p["theoretical_IICR_nisland"][i]["color"]
line_style = p["theoretical_IICR_nisland"][i]["linestyle"]
linewidth = p["theoretical_IICR_nisland"][i]["linewidth"]
alpha = p["theoretical_IICR_nisland"][i]["alpha"]
plot_label = p["theoretical_IICR_nisland"][i]["label"]
ax.plot(2 * N0 * g_time * t_k, N0 * theoretical_IICR_list[i],
color=linecolor, ls=line_style, alpha=alpha, label=plot_label)
# Plotting constant piecewise functions (if any)
if "piecewise_constant_functions" in p:
for f in p["piecewise_constant_functions"]:
x = f["x"]
y = f["y"]
plot_label = f["label"]
linecolor = f["color"]
line_style = f["linestyle"]
line_width = f["linewidth"]
line_alpha = f["alpha"]
ax.step(x, y, where='post', color=linecolor, ls=line_style, linewidth=line_width,
alpha=line_alpha, label=plot_label)
ax.set_xlabel(p["plot_params"]["plot_xlabel"])
ax.set_ylabel(p["plot_params"]["plot_ylabel"])
if "y_scale" in p["plot_params"]:
if p["plot_params"]["y_scale"] == "log":
ax.set_yscale('log')
ax.set_xscale('log')
plt.legend(loc='best')
[x_a, x_b, y_a, y_b] = p["plot_params"]["plot_limits"]
plt.xlim(x_a, x_b)
plt.ylim(y_a, y_b)
if "plot_title" in p["plot_params"]:
ax.set_title(p["plot_params"]["plot_title"])
if ("save_figure" in p["plot_params"]) and p["plot_params"]["save_figure"]:
fig_name = os.path.splitext(args.params_file)[0]
plt.savefig("{}.pdf".format(fig_name),
format="pdf")
if ("show_plot" in p["plot_params"]) and p["plot_params"]["show_plot"]:
plt.show()
# Plotting the densities
if "plot_densities" in p:
if len(p["plot_densities"]["densities_to_plot"])>0:
fig = plt.figure()
ax = fig.add_subplot(111)
for i in p["plot_densities"]["densities_to_plot"]:
l = p["scenarios"][i]["label"]
c = p["scenarios"][i]["color"]
s = p["scenarios"][i]["linestyle"]
a = p["scenarios"][i]["alpha"]
ax.step(times_vector, empirical_densities[i], color=c, ls=s,
alpha = a, label = l)
plt.title("Density of T2")
plt.xlim(p["plot_densities"]["x_lim"][0], p["plot_densities"]["x_lim"][1])
plt.ylim(p["plot_densities"]["y_lim"][0], p["plot_densities"]["y_lim"][1])
plt.legend(loc='best')
plt.show() | mit |
mmottahedi/neuralnilm_prototype | scripts/e398.py | 4 | 23130 | from __future__ import print_function, division
import matplotlib
import logging
from sys import stdout
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
from neuralnilm import (Net, RealApplianceSource,
BLSTMLayer, DimshuffleLayer,
BidirectionalRecurrentLayer)
from neuralnilm.source import standardise, discretize, fdiff, power_and_fdiff
from neuralnilm.experiment import run_experiment, init_experiment
from neuralnilm.net import TrainingError
from neuralnilm.layers import MixtureDensityLayer
from neuralnilm.objectives import (scaled_cost, mdn_nll,
scaled_cost_ignore_inactive, ignore_inactive,
scaled_cost3)
from neuralnilm.plot import MDNPlotter, CentralOutputPlotter
from neuralnilm.updates import clipped_nesterov_momentum
from lasagne.nonlinearities import sigmoid, rectify, tanh, identity
from lasagne.objectives import mse, binary_crossentropy
from lasagne.init import Uniform, Normal, Identity
from lasagne.layers import (LSTMLayer, DenseLayer, Conv1DLayer,
ReshapeLayer, FeaturePoolLayer, RecurrentLayer)
from lasagne.layers.batch_norm import BatchNormLayer
from lasagne.updates import nesterov_momentum, momentum
from functools import partial
import os
import __main__
from copy import deepcopy
from math import sqrt
import numpy as np
import theano.tensor as T
import gc
"""
e370
longer seq
"""
NAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0]
#PATH = "/homes/dk3810/workspace/python/neuralnilm/figures"
PATH = "/data/dk3810/figures"
SAVE_PLOT_INTERVAL = 500
GRADIENT_STEPS = 100
source_dict = dict(
filename='/data/dk3810/ukdale.h5',
appliances=[
['fridge freezer', 'fridge', 'freezer'],
'hair straighteners',
'television',
'dish washer',
['washer dryer', 'washing machine']
],
max_appliance_powers=[300, 500, 200, 2500, 2400],
on_power_thresholds=[5] * 5,
min_on_durations=[60, 60, 60, 1800, 1800],
min_off_durations=[12, 12, 12, 1800, 600],
window=("2013-06-01", "2014-07-01"),
seq_length=512,
# random_window=64,
output_one_appliance=True,
boolean_targets=False,
train_buildings=[1],
validation_buildings=[1],
skip_probability=0.9,
one_target_per_seq=False,
n_seq_per_batch=64,
subsample_target=4,
include_diff=False,
include_power=True,
clip_appliance_power=True,
target_is_prediction=False,
# independently_center_inputs=True,
standardise_input=True,
unit_variance_targets=True,
input_padding=2,
lag=0
# classification=True
# reshape_target_to_2D=True
# input_stats={'mean': np.array([ 0.05526326], dtype=np.float32),
# 'std': np.array([ 0.12636775], dtype=np.float32)},
# target_stats={
# 'mean': np.array([ 0.04066789, 0.01881946,
# 0.24639061, 0.17608672, 0.10273963],
# dtype=np.float32),
# 'std': np.array([ 0.11449792, 0.07338708,
# 0.26608968, 0.33463112, 0.21250485],
# dtype=np.float32)}
)
N = 50
net_dict = dict(
save_plot_interval=SAVE_PLOT_INTERVAL,
# loss_function=partial(ignore_inactive, loss_func=mdn_nll, seq_length=SEQ_LENGTH),
# loss_function=lambda x, t: mdn_nll(x, t).mean(),
loss_function=lambda x, t: mse(x, t).mean(),
# loss_function=lambda x, t: binary_crossentropy(x, t).mean(),
# loss_function=partial(scaled_cost, loss_func=mse),
# loss_function=ignore_inactive,
# loss_function=partial(scaled_cost3, ignore_inactive=False),
# updates_func=momentum,
updates_func=clipped_nesterov_momentum,
updates_kwargs={'clip_range': (0, 10)},
learning_rate=1e-5,
learning_rate_changes_by_iteration={
1000: 1e-6,
2000: 1e-7
# 800: 1e-4
# 500: 1e-3
# 4000: 1e-03,
# 6000: 5e-06,
# 7000: 1e-06
# 2000: 5e-06
# 3000: 1e-05
# 7000: 5e-06,
# 10000: 1e-06,
# 15000: 5e-07,
# 50000: 1e-07
},
do_save_activations=True,
# auto_reshape=False,
# plotter=CentralOutputPlotter
# plotter=MDNPlotter
)
def exp_a(name):
source_dict_copy = deepcopy(source_dict)
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
net_dict_copy['layers_config']= [
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': Conv1DLayer,
'num_filters': 40,
'filter_length': 2,
'stride': 1,
'nonlinearity': identity,
'b': None,
'border_mode': 'same'
},
{
'type': BatchNormLayer,
'nonlinearity': rectify
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': BidirectionalRecurrentLayer,
'num_units': 40,
'gradient_steps': GRADIENT_STEPS,
'nonlinearity': identity,
'b': None,
'W_hid_to_hid': Identity(scale=0.5),
'learn_init': True, 'precompute_input': False
},
{
'type': BatchNormLayer,
'axes': (0, 1),
'nonlinearity': rectify
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': Conv1DLayer,
'num_filters': 40,
'filter_length': 4,
'stride': 4,
'nonlinearity': identity,
'b': None
},
{
'type': BatchNormLayer,
'nonlinearity': rectify
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': BidirectionalRecurrentLayer,
'num_units': 40,
'gradient_steps': GRADIENT_STEPS,
'nonlinearity': identity,
'b': None,
'W_hid_to_hid': Identity(scale=0.5),
'learn_init': True, 'precompute_input': False
},
{
'type': BatchNormLayer,
'axes': (0, 1),
'nonlinearity': rectify
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': T.nnet.softplus
}
]
net = Net(**net_dict_copy)
return net
def exp_b(name):
"""
tanh first layer, all others are rectify
"""
source_dict_copy = deepcopy(source_dict)
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
net_dict_copy['layers_config']= [
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': Conv1DLayer,
'num_filters': 40,
'filter_length': 2,
'stride': 1,
'nonlinearity': identity,
'b': None,
'border_mode': 'same'
},
{
'type': BatchNormLayer,
'nonlinearity': tanh
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': BidirectionalRecurrentLayer,
'num_units': 40,
'gradient_steps': GRADIENT_STEPS,
'nonlinearity': identity,
'b': None,
'W_hid_to_hid': Identity(scale=0.5),
'learn_init': True, 'precompute_input': False
},
{
'type': BatchNormLayer,
'axes': (0, 1),
'nonlinearity': rectify
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': Conv1DLayer,
'num_filters': 40,
'filter_length': 4,
'stride': 4,
'nonlinearity': identity,
'b': None
},
{
'type': BatchNormLayer,
'nonlinearity': rectify
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': BidirectionalRecurrentLayer,
'num_units': 40,
'gradient_steps': GRADIENT_STEPS,
'nonlinearity': identity,
'b': None,
'W_hid_to_hid': Identity(scale=0.5),
'learn_init': True, 'precompute_input': False
},
{
'type': BatchNormLayer,
'axes': (0, 1),
'nonlinearity': rectify
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': T.nnet.softplus
}
]
net = Net(**net_dict_copy)
return net
def exp_c(name):
"""
tanh all the way through. Identity init of RNNs
"""
source_dict_copy = deepcopy(source_dict)
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
net_dict_copy['layers_config']= [
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': Conv1DLayer,
'num_filters': 40,
'filter_length': 2,
'stride': 1,
'nonlinearity': identity,
'b': None,
'border_mode': 'same'
},
{
'type': BatchNormLayer,
'nonlinearity': tanh
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': BidirectionalRecurrentLayer,
'num_units': 40,
'gradient_steps': GRADIENT_STEPS,
'nonlinearity': identity,
'b': None,
'W_hid_to_hid': Identity(scale=0.5),
'learn_init': True, 'precompute_input': False
},
{
'type': BatchNormLayer,
'axes': (0, 1),
'nonlinearity': tanh
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': Conv1DLayer,
'num_filters': 40,
'filter_length': 4,
'stride': 4,
'nonlinearity': identity,
'b': None
},
{
'type': BatchNormLayer,
'nonlinearity': tanh
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': BidirectionalRecurrentLayer,
'num_units': 40,
'gradient_steps': GRADIENT_STEPS,
'nonlinearity': identity,
'b': None,
'W_hid_to_hid': Identity(scale=0.5),
'learn_init': True, 'precompute_input': False
},
{
'type': BatchNormLayer,
'axes': (0, 1),
'nonlinearity': tanh
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': T.nnet.softplus
}
]
net = Net(**net_dict_copy)
return net
def exp_d(name):
"""
e380 (tanh all the way though, default inits) with batch norm
"""
source_dict_copy = deepcopy(source_dict)
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
net_dict_copy['layers_config']= [
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': Conv1DLayer,
'num_filters': 40,
'filter_length': 2,
'stride': 1,
'nonlinearity': identity,
'b': None,
'border_mode': 'same'
},
{
'type': BatchNormLayer,
'nonlinearity': tanh
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': BidirectionalRecurrentLayer,
'num_units': 40,
'gradient_steps': GRADIENT_STEPS,
'nonlinearity': identity,
'b': None,
'learn_init': True, 'precompute_input': False
},
{
'type': BatchNormLayer,
'axes': (0, 1),
'nonlinearity': tanh
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': Conv1DLayer,
'num_filters': 40,
'filter_length': 4,
'stride': 4,
'nonlinearity': identity,
'b': None
},
{
'type': BatchNormLayer,
'nonlinearity': tanh
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': BidirectionalRecurrentLayer,
'num_units': 40,
'gradient_steps': GRADIENT_STEPS,
'nonlinearity': identity,
'b': None,
'learn_init': True, 'precompute_input': False
},
{
'type': BatchNormLayer,
'axes': (0, 1),
'nonlinearity': tanh
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': T.nnet.softplus
}
]
net = Net(**net_dict_copy)
return net
def exp_e(name):
"""
e380 again
"""
source_dict_copy = deepcopy(source_dict)
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
net_dict_copy['layers_config']= [
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': Conv1DLayer,
'num_filters': 40,
'filter_length': 2,
'stride': 1,
'nonlinearity': tanh,
'border_mode': 'same'
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': BidirectionalRecurrentLayer,
'num_units': 40,
'gradient_steps': GRADIENT_STEPS,
'nonlinearity': tanh,
'learn_init': True, 'precompute_input': False
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': Conv1DLayer,
'num_filters': 40,
'filter_length': 4,
'stride': 4,
'nonlinearity': tanh
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': BidirectionalRecurrentLayer,
'num_units': 40,
'gradient_steps': GRADIENT_STEPS,
'nonlinearity': tanh,
'learn_init': True, 'precompute_input': False
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': T.nnet.softplus
}
]
net = Net(**net_dict_copy)
return net
def exp_f(name):
# two dense layers at start, batch norm
source_dict_copy = deepcopy(source_dict)
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
net_dict_copy['layers_config']= [
{
'type': DenseLayer,
'num_units': 40,
'nonlinearity': identity,
'W': Normal(std=1),
'b': None
},
{
'type': BatchNormLayer,
'nonlinearity': tanh,
'axes': (1)
},
{
'type': DenseLayer,
'num_units': 40,
'nonlinearity': identity,
'b': None
},
{
'type': BatchNormLayer,
'nonlinearity': tanh,
'axes': (1)
},
{
'type': BidirectionalRecurrentLayer,
'num_units': 40,
'gradient_steps': GRADIENT_STEPS,
'nonlinearity': identity,
'b': None,
'learn_init': True, 'precompute_input': False
},
{
'type': BatchNormLayer,
'axes': (0, 1),
'nonlinearity': tanh
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': Conv1DLayer,
'num_filters': 40,
'filter_length': 4,
'stride': 4,
'nonlinearity': identity,
'b': None
},
{
'type': BatchNormLayer,
'nonlinearity': tanh
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': BidirectionalRecurrentLayer,
'num_units': 40,
'gradient_steps': GRADIENT_STEPS,
'nonlinearity': identity,
'b': None,
'learn_init': True, 'precompute_input': False
},
{
'type': BatchNormLayer,
'axes': (0, 1),
'nonlinearity': tanh
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': T.nnet.softplus
}
]
net = Net(**net_dict_copy)
return net
def exp_g(name):
# two dense layers at start, no batch norm
source_dict_copy = deepcopy(source_dict)
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
net_dict_copy['layers_config']= [
{
'type': DenseLayer,
'num_units': 40,
'nonlinearity': tanh,
'W': Normal(std=1)
},
{
'type': DenseLayer,
'num_units': 40,
'nonlinearity': tanh
},
{
'type': BidirectionalRecurrentLayer,
'num_units': 40,
'gradient_steps': GRADIENT_STEPS,
'nonlinearity': tanh,
'learn_init': True, 'precompute_input': False
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': Conv1DLayer,
'num_filters': 40,
'filter_length': 4,
'stride': 4,
'nonlinearity': tanh
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': BidirectionalRecurrentLayer,
'num_units': 40,
'gradient_steps': GRADIENT_STEPS,
'nonlinearity': tanh,
'learn_init': True, 'precompute_input': False
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': T.nnet.softplus
}
]
net = Net(**net_dict_copy)
return net
def exp_h(name):
# replace tanh with sigmoid
source_dict_copy = deepcopy(source_dict)
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
net_dict_copy['layers_config']= [
{
'type': DenseLayer,
'num_units': 40,
'nonlinearity': sigmoid,
'W': Normal(std=1)
},
{
'type': DenseLayer,
'num_units': 40,
'nonlinearity': sigmoid
},
{
'type': BidirectionalRecurrentLayer,
'num_units': 40,
'gradient_steps': GRADIENT_STEPS,
'nonlinearity': sigmoid,
'learn_init': True, 'precompute_input': False
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': Conv1DLayer,
'num_filters': 40,
'filter_length': 4,
'stride': 4,
'nonlinearity': sigmoid
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': BidirectionalRecurrentLayer,
'num_units': 40,
'gradient_steps': GRADIENT_STEPS,
'nonlinearity': sigmoid,
'learn_init': True, 'precompute_input': False
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': T.nnet.softplus
}
]
net = Net(**net_dict_copy)
return net
def exp_i(name):
# two dense layers at start, no batch norm
# no gradient step, do precompute input, small net
source_dict_copy = deepcopy(source_dict)
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
net_dict_copy['layers_config']= [
{
'type': BidirectionalRecurrentLayer,
'num_units': 40,
'nonlinearity': tanh,
'learn_init': True,
'precompute_input': True
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': Conv1DLayer,
'num_filters': 40,
'filter_length': 4,
'stride': 4,
'nonlinearity': tanh
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': BidirectionalRecurrentLayer,
'num_units': 40,
'nonlinearity': tanh,
'learn_init': True,
'precompute_input': True
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': T.nnet.softplus
}
]
net = Net(**net_dict_copy)
return net
def main():
# EXPERIMENTS = list('abcdefghijklmnopqrstuvwxyz')
EXPERIMENTS = list('abcdefghi')
for experiment in EXPERIMENTS:
full_exp_name = NAME + experiment
func_call = init_experiment(PATH, experiment, full_exp_name)
logger = logging.getLogger(full_exp_name)
try:
net = eval(func_call)
run_experiment(net, epochs=5000)
except KeyboardInterrupt:
logger.info("KeyboardInterrupt")
break
except Exception as exception:
logger.exception("Exception")
# raise
else:
del net.source.train_activations
gc.collect()
finally:
logging.shutdown()
if __name__ == "__main__":
main()
| mit |
NelisVerhoef/scikit-learn | sklearn/neighbors/base.py | 71 | 31147 | """Base and mixin classes for nearest neighbors"""
# Authors: Jake Vanderplas <[email protected]>
# Fabian Pedregosa <[email protected]>
# Alexandre Gramfort <[email protected]>
# Sparseness support by Lars Buitinck <[email protected]>
# Multi-output support by Arnaud Joly <[email protected]>
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
import warnings
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import csr_matrix, issparse
from .ball_tree import BallTree
from .kd_tree import KDTree
from ..base import BaseEstimator
from ..metrics import pairwise_distances
from ..metrics.pairwise import PAIRWISE_DISTANCE_FUNCTIONS
from ..utils import check_X_y, check_array, _get_n_jobs, gen_even_slices
from ..utils.fixes import argpartition
from ..utils.validation import DataConversionWarning
from ..utils.validation import NotFittedError
from ..externals import six
from ..externals.joblib import Parallel, delayed
VALID_METRICS = dict(ball_tree=BallTree.valid_metrics,
kd_tree=KDTree.valid_metrics,
# The following list comes from the
# sklearn.metrics.pairwise doc string
brute=(list(PAIRWISE_DISTANCE_FUNCTIONS.keys()) +
['braycurtis', 'canberra', 'chebyshev',
'correlation', 'cosine', 'dice', 'hamming',
'jaccard', 'kulsinski', 'mahalanobis',
'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener',
'sokalsneath', 'sqeuclidean',
'yule', 'wminkowski']))
VALID_METRICS_SPARSE = dict(ball_tree=[],
kd_tree=[],
brute=PAIRWISE_DISTANCE_FUNCTIONS.keys())
class NeighborsWarning(UserWarning):
pass
# Make sure that NeighborsWarning are displayed more than once
warnings.simplefilter("always", NeighborsWarning)
def _check_weights(weights):
"""Check to make sure weights are valid"""
if weights in (None, 'uniform', 'distance'):
return weights
elif callable(weights):
return weights
else:
raise ValueError("weights not recognized: should be 'uniform', "
"'distance', or a callable function")
def _get_weights(dist, weights):
"""Get the weights from an array of distances and a parameter ``weights``
Parameters
===========
dist: ndarray
The input distances
weights: {'uniform', 'distance' or a callable}
The kind of weighting used
Returns
========
weights_arr: array of the same shape as ``dist``
if ``weights == 'uniform'``, then returns None
"""
if weights in (None, 'uniform'):
return None
elif weights == 'distance':
# if user attempts to classify a point that was zero distance from one
# or more training points, those training points are weighted as 1.0
# and the other points as 0.0
if dist.dtype is np.dtype(object):
for point_dist_i, point_dist in enumerate(dist):
# check if point_dist is iterable
# (ex: RadiusNeighborClassifier.predict may set an element of
# dist to 1e-6 to represent an 'outlier')
if hasattr(point_dist, '__contains__') and 0. in point_dist:
dist[point_dist_i] = point_dist == 0.
else:
dist[point_dist_i] = 1. / point_dist
else:
with np.errstate(divide='ignore'):
dist = 1. / dist
inf_mask = np.isinf(dist)
inf_row = np.any(inf_mask, axis=1)
dist[inf_row] = inf_mask[inf_row]
return dist
elif callable(weights):
return weights(dist)
else:
raise ValueError("weights not recognized: should be 'uniform', "
"'distance', or a callable function")
class NeighborsBase(six.with_metaclass(ABCMeta, BaseEstimator)):
"""Base class for nearest neighbors estimators."""
@abstractmethod
def __init__(self):
pass
def _init_params(self, n_neighbors=None, radius=None,
algorithm='auto', leaf_size=30, metric='minkowski',
p=2, metric_params=None, n_jobs=1, **kwargs):
if kwargs:
warnings.warn("Passing additional arguments to the metric "
"function as **kwargs is deprecated "
"and will no longer be supported in 0.18. "
"Use metric_params instead.",
DeprecationWarning, stacklevel=3)
if metric_params is None:
metric_params = {}
metric_params.update(kwargs)
self.n_neighbors = n_neighbors
self.radius = radius
self.algorithm = algorithm
self.leaf_size = leaf_size
self.metric = metric
self.metric_params = metric_params
self.p = p
self.n_jobs = n_jobs
if algorithm not in ['auto', 'brute',
'kd_tree', 'ball_tree']:
raise ValueError("unrecognized algorithm: '%s'" % algorithm)
if algorithm == 'auto':
if metric == 'precomputed':
alg_check = 'brute'
else:
alg_check = 'ball_tree'
else:
alg_check = algorithm
if callable(metric):
if algorithm == 'kd_tree':
# callable metric is only valid for brute force and ball_tree
raise ValueError(
"kd_tree algorithm does not support callable metric '%s'"
% metric)
elif metric not in VALID_METRICS[alg_check]:
raise ValueError("Metric '%s' not valid for algorithm '%s'"
% (metric, algorithm))
if self.metric_params is not None and 'p' in self.metric_params:
warnings.warn("Parameter p is found in metric_params. "
"The corresponding parameter from __init__ "
"is ignored.", SyntaxWarning, stacklevel=3)
effective_p = metric_params['p']
else:
effective_p = self.p
if self.metric in ['wminkowski', 'minkowski'] and effective_p < 1:
raise ValueError("p must be greater than one for minkowski metric")
self._fit_X = None
self._tree = None
self._fit_method = None
def _fit(self, X):
if self.metric_params is None:
self.effective_metric_params_ = {}
else:
self.effective_metric_params_ = self.metric_params.copy()
effective_p = self.effective_metric_params_.get('p', self.p)
if self.metric in ['wminkowski', 'minkowski']:
self.effective_metric_params_['p'] = effective_p
self.effective_metric_ = self.metric
# For minkowski distance, use more efficient methods where available
if self.metric == 'minkowski':
p = self.effective_metric_params_.pop('p', 2)
if p < 1:
raise ValueError("p must be greater than one "
"for minkowski metric")
elif p == 1:
self.effective_metric_ = 'manhattan'
elif p == 2:
self.effective_metric_ = 'euclidean'
elif p == np.inf:
self.effective_metric_ = 'chebyshev'
else:
self.effective_metric_params_['p'] = p
if isinstance(X, NeighborsBase):
self._fit_X = X._fit_X
self._tree = X._tree
self._fit_method = X._fit_method
return self
elif isinstance(X, BallTree):
self._fit_X = X.data
self._tree = X
self._fit_method = 'ball_tree'
return self
elif isinstance(X, KDTree):
self._fit_X = X.data
self._tree = X
self._fit_method = 'kd_tree'
return self
X = check_array(X, accept_sparse='csr')
n_samples = X.shape[0]
if n_samples == 0:
raise ValueError("n_samples must be greater than 0")
if issparse(X):
if self.algorithm not in ('auto', 'brute'):
warnings.warn("cannot use tree with sparse input: "
"using brute force")
if self.effective_metric_ not in VALID_METRICS_SPARSE['brute']:
raise ValueError("metric '%s' not valid for sparse input"
% self.effective_metric_)
self._fit_X = X.copy()
self._tree = None
self._fit_method = 'brute'
return self
self._fit_method = self.algorithm
self._fit_X = X
if self._fit_method == 'auto':
# A tree approach is better for small number of neighbors,
# and KDTree is generally faster when available
if ((self.n_neighbors is None or
self.n_neighbors < self._fit_X.shape[0] // 2) and
self.metric != 'precomputed'):
if self.effective_metric_ in VALID_METRICS['kd_tree']:
self._fit_method = 'kd_tree'
else:
self._fit_method = 'ball_tree'
else:
self._fit_method = 'brute'
if self._fit_method == 'ball_tree':
self._tree = BallTree(X, self.leaf_size,
metric=self.effective_metric_,
**self.effective_metric_params_)
elif self._fit_method == 'kd_tree':
self._tree = KDTree(X, self.leaf_size,
metric=self.effective_metric_,
**self.effective_metric_params_)
elif self._fit_method == 'brute':
self._tree = None
else:
raise ValueError("algorithm = '%s' not recognized"
% self.algorithm)
if self.n_neighbors is not None:
if self.n_neighbors <= 0:
raise ValueError(
"Expected n_neighbors > 0. Got %d" %
self.n_neighbors
)
return self
@property
def _pairwise(self):
# For cross-validation routines to split data correctly
return self.metric == 'precomputed'
class KNeighborsMixin(object):
"""Mixin for k-neighbors searches"""
def kneighbors(self, X=None, n_neighbors=None, return_distance=True):
"""Finds the K-neighbors of a point.
Returns indices of and distances to the neighbors of each point.
Parameters
----------
X : array-like, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
The query point or points.
If not provided, neighbors of each indexed point are returned.
In this case, the query point is not considered its own neighbor.
n_neighbors : int
Number of neighbors to get (default is the value
passed to the constructor).
return_distance : boolean, optional. Defaults to True.
If False, distances will not be returned
Returns
-------
dist : array
Array representing the lengths to points, only present if
return_distance=True
ind : array
Indices of the nearest points in the population matrix.
Examples
--------
In the following example, we construct a NeighborsClassifier
class from an array representing our data set and ask who's
the closest point to [1,1,1]
>>> samples = [[0., 0., 0.], [0., .5, 0.], [1., 1., .5]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(n_neighbors=1)
>>> neigh.fit(samples) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> print(neigh.kneighbors([[1., 1., 1.]])) # doctest: +ELLIPSIS
(array([[ 0.5]]), array([[2]]...))
As you can see, it returns [[0.5]], and [[2]], which means that the
element is at distance 0.5 and is the third element of samples
(indexes start at 0). You can also query for multiple points:
>>> X = [[0., 1., 0.], [1., 0., 1.]]
>>> neigh.kneighbors(X, return_distance=False) # doctest: +ELLIPSIS
array([[1],
[2]]...)
"""
if self._fit_method is None:
raise NotFittedError("Must fit neighbors before querying.")
if n_neighbors is None:
n_neighbors = self.n_neighbors
if X is not None:
query_is_train = False
X = check_array(X, accept_sparse='csr')
else:
query_is_train = True
X = self._fit_X
# Include an extra neighbor to account for the sample itself being
# returned, which is removed later
n_neighbors += 1
train_size = self._fit_X.shape[0]
if n_neighbors > train_size:
raise ValueError(
"Expected n_neighbors <= n_samples, "
" but n_samples = %d, n_neighbors = %d" %
(train_size, n_neighbors)
)
n_samples, _ = X.shape
sample_range = np.arange(n_samples)[:, None]
n_jobs = _get_n_jobs(self.n_jobs)
if self._fit_method == 'brute':
# for efficiency, use squared euclidean distances
if self.effective_metric_ == 'euclidean':
dist = pairwise_distances(X, self._fit_X, 'euclidean',
n_jobs=n_jobs, squared=True)
else:
dist = pairwise_distances(
X, self._fit_X, self.effective_metric_, n_jobs=n_jobs,
**self.effective_metric_params_)
neigh_ind = argpartition(dist, n_neighbors - 1, axis=1)
neigh_ind = neigh_ind[:, :n_neighbors]
# argpartition doesn't guarantee sorted order, so we sort again
neigh_ind = neigh_ind[
sample_range, np.argsort(dist[sample_range, neigh_ind])]
if return_distance:
if self.effective_metric_ == 'euclidean':
result = np.sqrt(dist[sample_range, neigh_ind]), neigh_ind
else:
result = dist[sample_range, neigh_ind], neigh_ind
else:
result = neigh_ind
elif self._fit_method in ['ball_tree', 'kd_tree']:
if issparse(X):
raise ValueError(
"%s does not work with sparse matrices. Densify the data, "
"or set algorithm='brute'" % self._fit_method)
result = Parallel(n_jobs, backend='threading')(
delayed(self._tree.query, check_pickle=False)(
X[s], n_neighbors, return_distance)
for s in gen_even_slices(X.shape[0], n_jobs)
)
if return_distance:
dist, neigh_ind = tuple(zip(*result))
result = np.vstack(dist), np.vstack(neigh_ind)
else:
result = np.vstack(result)
else:
raise ValueError("internal: _fit_method not recognized")
if not query_is_train:
return result
else:
# If the query data is the same as the indexed data, we would like
# to ignore the first nearest neighbor of every sample, i.e
# the sample itself.
if return_distance:
dist, neigh_ind = result
else:
neigh_ind = result
sample_mask = neigh_ind != sample_range
# Corner case: When the number of duplicates are more
# than the number of neighbors, the first NN will not
# be the sample, but a duplicate.
# In that case mask the first duplicate.
dup_gr_nbrs = np.all(sample_mask, axis=1)
sample_mask[:, 0][dup_gr_nbrs] = False
neigh_ind = np.reshape(
neigh_ind[sample_mask], (n_samples, n_neighbors - 1))
if return_distance:
dist = np.reshape(
dist[sample_mask], (n_samples, n_neighbors - 1))
return dist, neigh_ind
return neigh_ind
def kneighbors_graph(self, X=None, n_neighbors=None,
mode='connectivity'):
"""Computes the (weighted) graph of k-Neighbors for points in X
Parameters
----------
X : array-like, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
The query point or points.
If not provided, neighbors of each indexed point are returned.
In this case, the query point is not considered its own neighbor.
n_neighbors : int
Number of neighbors for each sample.
(default is value passed to the constructor).
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples_fit]
n_samples_fit is the number of samples in the fitted data
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(n_neighbors=2)
>>> neigh.fit(X) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> A = neigh.kneighbors_graph(X)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 1.],
[ 1., 0., 1.]])
See also
--------
NearestNeighbors.radius_neighbors_graph
"""
if n_neighbors is None:
n_neighbors = self.n_neighbors
# kneighbors does the None handling.
if X is not None:
X = check_array(X, accept_sparse='csr')
n_samples1 = X.shape[0]
else:
n_samples1 = self._fit_X.shape[0]
n_samples2 = self._fit_X.shape[0]
n_nonzero = n_samples1 * n_neighbors
A_indptr = np.arange(0, n_nonzero + 1, n_neighbors)
# construct CSR matrix representation of the k-NN graph
if mode == 'connectivity':
A_data = np.ones(n_samples1 * n_neighbors)
A_ind = self.kneighbors(X, n_neighbors, return_distance=False)
elif mode == 'distance':
A_data, A_ind = self.kneighbors(
X, n_neighbors, return_distance=True)
A_data = np.ravel(A_data)
else:
raise ValueError(
'Unsupported mode, must be one of "connectivity" '
'or "distance" but got "%s" instead' % mode)
kneighbors_graph = csr_matrix((A_data, A_ind.ravel(), A_indptr),
shape=(n_samples1, n_samples2))
return kneighbors_graph
class RadiusNeighborsMixin(object):
"""Mixin for radius-based neighbors searches"""
def radius_neighbors(self, X=None, radius=None, return_distance=True):
"""Finds the neighbors within a given radius of a point or points.
Return the indices and distances of each point from the dataset
lying in a ball with size ``radius`` around the points of the query
array. Points lying on the boundary are included in the results.
The result points are *not* necessarily sorted by distance to their
query point.
Parameters
----------
X : array-like, (n_samples, n_features), optional
The query point or points.
If not provided, neighbors of each indexed point are returned.
In this case, the query point is not considered its own neighbor.
radius : float
Limiting distance of neighbors to return.
(default is the value passed to the constructor).
return_distance : boolean, optional. Defaults to True.
If False, distances will not be returned
Returns
-------
dist : array, shape (n_samples,) of arrays
Array representing the distances to each point, only present if
return_distance=True. The distance values are computed according
to the ``metric`` constructor parameter.
ind : array, shape (n_samples,) of arrays
An array of arrays of indices of the approximate nearest points
from the population matrix that lie within a ball of size
``radius`` around the query points.
Examples
--------
In the following example, we construct a NeighborsClassifier
class from an array representing our data set and ask who's
the closest point to [1, 1, 1]:
>>> import numpy as np
>>> samples = [[0., 0., 0.], [0., .5, 0.], [1., 1., .5]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(radius=1.6)
>>> neigh.fit(samples) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> rng = neigh.radius_neighbors([[1., 1., 1.]])
>>> print(np.asarray(rng[0][0])) # doctest: +ELLIPSIS
[ 1.5 0.5]
>>> print(np.asarray(rng[1][0])) # doctest: +ELLIPSIS
[1 2]
The first array returned contains the distances to all points which
are closer than 1.6, while the second array returned contains their
indices. In general, multiple points can be queried at the same time.
Notes
-----
Because the number of neighbors of each point is not necessarily
equal, the results for multiple query points cannot be fit in a
standard data array.
For efficiency, `radius_neighbors` returns arrays of objects, where
each object is a 1D array of indices or distances.
"""
if self._fit_method is None:
raise NotFittedError("Must fit neighbors before querying.")
if X is not None:
query_is_train = False
X = check_array(X, accept_sparse='csr')
else:
query_is_train = True
X = self._fit_X
if radius is None:
radius = self.radius
n_samples = X.shape[0]
if self._fit_method == 'brute':
# for efficiency, use squared euclidean distances
if self.effective_metric_ == 'euclidean':
dist = pairwise_distances(X, self._fit_X, 'euclidean',
squared=True)
radius *= radius
else:
dist = pairwise_distances(X, self._fit_X,
self.effective_metric_,
**self.effective_metric_params_)
neigh_ind_list = [np.where(d <= radius)[0] for d in dist]
# See https://github.com/numpy/numpy/issues/5456
# if you want to understand why this is initialized this way.
neigh_ind = np.empty(n_samples, dtype='object')
neigh_ind[:] = neigh_ind_list
if return_distance:
dist_array = np.empty(n_samples, dtype='object')
if self.effective_metric_ == 'euclidean':
dist_list = [np.sqrt(d[neigh_ind[i]])
for i, d in enumerate(dist)]
else:
dist_list = [d[neigh_ind[i]]
for i, d in enumerate(dist)]
dist_array[:] = dist_list
results = dist_array, neigh_ind
else:
results = neigh_ind
elif self._fit_method in ['ball_tree', 'kd_tree']:
if issparse(X):
raise ValueError(
"%s does not work with sparse matrices. Densify the data, "
"or set algorithm='brute'" % self._fit_method)
results = self._tree.query_radius(X, radius,
return_distance=return_distance)
if return_distance:
results = results[::-1]
else:
raise ValueError("internal: _fit_method not recognized")
if not query_is_train:
return results
else:
# If the query data is the same as the indexed data, we would like
# to ignore the first nearest neighbor of every sample, i.e
# the sample itself.
if return_distance:
dist, neigh_ind = results
else:
neigh_ind = results
for ind, ind_neighbor in enumerate(neigh_ind):
mask = ind_neighbor != ind
neigh_ind[ind] = ind_neighbor[mask]
if return_distance:
dist[ind] = dist[ind][mask]
if return_distance:
return dist, neigh_ind
return neigh_ind
def radius_neighbors_graph(self, X=None, radius=None, mode='connectivity'):
"""Computes the (weighted) graph of Neighbors for points in X
Neighborhoods are restricted the points at a distance lower than
radius.
Parameters
----------
X : array-like, shape = [n_samples, n_features], optional
The query point or points.
If not provided, neighbors of each indexed point are returned.
In this case, the query point is not considered its own neighbor.
radius : float
Radius of neighborhoods.
(default is the value passed to the constructor).
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(radius=1.5)
>>> neigh.fit(X) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> A = neigh.radius_neighbors_graph(X)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 0.],
[ 1., 0., 1.]])
See also
--------
kneighbors_graph
"""
if X is not None:
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'])
n_samples2 = self._fit_X.shape[0]
if radius is None:
radius = self.radius
# construct CSR matrix representation of the NN graph
if mode == 'connectivity':
A_ind = self.radius_neighbors(X, radius,
return_distance=False)
A_data = None
elif mode == 'distance':
dist, A_ind = self.radius_neighbors(X, radius,
return_distance=True)
A_data = np.concatenate(list(dist))
else:
raise ValueError(
'Unsupported mode, must be one of "connectivity", '
'or "distance" but got %s instead' % mode)
n_samples1 = A_ind.shape[0]
n_neighbors = np.array([len(a) for a in A_ind])
A_ind = np.concatenate(list(A_ind))
if A_data is None:
A_data = np.ones(len(A_ind))
A_indptr = np.concatenate((np.zeros(1, dtype=int),
np.cumsum(n_neighbors)))
return csr_matrix((A_data, A_ind, A_indptr),
shape=(n_samples1, n_samples2))
class SupervisedFloatMixin(object):
def fit(self, X, y):
"""Fit the model using X as training data and y as target values
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree}
Training data. If array or matrix, shape [n_samples, n_features],
or [n_samples, n_samples] if metric='precomputed'.
y : {array-like, sparse matrix}
Target values, array of float values, shape = [n_samples]
or [n_samples, n_outputs]
"""
if not isinstance(X, (KDTree, BallTree)):
X, y = check_X_y(X, y, "csr", multi_output=True)
self._y = y
return self._fit(X)
class SupervisedIntegerMixin(object):
def fit(self, X, y):
"""Fit the model using X as training data and y as target values
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree}
Training data. If array or matrix, shape [n_samples, n_features],
or [n_samples, n_samples] if metric='precomputed'.
y : {array-like, sparse matrix}
Target values of shape = [n_samples] or [n_samples, n_outputs]
"""
if not isinstance(X, (KDTree, BallTree)):
X, y = check_X_y(X, y, "csr", multi_output=True)
if y.ndim == 1 or y.ndim == 2 and y.shape[1] == 1:
if y.ndim != 1:
warnings.warn("A column-vector y was passed when a 1d array "
"was expected. Please change the shape of y to "
"(n_samples, ), for example using ravel().",
DataConversionWarning, stacklevel=2)
self.outputs_2d_ = False
y = y.reshape((-1, 1))
else:
self.outputs_2d_ = True
self.classes_ = []
self._y = np.empty(y.shape, dtype=np.int)
for k in range(self._y.shape[1]):
classes, self._y[:, k] = np.unique(y[:, k], return_inverse=True)
self.classes_.append(classes)
if not self.outputs_2d_:
self.classes_ = self.classes_[0]
self._y = self._y.ravel()
return self._fit(X)
class UnsupervisedMixin(object):
def fit(self, X, y=None):
"""Fit the model using X as training data
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree}
Training data. If array or matrix, shape [n_samples, n_features],
or [n_samples, n_samples] if metric='precomputed'.
"""
return self._fit(X)
| bsd-3-clause |
IntersectTechnologies/inetbfa-data-conversion | dodo.py | 1 | 9308 | from os import path, listdir, makedirs
import re, codecs
import datetime as dt
import calendar as cal
import pandas as pd
import string
import numpy as np
from datamanager.envs import *
from datamanager.load import *
from datamanager.adjust import calc_adj_close
from datamanager.utils import last_month_end
import datamanager.transforms as transf
fields = marketdata_fields()
# paths
mergein_old = MASTER_DATA_PATH
mergein_new = CONVERT_PATH
index_src_path = path.join(DL_PATH, 'Indices.xlsx')
closepath = path.join(MERGED_PATH, "Close.csv")
divpath = path.join(MERGED_PATH, "Dividend Ex Date.csv")
bookvaluepath = path.join(MERGED_PATH, "Book Value per Share.csv")
def get_all_equities():
new_all, _, _, _ = get_all_equities_from_data(MERGED_PATH, CONVERT_PATH, 'Close')
return new_all
def get_current_listed():
new_all, current, newly_listed, delisted = get_all_equities_from_data(MERGED_PATH, CONVERT_PATH, 'Close')
return current_set
def convert_data(task):
'''
'''
name = task.name.split(':')[1]
fp = path.join(DL_PATH, name + '.xlsx')
new_data = load_inetbfa_ts_data(fp)
# drop all data for current month
dropix = new_data.index[new_data.index.values.astype('datetime64[D]') > np.datetime64(last_month_end())]
new_data.drop(dropix).sort_index(axis = 1).to_csv(task.targets[0])
def convert_indices(task):
new_data = load_inetbfa_ts_data(index_src_path)
dropix = new_data.index[new_data.index.values.astype('datetime64[D]') > np.datetime64(last_month_end())]
new_data.drop(dropix).sort_index(axis = 1).to_csv(task.targets[0])
def merge_index(task):
new = load_ts(path.join(CONVERT_PATH, 'Indices.csv'))
old = load_ts(path.join(MERGED_PATH, 'Indices.csv'))
merged = empty_dataframe(old.columns, enddate = last_month_end())
merged.update(old)
merged.update(new)
merged.sort_index(axis = 1).to_csv(task.targets[0])
def merge_data(task):
name = task.name.split(':')[1]
new = load_ts(path.join(CONVERT_PATH, name + '.csv'))
old = load_ts(path.join(MERGED_PATH, name + '.csv'))
merged = empty_dataframe(get_all_equities(), enddate = last_month_end())
merged.update(old)
merged.update(new)
merged.sort_index(axis = 1).to_csv(task.targets[0])
def calc_adjusted_close(dependencies, targets):
all_equities = get_all_equities()
# Import closing price data
close = load_field_ts(MERGED_PATH, field = "Close")
# Import dividend ex date data
divs = load_field_ts(MERGED_PATH, field = "Dividend Ex Date")
adj_close = calc_adj_close(close, divs, all_equities, enddate = last_month_end())
adj_close.sort_index(axis = 1).to_csv(targets[0])
def booktomarket(dependencies, targets):
# Import closing price data
close = load_field_ts(MERGED_PATH, field = "Close")
# Import book value per share data
bookvalue = load_field_ts(MERGED_PATH, field = "Book Value per Share")
b2m = transf.calc_booktomarket(close, bookvalue)
b2m.sort_index(axis = 1).to_csv(targets[0])
def resample_monthly(task):
name = task.name.split(':')[1]
data = load_field_ts(MASTER_DATA_PATH, field = name)
write = True
out = pd.DataFrame()
if name == 'Close':
out = transf.resample_monthly(data, how = 'last')
elif name == 'Adjusted Close':
out = transf.resample_monthly(data, how = 'last')
elif name == 'Open':
out = transf.resample_monthly(data, how = 'first')
elif name == 'High':
out = transf.resample_monthly(data, how = 'max')
elif name == 'Low':
out = transf.resample_monthly(data, how = 'min')
elif name == 'DY':
out = transf.resample_monthly(data, how = 'last')
elif name == 'EY':
out = transf.resample_monthly(data, how = 'last')
elif name == 'PE':
out = transf.resample_monthly(data, how = 'last')
elif name == 'Book-to-Market':
out = transf.resample_monthly(data, how = 'last')
elif name == 'Volume':
out = transf.resample_monthly(data, how = 'sum')
elif name == 'Total Number Of Shares':
out = transf.resample_monthly(data, how = 'last')
elif name == 'Number Of Trades':
out = transf.resample_monthly(data, how = 'sum')
elif name == 'Market Cap':
out = transf.resample_monthly(data, how = 'last')
else:
write = False
if (write):
out.sort_index(axis = 1).to_csv(path.join(MASTER_DATA_PATH, name + '-monthly.csv'))
def monthly_avg_momentum(task):
# load the daily close
close = load_field_ts(MASTER_DATA_PATH, field = "Close")
# resample to monthly average data
close_m = transf.resample_monthly(close, how = 'mean')
# calculate the momentum
mom = transf.momentum_monthly(close_m, 12, 1)
mom.sort_index(axis = 1).to_csv(path.join(MASTER_DATA_PATH, "Monthly-Avg-Momentum.csv"))
def monthly_close_momentum(task):
# load the daily close
close = load_field_ts(MASTER_DATA_PATH, field = "Close")
# resample to monthly close data
close_m = transf.resample_monthly(close, how = 'last')
# calculate the momentum
mom = transf.momentum_monthly(close_m, 12, 1)
mom.sort_index(axis = 1).to_csv(path.join(MASTER_DATA_PATH, "Monthly-Close-Momentum.csv"))
def calc_log_returns(task):
close = load_field_ts(MASTER_DATA_PATH, field = "Close")
logret = transf.log_returns(close)
logret.sort_index(axis = 1).to_csv(path.join(MASTER_DATA_PATH, "Log-Returns.csv"))
def calc_pead_momentum(task):
# load close
close = load_field_ts(MASTER_DATA_PATH, field = "Close")
# load dividend decl date
announcements = load_field_ts(MASTER_DATA_PATH, field = "Dividend Declaration Date")
pead = transf.pead_momentum(announcements, close)
pead.sort_index(axis = 1).to_csv(path.join(MASTER_DATA_PATH, "Normalized-PEAD-Momentum.csv"))
def swapaxes(dependencies, targets):
temp = {}
for d in dependencies:
field = d.split("\\")[-1].split(".")[0]
temp[field] = pd.read_csv(d, sep = ',', index_col = 0, parse_dates=True)
# now create panel
panel = pd.Panel(temp)
out = panel.swapaxes(0, 2)
for ticker in out.items:
out[ticker].dropna(how='all').sort_index(axis = 1).to_csv(path.join(CONVERT_PATH, "tickers", ticker + '.csv'), index_label = "Date")
##########################################################################################
# DOIT tasks
##########################################################################################
# 1
def task_convert():
for f in fields:
yield {
'name':f,
'actions':[convert_data],
'targets':[path.join(CONVERT_PATH, f+ '.csv')],
'file_dep':[path.join(DL_PATH, f + '.xlsx')],
}
def task_convert_index():
return {
'actions':[convert_indices],
'file_dep': [path.join(DL_PATH, 'Indices.xlsx')],
'targets':[path.join(CONVERT_PATH, "Indices.csv")]
}
def task_merge_index():
return {
'actions':[merge_index],
'targets':[path.join(MERGED_PATH, "Indices.csv")],
'file_dep':[path.join(CONVERT_PATH, "Indices.csv")]
}
# 2
def task_merge():
for f in fields:
yield {
'name':f,
'actions':[merge_data],
'targets':[path.join(MERGED_PATH, f + '.csv')],
'file_dep':[path.join(mergein_new, f + '.csv'), path.join(mergein_old, f + '.csv')]
}
# 3
def task_adjusted_close():
return {
'actions':[calc_adjusted_close],
'file_dep': [closepath,
divpath],
'targets':[path.join(MERGED_PATH, "Adjusted Close.csv")]
}
# 5
def task_book2market():
return {
'actions':[booktomarket],
'file_dep': [closepath, bookvaluepath],
'targets':[path.join(MERGED_PATH, "Book-to-Market.csv")]
}
# 6
def task_data_per_ticker():
files = [path.join(CONVERT_PATH, f + '.csv') for f in fields]
return {
'actions':[swapaxes],
'file_dep': files,
'targets':[path.join(CONVERT_PATH, "tickers")]
}
def task_resample_monthly():
expanded = fields + ['Book-to-Market', 'Adjusted Close']
for f in fields:
yield {
'name':f,
'actions':[resample_monthly],
'targets':[path.join(MASTER_DATA_PATH, f + '-monthly.csv')],
'file_dep':[path.join(MASTER_DATA_PATH, f + '.csv')],
}
def task_monthly_close_momentum():
return {
'actions':[monthly_close_momentum],
'file_dep':[path.join(MASTER_DATA_PATH, 'Close.csv')],
}
def task_monthly_avg_momentum():
return {
'actions':[monthly_avg_momentum],
'file_dep':[path.join(MASTER_DATA_PATH, 'Close.csv')],
'targets':[path.join(MASTER_DATA_PATH, "Monthly-Avg-Momentum.csv")]
}
def task_log_returns():
return {
'actions':[calc_log_returns],
'file_dep':[path.join(MASTER_DATA_PATH, 'Close.csv')],
}
def task_pead_momentum():
return {
'actions':[calc_pead_momentum],
'file_dep':[path.join(MASTER_DATA_PATH, 'Close.csv'), path.join(MASTER_DATA_PATH, 'Dividend Declaration Date.csv')],
'targets':[path.join(MASTER_DATA_PATH, "Normalized-PEAD-Momentum.csv")]
} | apache-2.0 |
rajanil/mkboost | src/plot_visualize_kmers_collapsed.py | 1 | 8552 | import numpy as np
import cPickle
import matplotlib.pyplot as plot
from matplotlib.colors import colorConverter as convert
import matplotlib.cm as colormap
import pdb
import sys
def compile_hit_matrix(sequences, kmers, m):
"""this function compiles a matrix representation of where
along a collection of sequences, the list of kmers are found,
up to a given mismatch
Arguments
sequences : list
List of tuples containing virus protein sequence and
virus host class
kmer_list : list
List of lists of kmers, over different CV folds,
selected by Adaboost
m : int
Allowed mismatch
Returns
hitmatrix : int array
N_seq x col_size array where N_seq is total
number of viruses and col_size is the resolution along
the virus sequence.
.. note::
* In generating this hit matrix, the virus sequences are NOT aligned using any alignment tool. Instead, the sequences are simply normalized to unit length. Thus, location along sequence actually indicates fraction of whole sequence length.
* This visualization does not distinguish between individual k-mers, thus indicating selected protein regions rather than selected k-mers.
"""
col_size = 300
N_sequences = len(sequences)
hit_matrix = np.zeros((N_sequences,col_size+1),dtype='int')
kmer_length = len(kmer_list[0][0])
for index, seq in enumerate(sequences):
# first column stores the virus class
hit_matrix[index,0] = seq[1]
sequence = seq[0]
sequence_length = len(sequence)
for c in xrange(sequence_length-kmer_length+1):
for fold, kmers in enumerate(kmer_list):
for kmer in kmers:
mismatch = (np.array(list(sequence[c:c+kmer_length]))!=np.array(list(kmer))).sum()
if mismatch<=m:
left_col = int(c * float(col_size) / (sequence_length-kmer_length+1)) + 1
right_col = min([left_col+2,sequence_length-kmer_length+1])
hit_matrix[index,left_col:right_col] += 1
# normalize by max number of hits
hit_matrix[:,1:] = hit_matrix[:,1:]/hit_matrix[:,1:].max()
return hit_matrix
def plot_hit_matrix(hit_matrix, k, m, kmers):
"""this function visualizes the kmers along protein sequences,
represented as a matrix, using `imshow`.
Arguments
hit_matrix : int array
`N_seq` x `col_size` array where `N_seq` is total
number of viruses and `col_size` is the resolution along
the virus sequence.
k : int
size of k-mers
m : int
allowed mismatch
kmers : list
list of list of k-mers, over different CV folds,
selected by Adaboost
Returns
figure : matplotlib figure object
"""
# set background color, text color and font sizes
text_color = 'k'
bg_color = 'w'
axis_label_fontsize = 7
axis_tick_fontsize = 6
title_fontsize = 8
legend_fontsize = 6
class_labels = np.unique(hit_matrix[:,0]).astype(int)
num_classes = class_labels.size
(num_proteins,num_cols) = hit_matrix.shape
# introduce additional rows in the visualization matrix
# that separate between host classes
num_proteins = num_proteins + num_classes - 1
num_cols = num_cols-1
data = np.zeros((num_proteins,num_cols,3),dtype=float)
for label in class_labels:
hit_idx = (hit_matrix[:,0]==label).nonzero()[0]
data_idx = hit_idx + ( label - 1 )
data[data_idx,:,0] = hit_matrix[hit_idx,1:]
# indicated selected regions using a pure color
# red is used in this script
# in grayscale mode, this gets plotted as an intensity
data[data_idx,:,:] = data[data_idx,:,:] * np.array(list(convert.to_rgb('red'))).reshape(1,1,3)
try:
data[data_idx.max()+1,:,:] = 0.1
except IndexError:
continue
# set figure size and resolution
DPI = 500
fig_resolution = (1706, 1280)
fig_size = tuple([res/float(DPI) for res in fig_resolution])
figure = plot.figure(figsize=fig_size, facecolor=bg_color, edgecolor=bg_color)
subplot = figure.add_subplot(111)
subplot.set_position([0.03,0.04,0.95,0.87])
subplot.imshow(1.-hit_matrix[:,1:], cmap=colormap.gray, aspect='auto', interpolation='nearest')
for label in class_labels[:-1]:
y_coord = (hit_matrix[:,0]==label).nonzero()[0].max() + 0.5
subplot.plot([0,data.shape[1]-1], [y_coord, y_coord], '-', color='gray', linewidth=0.1)
# set X-axis and Y-axis labels, tick sizes and tick labels
subplot.axis([0, data.shape[1]-1, 0, data.shape[0]-1])
subplot.set_xticks([0,data.shape[1]/2,data.shape[1]-1])
subplot.set_xticklabels(('0','Relative Location','1'), color=text_color, verticalalignment='center', fontsize=axis_tick_fontsize)
for line in subplot.get_xticklines():
line.set_markersize(0)
y_labels = ('Invertebrate','Plant','Vertebrate')
y_label_loc = []
for c in class_labels:
y_label_loc.append(int(np.mean((hit_matrix[:,0]==c).nonzero()[0])))
subplot.set_yticks(y_label_loc)
subplot.set_yticklabels(y_labels, rotation=90, color=text_color, \
horizontalalignment='center', fontsize=axis_tick_fontsize)
for line in subplot.get_yticklines():
line.set_markersize(0)
# set figure title
figure.suptitle('k = %d, m = %d' % (k,m), x=0.95, y=0.95, color=text_color, \
fontsize=title_fontsize, verticalalignment='center', \
horizontalalignment='right')
return figure
if __name__=="__main__":
# set project path and parameters
project_path = '/proj/ar2384/picorna'
virus_family = 'picorna'
data_path = '%s/cache/%s_protein/' %(project_path, virus_family)
talk = False
# values of k, m, cut_off are read from std input
# cut_off = max number of boosting rounds that will be parsed
(k, m, cut_off) = map(int,sys.argv[1:4])
# load virus classes
classes = dict()
c = open('%s/data/%s_classes.csv' % (project_path, virus_family),'r')
for line in c:
row = line.strip().split(',')
virus_name = ' '.join(row[0].split()[1:])
classes[row[0].split()[0]] = [virus_name,int(row[1])]
c.close()
# load kmers
folds = 10
kmer_list = []
for fold in range(folds):
f = open('%s/adt_%s_%d_%d_%d.pkl' % (data_path, model, k, m, fold),'r')
adt = cPickle.load(f)
f.close()
kmer_list.extend([list(set([adt[t][0].name for t in range(cut_off)]))])
# load virus protein sequences
p = open('%s/data/%svirus-proteins.fasta' % (project_path, virus_family),'r')
sequences = []
sequence = 'A'
label = 0
viruses = []
for line in p:
# indicates start of new virus
if ('NC_' in line or 'virus' in line) and '>' not in line:
# add previous protein to list of sequences
sequences.append([sequence,label])
row = line.strip().split(',')
virus_name = ' '.join(row[0].split()[1:])
virus_id = row[0].split()[0]
viruses.append(virus_id)
label = classes[virus_id][1]
sequence = ''
# indicates start of new protein
elif '>' in line:
continue
# continue with previous protein
else:
sequence += line.strip()
p.close()
# pop out first dummy sequence
sequences.pop(0)
# matrix representation of position along a sequence
# where selected k-mers are found, up to m mismatches
hit_matrix = compile_hit_matrix(sequences,kmers,m)
# save compiled data
f = open(data_path + virus_family + '_hitmatrix_collapsed_%d_%d.pkl' % (k,m),'w')
cPickle.Pickler(f,protocol=2).dump(hit_matrix)
cPickle.Pickler(f,protocol=2).dump(viruses)
cPickle.Pickler(f,protocol=2).dump(classes)
f.close()
# group viruses with similar hosts together
sort_indices = hit_matrix[:,0].argsort()
sort_virus_id = [viruses[i] for i in sort_indices]
sort_viruses = [classes[v][0] for v in sort_virus_id]
# plot and save the visualization
figure = plot_hit_matrix(hit_matrix[sort_indices,:], k, m, kmer_list)
filename = '%s/fig/%s_protein_kmer_visualization_collapsed_%d_%d.eps' % (project_path, virus_family, k,m)
figure.savefig(fname, dpi=(500), format='eps')
| mit |
cbmoore/statsmodels | statsmodels/datasets/spector/data.py | 25 | 2000 | """Spector and Mazzeo (1980) - Program Effectiveness Data"""
__docformat__ = 'restructuredtext'
COPYRIGHT = """Used with express permission of the original author, who
retains all rights. """
TITLE = __doc__
SOURCE = """
http://pages.stern.nyu.edu/~wgreene/Text/econometricanalysis.htm
The raw data was downloaded from Bill Greene's Econometric Analysis web site,
though permission was obtained from the original researcher, Dr. Lee Spector,
Professor of Economics, Ball State University."""
DESCRSHORT = """Experimental data on the effectiveness of the personalized
system of instruction (PSI) program"""
DESCRLONG = DESCRSHORT
NOTE = """::
Number of Observations - 32
Number of Variables - 4
Variable name definitions::
Grade - binary variable indicating whether or not a student's grade
improved. 1 indicates an improvement.
TUCE - Test score on economics test
PSI - participation in program
GPA - Student's grade point average
"""
import numpy as np
from statsmodels.datasets import utils as du
from os.path import dirname, abspath
def load():
"""
Load the Spector dataset and returns a Dataset class instance.
Returns
-------
Dataset instance:
See DATASET_PROPOSAL.txt for more information.
"""
data = _get_data()
return du.process_recarray(data, endog_idx=3, dtype=float)
def load_pandas():
"""
Load the Spector dataset and returns a Dataset class instance.
Returns
-------
Dataset instance:
See DATASET_PROPOSAL.txt for more information.
"""
data = _get_data()
return du.process_recarray_pandas(data, endog_idx=3, dtype=float)
def _get_data():
filepath = dirname(abspath(__file__))
##### EDIT THE FOLLOWING TO POINT TO DatasetName.csv #####
data = np.recfromtxt(open(filepath + '/spector.csv',"rb"), delimiter=" ",
names=True, dtype=float, usecols=(1,2,3,4))
return data
| bsd-3-clause |
buckbaskin/stopsign | src/visualize/bytes_to_bits.py | 1 | 1570 | #!/usr/bin/env python3
import pandas as pd
from bitstring import BitArray
pkg_path = '/home/buck/ros_ws/src/stopsign'
POSITIVE_FILE = '%s/data/017_the_500/positive_500.csv' % (pkg_path,)
NEGATIVE_FILE = '%s/data/017_the_500/negative_500_%s.csv' % (pkg_path, '%d',)
BIT_FILE = '%s/data/017_the_500/%s' % (pkg_path, '%s_bits_500%s.csv',)
descriptors = []
for i in range(32):
descriptors.append('descr%02d' % (i,))
klass = ['class'.ljust(7)]
all_files = [POSITIVE_FILE]
for i in range(5):
all_files.append(NEGATIVE_FILE % (i,))
for index, START_FILE in enumerate(all_files):
print('read_csv')
print(START_FILE)
df = pd.read_csv(START_FILE, header=0)
print('relabel df %d' % (len(df),))
df['class'] = df['class ']
df = df.drop(columns=['class ',])
df = df.drop(columns=['angle ', 'classid', 'octave ', 'x'.ljust(7), 'y'.ljust(7), 'respons', 'size ', 'imageid'])
bit_label = 'd%02db%01d'
for desc_index, descriptor in enumerate(descriptors):
for bit_index in range(0, 8):
new_label = bit_label % (desc_index, bit_index,)
df[new_label] = df[descriptor].apply(lambda x: (x // 2**bit_index) % 2)
df = df.drop(columns=[descriptor])
if desc_index % 8 == 0:
print('done with % 2d / 32' % (desc_index + 1,))
print('write to csv')
# print(df.describe())
if index == 0:
OUT_FILE = BIT_FILE % ('positive', '',)
else:
OUT_FILE = BIT_FILE % ('negative', '_%d' % (index - 1),)
print(OUT_FILE)
df.to_csv(OUT_FILE, index=False)
| mit |
djfan/why_yellow_taxi | Run/dumbo_run.py | 1 | 4709 | import pyproj
import csv
import shapely.geometry as geom
import fiona
import fiona.crs
import shapely
import rtree
import geopandas as gpd
import numpy as np
import operator
import pandas as pd
import pyspark
from pyspark import SparkContext
from shapely.geometry import Point
from pyspark.sql import SQLContext
import datetime
def countLine(partID, records):
import pyproj
import csv
import shapely.geometry as geom
import fiona
import fiona.crs
import shapely
import rtree
import geopandas as gpd
import numpy as np
import operator
import pandas as pd
import pyspark
from pyspark import SparkContext
from pyspark.sql import SQLContext
import datetime
index = rtree.Rtree()
for idx, geometry in enumerate(entr_buf.geometry):
index.insert(idx, geometry.bounds)
entr_lines = {}
proj = pyproj.Proj(init='epsg:2263', preserve_units=True)
if partID==0:
records.next()
reader = csv.reader(records)
for row in reader:
if ((float(row[5])!=0) and float(row[9]!=0)):
if row[1]:
wd_h = datetime.datetime.strptime(row[1], '%Y-%m-%d %H:%M:%S')
wd = wd_h.weekday()
hour = wd_h.hour
day = wd_h.day
month = wd_h.month
else:
wd = None
hour = None
day = None
month = None
p = geom.Point(proj(float(row[5]), float(row[6])))
d = geom.Point(proj(float(row[9]), float(row[10])))
p_potential = index.intersection((p.x,p.y,p.x,p.y))
d_potential = index.intersection((d.x,d.y,d.x,d.y))
p_match = None # The first one match, should be the closest one? No!
d_match = None
for p_idx in p_potential:
if entr_buf.geometry[p_idx].contains(p):
p_match = p_idx # print 'p',p_idx
p_lines = set(entr_buf.lines[p_idx])
break
for d_idx in d_potential:
if entr_buf.geometry[d_idx].contains(d):
d_match = d_idx # print 'd',d_idx
d_lines = set(entr_buf.lines[d_idx])
break
if ((p_match and d_match) and (p_match != d_match)):
dirct_lines = tuple(p_lines.intersection(d_lines))
dirct_lines_wd_h_d_m = (dirct_lines, wd, hour, day, month)
if dirct_lines:
entr_lines[dirct_lines_wd_h_d_m] = entr_lines.get(dirct_lines_wd_h_d_m, 0)+1
return entr_lines.items()
def mapper(record):
for key in record[0][0]:
yield (key, record[0][1], record[0][2], record[0][3], record[0][4]), record[1]
def service(record):
if (record[0][0] == 'B' and (record[0][1] in [5, 6])):
pass
elif (record[0][0] == 'W' and (record[0][1] in [5, 6])):
pass
elif (record[0][0] == 'C' and (record[0][2] in range(0,6))):
pass
elif (record[0][0] == 'B' and (record[0][1] in range(0,6))):
pass
elif (record[0][0] == 'S' and (record[0][1] in range(0,6))):
pass
elif (record[0][0] == 'W' and (record[0][1] in range(0,6))):
pass
else:
return record
def fetch_entr_geo(entr_points):
import geopandas as gpd
import pyproj
routes = ['route_'+str(i) for i in range(1,12)]
entr_geo = gpd.GeoDataFrame(columns=['geometry', 'lines'])
proj = pyproj.Proj(init='epsg:2263', preserve_units=True)
for i in range(len(entr_points)):
entr_coor = entr_points[i].asDict()['geometry'].asDict()['coordinates']
entr_buffer = Point(proj(float(entr_coor[0]), float(entr_coor[1]))).buffer(100)
entr_prop = entr_points[i].asDict()['properties'].asDict()
entr_lines = [entr_prop[r] for r in routes if entr_prop[r]]
entr_geo = entr_geo.append({'geometry':entr_buffer, 'lines':entr_lines}, ignore_index=True)
return entr_geo
if __name__ == '__main__':
sc = SparkContext(appName="bigdata_project")
entr_json_path = '/user/df1676/2016_(May)_New_York_City_Subway_Station_Entrances.json'
#taxi_csv_path = '/user/df1676/yellow_tripdata_2016-01.csv'
taxi_csv_path = '/user/df1676/df_shuffle.csv'
sqlContext = SQLContext(sc)
global entr_buf
entr_points = sqlContext.read.load(entr_json_path, format='json', header=True, inferSchema=True).collect()[0].asDict()['features']
entr_buf = fetch_entr_geo(entr_points)
#sc = SparkContext(appName="bigdata")
rdd = sc.textFile(taxi_csv_path)
counts = rdd.mapPartitionsWithIndex(countLine).flatMap(mapper).reduceByKey(lambda x,y: x+y).filter(service)
#print counts.collect()[0]
counts.saveAsTextFile('hdfs://babar.es.its.nyu.edu:8020/user/df1676/out')
| mit |
mfjb/scikit-learn | sklearn/ensemble/weight_boosting.py | 97 | 40773 | """Weight Boosting
This module contains weight boosting estimators for both classification and
regression.
The module structure is the following:
- The ``BaseWeightBoosting`` base class implements a common ``fit`` method
for all the estimators in the module. Regression and classification
only differ from each other in the loss function that is optimized.
- ``AdaBoostClassifier`` implements adaptive boosting (AdaBoost-SAMME) for
classification problems.
- ``AdaBoostRegressor`` implements adaptive boosting (AdaBoost.R2) for
regression problems.
"""
# Authors: Noel Dawe <[email protected]>
# Gilles Louppe <[email protected]>
# Hamzeh Alsalhi <[email protected]>
# Arnaud Joly <[email protected]>
#
# Licence: BSD 3 clause
from abc import ABCMeta, abstractmethod
import numpy as np
from numpy.core.umath_tests import inner1d
from .base import BaseEnsemble
from ..base import ClassifierMixin, RegressorMixin
from ..externals import six
from ..externals.six.moves import zip
from ..externals.six.moves import xrange as range
from .forest import BaseForest
from ..tree import DecisionTreeClassifier, DecisionTreeRegressor
from ..tree.tree import BaseDecisionTree
from ..tree._tree import DTYPE
from ..utils import check_array, check_X_y, check_random_state
from ..metrics import accuracy_score, r2_score
from sklearn.utils.validation import has_fit_parameter, check_is_fitted
__all__ = [
'AdaBoostClassifier',
'AdaBoostRegressor',
]
class BaseWeightBoosting(six.with_metaclass(ABCMeta, BaseEnsemble)):
"""Base class for AdaBoost estimators.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator=None,
n_estimators=50,
estimator_params=tuple(),
learning_rate=1.,
random_state=None):
super(BaseWeightBoosting, self).__init__(
base_estimator=base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params)
self.learning_rate = learning_rate
self.random_state = random_state
def fit(self, X, y, sample_weight=None):
"""Build a boosted classifier/regressor from the training set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. COO, DOK, and LIL are converted to CSR. The dtype is
forced to DTYPE from tree._tree if the base classifier of this
ensemble weighted boosting classifier is a tree or forest.
y : array-like of shape = [n_samples]
The target values (class labels in classification, real numbers in
regression).
sample_weight : array-like of shape = [n_samples], optional
Sample weights. If None, the sample weights are initialized to
1 / n_samples.
Returns
-------
self : object
Returns self.
"""
# Check parameters
if self.learning_rate <= 0:
raise ValueError("learning_rate must be greater than zero")
if (self.base_estimator is None or
isinstance(self.base_estimator, (BaseDecisionTree,
BaseForest))):
dtype = DTYPE
accept_sparse = 'csc'
else:
dtype = None
accept_sparse = ['csr', 'csc']
X, y = check_X_y(X, y, accept_sparse=accept_sparse, dtype=dtype)
if sample_weight is None:
# Initialize weights to 1 / n_samples
sample_weight = np.empty(X.shape[0], dtype=np.float)
sample_weight[:] = 1. / X.shape[0]
else:
# Normalize existing weights
sample_weight = sample_weight / sample_weight.sum(dtype=np.float64)
# Check that the sample weights sum is positive
if sample_weight.sum() <= 0:
raise ValueError(
"Attempting to fit with a non-positive "
"weighted number of samples.")
# Check parameters
self._validate_estimator()
# Clear any previous fit results
self.estimators_ = []
self.estimator_weights_ = np.zeros(self.n_estimators, dtype=np.float)
self.estimator_errors_ = np.ones(self.n_estimators, dtype=np.float)
for iboost in range(self.n_estimators):
# Boosting step
sample_weight, estimator_weight, estimator_error = self._boost(
iboost,
X, y,
sample_weight)
# Early termination
if sample_weight is None:
break
self.estimator_weights_[iboost] = estimator_weight
self.estimator_errors_[iboost] = estimator_error
# Stop if error is zero
if estimator_error == 0:
break
sample_weight_sum = np.sum(sample_weight)
# Stop if the sum of sample weights has become non-positive
if sample_weight_sum <= 0:
break
if iboost < self.n_estimators - 1:
# Normalize
sample_weight /= sample_weight_sum
return self
@abstractmethod
def _boost(self, iboost, X, y, sample_weight):
"""Implement a single boost.
Warning: This method needs to be overriden by subclasses.
Parameters
----------
iboost : int
The index of the current boost iteration.
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. COO, DOK, and LIL are converted to CSR.
y : array-like of shape = [n_samples]
The target values (class labels).
sample_weight : array-like of shape = [n_samples]
The current sample weights.
Returns
-------
sample_weight : array-like of shape = [n_samples] or None
The reweighted sample weights.
If None then boosting has terminated early.
estimator_weight : float
The weight for the current boost.
If None then boosting has terminated early.
error : float
The classification error for the current boost.
If None then boosting has terminated early.
"""
pass
def staged_score(self, X, y, sample_weight=None):
"""Return staged scores for X, y.
This generator method yields the ensemble score after each iteration of
boosting and therefore allows monitoring, such as to determine the
score on a test set after each boost.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
y : array-like, shape = [n_samples]
Labels for X.
sample_weight : array-like, shape = [n_samples], optional
Sample weights.
Returns
-------
z : float
"""
for y_pred in self.staged_predict(X):
if isinstance(self, ClassifierMixin):
yield accuracy_score(y, y_pred, sample_weight=sample_weight)
else:
yield r2_score(y, y_pred, sample_weight=sample_weight)
@property
def feature_importances_(self):
"""Return the feature importances (the higher, the more important the
feature).
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
if self.estimators_ is None or len(self.estimators_) == 0:
raise ValueError("Estimator not fitted, "
"call `fit` before `feature_importances_`.")
try:
norm = self.estimator_weights_.sum()
return (sum(weight * clf.feature_importances_ for weight, clf
in zip(self.estimator_weights_, self.estimators_))
/ norm)
except AttributeError:
raise AttributeError(
"Unable to compute feature importances "
"since base_estimator does not have a "
"feature_importances_ attribute")
def _check_sample_weight(self):
if not has_fit_parameter(self.base_estimator_, "sample_weight"):
raise ValueError("%s doesn't support sample_weight."
% self.base_estimator_.__class__.__name__)
def _validate_X_predict(self, X):
"""Ensure that X is in the proper format"""
if (self.base_estimator is None or
isinstance(self.base_estimator,
(BaseDecisionTree, BaseForest))):
X = check_array(X, accept_sparse='csr', dtype=DTYPE)
else:
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'])
return X
def _samme_proba(estimator, n_classes, X):
"""Calculate algorithm 4, step 2, equation c) of Zhu et al [1].
References
----------
.. [1] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
proba = estimator.predict_proba(X)
# Displace zero probabilities so the log is defined.
# Also fix negative elements which may occur with
# negative sample weights.
proba[proba < np.finfo(proba.dtype).eps] = np.finfo(proba.dtype).eps
log_proba = np.log(proba)
return (n_classes - 1) * (log_proba - (1. / n_classes)
* log_proba.sum(axis=1)[:, np.newaxis])
class AdaBoostClassifier(BaseWeightBoosting, ClassifierMixin):
"""An AdaBoost classifier.
An AdaBoost [1] classifier is a meta-estimator that begins by fitting a
classifier on the original dataset and then fits additional copies of the
classifier on the same dataset but where the weights of incorrectly
classified instances are adjusted such that subsequent classifiers focus
more on difficult cases.
This class implements the algorithm known as AdaBoost-SAMME [2].
Read more in the :ref:`User Guide <adaboost>`.
Parameters
----------
base_estimator : object, optional (default=DecisionTreeClassifier)
The base estimator from which the boosted ensemble is built.
Support for sample weighting is required, as well as proper `classes_`
and `n_classes_` attributes.
n_estimators : integer, optional (default=50)
The maximum number of estimators at which boosting is terminated.
In case of perfect fit, the learning procedure is stopped early.
learning_rate : float, optional (default=1.)
Learning rate shrinks the contribution of each classifier by
``learning_rate``. There is a trade-off between ``learning_rate`` and
``n_estimators``.
algorithm : {'SAMME', 'SAMME.R'}, optional (default='SAMME.R')
If 'SAMME.R' then use the SAMME.R real boosting algorithm.
``base_estimator`` must support calculation of class probabilities.
If 'SAMME' then use the SAMME discrete boosting algorithm.
The SAMME.R algorithm typically converges faster than SAMME,
achieving a lower test error with fewer boosting iterations.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
estimators_ : list of classifiers
The collection of fitted sub-estimators.
classes_ : array of shape = [n_classes]
The classes labels.
n_classes_ : int
The number of classes.
estimator_weights_ : array of floats
Weights for each estimator in the boosted ensemble.
estimator_errors_ : array of floats
Classification error for each estimator in the boosted
ensemble.
feature_importances_ : array of shape = [n_features]
The feature importances if supported by the ``base_estimator``.
See also
--------
AdaBoostRegressor, GradientBoostingClassifier, DecisionTreeClassifier
References
----------
.. [1] Y. Freund, R. Schapire, "A Decision-Theoretic Generalization of
on-Line Learning and an Application to Boosting", 1995.
.. [2] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
def __init__(self,
base_estimator=None,
n_estimators=50,
learning_rate=1.,
algorithm='SAMME.R',
random_state=None):
super(AdaBoostClassifier, self).__init__(
base_estimator=base_estimator,
n_estimators=n_estimators,
learning_rate=learning_rate,
random_state=random_state)
self.algorithm = algorithm
def fit(self, X, y, sample_weight=None):
"""Build a boosted classifier from the training set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
y : array-like of shape = [n_samples]
The target values (class labels).
sample_weight : array-like of shape = [n_samples], optional
Sample weights. If None, the sample weights are initialized to
``1 / n_samples``.
Returns
-------
self : object
Returns self.
"""
# Check that algorithm is supported
if self.algorithm not in ('SAMME', 'SAMME.R'):
raise ValueError("algorithm %s is not supported" % self.algorithm)
# Fit
return super(AdaBoostClassifier, self).fit(X, y, sample_weight)
def _validate_estimator(self):
"""Check the estimator and set the base_estimator_ attribute."""
super(AdaBoostClassifier, self)._validate_estimator(
default=DecisionTreeClassifier(max_depth=1))
# SAMME-R requires predict_proba-enabled base estimators
if self.algorithm == 'SAMME.R':
if not hasattr(self.base_estimator_, 'predict_proba'):
raise TypeError(
"AdaBoostClassifier with algorithm='SAMME.R' requires "
"that the weak learner supports the calculation of class "
"probabilities with a predict_proba method.\n"
"Please change the base estimator or set "
"algorithm='SAMME' instead.")
self._check_sample_weight()
def _boost(self, iboost, X, y, sample_weight):
"""Implement a single boost.
Perform a single boost according to the real multi-class SAMME.R
algorithm or to the discrete SAMME algorithm and return the updated
sample weights.
Parameters
----------
iboost : int
The index of the current boost iteration.
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
y : array-like of shape = [n_samples]
The target values (class labels).
sample_weight : array-like of shape = [n_samples]
The current sample weights.
Returns
-------
sample_weight : array-like of shape = [n_samples] or None
The reweighted sample weights.
If None then boosting has terminated early.
estimator_weight : float
The weight for the current boost.
If None then boosting has terminated early.
estimator_error : float
The classification error for the current boost.
If None then boosting has terminated early.
"""
if self.algorithm == 'SAMME.R':
return self._boost_real(iboost, X, y, sample_weight)
else: # elif self.algorithm == "SAMME":
return self._boost_discrete(iboost, X, y, sample_weight)
def _boost_real(self, iboost, X, y, sample_weight):
"""Implement a single boost using the SAMME.R real algorithm."""
estimator = self._make_estimator()
try:
estimator.set_params(random_state=self.random_state)
except ValueError:
pass
estimator.fit(X, y, sample_weight=sample_weight)
y_predict_proba = estimator.predict_proba(X)
if iboost == 0:
self.classes_ = getattr(estimator, 'classes_', None)
self.n_classes_ = len(self.classes_)
y_predict = self.classes_.take(np.argmax(y_predict_proba, axis=1),
axis=0)
# Instances incorrectly classified
incorrect = y_predict != y
# Error fraction
estimator_error = np.mean(
np.average(incorrect, weights=sample_weight, axis=0))
# Stop if classification is perfect
if estimator_error <= 0:
return sample_weight, 1., 0.
# Construct y coding as described in Zhu et al [2]:
#
# y_k = 1 if c == k else -1 / (K - 1)
#
# where K == n_classes_ and c, k in [0, K) are indices along the second
# axis of the y coding with c being the index corresponding to the true
# class label.
n_classes = self.n_classes_
classes = self.classes_
y_codes = np.array([-1. / (n_classes - 1), 1.])
y_coding = y_codes.take(classes == y[:, np.newaxis])
# Displace zero probabilities so the log is defined.
# Also fix negative elements which may occur with
# negative sample weights.
proba = y_predict_proba # alias for readability
proba[proba < np.finfo(proba.dtype).eps] = np.finfo(proba.dtype).eps
# Boost weight using multi-class AdaBoost SAMME.R alg
estimator_weight = (-1. * self.learning_rate
* (((n_classes - 1.) / n_classes) *
inner1d(y_coding, np.log(y_predict_proba))))
# Only boost the weights if it will fit again
if not iboost == self.n_estimators - 1:
# Only boost positive weights
sample_weight *= np.exp(estimator_weight *
((sample_weight > 0) |
(estimator_weight < 0)))
return sample_weight, 1., estimator_error
def _boost_discrete(self, iboost, X, y, sample_weight):
"""Implement a single boost using the SAMME discrete algorithm."""
estimator = self._make_estimator()
try:
estimator.set_params(random_state=self.random_state)
except ValueError:
pass
estimator.fit(X, y, sample_weight=sample_weight)
y_predict = estimator.predict(X)
if iboost == 0:
self.classes_ = getattr(estimator, 'classes_', None)
self.n_classes_ = len(self.classes_)
# Instances incorrectly classified
incorrect = y_predict != y
# Error fraction
estimator_error = np.mean(
np.average(incorrect, weights=sample_weight, axis=0))
# Stop if classification is perfect
if estimator_error <= 0:
return sample_weight, 1., 0.
n_classes = self.n_classes_
# Stop if the error is at least as bad as random guessing
if estimator_error >= 1. - (1. / n_classes):
self.estimators_.pop(-1)
if len(self.estimators_) == 0:
raise ValueError('BaseClassifier in AdaBoostClassifier '
'ensemble is worse than random, ensemble '
'can not be fit.')
return None, None, None
# Boost weight using multi-class AdaBoost SAMME alg
estimator_weight = self.learning_rate * (
np.log((1. - estimator_error) / estimator_error) +
np.log(n_classes - 1.))
# Only boost the weights if I will fit again
if not iboost == self.n_estimators - 1:
# Only boost positive weights
sample_weight *= np.exp(estimator_weight * incorrect *
((sample_weight > 0) |
(estimator_weight < 0)))
return sample_weight, estimator_weight, estimator_error
def predict(self, X):
"""Predict classes for X.
The predicted class of an input sample is computed as the weighted mean
prediction of the classifiers in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
y : array of shape = [n_samples]
The predicted classes.
"""
pred = self.decision_function(X)
if self.n_classes_ == 2:
return self.classes_.take(pred > 0, axis=0)
return self.classes_.take(np.argmax(pred, axis=1), axis=0)
def staged_predict(self, X):
"""Return staged predictions for X.
The predicted class of an input sample is computed as the weighted mean
prediction of the classifiers in the ensemble.
This generator method yields the ensemble prediction after each
iteration of boosting and therefore allows monitoring, such as to
determine the prediction on a test set after each boost.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : generator of array, shape = [n_samples]
The predicted classes.
"""
n_classes = self.n_classes_
classes = self.classes_
if n_classes == 2:
for pred in self.staged_decision_function(X):
yield np.array(classes.take(pred > 0, axis=0))
else:
for pred in self.staged_decision_function(X):
yield np.array(classes.take(
np.argmax(pred, axis=1), axis=0))
def decision_function(self, X):
"""Compute the decision function of ``X``.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
score : array, shape = [n_samples, k]
The decision function of the input samples. The order of
outputs is the same of that of the `classes_` attribute.
Binary classification is a special cases with ``k == 1``,
otherwise ``k==n_classes``. For binary classification,
values closer to -1 or 1 mean more like the first or second
class in ``classes_``, respectively.
"""
check_is_fitted(self, "n_classes_")
X = self._validate_X_predict(X)
n_classes = self.n_classes_
classes = self.classes_[:, np.newaxis]
pred = None
if self.algorithm == 'SAMME.R':
# The weights are all 1. for SAMME.R
pred = sum(_samme_proba(estimator, n_classes, X)
for estimator in self.estimators_)
else: # self.algorithm == "SAMME"
pred = sum((estimator.predict(X) == classes).T * w
for estimator, w in zip(self.estimators_,
self.estimator_weights_))
pred /= self.estimator_weights_.sum()
if n_classes == 2:
pred[:, 0] *= -1
return pred.sum(axis=1)
return pred
def staged_decision_function(self, X):
"""Compute decision function of ``X`` for each boosting iteration.
This method allows monitoring (i.e. determine error on testing set)
after each boosting iteration.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
score : generator of array, shape = [n_samples, k]
The decision function of the input samples. The order of
outputs is the same of that of the `classes_` attribute.
Binary classification is a special cases with ``k == 1``,
otherwise ``k==n_classes``. For binary classification,
values closer to -1 or 1 mean more like the first or second
class in ``classes_``, respectively.
"""
check_is_fitted(self, "n_classes_")
X = self._validate_X_predict(X)
n_classes = self.n_classes_
classes = self.classes_[:, np.newaxis]
pred = None
norm = 0.
for weight, estimator in zip(self.estimator_weights_,
self.estimators_):
norm += weight
if self.algorithm == 'SAMME.R':
# The weights are all 1. for SAMME.R
current_pred = _samme_proba(estimator, n_classes, X)
else: # elif self.algorithm == "SAMME":
current_pred = estimator.predict(X)
current_pred = (current_pred == classes).T * weight
if pred is None:
pred = current_pred
else:
pred += current_pred
if n_classes == 2:
tmp_pred = np.copy(pred)
tmp_pred[:, 0] *= -1
yield (tmp_pred / norm).sum(axis=1)
else:
yield pred / norm
def predict_proba(self, X):
"""Predict class probabilities for X.
The predicted class probabilities of an input sample is computed as
the weighted mean predicted class probabilities of the classifiers
in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
p : array of shape = [n_samples]
The class probabilities of the input samples. The order of
outputs is the same of that of the `classes_` attribute.
"""
check_is_fitted(self, "n_classes_")
n_classes = self.n_classes_
X = self._validate_X_predict(X)
if self.algorithm == 'SAMME.R':
# The weights are all 1. for SAMME.R
proba = sum(_samme_proba(estimator, n_classes, X)
for estimator in self.estimators_)
else: # self.algorithm == "SAMME"
proba = sum(estimator.predict_proba(X) * w
for estimator, w in zip(self.estimators_,
self.estimator_weights_))
proba /= self.estimator_weights_.sum()
proba = np.exp((1. / (n_classes - 1)) * proba)
normalizer = proba.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba /= normalizer
return proba
def staged_predict_proba(self, X):
"""Predict class probabilities for X.
The predicted class probabilities of an input sample is computed as
the weighted mean predicted class probabilities of the classifiers
in the ensemble.
This generator method yields the ensemble predicted class probabilities
after each iteration of boosting and therefore allows monitoring, such
as to determine the predicted class probabilities on a test set after
each boost.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
p : generator of array, shape = [n_samples]
The class probabilities of the input samples. The order of
outputs is the same of that of the `classes_` attribute.
"""
X = self._validate_X_predict(X)
n_classes = self.n_classes_
proba = None
norm = 0.
for weight, estimator in zip(self.estimator_weights_,
self.estimators_):
norm += weight
if self.algorithm == 'SAMME.R':
# The weights are all 1. for SAMME.R
current_proba = _samme_proba(estimator, n_classes, X)
else: # elif self.algorithm == "SAMME":
current_proba = estimator.predict_proba(X) * weight
if proba is None:
proba = current_proba
else:
proba += current_proba
real_proba = np.exp((1. / (n_classes - 1)) * (proba / norm))
normalizer = real_proba.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
real_proba /= normalizer
yield real_proba
def predict_log_proba(self, X):
"""Predict class log-probabilities for X.
The predicted class log-probabilities of an input sample is computed as
the weighted mean predicted class log-probabilities of the classifiers
in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
p : array of shape = [n_samples]
The class probabilities of the input samples. The order of
outputs is the same of that of the `classes_` attribute.
"""
return np.log(self.predict_proba(X))
class AdaBoostRegressor(BaseWeightBoosting, RegressorMixin):
"""An AdaBoost regressor.
An AdaBoost [1] regressor is a meta-estimator that begins by fitting a
regressor on the original dataset and then fits additional copies of the
regressor on the same dataset but where the weights of instances are
adjusted according to the error of the current prediction. As such,
subsequent regressors focus more on difficult cases.
This class implements the algorithm known as AdaBoost.R2 [2].
Read more in the :ref:`User Guide <adaboost>`.
Parameters
----------
base_estimator : object, optional (default=DecisionTreeRegressor)
The base estimator from which the boosted ensemble is built.
Support for sample weighting is required.
n_estimators : integer, optional (default=50)
The maximum number of estimators at which boosting is terminated.
In case of perfect fit, the learning procedure is stopped early.
learning_rate : float, optional (default=1.)
Learning rate shrinks the contribution of each regressor by
``learning_rate``. There is a trade-off between ``learning_rate`` and
``n_estimators``.
loss : {'linear', 'square', 'exponential'}, optional (default='linear')
The loss function to use when updating the weights after each
boosting iteration.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
estimators_ : list of classifiers
The collection of fitted sub-estimators.
estimator_weights_ : array of floats
Weights for each estimator in the boosted ensemble.
estimator_errors_ : array of floats
Regression error for each estimator in the boosted ensemble.
feature_importances_ : array of shape = [n_features]
The feature importances if supported by the ``base_estimator``.
See also
--------
AdaBoostClassifier, GradientBoostingRegressor, DecisionTreeRegressor
References
----------
.. [1] Y. Freund, R. Schapire, "A Decision-Theoretic Generalization of
on-Line Learning and an Application to Boosting", 1995.
.. [2] H. Drucker, "Improving Regressors using Boosting Techniques", 1997.
"""
def __init__(self,
base_estimator=None,
n_estimators=50,
learning_rate=1.,
loss='linear',
random_state=None):
super(AdaBoostRegressor, self).__init__(
base_estimator=base_estimator,
n_estimators=n_estimators,
learning_rate=learning_rate,
random_state=random_state)
self.loss = loss
self.random_state = random_state
def fit(self, X, y, sample_weight=None):
"""Build a boosted regressor from the training set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
y : array-like of shape = [n_samples]
The target values (real numbers).
sample_weight : array-like of shape = [n_samples], optional
Sample weights. If None, the sample weights are initialized to
1 / n_samples.
Returns
-------
self : object
Returns self.
"""
# Check loss
if self.loss not in ('linear', 'square', 'exponential'):
raise ValueError(
"loss must be 'linear', 'square', or 'exponential'")
# Fit
return super(AdaBoostRegressor, self).fit(X, y, sample_weight)
def _validate_estimator(self):
"""Check the estimator and set the base_estimator_ attribute."""
super(AdaBoostRegressor, self)._validate_estimator(
default=DecisionTreeRegressor(max_depth=3))
self._check_sample_weight()
def _boost(self, iboost, X, y, sample_weight):
"""Implement a single boost for regression
Perform a single boost according to the AdaBoost.R2 algorithm and
return the updated sample weights.
Parameters
----------
iboost : int
The index of the current boost iteration.
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
y : array-like of shape = [n_samples]
The target values (class labels in classification, real numbers in
regression).
sample_weight : array-like of shape = [n_samples]
The current sample weights.
Returns
-------
sample_weight : array-like of shape = [n_samples] or None
The reweighted sample weights.
If None then boosting has terminated early.
estimator_weight : float
The weight for the current boost.
If None then boosting has terminated early.
estimator_error : float
The regression error for the current boost.
If None then boosting has terminated early.
"""
estimator = self._make_estimator()
try:
estimator.set_params(random_state=self.random_state)
except ValueError:
pass
generator = check_random_state(self.random_state)
# Weighted sampling of the training set with replacement
# For NumPy >= 1.7.0 use np.random.choice
cdf = sample_weight.cumsum()
cdf /= cdf[-1]
uniform_samples = generator.random_sample(X.shape[0])
bootstrap_idx = cdf.searchsorted(uniform_samples, side='right')
# searchsorted returns a scalar
bootstrap_idx = np.array(bootstrap_idx, copy=False)
# Fit on the bootstrapped sample and obtain a prediction
# for all samples in the training set
estimator.fit(X[bootstrap_idx], y[bootstrap_idx])
y_predict = estimator.predict(X)
error_vect = np.abs(y_predict - y)
error_max = error_vect.max()
if error_max != 0.:
error_vect /= error_max
if self.loss == 'square':
error_vect **= 2
elif self.loss == 'exponential':
error_vect = 1. - np.exp(- error_vect)
# Calculate the average loss
estimator_error = (sample_weight * error_vect).sum()
if estimator_error <= 0:
# Stop if fit is perfect
return sample_weight, 1., 0.
elif estimator_error >= 0.5:
# Discard current estimator only if it isn't the only one
if len(self.estimators_) > 1:
self.estimators_.pop(-1)
return None, None, None
beta = estimator_error / (1. - estimator_error)
# Boost weight using AdaBoost.R2 alg
estimator_weight = self.learning_rate * np.log(1. / beta)
if not iboost == self.n_estimators - 1:
sample_weight *= np.power(
beta,
(1. - error_vect) * self.learning_rate)
return sample_weight, estimator_weight, estimator_error
def _get_median_predict(self, X, limit):
# Evaluate predictions of all estimators
predictions = np.array([
est.predict(X) for est in self.estimators_[:limit]]).T
# Sort the predictions
sorted_idx = np.argsort(predictions, axis=1)
# Find index of median prediction for each sample
weight_cdf = self.estimator_weights_[sorted_idx].cumsum(axis=1)
median_or_above = weight_cdf >= 0.5 * weight_cdf[:, -1][:, np.newaxis]
median_idx = median_or_above.argmax(axis=1)
median_estimators = sorted_idx[np.arange(X.shape[0]), median_idx]
# Return median predictions
return predictions[np.arange(X.shape[0]), median_estimators]
def predict(self, X):
"""Predict regression value for X.
The predicted regression value of an input sample is computed
as the weighted median prediction of the classifiers in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
y : array of shape = [n_samples]
The predicted regression values.
"""
check_is_fitted(self, "estimator_weights_")
X = self._validate_X_predict(X)
return self._get_median_predict(X, len(self.estimators_))
def staged_predict(self, X):
"""Return staged predictions for X.
The predicted regression value of an input sample is computed
as the weighted median prediction of the classifiers in the ensemble.
This generator method yields the ensemble prediction after each
iteration of boosting and therefore allows monitoring, such as to
determine the prediction on a test set after each boost.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
y : generator of array, shape = [n_samples]
The predicted regression values.
"""
check_is_fitted(self, "estimator_weights_")
X = self._validate_X_predict(X)
for i, _ in enumerate(self.estimators_, 1):
yield self._get_median_predict(X, limit=i)
| bsd-3-clause |
nu-xtal-tools/capow | capow.py | 1 | 58268 | ### CAPOW (calculation and plotting of optimal weights) ###
# NOTES: #
# Written using pyqt (4.10.1), matplotlib (2.0.2), numpy (1.12.1), scipy (0.19.0), python (2.7.5)
# requires window_tab.py (gui) and shelx_weighting.py (code to run weighting minimization)
# input: .fcf, SHELX LIST 4 or 8, (with .cif and .ins) or .fco, XD2016,(with .cif and .mas)
##Known issues:
# "RuntimeWarning: PyOS_InputHook is not available for interactive use of PyGTK" - problem is when importing pyplot from matplotlib. Works using matplotlib (2.0.2).
## To report errors/give feedback:
## - [email protected] or [email protected]
import sys, os
## Check that matplotlib, scipy, numpy, pyqt4 are all installed on computer ##
run = True #Flag, will change to false if module versions do not match.
try:
from matplotlib import __version__ as mp_u_vers #getting version of matplotlib on user computer
except ImportError:
print "Program Exiting: Matplotlib module is needed to run this code. Please install and try again."
sys.exit()
try:
from scipy import __version__ as s_u_vers #getting version of scipyon user computer
except ImportError:
print "Program Exiting: Scipy module is needed to run this code. Please install and try again."
sys.exit()
try:
from numpy import __version__ as n_u_vers #getting version of numpy on user computer
except ImportError:
print "Program Exiting: Numpy module is needed to run this code. Please install and try again."
sys.exit()
try:
from PyQt4.Qt import PYQT_VERSION_STR as pq_u_vers # getting version of pyqt4, method from pyqt website, on user computer
except ImportError:
print "Program Exiting: PyQt module is needed to run this code. Please install and try again."
sys.exit()
py_u_vers = sys.version.split()[0] #getting version of python on user computer, prints long string with version number at start
## Check versions of required modules ##
def bruce(w_vers, u_vers):
"""function to return whether user version is higher, lower, or same as written version"""
if u_vers > w_vers:
return "H"
elif w_vers == u_vers:
return "S"
else:
return "L"
def compare_versions(written_vers, user_vers):
"""function to compare tested vs user versions of python/packages"""
w_vers_list = written_vers.split(".")
u_vers_list = user_vers.split(".")
list_0 = bruce(w_vers_list[0],u_vers_list[0])
if list_0 == "L":
return "L"
elif list_0 == "H":
return "H"
else:
list_1 = bruce(w_vers_list[1],u_vers_list[1])
if list_1 == "L":
return "L"
elif list_1 == "H":
return "H"
else:
list_2 = bruce(w_vers_list[2],u_vers_list[2])
if list_2 == "L":
return "L"
elif list_2 == "H":
return "H"
else:
return list_2, "should not happen"
## version of python/package code, program written with
mp_w_vers = "2.0.2" #matplotlib
s_w_vers = "0.19.0" #scipy
n_w_vers = "1.12.1" #numpy
pq_w_vers = "4.10.1" #pyqt4
py_w_vers = "2.7.5" #python
## Checking if module versions are the same as what the code was written with ##
check_packages = ["", "", "", "", ""] #for use later when checking whether
if mp_u_vers != mp_w_vers :
check_packages[0] = compare_versions(mp_w_vers, mp_u_vers)
if check_packages[0] == "L":
print "Program Exiting: The code was written and tested with matplotlib (%s). Your version of matplotlib is (%s). Using an earlier version of matplotlib will cause the program to break." %(mp_w_vers, mp_u_vers)
run = False
else:
check_packages[0] = "S"
if s_u_vers != s_w_vers:
run = False
check_packages[1] = compare_versions(s_w_vers,s_u_vers)
if check_packages[1] == "L":
print "Program Exiting: The code was written and tested with scipy (%s). Your version of scipy is (%s). Using an earlier version of scipy may cause the program to break. " %(s_w_vers, s_u_vers)
else:
check_packages[1] = "S"
if n_u_vers != n_w_vers:
run = False
check_packages[2] = compare_versions(n_w_vers,n_u_vers)
if check_packages[2] == "L":
print "Program Exiting: The code was written and tested with numpy (%s). Your version of numpy is (%s). Using an earlier version of numpy may cause the program to break." %(n_w_vers, np_u_vers)
else:
check_packages[2] = "S"
if pq_u_vers != pq_w_vers:
run = False
check_packages[3] = compare_versions(pq_w_vers,pq_u_vers)
if check_packages[3] == "L":
print "Program Exiting: The code was written and tested with PyQt4 (%s). Your version of PyQt4 is (%s). Using an earlier version of PyQt4 may cause the program to break." % (pq_w_vers, pq_u_vers)
else:
check_packages[3] = "S"
if py_u_vers != py_w_vers:
run = False
check_packages[4] = compare_versions(py_w_vers,py_u_vers)
if check_packages[4] == "L":
print "Program Exiting: The code was written and tested with python (%s). Your version of python is (%s). Using an earlier version of python may cause the program to break." %(py_w_vers, py_u_vers)
else:
check_packages[4] = "S"
## If modules are not the same, check if user still wants to run code ###
if run == False:
for i in check_packages:
if i == "L":
print "Please update python/packages and try again."
sys.exit()
## code should only be running here if there are "H" in list.
#assumes that code is still compatible with later versions of the packages
print "Warning: CAPOW was written with : \n python: %s \n matplotlib: %s \n scipy: %s \n numpy: %s \n PyQt4: %s" % (py_w_vers, mp_w_vers, s_w_vers, n_w_vers, pq_w_vers)
print "This computer is using : \n python: %s \n matplotlib: %s \n scipy: %s \n numpy: %s \n PyQt4: %s"% (py_u_vers, mp_u_vers, s_u_vers, n_u_vers, pq_u_vers)
print "There may be issues with incompatibilities of functions."
#sys.exit()
else:
pass
print "Running CAPOW"
import math
from copy import deepcopy
from PyQt4.uic import loadUiType
from PyQt4 import QtCore, QtGui
try:
from window_tab import Ui_MainWindow
except IOError:
print "Program exiting: window-tab.py file is not present in this folder. Please download a copy and try again."
sys.exit()
from matplotlib.figure import Figure
from matplotlib.backends.backend_qt4agg import (
FigureCanvasQTAgg as FigureCanvas,
NavigationToolbar2QT as NavigationToolbar)
from matplotlib import cm
import matplotlib.pyplot as plt #this might break with older versions of matplotlib giving a runtime error.
import numpy as np
import scipy.stats as stats
try:
from shelx_weighting import shelx_weighting_calc
except ImportError:
print "Program exiting: shelx_weighting.py is not present in this folder. Please download a copy and try again."
sys.exit()
###
class Main(QtGui.QMainWindow, Ui_MainWindow):
def __init__(self,scale_factor):
super(Main, self).__init__()
self.setupUi(self)
self.qlist_item = QtGui.QListWidgetItem(self.mpl_figs)
self.tab2_info = QtGui.QListWidgetItem(self.tab2_infobox)
self.scale_factor = scale_factor
self.set_up() #sets up gui and matplotlib widget
self.file_selected = False #Flag incase user trys to run weighting scheme code before a file has been selected, if False error message will show.
def set_up(self):
"""Function to connect all the buttons and set up the table headers for the widget"""
#connecting buttons and functions
self.apply_button.clicked.connect(self.recalc_fig)
self.calc_weight_button.clicked.connect(self.calculate_weighting_scheme)
self.copy_weights.clicked.connect(self.send_weights)
self.drk_button.clicked.connect(self.drk_lim)
self.clear_button.clicked.connect(self.clr_lim)
self.clearweights.clicked.connect(self.clear_weights)
self.actionSelect_File.triggered.connect(self.select_file)
# adding equality signs into filter tables
self.i_filt_eq.insertItem(0,">")
self.i_filt_eq.insertItem(1,"<")
self.i_sig_filt_eq.insertItem(0,">")
self.i_sig_filt_eq.insertItem(1,"<")
self.i_filt_eq_2.insertItem(0,">")
self.i_filt_eq_2.insertItem(1,"<")
self.i_sig_filt_eq_2.insertItem(0,">")
self.i_sig_filt_eq_2.insertItem(1,"<")
self.resoln_upper_eq.insertItem(0,"<") #swapped this round to make it read nicer. Resoln < 1 (so will keep all values of resolution that are less than 1.)
self.resoln_upper_eq_2.insertItem(0,"<") #swapped
self.resoln_lower_eq_2.insertItem(0,">") #swapped
self.resoln_lower_eq.insertItem(0,">") #swapped
self.filt_eq = ">"
self.sig_filt_eq = ">"
#naming header for weighting scheme tab table
headers = ["a","b","goof","wr2","stop a","stop b","I cutoff","Isig cutoff","resoln <","resoln >","no of reflns","start a","start b","bin","bin variance"]
self.tablemodel = QtGui.QStandardItemModel() #internet says to make it a model to work
self.tablemodel.setHorizontalHeaderLabels(headers)
self.weighting_tableview.setModel(self.tablemodel)
#sets up matplotlib widget
self.fig1 = plt.figure()
self.ax1f1 = self.fig1.add_subplot(1,1,1)
self.ax1f1.plot([-4,4],[-4,4],'--',color='r') #plotting expected plot line
self.canvas = FigureCanvas(self.fig1)
self.mplvl.addWidget(self.canvas)
self.canvas.draw()
self.toolbar = NavigationToolbar(self.canvas,self.mplwindow, coordinates=True)
self.mplvl.addWidget(self.toolbar)
def select_file(self):
"""Allows user to select a file to use and then creates normal probability plot from the data"""
dlg = QtGui.QFileDialog()
marker = os.getcwd()
plt.cla() #clear matplotlib
file_name = QtGui.QFileDialog.getOpenFileName(self, 'Select structure factor file', marker,"fcf or fco files (*.fcf *.fco)")
#file_name = QtGui.QFileDialog.getOpenFileName(self, 'Select structure factor file', '/home/',"fcf or fco files (*.fcf *.fco)") #change '/home/' to change the folder the code opens first.
file_name = str(file_name)
self.code_run = True #flag to stop weighting scheme code from running. Used later
if os.path.isfile(file_name) == False: #if user did not select a file
self.qlist_item.setText("Error: File has not been selected")
else:
self.i_file = file_name
#self.addmpl()
self.calc_weight_button.setEnabled(True)
self.file_selected = True #Flag incase user trys to run weighting scheme code before a file has been selected
### Reset items from a potential previous file
self.lamda = 0.0
self.shelx_a = 0.0
self.shelx_b = 0.0
self.mas_a = 0.0
self.mas_b = 0.0
self.mas_c = 0.0
self.mas_d = 0.0
self.mas_e = 0.0
self.mas_f = 0.3333333333
self.n_independant_params = ""
### reset fields in normal probability plot tab ####
i_filt =self.i_filt.setText("")
i_sig_filt = self.i_sig_filt.setText("")
resoln_upper_filt = self.resoln_upper_filt.setText("")
resoln_lower_filt = self.resoln_lower_filt.setText("")
self.clr_lim()
self.tab2_info.setText("")
self.qlist_item.setText("")
### reset fields in weighting calculator tab ###
self.clear_weights()
self.resoln_upper_filt_2.setText("")
self.resoln_lower_filt_2.setText("")
self.i_filt_2.setText("")
self.i_sig_filt_2.setText("")
self.i_filt_2.setText("")
self.i_sig_filt_2.setText("")
self.a_stop.setText("")
self.b_stop.setText("")
self.a_start.setText("") #need to check for float
self.b_start.setText("") #need to check for float
self.i_filt_eq.setCurrentIndex(0)
self.i_sig_filt_eq.setCurrentIndex(0)
#need shelx to be checked, and weighting binning, I to be checked.
### need cif file to get number of independant parameters
file_loc_list = os.path.split(file_name) #separating file name from folders: using os for file functions so is useable on other operating systems
self.calculate_start_check.setChecked(True)
self.bintype_intensity.setChecked(True)
self.bintype_resolution.setChecked(False)
self.i_filt_eq_2.setCurrentIndex(0)
self.i_sig_filt_eq_2.setCurrentIndex(0)
marker = file_loc_list[0] #marker to be used when opening file dialog to select file, opens in folder that fco/fcf file was selected from
### code will look for and select a cif file with the same starting name as input file, if no file, will open a file dialog to select one ###
no_cif = False
if file_name[-3:] == "fco":
try:
potential_lsm_file_lst = os.path.split(file_name)
new_file_name = potential_lsm_file_lst[1][0:-4] + "_lsm.cif"#xd_lsm.cif made during xdlsm refinement
potential_lsm_file = os.path.join(potential_lsm_file_lst[0], new_file_name)
open(potential_lsm_file,"r")
cif_file = potential_lsm_file
except IOError:
cif_file = QtGui.QFileDialog.getOpenFileName(self, 'Select cif file', marker, "cif file (*.cif)")
if os.path.isfile(str(cif_file)) == False: #if user did not select a file
self.qlist_item.setText("Error: Cif file has not been selected")
no_cif = True
else:
try:
potential_cif_file_lst = os.path.split(file_name)
new_cif_file = potential_cif_file_lst[1][0:-3]+ "cif"
potential_cif_file = os.path.join(potential_cif_file_lst[0], new_cif_file)
open(potential_cif_file,"r")
cif_file = potential_cif_file
except IOError:
cif_file = QtGui.QFileDialog.getOpenFileName(self, 'Select cif file', marker, "cif file (*.cif)")
if os.path.isfile(str(cif_file)) == False: #if user did not select a file
self.qlist_item.setText("Error: Cif file has not been selected")
no_cif = True
if no_cif == True:
pass
else:
for line in open(cif_file,"r").readlines():
if line.startswith("_refine_ls_number_parameters"):
n_params = line.split()[1]
self.n_independant_params = float(n_params)
if self.n_independant_params == "":
self.cif_info = False #error capture for later
self.qlist_item.setText("Number of independant parameters is not present in cif file. Program has stopped.")
else:
self.cif_info = True
self.addmpl()
def addmpl(self):
"""gets values to create normal probability plot"""
run_flag = self.get_graph_values(self.i_file) #STOP/GO depending on if graph values have been extracted successfully
if run_flag == "STOP": #if run flag is STOP then values have not been extracted successfully from code.
self.code_run = False #This flag stops weighting scheme code from being able to run if there was a problem with importing files
elif run_flag == "NoP": #no parameter from cif
self.code_run = False
elif run_flag == "NoI": #no info from ins/mas
self.code_run = False
elif run_flag == "NoV": #no values found in fcf/fco
self.code_run = False
else:
self.R2_text = ""
self.clear_weights()
y = [-4,0,4] #x and y for expected normal probability line
x = [-4,0,4]
def onpick2(event):
""" function to return information about reflections clicked on in normal probability plot"""
ind = event.ind
text_box_text = ""
text_box_list = ["Reflections Selected:","label:resoln:(fm,fc,sig(F)):,(graph x,y)"]
if len(ind) < 25:
for i in ind:
text_box_list.append("%s : %f : (%f, %f, %f) : (%f, %f)"%(self.labels[i],np.take(self.no_col,i) ,np.take(self.fm_only,i),np.take(self.fc_only,i),np.take(self.sigf_only,i),np.take(self.res[0][0], i), np.take(self.res[0][1], i)))
if len(text_box_list) > 0:
text_box_text = "\n".join(text_box_list)#
if len(self.R2_text) != 0:
text_box_text = self.R2_text + "\n" + text_box_text
self.qlist_item.setText(text_box_text)
f = 1/3.0 #default f value
if self.i_file[-3:] == "fcf":
if self.weight_applied == True:
self.calc_graph(-99999,-99999,999999,-999999,self.shelx_a,self.shelx_b,0.0,0.0,0.0,f) #shelx_a and shelx_b taken from ins file
else:
self.calc_graph(-99999,-99999,999999,-999999,-2.0,0.0,0.0,0.0,0.0,f) #-2.0 is xd default, a < -1.0 gives statistical weights
else:
self.calc_graph(-99999,-99999,999999,-999999,self.mas_a,self.mas_b,self.mas_c,self.mas_d,self.mas_e,self.mas_f)
x_min= False
x_max= False
y_min= False
y_max = False
plt.cla()
self.plot_norm(x_min,x_max,y_min,y_max)
self.ax1f1.plot(x,y,'--',color='r')
self.canvas.draw()
self.canvas.mpl_connect('pick_event', onpick2)
def select_info_file(self,marker,file_type):
"""function to bring up file dialog to select info - ins/mas file to provide extra information about refinement - lambda, applied weight"""
dlg = QtGui.QFileDialog()
if file_type == "fcf":
file_name = QtGui.QFileDialog.getOpenFileName(self, 'Select .ins or .res file', marker,"ins file (*.ins *.res)")
elif file_type == "fco":
file_name = QtGui.QFileDialog.getOpenFileName(self, 'Select .mas file', marker,"mas file (*.mas )")
#self.info_file.setText(file_name)
#self.info_file_name
if os.path.isfile(str(file_name)) == False: #if user did not select a file
self.qlist_item.setText("Error: No ins/mas file has not been selected")
file_name = False
return file_name
def check_info_file(self,i_file, file_type):
""" checks that info file exists"""
info_file_lst = os.path.split(i_file)
if file_type == "fcf":
new_file_name = info_file_lst[1][0:-3] + "ins"
info_file = os.path.join(info_file_lst[0], new_file_name)
file_type="fcf"
elif file_type == "fco":
new_file_name = info_file_lst[1][0:-3] + "mas"
info_file = os.path.join(info_file_lst[0], new_file_name)
file_type ="fco"
try:
open_ins = open(info_file,"r")# ##potential issue if ins file does not have same name as fcf file
except IOError:
marker = os.path.split(i_file)[0]
info_file = self.select_info_file(marker, file_type)
return info_file
def get_graph_values(self,i_file):
"""function to obtain values of fc, fm and su from imported file. Calculates resolution from lambda for fcf file."""
f= open(i_file,"r") #open selected file
g=f.readlines()
s = 0
start_defined = False
self.F_c = []
self.F_m = []
self.sig_F = []
self.sth = []
self.resoln = []
self.F_v_F = []
file_type = i_file[-3:] #xd = fco, shelx = fcf
self.labels_all = []
self.sinth_all = []
if file_type == "fco":
for line in g:
s += 1
if line.startswith(" _refln_XD_refine_code"):
start = s
val_order = [4,3,5] # order of line split for [fm, fc, fsig], to be used when getting values from file
start_defined = True
elif line.startswith("data_"):
self.code_mark = line.split("_")[1] #gets data flag name from xd.mas
info_file_name = self.check_info_file(i_file, "fco")
if info_file_name == False:
no_info_file = True #No ins/mas file has been selected
else:
no_info_file = False
open_fco_name = open(info_file_name,"r")
open_fco = open_fco_name.readlines()
for line in open_fco:
if line.startswith("WEIGHT") or line.startswith("!WEIGHT"):
weight_line = line.split()
self.mas_a = float(weight_line[1])
self.mas_b =float(weight_line[2])
self.mas_c =float(weight_line[3])
self.mas_d =float(weight_line[4])
self.mas_e =float(weight_line[5])
self.mas_f = float(weight_line[6])
self.weight_applied = True
self.a_edit.setText(str(self.mas_a))
self.b_edit.setText(str(self.mas_b))
self.c_edit.setText(str(self.mas_c))
self.d_edit.setText(str(self.mas_d))
self.e_edit.setText(str(self.mas_e))
self.f_edit.setText(str(self.mas_f))
elif line.startswith("WAVE"):
wave_line = line.split()
self.lamda = float(wave_line[1])
open_fco_name.close()
elif file_type == "fcf":
start_string = "NOTDEFINED"
self.code_mark = "[Plot Name]"
#### get weights and unit cell parameters from filename.ins ####
info_file_name = self.check_info_file(i_file, "fcf")
if info_file_name == False:
no_info_file = True
else:
no_info_file = False
open_ins_name = open(info_file_name,"r")
open_ins = open_ins_name.readlines()
for line in open_ins:
if line.startswith("CELL"):
cell_params = line.split()
self.lamda = float(cell_params[1])
unit_a =float(cell_params[2])
unit_b =float(cell_params[3])
unit_c =float(cell_params[4])
#numpy uses angles in radians for trig, so need to convert (as we will use these values later)
unit_alpha =np.radians(float(cell_params[5]))
unit_beta =np.radians(float(cell_params[6]))
unit_gamma= np.radians(float(cell_params[7]))
elif line.startswith("WGHT"):
#takes weight values from ins file to use in graph.
shelx_weight = line.split()
self.shelx_a = float(shelx_weight[1])
self.shelx_b = float(shelx_weight[2])
self.weight_applied = True
self.a_edit.setText(str(self.shelx_a))
self.b_edit.setText(str(self.shelx_b))
self.c_edit.setText("0.0")
self.d_edit.setText("0.0")
self.e_edit.setText("0.0")
self.f_edit.setText("1/3")
#shelx currently only calculates a and b.
#send these values to initial graph
open_ins_name.close()
######
for line in g:
s += 1
if line.startswith("_shelx_refln_list_code"):
list_code = line.split()[1]
if list_code == "4":
#start_defined = True
start_string = " _refln_observed_status"
val_order = [4, 3, 5] #[Fm2, Fc2,Fsig2]
elif list_code == "8":
start_string = " _shelx_refinement_sigma"
#start_defined = True
val_order = [3, 5, 4] #[Fm2, Fc2 ,Fsig2]
else:
start_defined = False
self.qlist_item.setText("Code can only work with LIST 4 and 8")
break
elif line.startswith(start_string) or line.startswith(" %s" %start_string): #sometimes there is an extra space before the start string in fcf
start = s
start_defined = True
else:
start_defined = False
self.qlist_item.setText("Program only works with .fco or .fcf (LIST 4,8).")
if self.cif_info == False:
self.code_running = False
return "NoP" #meaning we do not have the number of independant parameters from the cif
elif no_info_file == True:
self.code_running = False
return "NoI"
elif start_defined == False: #if start of list has not been defined, code will break
self.qlist_item.setText("Error: Values not found in file.")
self.code_running = False
self.recalc = False #so that graph cannot be recalculated because there is no file
return "STOP" #have been unable to get values from input file therefore will not make graph and print error message
else:
for i in range(start,len(g)):
l = g[i]
lst = l.split()
if len(lst) == 0: continue
self.F_m.append(float(lst[val_order[0]]))
self.F_c.append(float(lst[val_order[1]]))
self.sig_F.append(float(lst[val_order[2]]))
#get key of hkl for labels #
ha = int(lst[0])
la =int(lst[2])
ku = int(lst[1])
h = str(ha).rjust(4, " ")
k = str(ku).rjust(4, " ")
lz = str(la).rjust(4, " ")
key_lst = [h,k,lz]
key = "".join(key_lst)
key = key.lstrip()
self.labels_all.append(key)
if file_type == "fcf":
#formula for triclinic from giacavazzo pg 66
denom = (1- (np.cos(unit_alpha))**2 - (np.cos(unit_beta))**2 - (np.cos(unit_gamma))**2 + 2*np.cos(unit_alpha)*np.cos(unit_beta)*np.cos(unit_gamma))
term1 =(ha**2/unit_a**2)*(np.sin(unit_alpha)**2)
term2 =(ku**2/unit_b**2)*(np.sin(unit_beta)**2)
term3 = (la**2/unit_c**2)*(np.sin(unit_gamma)**2)
term4 =((2*ku*la)/(unit_b*unit_c))*(np.cos(unit_beta)*np.cos(unit_gamma) - np.cos(unit_alpha))
term5 =((2*ha*la)/(unit_a*unit_c))*(np.cos(unit_alpha)*np.cos(unit_gamma) - np.cos(unit_beta))
term6 = ((2*ku*ha)/(unit_b*unit_a))*(np.cos(unit_beta)*np.cos(unit_alpha) - np.cos(unit_gamma))
num = term1 + term2 +term3 +term4+ term5 +term6
one_dhkl = np.sqrt(num/denom) #as value calculated is 1/dhkl**2
resoln = self.lamda*one_dhkl/2
self.sth.append(resoln)
sinthl = one_dhkl/2
self.resoln.append(sinthl)
elif file_type == "fco":
self.sth.append(float(lst[6])*self.lamda) #should this not be *lamda, isn't valule in fco sinth/lamda (think it is)
self.resoln.append(float(lst[6]))
else:
self.sth.append(1)
if len(self.F_m) == 0:
self.qlist_item.setText("No values found in file.")
return "NoV"
else:
self.code_running = True
self.recalc = True # preliminarily allowing graph to be recalculated as values have been collected
return "GO" #values have been collected, therefore return go to show that code should continue
def calc_graph(self,i_filt,i_sig_filt,resoln_upper_filt,resoln_lower_filt,a,b,c,d,e,f):
"""function to produce normal probability plot"""
if self.code_running == False: #Check that values from fco/fcf have been gathered.
pass
else:
### lists for values to be plotted
F_val = []
f_sub = []
f_m_only = []
sig_f_only = []
f_c_only = []
fc = [] #colour of points
#set weight string for printing
if a <= -1:
self.weight_string = "Weighting applied:\n Statistical weights \n"
else:
self.weight_string = "Weighting applied:\n a: %f, b:%f, c%f, d:%f, e:%f, f: %f\n" %(round(a,6), round(b,6), round(c,6), round(d,6), round(e,6),round(f,6))
i_sig = []
labels = []
w = []
def calc_and_append(i,a,b,c,d,e,f):
"""function to calculate weighting value for each reflection and append values to lists for further use"""
s = self.sig_F[i]
if c > 0:
q = np.exp(c*self.sth[i])
elif c< 0:
q = 1 - np.exp(c*self.sth[i])
else: #c == 0
q = 1.0
if a < -1.0: #statistical weights
a = 0.0
b = 0.0
c = 0.0
d = 0.0
e = 0.0
f = 1/3.0
#p = (f*self.F_m[i] + (1-f)*self.F_c[i])
base = (s**2)
w2 = base/q
elif a > -1.0:
p = (f*self.F_m[i] + (1-f)*self.F_c[i])
base = (s**2 + (a*p)**2 + b*p + d + (self.sth[i]/self.lamda)*e)
w2 = base/q
else: # a = -0.1, unit weights
#p = 0.0
w2 = 1.0
#base = (s**2 + (a*p)**2 + b*p + d + (self.sth[i]/self.lamda)*e)
#w2 = base/q
#w_tot = sum(w*np.square(fo-scale_factor*fc))
w.append(np.sqrt(w2))
f_no = ((self.F_c[i]-self.F_m[i]))/np.sqrt(w2) # need w in there somehow.
i_sig.append(self.F_m[i]/self.sig_F[i])
f_m_only.append(self.F_m[i])
sig_f_only.append(self.sig_F[i])
fc.append(self.resoln[i]) #change this based on what you want colour to be!! - probably want this to be an option (resoln is sintheta/lamda)
F_val.append(f_no) #Fval is the observed resolution value
f_c_only.append(self.F_c[i])
labels.append(self.labels_all[i])
less_than = 0
for i in range(0,len(self.F_m)):
#if np.sqrt(self.F_m[i]) > 0.0: #cutoff makes it match drkplot,
if self.F_m[i] > -9999999999.0: #0.0: # as square rooting a number less than 0 would make np.sqrt freak out - think this line should be changed to the 0.0!!!!!!!!!!
resoln_i = float(self.resoln[i]) #resolution being sintheta/lamda
### filtering reflections based on limits ###
utrue = resoln_i >= float(resoln_lower_filt) #is the resolution number above the lower filt
ltrue = float(resoln_upper_filt) >= resoln_i # is resolution number below the upper filt
# lower filt < resolution < upper filt == good! :D
if utrue == True:
if ltrue == True:
if self.filt_eq == ">":
if self.F_m[i] >= i_filt:
if self.sig_filt_eq == ">":
if self.F_m[i]/self.sig_F[i] >= i_sig_filt:
calc_and_append(i,a,b,c,d,e,f) # append
elif self.sig_filt_eq == "<":
if self.F_m[i]/self.sig_F[i] <= i_sig_filt:
calc_and_append(i,a,b,c,d,e,f) #when this condition is hit, then removed, bad things happen (oh yes because it is reset
elif self.filt_eq == "<":
if self.F_m[i] <= i_filt:
if self.sig_filt_eq == ">":
if self.F_m[i]/self.sig_F[i] >= i_sig_filt:
calc_and_append(i,a,b,c,d,e,f) # append
elif self.sig_filt_eq == "<":
if self.F_m[i]/self.sig_F[i] <= i_sig_filt:
calc_and_append(i,a,b,c,d,e,f) #append
else:
less_than += 1
if len(F_val) == 0: #if this occurs there are no values in the list, therefore no graph will be drawn
self.nocalc_text = "No values match this criteria"
text_box_text = self.nocalc_text
self.qlist_item.setText(text_box_text)
self.recalc = False
else:
self.recalc = True
self.res = stats.probplot(F_val)
zipped = zip(F_val,fc,i_sig,labels,w,f_m_only,sig_f_only,f_c_only) #sort in order
sort = sorted(zipped) #sorts by first column.
self.no_col = []
self.labels = []
self.weights = []
self.fm_only = []
self.sigf_only = []
self.fc_only = []
for item in sort:
self.no_col.append(item[1])
self.labels.append(item[3])
self.weights.append(item[4])
self.fm_only.append(item[5])
self.sigf_only.append(item[6])
self.fc_only.append(item[7])
def calculate_R2(F_val):
"""function to calculate R^2 value of normal probability plot"""
res_m = self.res[1][0]
res_c = self.res[1][1]
y_avg = np.mean(F_val)
tot_sq = 0
reg_sq = 0
norm_sq_tot = 0
se_sq = 0
goof_tot = 0
for i in range(0,len(self.res[0][0])):
x = self.res[0][0][i]
y = self.res[0][1][i]
ypred = res_m*x+res_c
ssr = (ypred - y_avg)**2
sst = (y - y_avg)**2
se = (y - ypred)**2
norm_sq = (y-x)**2
tot_sq += sst
reg_sq += ssr
se_sq += se
norm_sq_tot += norm_sq
if x == 0:
pass
else:
goof_add = ((y-x)**2)/x #calculating goodness of fit
goof_tot += goof_add
R2_straight = 1- (norm_sq_tot/tot_sq)
return 1 - se_sq/tot_sq, R2_straight#R2
R2_val, r2_s = calculate_R2(F_val)
self.R2_text = "%sR2 : %f \nR2 straight: %f \nNo. of Reflections: %d \n"%(self.weight_string, R2_val, r2_s, len(F_val))
text_box_text = self.R2_text
self.qlist_item.setText(text_box_text)
def recalc_fig(self):
"""function triggered by apply button on normal probability plot tab, recalculates plot with defined weight and/or data limits and axes"""
self.run_recalc = True #set as true, used for while loop to check that a -f and limits are all numerical or fractions
while self.run_recalc == True: #using while loop to allow stopping of function if values are non-numeric, or file is missing.
if self.file_selected == False:
self.qlist_item.setText("Error: File has not been selected")
break
if self.cif_info == False:
self.qlist_item.setText("Error: number of independant parameters is not in cif file")
break
a_val = self.a_edit.text()
if len(a_val) < 1: #if there is nothing in box, len = 0, set default (0.0)
a_val = 0.0
else:
a_val = self.check_int(a_val)
if a_val == "False":
break
b_val =self.b_edit.text()
if len(b_val) < 1:
b_val = 0.0
else:
b_val = self.check_int(b_val)
if b_val == "False":
break
c_val =self.c_edit.text()
if len(c_val) < 1:
c_val = 0.0
else:
c_val = self.check_int(c_val)
if c_val == "False":
break
d_val =self.d_edit.text()
if len(d_val) < 1:
d_val = 0.0
else:
d_val = self.check_int(d_val)
if d_val == "False":
break
e_val =self.e_edit.text()
if len(e_val) < 1:
e_val = 0.0
else:
e_val = self.check_int(e_val)
if e_val == "False":
break
f_val = self.f_edit.text()
if len(f_val) < 1:
f_val = 1/3.0 #default === 1/3.0
else:
f_val = self.check_int(f_val)
if f_val == "False":
break
self.filt_eq = self.i_filt_eq.currentText() #checking which inequality sign is selected for intensity filter
self.sig_filt_eq = self.i_sig_filt_eq.currentText()#checking which inequality sign is selected intensity/s.u. filter
i_filt =self.i_filt.text()
i_sig_filt = self.i_sig_filt.text()
resoln_upper_filt = self.resoln_upper_filt.text()
resoln_lower_filt = self.resoln_lower_filt.text()
if len(self.i_sig_filt.text()) > 0:
i_sig_filt= self.check_int(i_sig_filt)
if i_sig_filt == False:
break
else:
if self.sig_filt_eq == ">":
i_sig_filt = -9999999999.9
else:
i_sig_filt = 9999999999.9
if len(self.i_filt.text()) > 0:
i_filt = self.check_int(i_filt)
if i_filt == False:
break
else:
if self.filt_eq == ">":
i_filt = -9999999999.9
else:
i_filt = 9999999999.9
### checking resoln_upper_filt and lower _filt ####
if resoln_upper_filt == "":
resoln_upper_filt = 9999999999999.0
else:
resoln_lower_filt =self.check_int(resoln_lower_filt)
if resoln_lower_filt == False:
break
if resoln_lower_filt == "":
resoln_lower_filt = -99999999999999.0
else:
resoln_upper_filt = self.check_int(resoln_upper_filt)
if resoln_upper_filt == False:
break
#check they are all numerical values
plt.cla()
self.calc_graph(i_filt,i_sig_filt,resoln_upper_filt,resoln_lower_filt,a_val, b_val, c_val, d_val, e_val,f_val)
if self.recalc == True: #checks there are values in list still after things have been filtered out True/False, assigned in calc_graph based on whether there are items in value lists after filters applied
def check(a):
"""checks if graphical limits are floats or not"""
if len(a) < 1:
return False
else:
try:
a = float(a)
return a
except ValueError:
return False
x_min_val = check(self.x_min.text())
x_max_val = check(self.x_max.text())
y_min_val = check(self.y_min.text())
y_max_val = check(self.y_max.text())
self.plot_norm(x_min_val, x_max_val, y_min_val, y_max_val)
self.run_recalc = False #to stop while loop from running
def plot_norm(self,x_min,x_max,y_min,y_max):
"""plots normal probability plot, if limits not defined, max or min value used as limit to show all points"""
### need to split up resolutions for graph ###
res_resolncut = []
res2_resolncut = []
upper = 2.0
lower = 0.0
colour = []
res2 = self.ax1f1.scatter(self.res[0][0],self.res[0][1],c=self.no_col, lw=0, s=10,picker=True)#line_picker)
y = [-4,0,4]
x = [-4,0,4]
self.ax1f1.plot(x,y,'--',color='r')
self.ax1f1.axhline(y=0,ls="-",c="black")
self.ax1f1.axvline(x=0,ls="-",c="black")
if x_min != False:
if x_max != False:
self.ax1f1.set_xlim([x_min,x_max])
else:
self.ax1f1.set_xlim([x_min,plt.xlim()[1]])
if x_max != False:
if x_min != False:
pass
else:
self.ax1f1.set_xlim([plt.xlim()[0] ,x_max])
if y_min != False:
if y_max != False:
self.ax1f1.set_ylim([y_min,y_max])
else:
self.ax1f1.set_ylim([y_min,plt.ylim()[1]])
if y_max != False:
if y_min != False:
pass
else:
self.ax1f1.set_ylim([plt.ylim()[0],y_max])
self.ax1f1.set_xlabel("Expected Residuals")
self.ax1f1.set_ylabel("Ordered Residuals")
self.fig1.canvas.draw()
def check_frac(self,y):
"""function to check if input value for weight/limit is a fraction and if so extract value"""
frac = y.split("/")
def is_number(x):
try:
no = float(x)
return True
except ValueError:
return False
if len(frac) == 2:
if is_number(frac[0]) == True and is_number(frac[1]) == True: #makes sure both items in list can be floats
new_no = float(frac[0])/float(frac[1])
fraction = new_no
else:
fraction = "False"
else:
fraction = "False"
return fraction
def check_int(self,string):
"""function to check if input value for weight/limit in normal probability plot is a float and if so extract value"""
if string == "":
return string
else:
try:
no = float(string)
except ValueError:
if "/" in string:
no = self.check_frac(string) #so fractions can also be input
else:
no = "False" #using it as a string as if value = 0.0, will be evaluated as false when it shouldn't be.
if no == "False":
text_box_text = "One of the input values is not a number or a fraction:\n graph not recalculated"
self.qlist_item.setText(text_box_text)
return no
def check_int_weight(self,string):
"""function to check if input for cutoffs in weighting tab is a number"""
if string == "":
return string
else:
try:
no = float(string)
except ValueError:
if "/" in string:
no = self.check_frac(string)
else:
no = "False"
if no == "False":
text_box_text = "One of the input values is not a number or a fraction:\n graph not recalculated"
self.tab2_info.setText(text_box_text)
return no
def calculate_weighting_scheme(self):
"""calculates optimal a and b parameters using weighting scheme code from python script"""
#f_c, f_m, sig_f
#check input values are numerical
all_num=True
resoln_upper = self.check_int_weight(self.resoln_upper_filt_2.text())
resoln_lower =self.check_int_weight(self.resoln_lower_filt_2.text())
i_remove = self.check_int_weight(self.i_filt_2.text())
isig_remove = self.check_int_weight(self.i_sig_filt_2.text())
i_val = self.check_int_weight(self.i_filt_2.text())
isig_val = self.check_int_weight(self.i_sig_filt_2.text())
all_num = True
if resoln_upper == "False" or resoln_lower == "False" or i_remove == "False" or isig_remove == "False" or i_val == "False" or isig_val == "False":
all_num = False
#check that input values are numbers.
if self.file_selected == False:
self.tab2_info.setText("Error: File has not been selected")
elif all_num == False:
self.tab2_info.setText("Error: One or more of cutoffs is not a number.")
#pass
elif self.code_run == False: #Stop running if there was an issue importing files
self.tab2_info.setText("Error: Problem importing files. Weighting code cannot run.")
else:
self.tab2_info.setText("")
Fmlist = []
Fclist = []
sigflist = []
resolnlist = []
self.calc_weight_button.setEnabled(False)
table_column = []
Fmlist = deepcopy(self.F_m)
Fclist =deepcopy(self.F_c)
sigflist =deepcopy(self.sig_F)
resolnlist = deepcopy(self.resoln)
F_c = np.array(Fclist)
F_m = np.array(Fmlist)
sig_F = np.array(sigflist)
resolution = np.array(resolnlist)
stop_run = False
bintype = self.weight_bin_style()
new_Fm = []
new_Fc = []
new_sigF = []
newres = []
isig_table = ""
i_table = ""
ru_table = ""
rl_table = ""
noreflns = len(new_Fm)
#run = shelx_weighting_calc olex_weighting_scheme(self.n_independant_params, self.scale_factor, new_Fm, new_Fc, new_sigF, newres, bintype) #setting up class from weighting scheme python scripts.
new_Fm, new_Fc, new_sigF, newres, i_table,isig_table, ru_table, rl_table,noreflns = self.sort_data(F_m, F_c, sig_F, resolution)
zero_indicies = new_Fm < 0 #need reset to zero as in olex
new_Fm[zero_indicies] = 0
run = shelx_weighting_calc(self.n_independant_params, self.scale_factor, new_Fm, new_Fc, new_sigF, newres, bintype) #setting up class from weighting scheme python scripts.
if self.calculate_start_check.checkState() == 2: #check state of checkbox.
calc_start = True
start_a = 0
start_b = 0
start_a, start_b = run.calculate_start(F_m, F_c, sig_F) #these vals all data, sorting would start here before put into file
a_stop = self.check_int_weight(self.a_stop.text())
b_stop = self.check_int_weight(self.b_stop.text())
if a_stop == "False" or b_stop == "False":
self.tab2_info.setText("Error: One or more Weighting Stopping Points not a number.")
stop_run = True
else:
calc_start = False
start_a = self.check_int_weight(self.a_start.text()) #need to check for float
start_b = self.check_int_weight(self.b_start.text()) #need to check for float
a_stop = self.check_int_weight(self.a_stop.text())
b_stop = self.check_int_weight(self.b_stop.text())
if start_a == "" or start_b == "":
self.tab2_info.setText("Error: No starting a or b values set. Please input values or check Calculate Start box and try again")
stop_run = True
elif start_a == "False" or start_b == "False":
self.tab2_info.setText("Error: One or more Weighting Starting Points not a number.")
stop_run = True
elif a_stop == "False" or b_stop == "False":
self.tab2_info.setText("Error: One or more Weighting Stopping Points not a number.")
stop_run = True
if stop_run == False:
if a_stop == "" or b_stop == "":
a_stop = 1e-4
b_stop = 5e-3
# else:
# a_stop = float(a_stop)
# b_stop = float(b_stop)
#olex defaults
a,b,goof,wr2,variance, error = run.minimize_variance(a_stop,b_stop,start_a, start_b)
if len(error) > 0:
self.tab2_info.setText(error)
table_column = []
table_vals = [a,b,goof,wr2,a_stop,b_stop,i_table,isig_table, ru_table, rl_table,noreflns,start_a,start_b,bintype,variance]
for i in table_vals:
item = QtGui.QStandardItem(str(i))
table_column.append(item)
self.tablemodel.appendRow(table_column)
self.weighting_tableview.setModel(self.tablemodel)
if a == "-" and b == "-":
self.tab2_info.setText("CAPOW could not search for weighting scheme, try smaller stopping values or adjusting starting values.")
self.calc_weight_button.setEnabled(True)
def weight_bin_style(self):
"""function to check which weighting bin is required"""
if self.bintype_intensity.checkState() == 2:
bintype = "I"
#elif self.bintype_resolution.checkState() == 2:
else: #either intensity is ticked or resolution is ticked.
bintype = "R"
return bintype
def sort_data(self, Fm, Fc, sigF, resolution):
"""function to apply cutoffs to data for weighting scheme calculator"""
#create lists to populate during application of cutoffs
remove_index = []
remove_resup = []
remove_reslow = []
remove_i = []
remove_isig = []
resulting_list = []
ints = []
# obtain input values from weighting tab
resoln_upper =self.resoln_upper_filt_2.text()
resoln_lower =self.resoln_lower_filt_2.text()
i_remove = self.i_filt_2.text()
isig_remove = self.i_sig_filt_2.text()
i_filt_eq = self.i_filt_eq_2.currentText()
isig_filt_eq = self.i_sig_filt_eq_2.currentText()
i_val = self.i_filt_2.text()
isig_val = self.i_sig_filt_2.text()
isig_table = ""
i_table = ""
ru_table = ""
rl_table = ""
if resoln_upper != "":
remove_resup = resolution > float(resoln_upper) #want to remove everything above, so only want indexes of those above
ru_table = "> %s" % resoln_upper
if resoln_lower != "":
remove_reslow = resolution < float(resoln_lower) #find values with a resoltion of less than resoln_lower, produces list True False whether this condition is met
rl_table = "> %s" % resoln_lower
#### resoln cutoff upper###
if len(remove_resup) > 0:
if len(remove_reslow) > 0:
ints = [i for i in range(0, len(Fm)) if remove_resup[i] == True] #whether above condition is met, remove_resup list of true falses, only want index of those that are true
resulting_list = list(ints)
ints = []
ints = [i for i in range(0, len(Fm)) if remove_reslow[i] == True]
resulting_list.extend(x for x in ints if x not in resulting_list) #add values to resulting list if true in remove_reslow (and not already in resulting list)
ints = []
else:
ints = [i for i in range(0, len(Fm)) if remove_resup[i] == True]
resulting_list = list(ints)
ints = []
elif len(remove_reslow) > 0: #resolution cutoff lower
ints = [i for i in range(0, len(Fm)) if remove_reslow[i] == True]
resulting_list = list(ints)
ints = []
### check i cutoff##
if i_val != "":
if i_filt_eq == "<":
remove_i = Fm > float(i_val) #so selects all Fm values that are greater than the cutoff
elif i_filt_eq == ">":
#then check list
remove_i = Fm < float(i_val)
i_table = "%s %s" % (i_filt_eq,i_val)
if len(resulting_list) > 0:
#if the resulting list has values in, need to make sure that there are no doubles included in resulting list when lower indicies joined on
ints = []
ints = [i for i in range(0, len(Fm)) if remove_i[i] == True]
resulting_list.extend(x for x in ints if x not in resulting_list)
ints = []
else:
ints = [i for i in range(0, len(Fm)) if remove_i[i] == True]
resulting_list = list(ints)
####
fmsig = Fm/sigF
### i/s.u. cutoff
if isig_val != "":
if isig_filt_eq == "<":
remove_isig = fmsig > float(isig_val) #so selects all Fm values that are greater than the cutoff
elif isig_filt_eq == ">":
remove_isig = fmsig < float(isig_val)
isig_table = "%s %s" % (isig_filt_eq,isig_val)
if len(resulting_list) > 0:
#if the resulting list has values in, need to make sure that there are no doubles included in resulting list when lower indicies joined on
ints = []
ints = [i for i in range(0, len(Fm)) if remove_isig[i] == True]
resulting_list.extend(x for x in ints if x not in resulting_list)
ints = []
else:
ints = []
ints = [i for i in range(0, len(Fm)) if remove_isig[i] == True]
resulting_list = list(ints)
ints = []
if len(resulting_list) > 0:
new_Fm = np.delete(Fm, resulting_list)
new_Fc =np.delete(Fc, resulting_list)
new_sigF = np.delete(sigF, resulting_list)
new_res = np.delete(resolution, resulting_list)
else:
new_Fm =Fm
new_Fc =Fc
new_sigF = sigF
new_res = resolution
cutoffs_list = []
noreflns = len(new_Fm)
return new_Fm, new_Fc, new_sigF, new_res,i_table,isig_table, ru_table, rl_table,noreflns
def drk_lim(self):
"""inserts plot limits to mirror DRKplot into normal probability plot tab"""
self.x_min.setText("-4")
self.x_max.setText("4")
self.y_min.setText("-4")
self.y_max.setText("4")
def clr_lim(self):
"""removes plot limits from drk plot tab"""
self.x_min.setText("")
self.x_max.setText("")
self.y_min.setText("")
self.y_max.setText("")
def clear_weights(self):
"""Clears all saved weights from weighting scheme table"""
self.tablemodel = ""
headers = ["a","b","goof","wr2","stop a","stop b","i cutoff","isig cutoff","resoln upper","resoln lower","no. of reflns","start a","start b","binning", "bin variance"]
self.tablemodel = QtGui.QStandardItemModel() #internet says to make it a model to work
self.tablemodel.setHorizontalHeaderLabels(headers)
self.weighting_tableview.setModel(self.tablemodel)
def send_weights(self):
"""Sends selected weights from weighting scheme table to normal probability plot tab"""
#which row is selected?
selectedindexes = self.weighting_tableview.selectedIndexes()
#no selectedRows() function for tableview apparently, so have to get selected values
row = [] #(i.row() for i in selectedindexes if i.row() not in row)]
for i in selectedindexes:
rowindex = i.row()
if rowindex not in row:
row.append(rowindex)
#making sure only one row has been selected
if len(row) == 1:
row_val = row[0]
#gets selected values
selected_a =str(self.tablemodel.item(row_val,0).text())
selected_b = str(self.tablemodel.item(row_val,1).text())
#sets a and b from tab one as selected values
self.a_edit.setText(str(selected_a))
self.b_edit.setText(str(selected_b))
self.c_edit.setText("0.0")
self.d_edit.setText("0.0")
self.e_edit.setText("0.0")
self.f_edit.setText("1/3")
elif len(row) == 0:
self.tab2_info.setText("Error: No row selected")
else:
self.tab2_info.setText("Error: More than one row selected")
if __name__ == '__main__':
#import sys
#from PyQt4 import QtGui
app = QtGui.QApplication(sys.argv)
main = Main(1.0) #scale factor used in weight calculation, code assumes it is one. Change here if not.
main.show()
sys.exit(app.exec_())
| gpl-3.0 |
mit-crpg/openmc | tests/regression_tests/tally_slice_merge/test.py | 8 | 6593 | import hashlib
import itertools
import openmc
from tests.testing_harness import PyAPITestHarness
class TallySliceMergeTestHarness(PyAPITestHarness):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Define nuclides and scores to add to both tallies
self.nuclides = ['U235', 'U238']
self.scores = ['fission', 'nu-fission']
# Define filters for energy and spatial domain
low_energy = openmc.EnergyFilter([0., 0.625])
high_energy = openmc.EnergyFilter([0.625, 20.e6])
merged_energies = low_energy.merge(high_energy)
cell_21 = openmc.CellFilter(21)
cell_27 = openmc.CellFilter(27)
distribcell_filter = openmc.DistribcellFilter(21)
mesh = openmc.RegularMesh(name='mesh')
mesh.dimension = [2, 2]
mesh.lower_left = [-50., -50.]
mesh.upper_right = [+50., +50.]
mesh_filter = openmc.MeshFilter(mesh)
self.cell_filters = [cell_21, cell_27]
self.energy_filters = [low_energy, high_energy]
# Initialize cell tallies with filters, nuclides and scores
tallies = []
for energy_filter in self.energy_filters:
for cell_filter in self.cell_filters:
for nuclide in self.nuclides:
for score in self.scores:
tally = openmc.Tally()
tally.estimator = 'tracklength'
tally.scores.append(score)
tally.nuclides.append(nuclide)
tally.filters.append(cell_filter)
tally.filters.append(energy_filter)
tallies.append(tally)
# Merge all cell tallies together
while len(tallies) != 1:
halfway = len(tallies) // 2
zip_split = zip(tallies[:halfway], tallies[halfway:])
tallies = list(map(lambda xy: xy[0].merge(xy[1]), zip_split))
# Specify a name for the tally
tallies[0].name = 'cell tally'
# Initialize a distribcell tally
distribcell_tally = openmc.Tally(name='distribcell tally')
distribcell_tally.estimator = 'tracklength'
distribcell_tally.filters = [distribcell_filter, merged_energies]
for score in self.scores:
distribcell_tally.scores.append(score)
for nuclide in self.nuclides:
distribcell_tally.nuclides.append(nuclide)
mesh_tally = openmc.Tally(name='mesh tally')
mesh_tally.estimator = 'tracklength'
mesh_tally.filters = [mesh_filter, merged_energies]
mesh_tally.scores = self.scores
mesh_tally.nuclides = self.nuclides
# Add tallies to a Tallies object
self._model.tallies = [tallies[0], distribcell_tally, mesh_tally]
def _get_results(self, hash_output=False):
"""Digest info in the statepoint and return as a string."""
# Read the statepoint file.
sp = openmc.StatePoint(self._sp_name)
# Extract the cell tally
tallies = [sp.get_tally(name='cell tally')]
# Slice the tallies by cell filter bins
cell_filter_prod = itertools.product(tallies, self.cell_filters)
tallies = map(lambda tf: tf[0].get_slice(filters=[type(tf[1])],
filter_bins=[(tf[1].bins[0],)]),
cell_filter_prod)
# Slice the tallies by energy filter bins
energy_filter_prod = itertools.product(tallies, self.energy_filters)
tallies = map(lambda tf: tf[0].get_slice(filters=[type(tf[1])],
filter_bins=[(tf[1].bins[0],)]),
energy_filter_prod)
# Slice the tallies by nuclide
nuclide_prod = itertools.product(tallies, self.nuclides)
tallies = map(lambda tn: tn[0].get_slice(nuclides=[tn[1]]), nuclide_prod)
# Slice the tallies by score
score_prod = itertools.product(tallies, self.scores)
tallies = map(lambda ts: ts[0].get_slice(scores=[ts[1]]), score_prod)
tallies = list(tallies)
# Initialize an output string
outstr = ''
# Append sliced Tally Pandas DataFrames to output string
for tally in tallies:
df = tally.get_pandas_dataframe()
outstr += df.to_string()
# Merge all tallies together
while len(tallies) != 1:
halfway = int(len(tallies) / 2)
zip_split = zip(tallies[:halfway], tallies[halfway:])
tallies = list(map(lambda xy: xy[0].merge(xy[1]), zip_split))
# Append merged Tally Pandas DataFrame to output string
df = tallies[0].get_pandas_dataframe()
outstr += df.to_string() + '\n'
# Extract the distribcell tally
distribcell_tally = sp.get_tally(name='distribcell tally')
# Sum up a few subdomains from the distribcell tally
sum1 = distribcell_tally.summation(filter_type=openmc.DistribcellFilter,
filter_bins=[0, 100, 2000, 30000])
# Sum up a few subdomains from the distribcell tally
sum2 = distribcell_tally.summation(filter_type=openmc.DistribcellFilter,
filter_bins=[500, 5000, 50000])
# Merge the distribcell tally slices
merge_tally = sum1.merge(sum2)
# Append merged Tally Pandas DataFrame to output string
df = merge_tally.get_pandas_dataframe()
outstr += df.to_string() + '\n'
# Extract the mesh tally
mesh_tally = sp.get_tally(name='mesh tally')
# Sum up a few subdomains from the mesh tally
sum1 = mesh_tally.summation(filter_type=openmc.MeshFilter,
filter_bins=[(1, 1), (1, 2)])
# Sum up a few subdomains from the mesh tally
sum2 = mesh_tally.summation(filter_type=openmc.MeshFilter,
filter_bins=[(2, 1), (2, 2)])
# Merge the mesh tally slices
merge_tally = sum1.merge(sum2)
# Append merged Tally Pandas DataFrame to output string
df = merge_tally.get_pandas_dataframe()
outstr += df.to_string() + '\n'
# Hash the results if necessary
if hash_output:
sha512 = hashlib.sha512()
sha512.update(outstr.encode('utf-8'))
outstr = sha512.hexdigest()
return outstr
def test_tally_slice_merge():
harness = TallySliceMergeTestHarness('statepoint.10.h5')
harness.main()
| mit |
mlassnig/pilot | EventRanges.py | 3 | 4871 | #
import json
import os
import traceback
from pUtil import httpConnect, tolog
from EventRangesPandaProxy import downloadEventRangesPandaProxy, updateEventRangePandaProxy, updateEventRangesPandaProxy
def downloadEventRanges(jobId, jobsetID, taskID, pandaProxySecretKey=None, numRanges=10, url="https://pandaserver.cern.ch:25443/server/panda"):
""" Download event ranges from the Event Server """
try:
# url should be '%s:%s/server/panda' % (env['pshttpurl'], str(env['psport']))
if os.environ.has_key('EventRanges') and os.path.exists(os.environ['EventRanges']):
try:
with open(os.environ['EventRanges']) as json_file:
events = json.load(json_file)
os.rename(os.environ['EventRanges'], os.environ['EventRanges'] + ".loaded")
tolog(events)
return json.dumps(events)
except:
tolog('Failed to open event ranges json file: %s' % traceback.format_exc())
# Return the server response (instruction to AthenaMP)
# Note: the returned message is a string (of a list of dictionaries). If it needs to be converted back to a list, use json.loads(message)
tolog("Downloading new event ranges for jobId=%s, taskID=%s and jobsetID=%s" % (jobId, taskID, jobsetID))
if pandaProxySecretKey is not None and pandaProxySecretKey != "" :
return downloadEventRangesPandaProxy(jobId, jobsetID, pandaProxySecretKey)
# message = "[{u'lastEvent': 2, u'LFN': u'mu_E50_eta0-25.evgen.pool.root',u'eventRangeID': u'130-2068634812-21368-1-1', u'startEvent': 2, u'GUID':u'74DFB3ED-DAA7-E011-8954-001E4F3D9CB1'}]"
message = ""
node = {}
node['pandaID'] = jobId
node['jobsetID'] = jobsetID
node['taskID'] = taskID
node['nRanges'] = numRanges
# open connection
ret = httpConnect(node, url, path=os.getcwd(), mode="GETEVENTRANGES")
response = ret[1]
if ret[0]: # non-zero return code
message = "Failed to download event range - error code = %d" % (ret[0])
else:
message = response['eventRanges']
if message == "" or message == "[]":
message = "No more events"
return message
except Exception, e:
tolog("Failed to download event ranges: %s: %s" % (str(e), traceback.format_exc()))
return None
def updateEventRange(event_range_id, eventRangeList, jobId, pandaProxySecretKey, status='finished', os_bucket_id=-1, errorCode=None):
""" Update an list of event ranges on the Event Server """
# parameter eventRangeList is not used
try:
tolog("Updating an event range..")
if pandaProxySecretKey is not None and pandaProxySecretKey != "" :
return updateEventRangePandaProxy(event_range_id, eventRangeList, jobId, pandaProxySecretKey, status, os_bucket_id, errorCode)
eventrange = {'eventRangeID': event_range_id, 'eventStatus': status}
if os_bucket_id != -1:
eventrange['objstoreID'] = os_bucket_id
if errorCode:
eventrange['errorCode'] = errorCode
status, message = updateEventRanges([eventrange])
if status == 0:
message = json.loads(message)[0]
if str(message).lower() == 'true':
message = ""
return message
except:
tolog("Failed to update event range: %s" % traceback.format_exc())
return None
def updateEventRanges(event_ranges, pandaProxySecretKey=None, jobId=None, url="https://pandaserver.cern.ch:25443/server/panda", version=0):
""" Update an event range on the Event Server """
tolog("Updating event ranges...")
try:
if pandaProxySecretKey is not None and pandaProxySecretKey != "" :
return updateEventRangesPandaProxy(event_ranges, pandaProxySecretKey, jobId)
message = ""
# eventRanges = [{'eventRangeID': '4001396-1800223966-4426028-1-2', 'eventStatus':'running'}, {'eventRangeID': '4001396-1800223966-4426028-2-2','eventStatus':'running'}]
node={}
node['eventRanges']=json.dumps(event_ranges)
if version:
node['version'] = 1
# open connection
ret = httpConnect(node, url, path=os.getcwd(), mode="UPDATEEVENTRANGES")
# response = json.loads(ret[1])
status = ret[0]
if ret[0]: # non-zero return code
message = "Failed to update event range - error code = %d, error: %s" % (ret[0], ret[1])
else:
response = json.loads(json.dumps(ret[1]))
status = int(response['StatusCode'])
message = json.dumps(response['Returns'])
return status, message
except:
tolog("Failed to update event ranges: %s" % traceback.format_exc())
return -1, None
| apache-2.0 |
nguyentu1602/statsmodels | statsmodels/sandbox/regression/kernridgeregress_class.py | 39 | 7941 | '''Kernel Ridge Regression for local non-parametric regression'''
import numpy as np
from scipy import spatial as ssp
from numpy.testing import assert_equal
import matplotlib.pylab as plt
def plt_closeall(n=10):
'''close a number of open matplotlib windows'''
for i in range(n): plt.close()
def kernel_rbf(x,y,scale=1, **kwds):
#scale = kwds.get('scale',1)
dist = ssp.minkowski_distance_p(x[:,np.newaxis,:],y[np.newaxis,:,:],2)
return np.exp(-0.5/scale*(dist))
def kernel_euclid(x,y,p=2, **kwds):
return ssp.minkowski_distance(x[:,np.newaxis,:],y[np.newaxis,:,:],p)
class GaussProcess(object):
'''class to perform kernel ridge regression (gaussian process)
Warning: this class is memory intensive, it creates nobs x nobs distance
matrix and its inverse, where nobs is the number of rows (observations).
See sparse version for larger number of observations
Notes
-----
Todo:
* normalize multidimensional x array on demand, either by var or cov
* add confidence band
* automatic selection or proposal of smoothing parameters
Note: this is different from kernel smoothing regression,
see for example http://en.wikipedia.org/wiki/Kernel_smoother
In this version of the kernel ridge regression, the training points
are fitted exactly.
Needs a fast version for leave-one-out regression, for fitting each
observation on all the other points.
This version could be numerically improved for the calculation for many
different values of the ridge coefficient. see also short summary by
Isabelle Guyon (ETHZ) in a manuscript KernelRidge.pdf
Needs verification and possibly additional statistical results or
summary statistics for interpretation, but this is a problem with
non-parametric, non-linear methods.
Reference
---------
Rasmussen, C.E. and C.K.I. Williams, 2006, Gaussian Processes for Machine
Learning, the MIT Press, www.GaussianProcess.org/gpal, chapter 2
a short summary of the kernel ridge regression is at
http://www.ics.uci.edu/~welling/teaching/KernelsICS273B/Kernel-Ridge.pdf
'''
def __init__(self, x, y=None, kernel=kernel_rbf,
scale=0.5, ridgecoeff = 1e-10, **kwds ):
'''
Parameters
----------
x : 2d array (N,K)
data array of explanatory variables, columns represent variables
rows represent observations
y : 2d array (N,1) (optional)
endogenous variable that should be fitted or predicted
can alternatively be specified as parameter to fit method
kernel : function, default: kernel_rbf
kernel: (x1,x2)->kernel matrix is a function that takes as parameter
two column arrays and return the kernel or distance matrix
scale : float (optional)
smoothing parameter for the rbf kernel
ridgecoeff : float (optional)
coefficient that is multiplied with the identity matrix in the
ridge regression
Notes
-----
After initialization, kernel matrix is calculated and if y is given
as parameter then also the linear regression parameter and the
fitted or estimated y values, yest, are calculated. yest is available
as an attribute in this case.
Both scale and the ridge coefficient smooth the fitted curve.
'''
self.x = x
self.kernel = kernel
self.scale = scale
self.ridgecoeff = ridgecoeff
self.distxsample = kernel(x,x,scale=scale)
self.Kinv = np.linalg.inv(self.distxsample +
np.eye(*self.distxsample.shape)*ridgecoeff)
if not y is None:
self.y = y
self.yest = self.fit(y)
def fit(self,y):
'''fit the training explanatory variables to a sample ouput variable'''
self.parest = np.dot(self.Kinv, y) #self.kernel(y,y,scale=self.scale))
yhat = np.dot(self.distxsample,self.parest)
return yhat
## print ds33.shape
## ds33_2 = kernel(x,x[::k,:],scale=scale)
## dsinv = np.linalg.inv(ds33+np.eye(*distxsample.shape)*ridgecoeff)
## B = np.dot(dsinv,y[::k,:])
def predict(self,x):
'''predict new y values for a given array of explanatory variables'''
self.xpredict = x
distxpredict = self.kernel(x, self.x, scale=self.scale)
self.ypredict = np.dot(distxpredict, self.parest)
return self.ypredict
def plot(self, y, plt=plt ):
'''some basic plots'''
#todo return proper graph handles
plt.figure();
plt.plot(self.x,self.y, 'bo-', self.x, self.yest, 'r.-')
plt.title('sample (training) points')
plt.figure()
plt.plot(self.xpredict,y,'bo-',self.xpredict,self.ypredict,'r.-')
plt.title('all points')
def example1():
m,k = 500,4
upper = 6
scale=10
xs1a = np.linspace(1,upper,m)[:,np.newaxis]
xs1 = xs1a*np.ones((1,4)) + 1/(1.0+np.exp(np.random.randn(m,k)))
xs1 /= np.std(xs1[::k,:],0) # normalize scale, could use cov to normalize
y1true = np.sum(np.sin(xs1)+np.sqrt(xs1),1)[:,np.newaxis]
y1 = y1true + 0.250 * np.random.randn(m,1)
stride = 2 #use only some points as trainig points e.g 2 means every 2nd
gp1 = GaussProcess(xs1[::stride,:],y1[::stride,:], kernel=kernel_euclid,
ridgecoeff=1e-10)
yhatr1 = gp1.predict(xs1)
plt.figure()
plt.plot(y1true, y1,'bo',y1true, yhatr1,'r.')
plt.title('euclid kernel: true y versus noisy y and estimated y')
plt.figure()
plt.plot(y1,'bo-',y1true,'go-',yhatr1,'r.-')
plt.title('euclid kernel: true (green), noisy (blue) and estimated (red) '+
'observations')
gp2 = GaussProcess(xs1[::stride,:],y1[::stride,:], kernel=kernel_rbf,
scale=scale, ridgecoeff=1e-1)
yhatr2 = gp2.predict(xs1)
plt.figure()
plt.plot(y1true, y1,'bo',y1true, yhatr2,'r.')
plt.title('rbf kernel: true versus noisy (blue) and estimated (red) observations')
plt.figure()
plt.plot(y1,'bo-',y1true,'go-',yhatr2,'r.-')
plt.title('rbf kernel: true (green), noisy (blue) and estimated (red) '+
'observations')
#gp2.plot(y1)
def example2(m=100, scale=0.01, stride=2):
#m,k = 100,1
upper = 6
xs1 = np.linspace(1,upper,m)[:,np.newaxis]
y1true = np.sum(np.sin(xs1**2),1)[:,np.newaxis]/xs1
y1 = y1true + 0.05*np.random.randn(m,1)
ridgecoeff = 1e-10
#stride = 2 #use only some points as trainig points e.g 2 means every 2nd
gp1 = GaussProcess(xs1[::stride,:],y1[::stride,:], kernel=kernel_euclid,
ridgecoeff=1e-10)
yhatr1 = gp1.predict(xs1)
plt.figure()
plt.plot(y1true, y1,'bo',y1true, yhatr1,'r.')
plt.title('euclid kernel: true versus noisy (blue) and estimated (red) observations')
plt.figure()
plt.plot(y1,'bo-',y1true,'go-',yhatr1,'r.-')
plt.title('euclid kernel: true (green), noisy (blue) and estimated (red) '+
'observations')
gp2 = GaussProcess(xs1[::stride,:],y1[::stride,:], kernel=kernel_rbf,
scale=scale, ridgecoeff=1e-2)
yhatr2 = gp2.predict(xs1)
plt.figure()
plt.plot(y1true, y1,'bo',y1true, yhatr2,'r.')
plt.title('rbf kernel: true versus noisy (blue) and estimated (red) observations')
plt.figure()
plt.plot(y1,'bo-',y1true,'go-',yhatr2,'r.-')
plt.title('rbf kernel: true (green), noisy (blue) and estimated (red) '+
'observations')
#gp2.plot(y1)
if __name__ == '__main__':
example2()
#example2(m=1000, scale=0.01)
#example2(m=100, scale=0.5) # oversmoothing
#example2(m=2000, scale=0.005) # this looks good for rbf, zoom in
#example2(m=200, scale=0.01,stride=4)
example1()
#plt.show()
#plt_closeall() # use this to close the open figure windows
| bsd-3-clause |
mehdidc/scikit-learn | sklearn/cluster/birch.py | 18 | 22657 | # Authors: Manoj Kumar <[email protected]>
# Alexandre Gramfort <[email protected]>
# Joel Nothman <[email protected]>
# License: BSD 3 clause
from __future__ import division
import warnings
import numpy as np
from scipy import sparse
from math import sqrt
from ..metrics.pairwise import euclidean_distances
from ..base import TransformerMixin, ClusterMixin, BaseEstimator
from ..externals.six.moves import xrange
from ..utils import check_array
from ..utils.extmath import row_norms, safe_sparse_dot
from ..utils.validation import NotFittedError, check_is_fitted
from .hierarchical import AgglomerativeClustering
def _iterate_sparse_X(X):
"""This little hack returns a densified row when iterating over a sparse
matrix, insted of constructing a sparse matrix for every row that is
expensive.
"""
n_samples = X.shape[0]
X_indices = X.indices
X_data = X.data
X_indptr = X.indptr
for i in xrange(n_samples):
row = np.zeros(X.shape[1])
startptr, endptr = X_indptr[i], X_indptr[i + 1]
nonzero_indices = X_indices[startptr:endptr]
row[nonzero_indices] = X_data[startptr:endptr]
yield row
def _split_node(node, threshold, branching_factor):
"""The node has to be split if there is no place for a new subcluster
in the node.
1. Two empty nodes and two empty subclusters are initialized.
2. The pair of distant subclusters are found.
3. The properties of the empty subclusters and nodes are updated
according to the nearest distance between the subclusters to the
pair of distant subclusters.
4. The two nodes are set as children to the two subclusters.
"""
new_subcluster1 = _CFSubcluster()
new_subcluster2 = _CFSubcluster()
new_node1 = _CFNode(
threshold, branching_factor, is_leaf=node.is_leaf,
n_features=node.n_features)
new_node2 = _CFNode(
threshold, branching_factor, is_leaf=node.is_leaf,
n_features=node.n_features)
new_subcluster1.child_ = new_node1
new_subcluster2.child_ = new_node2
if node.is_leaf:
if node.prev_leaf_ is not None:
node.prev_leaf_.next_leaf_ = new_node1
new_node1.prev_leaf_ = node.prev_leaf_
new_node1.next_leaf_ = new_node2
new_node2.prev_leaf_ = new_node1
new_node2.next_leaf_ = node.next_leaf_
if node.next_leaf_ is not None:
node.next_leaf_.prev_leaf_ = new_node2
dist = euclidean_distances(
node.centroids_, Y_norm_squared=node.squared_norm_, squared=True)
n_clusters = dist.shape[0]
farthest_idx = np.unravel_index(
dist.argmax(), (n_clusters, n_clusters))
node1_dist, node2_dist = dist[[farthest_idx]]
node1_closer = node1_dist < node2_dist
for idx, subcluster in enumerate(node.subclusters_):
if node1_closer[idx]:
new_node1.append_subcluster(subcluster)
new_subcluster1.update(subcluster)
else:
new_node2.append_subcluster(subcluster)
new_subcluster2.update(subcluster)
return new_subcluster1, new_subcluster2
class _CFNode(object):
"""Each node in a CFTree is called a CFNode.
The CFNode can have a maximum of branching_factor
number of CFSubclusters.
Parameters
----------
threshold : float
Threshold needed for a new subcluster to enter a CFSubcluster.
branching_factor : int
Maximum number of CF subclusters in each node.
is_leaf : bool
We need to know if the CFNode is a leaf or not, in order to
retrieve the final subclusters.
n_features : int
The number of features.
Attributes
----------
subclusters_ : array-like
list of subclusters for a particular CFNode.
prev_leaf_ : _CFNode
prev_leaf. Useful only if is_leaf is True.
next_leaf_ : _CFNode
next_leaf. Useful only if is_leaf is True.
the final subclusters.
init_centroids_ : ndarray, shape (branching_factor + 1, n_features)
manipulate ``init_centroids_`` throughout rather than centroids_ since
the centroids are just a view of the ``init_centroids_`` .
init_sq_norm_ : ndarray, shape (branching_factor + 1,)
manipulate init_sq_norm_ throughout. similar to ``init_centroids_``.
centroids_ : ndarray
view of ``init_centroids_``.
squared_norm_ : ndarray
view of ``init_sq_norm_``.
"""
def __init__(self, threshold, branching_factor, is_leaf, n_features):
self.threshold = threshold
self.branching_factor = branching_factor
self.is_leaf = is_leaf
self.n_features = n_features
# The list of subclusters, centroids and squared norms
# to manipulate throughout.
self.subclusters_ = []
self.init_centroids_ = np.zeros((branching_factor + 1, n_features))
self.init_sq_norm_ = np.zeros((branching_factor + 1))
self.squared_norm_ = []
self.prev_leaf_ = None
self.next_leaf_ = None
def append_subcluster(self, subcluster):
n_samples = len(self.subclusters_)
self.subclusters_.append(subcluster)
self.init_centroids_[n_samples] = subcluster.centroid_
self.init_sq_norm_[n_samples] = subcluster.sq_norm_
# Keep centroids and squared norm as views. In this way
# if we change init_centroids and init_sq_norm_, it is
# sufficient,
self.centroids_ = self.init_centroids_[:n_samples + 1, :]
self.squared_norm_ = self.init_sq_norm_[:n_samples + 1]
def update_split_subclusters(self, subcluster,
new_subcluster1, new_subcluster2):
"""Remove a subcluster from a node and update it with the
split subclusters.
"""
ind = self.subclusters_.index(subcluster)
self.subclusters_[ind] = new_subcluster1
self.init_centroids_[ind] = new_subcluster1.centroid_
self.init_sq_norm_[ind] = new_subcluster1.sq_norm_
self.append_subcluster(new_subcluster2)
def insert_cf_subcluster(self, subcluster):
"""Insert a new subcluster into the node."""
if not self.subclusters_:
self.append_subcluster(subcluster)
return False
threshold = self.threshold
branching_factor = self.branching_factor
# We need to find the closest subcluster among all the
# subclusters so that we can insert our new subcluster.
dist_matrix = np.dot(self.centroids_, subcluster.centroid_)
dist_matrix *= -2.
dist_matrix += self.squared_norm_
closest_index = np.argmin(dist_matrix)
closest_subcluster = self.subclusters_[closest_index]
# If the subcluster has a child, we need a recursive strategy.
if closest_subcluster.child_ is not None:
split_child = closest_subcluster.child_.insert_cf_subcluster(
subcluster)
if not split_child:
# If it is determined that the child need not be split, we
# can just update the closest_subcluster
closest_subcluster.update(subcluster)
self.init_centroids_[closest_index] = \
self.subclusters_[closest_index].centroid_
self.init_sq_norm_[closest_index] = \
self.subclusters_[closest_index].sq_norm_
return False
# things not too good. we need to redistribute the subclusters in
# our child node, and add a new subcluster in the parent
# subcluster to accomodate the new child.
else:
new_subcluster1, new_subcluster2 = _split_node(
closest_subcluster.child_, threshold, branching_factor)
self.update_split_subclusters(
closest_subcluster, new_subcluster1, new_subcluster2)
if len(self.subclusters_) > self.branching_factor:
return True
return False
# good to go!
else:
merged = closest_subcluster.merge_subcluster(
subcluster, self.threshold)
if merged:
self.init_centroids_[closest_index] = \
closest_subcluster.centroid_
self.init_sq_norm_[closest_index] = \
closest_subcluster.sq_norm_
return False
# not close to any other subclusters, and we still
# have space, so add.
elif len(self.subclusters_) < self.branching_factor:
self.append_subcluster(subcluster)
return False
# We do not have enough space nor is it closer to an
# other subcluster. We need to split.
else:
self.append_subcluster(subcluster)
return True
class _CFSubcluster(object):
"""Each subcluster in a CFNode is called a CFSubcluster.
A CFSubcluster can have a CFNode has its child.
Parameters
----------
linear_sum : ndarray, shape (n_features,), optional
Sample. This is kept optional to allow initialization of empty
subclusters.
Attributes
----------
n_samples_ : int
Number of samples that belong to each subcluster.
linear_sum_ : ndarray
Linear sum of all the samples in a subcluster. Prevents holding
all sample data in memory.
squared_sum_ : float
Sum of the squared l2 norms of all samples belonging to a subcluster.
centroid_ : ndarray
Centroid of the subcluster. Prevent recomputing of centroids when
``CFNode.centroids_`` is called.
child_ : _CFNode
Child Node of the subcluster. Once a given _CFNode is set as the child
of the _CFNode, it is set to ``self.child_``.
sq_norm_ : ndarray
Squared norm of the subcluster. Used to prevent recomputing when
pairwise minimum distances are computed.
"""
def __init__(self, linear_sum=None):
if linear_sum is None:
self.n_samples_ = 0
self.squared_sum_ = 0.0
self.linear_sum_ = 0
else:
self.n_samples_ = 1
self.centroid_ = self.linear_sum_ = linear_sum
self.squared_sum_ = self.sq_norm_ = np.dot(
self.linear_sum_, self.linear_sum_)
self.child_ = None
def update(self, subcluster):
self.n_samples_ += subcluster.n_samples_
self.linear_sum_ += subcluster.linear_sum_
self.squared_sum_ += subcluster.squared_sum_
self.centroid_ = self.linear_sum_ / self.n_samples_
self.sq_norm_ = np.dot(self.centroid_, self.centroid_)
def merge_subcluster(self, nominee_cluster, threshold):
"""Check if a cluster is worthy enough to be merged. If
yes then merge.
"""
new_ss = self.squared_sum_ + nominee_cluster.squared_sum_
new_ls = self.linear_sum_ + nominee_cluster.linear_sum_
new_n = self.n_samples_ + nominee_cluster.n_samples_
new_centroid = (1 / new_n) * new_ls
new_norm = np.dot(new_centroid, new_centroid)
dot_product = (-2 * new_n) * new_norm
sq_radius = (new_ss + dot_product) / new_n + new_norm
if sq_radius <= threshold ** 2:
(self.n_samples_, self.linear_sum_, self.squared_sum_,
self.centroid_, self.sq_norm_) = \
new_n, new_ls, new_ss, new_centroid, new_norm
return True
return False
@property
def radius(self):
"""Return radius of the subcluster"""
dot_product = -2 * np.dot(self.linear_sum_, self.centroid_)
return sqrt(
((self.squared_sum_ + dot_product) / self.n_samples_) +
self.sq_norm_)
class Birch(BaseEstimator, TransformerMixin, ClusterMixin):
"""Implements the Birch clustering algorithm.
Every new sample is inserted into the root of the Clustering Feature
Tree. It is then clubbed together with the subcluster that has the
centroid closest to the new sample. This is done recursively till it
ends up at the subcluster of the leaf of the tree has the closest centroid.
Parameters
----------
threshold : float, default 0.5
The radius of the subcluster obtained by merging a new sample and the
closest subcluster should be lesser than the threshold. Otherwise a new
subcluster is started.
branching_factor : int, default 50
Maximum number of CF subclusters in each node. If a new samples enters
such that the number of subclusters exceed the branching_factor then
the node has to be split. The corresponding parent also has to be
split and if the number of subclusters in the parent is greater than
the branching factor, then it has to be split recursively.
n_clusters : int, instance of sklearn.cluster model, default None
Number of clusters after the final clustering step, which treats the
subclusters from the leaves as new samples. By default, this final
clustering step is not performed and the subclusters are returned
as they are. If a model is provided, the model is fit treating
the subclusters as new samples and the initial data is mapped to the
label of the closest subcluster. If an int is provided, the model
fit is AgglomerativeClustering with n_clusters set to the int.
compute_labels : bool, default True
Whether or not to compute labels for each fit.
copy : bool, default True
Whether or not to make a copy of the given data. If set to False,
the initial data will be overwritten.
Attributes
----------
root_ : _CFNode
Root of the CFTree.
dummy_leaf_ : _CFNode
Start pointer to all the leaves.
subcluster_centers_ : ndarray,
Centroids of all subclusters read directly from the leaves.
subcluster_labels_ : ndarray,
Labels assigned to the centroids of the subclusters after
they are clustered globally.
labels_ : ndarray, shape (n_samples,)
Array of labels assigned to the input data.
if partial_fit is used instead of fit, they are assigned to the
last batch of data.
Examples
--------
>>> from sklearn.cluster import Birch
>>> X = [[0, 1], [0.3, 1], [-0.3, 1], [0, -1], [0.3, -1], [-0.3, -1]]
>>> brc = Birch(branching_factor=50, n_clusters=None, threshold=0.5,
... compute_labels=True)
>>> brc.fit(X)
Birch(branching_factor=50, compute_labels=True, copy=True, n_clusters=None,
threshold=0.5)
>>> brc.predict(X)
array([0, 0, 0, 1, 1, 1])
References
----------
* Tian Zhang, Raghu Ramakrishnan, Maron Livny
BIRCH: An efficient data clustering method for large databases.
http://www.cs.sfu.ca/CourseCentral/459/han/papers/zhang96.pdf
* Roberto Perdisci
JBirch - Java implementation of BIRCH clustering algorithm
https://code.google.com/p/jbirch/
"""
def __init__(self, threshold=0.5, branching_factor=50, n_clusters=3,
compute_labels=True, copy=True):
self.threshold = threshold
self.branching_factor = branching_factor
self.n_clusters = n_clusters
self.compute_labels = compute_labels
self.copy = copy
def fit(self, X, y=None):
"""
Build a CF Tree for the input data.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Input data.
"""
self.fit_, self.partial_fit_ = True, False
return self._fit(X)
def _fit(self, X):
X = check_array(X, accept_sparse='csr', copy=self.copy)
threshold = self.threshold
branching_factor = self.branching_factor
if branching_factor <= 1:
raise ValueError("Branching_factor should be greater than one.")
n_samples, n_features = X.shape
# If partial_fit is called for the first time or fit is called, we
# start a new tree.
partial_fit = getattr(self, 'partial_fit_')
has_root = getattr(self, 'root_', None)
if getattr(self, 'fit_') or (partial_fit and not has_root):
# The first root is the leaf. Manipulate this object throughout.
self.root_ = _CFNode(threshold, branching_factor, is_leaf=True,
n_features=n_features)
# To enable getting back subclusters.
self.dummy_leaf_ = _CFNode(threshold, branching_factor,
is_leaf=True, n_features=n_features)
self.dummy_leaf_.next_leaf_ = self.root_
self.root_.prev_leaf_ = self.dummy_leaf_
# Cannot vectorize. Enough to convince to use cython.
if not sparse.issparse(X):
iter_func = iter
else:
iter_func = _iterate_sparse_X
for sample in iter_func(X):
subcluster = _CFSubcluster(linear_sum=sample)
split = self.root_.insert_cf_subcluster(subcluster)
if split:
new_subcluster1, new_subcluster2 = _split_node(
self.root_, threshold, branching_factor)
del self.root_
self.root_ = _CFNode(threshold, branching_factor,
is_leaf=False,
n_features=n_features)
self.root_.append_subcluster(new_subcluster1)
self.root_.append_subcluster(new_subcluster2)
centroids = np.concatenate([
leaf.centroids_ for leaf in self._get_leaves()])
self.subcluster_centers_ = centroids
self._global_clustering(X)
return self
def _get_leaves(self):
"""
Retrieve the leaves of the CF Node.
Returns
-------
leaves: array-like
List of the leaf nodes.
"""
leaf_ptr = self.dummy_leaf_.next_leaf_
leaves = []
while leaf_ptr is not None:
leaves.append(leaf_ptr)
leaf_ptr = leaf_ptr.next_leaf_
return leaves
def partial_fit(self, X=None, y=None):
"""
Online learning. Prevents rebuilding of CFTree from scratch.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features), None
Input data. If X is not provided, only the global clustering
step is done.
"""
self.partial_fit_, self.fit_ = True, False
if X is None:
# Perform just the final global clustering step.
self._global_clustering()
return self
else:
self._check_fit(X)
return self._fit(X)
def _check_fit(self, X):
is_fitted = hasattr(self, 'subcluster_centers_')
# Called by partial_fit, before fitting.
has_partial_fit = hasattr(self, 'partial_fit_')
# Should raise an error if one does not fit before predicting.
if not (is_fitted or has_partial_fit):
raise NotFittedError("Fit training data before predicting")
if is_fitted and X.shape[1] != self.subcluster_centers_.shape[1]:
raise ValueError(
"Training data and predicted data do "
"not have same number of features.")
def predict(self, X):
"""
Predict data using the ``centroids_`` of subclusters.
Avoid computation of the row norms of X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Input data.
Returns
-------
labels: ndarray, shape(n_samples)
Labelled data.
"""
X = check_array(X, accept_sparse='csr')
self._check_fit(X)
reduced_distance = safe_sparse_dot(X, self.subcluster_centers_.T)
reduced_distance *= -2
reduced_distance += self._subcluster_norms
return self.subcluster_labels_[np.argmin(reduced_distance, axis=1)]
def transform(self, X, y=None):
"""
Transform X into subcluster centroids dimension.
Each dimension represents the distance from the sample point to each
cluster centroid.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Input data.
Returns
-------
X_trans : {array-like, sparse matrix}, shape (n_samples, n_clusters)
Transformed data.
"""
check_is_fitted(self, 'subcluster_centers_')
return euclidean_distances(X, self.subcluster_centers_)
def _global_clustering(self, X=None):
"""
Global clustering for the subclusters obtained after fitting
"""
clusterer = self.n_clusters
centroids = self.subcluster_centers_
compute_labels = (X is not None) and self.compute_labels
# Preprocessing for the global clustering.
not_enough_centroids = False
if isinstance(clusterer, int):
clusterer = AgglomerativeClustering(
n_clusters=self.n_clusters)
# There is no need to perform the global clustering step.
if len(centroids) < self.n_clusters:
not_enough_centroids = True
elif (clusterer is not None and not
hasattr(clusterer, 'fit_predict')):
raise ValueError("n_clusters should be an instance of "
"ClusterMixin or an int")
# To use in predict to avoid recalculation.
self._subcluster_norms = row_norms(
self.subcluster_centers_, squared=True)
if clusterer is None or not_enough_centroids:
self.subcluster_labels_ = np.arange(len(centroids))
if not_enough_centroids:
warnings.warn(
"Number of subclusters found (%d) by Birch is less "
"than (%d). Decrease the threshold."
% (len(centroids), self.n_clusters))
else:
# The global clustering step that clusters the subclusters of
# the leaves. It assumes the centroids of the subclusters as
# samples and finds the final centroids.
self.subcluster_labels_ = clusterer.fit_predict(
self.subcluster_centers_)
if compute_labels:
self.labels_ = self.predict(X)
| bsd-3-clause |
manashmndl/scikit-learn | benchmarks/bench_plot_lasso_path.py | 301 | 4003 | """Benchmarks of Lasso regularization path computation using Lars and CD
The input data is mostly low rank but is a fat infinite tail.
"""
from __future__ import print_function
from collections import defaultdict
import gc
import sys
from time import time
import numpy as np
from sklearn.linear_model import lars_path
from sklearn.linear_model import lasso_path
from sklearn.datasets.samples_generator import make_regression
def compute_bench(samples_range, features_range):
it = 0
results = defaultdict(lambda: [])
max_it = len(samples_range) * len(features_range)
for n_samples in samples_range:
for n_features in features_range:
it += 1
print('====================')
print('Iteration %03d of %03d' % (it, max_it))
print('====================')
dataset_kwargs = {
'n_samples': n_samples,
'n_features': n_features,
'n_informative': n_features / 10,
'effective_rank': min(n_samples, n_features) / 10,
#'effective_rank': None,
'bias': 0.0,
}
print("n_samples: %d" % n_samples)
print("n_features: %d" % n_features)
X, y = make_regression(**dataset_kwargs)
gc.collect()
print("benchmarking lars_path (with Gram):", end='')
sys.stdout.flush()
tstart = time()
G = np.dot(X.T, X) # precomputed Gram matrix
Xy = np.dot(X.T, y)
lars_path(X, y, Xy=Xy, Gram=G, method='lasso')
delta = time() - tstart
print("%0.3fs" % delta)
results['lars_path (with Gram)'].append(delta)
gc.collect()
print("benchmarking lars_path (without Gram):", end='')
sys.stdout.flush()
tstart = time()
lars_path(X, y, method='lasso')
delta = time() - tstart
print("%0.3fs" % delta)
results['lars_path (without Gram)'].append(delta)
gc.collect()
print("benchmarking lasso_path (with Gram):", end='')
sys.stdout.flush()
tstart = time()
lasso_path(X, y, precompute=True)
delta = time() - tstart
print("%0.3fs" % delta)
results['lasso_path (with Gram)'].append(delta)
gc.collect()
print("benchmarking lasso_path (without Gram):", end='')
sys.stdout.flush()
tstart = time()
lasso_path(X, y, precompute=False)
delta = time() - tstart
print("%0.3fs" % delta)
results['lasso_path (without Gram)'].append(delta)
return results
if __name__ == '__main__':
from mpl_toolkits.mplot3d import axes3d # register the 3d projection
import matplotlib.pyplot as plt
samples_range = np.linspace(10, 2000, 5).astype(np.int)
features_range = np.linspace(10, 2000, 5).astype(np.int)
results = compute_bench(samples_range, features_range)
max_time = max(max(t) for t in results.values())
fig = plt.figure('scikit-learn Lasso path benchmark results')
i = 1
for c, (label, timings) in zip('bcry', sorted(results.items())):
ax = fig.add_subplot(2, 2, i, projection='3d')
X, Y = np.meshgrid(samples_range, features_range)
Z = np.asarray(timings).reshape(samples_range.shape[0],
features_range.shape[0])
# plot the actual surface
ax.plot_surface(X, Y, Z.T, cstride=1, rstride=1, color=c, alpha=0.8)
# dummy point plot to stick the legend to since surface plot do not
# support legends (yet?)
#ax.plot([1], [1], [1], color=c, label=label)
ax.set_xlabel('n_samples')
ax.set_ylabel('n_features')
ax.set_zlabel('Time (s)')
ax.set_zlim3d(0.0, max_time * 1.1)
ax.set_title(label)
#ax.legend()
i += 1
plt.show()
| bsd-3-clause |
AI-Innovation/cs231n_ass1 | cs231n/classifiers/neural_net.py | 7 | 9686 | import numpy as np
import matplotlib.pyplot as plt
class TwoLayerNet(object):
"""
A two-layer fully-connected neural network. The net has an input dimension of
N, a hidden layer dimension of H, and performs classification over C classes.
We train the network with a softmax loss function and L2 regularization on the
weight matrices. The network uses a ReLU nonlinearity after the first fully
connected layer.
In other words, the network has the following architecture:
input - fully connected layer - ReLU - fully connected layer - softmax
The outputs of the second fully-connected layer are the scores for each class.
"""
def __init__(self, input_size, hidden_size, output_size, std=1e-4):
"""
Initialize the model. Weights are initialized to small random values and
biases are initialized to zero. Weights and biases are stored in the
variable self.params, which is a dictionary with the following keys:
W1: First layer weights; has shape (D, H)
b1: First layer biases; has shape (H,)
W2: Second layer weights; has shape (H, C)
b2: Second layer biases; has shape (C,)
Inputs:
- input_size: The dimension D of the input data.
- hidden_size: The number of neurons H in the hidden layer.
- output_size: The number of classes C.
"""
self.params = {}
self.params['W1'] = std * np.random.randn(input_size, hidden_size)
self.params['b1'] = np.zeros(hidden_size)
self.params['W2'] = std * np.random.randn(hidden_size, output_size)
self.params['b2'] = np.zeros(output_size)
def loss(self, X, y=None, reg=0.0):
"""
Compute the loss and gradients for a two layer fully connected neural
network.
Inputs:
- X: Input data of shape (N, D). Each X[i] is a training sample.
- y: Vector of training labels. y[i] is the label for X[i], and each y[i] is
an integer in the range 0 <= y[i] < C. This parameter is optional; if it
is not passed then we only return scores, and if it is passed then we
instead return the loss and gradients.
- reg: Regularization strength.
Returns:
If y is None, return a matrix scores of shape (N, C) where scores[i, c] is
the score for class c on input X[i].
If y is not None, instead return a tuple of:
- loss: Loss (data loss and regularization loss) for this batch of training
samples.
- grads: Dictionary mapping parameter names to gradients of those parameters
with respect to the loss function; has the same keys as self.params.
"""
# Unpack variables from the params dictionary
W1, b1 = self.params['W1'], self.params['b1']
W2, b2 = self.params['W2'], self.params['b2']
N, D = X.shape
# Compute the forward pass
scores = None
#############################################################################
# TODO: Perform the forward pass, computing the class scores for the input. #
# Store the result in the scores variable, which should be an array of #
# shape (N, C). #
#############################################################################
pass
#############################################################################
# END OF YOUR CODE #
#############################################################################
# If the targets are not given then jump out, we're done
if y is None:
return scores
# Compute the loss
loss = None
#############################################################################
# TODO: Finish the forward pass, and compute the loss. This should include #
# both the data loss and L2 regularization for W1 and W2. Store the result #
# in the variable loss, which should be a scalar. Use the Softmax #
# classifier loss. So that your results match ours, multiply the #
# regularization loss by 0.5 #
#############################################################################
pass
#############################################################################
# END OF YOUR CODE #
#############################################################################
# Backward pass: compute gradients
grads = {}
#############################################################################
# TODO: Compute the backward pass, computing the derivatives of the weights #
# and biases. Store the results in the grads dictionary. For example, #
# grads['W1'] should store the gradient on W1, and be a matrix of same size #
#############################################################################
pass
#############################################################################
# END OF YOUR CODE #
#############################################################################
return loss, grads
def train(self, X, y, X_val, y_val,
learning_rate=1e-3, learning_rate_decay=0.95,
reg=1e-5, num_iters=100,
batch_size=200, verbose=False):
"""
Train this neural network using stochastic gradient descent.
Inputs:
- X: A numpy array of shape (N, D) giving training data.
- y: A numpy array f shape (N,) giving training labels; y[i] = c means that
X[i] has label c, where 0 <= c < C.
- X_val: A numpy array of shape (N_val, D) giving validation data.
- y_val: A numpy array of shape (N_val,) giving validation labels.
- learning_rate: Scalar giving learning rate for optimization.
- learning_rate_decay: Scalar giving factor used to decay the learning rate
after each epoch.
- reg: Scalar giving regularization strength.
- num_iters: Number of steps to take when optimizing.
- batch_size: Number of training examples to use per step.
- verbose: boolean; if true print progress during optimization.
"""
num_train = X.shape[0]
iterations_per_epoch = max(num_train / batch_size, 1)
# Use SGD to optimize the parameters in self.model
loss_history = []
train_acc_history = []
val_acc_history = []
for it in xrange(num_iters):
X_batch = None
y_batch = None
#########################################################################
# TODO: Create a random minibatch of training data and labels, storing #
# them in X_batch and y_batch respectively. #
#########################################################################
pass
#########################################################################
# END OF YOUR CODE #
#########################################################################
# Compute loss and gradients using the current minibatch
loss, grads = self.loss(X_batch, y=y_batch, reg=reg)
loss_history.append(loss)
#########################################################################
# TODO: Use the gradients in the grads dictionary to update the #
# parameters of the network (stored in the dictionary self.params) #
# using stochastic gradient descent. You'll need to use the gradients #
# stored in the grads dictionary defined above. #
#########################################################################
pass
#########################################################################
# END OF YOUR CODE #
#########################################################################
if verbose and it % 100 == 0:
print 'iteration %d / %d: loss %f' % (it, num_iters, loss)
# Every epoch, check train and val accuracy and decay learning rate.
if it % iterations_per_epoch == 0:
# Check accuracy
train_acc = (self.predict(X_batch) == y_batch).mean()
val_acc = (self.predict(X_val) == y_val).mean()
train_acc_history.append(train_acc)
val_acc_history.append(val_acc)
# Decay learning rate
learning_rate *= learning_rate_decay
return {
'loss_history': loss_history,
'train_acc_history': train_acc_history,
'val_acc_history': val_acc_history,
}
def predict(self, X):
"""
Use the trained weights of this two-layer network to predict labels for
data points. For each data point we predict scores for each of the C
classes, and assign each data point to the class with the highest score.
Inputs:
- X: A numpy array of shape (N, D) giving N D-dimensional data points to
classify.
Returns:
- y_pred: A numpy array of shape (N,) giving predicted labels for each of
the elements of X. For all i, y_pred[i] = c means that X[i] is predicted
to have class c, where 0 <= c < C.
"""
y_pred = None
###########################################################################
# TODO: Implement this function; it should be VERY simple! #
###########################################################################
pass
###########################################################################
# END OF YOUR CODE #
###########################################################################
return y_pred
| mit |
robin-lai/scikit-learn | sklearn/feature_selection/tests/test_chi2.py | 221 | 2398 | """
Tests for chi2, currently the only feature selection function designed
specifically to work with sparse matrices.
"""
import numpy as np
from scipy.sparse import coo_matrix, csr_matrix
import scipy.stats
from sklearn.feature_selection import SelectKBest, chi2
from sklearn.feature_selection.univariate_selection import _chisquare
from nose.tools import assert_raises
from numpy.testing import assert_equal, assert_array_almost_equal
# Feature 0 is highly informative for class 1;
# feature 1 is the same everywhere;
# feature 2 is a bit informative for class 2.
X = [[2, 1, 2],
[9, 1, 1],
[6, 1, 2],
[0, 1, 2]]
y = [0, 1, 2, 2]
def mkchi2(k):
"""Make k-best chi2 selector"""
return SelectKBest(chi2, k=k)
def test_chi2():
# Test Chi2 feature extraction
chi2 = mkchi2(k=1).fit(X, y)
chi2 = mkchi2(k=1).fit(X, y)
assert_equal(chi2.get_support(indices=True), [0])
assert_equal(chi2.transform(X), np.array(X)[:, [0]])
chi2 = mkchi2(k=2).fit(X, y)
assert_equal(sorted(chi2.get_support(indices=True)), [0, 2])
Xsp = csr_matrix(X, dtype=np.float)
chi2 = mkchi2(k=2).fit(Xsp, y)
assert_equal(sorted(chi2.get_support(indices=True)), [0, 2])
Xtrans = chi2.transform(Xsp)
assert_equal(Xtrans.shape, [Xsp.shape[0], 2])
# == doesn't work on scipy.sparse matrices
Xtrans = Xtrans.toarray()
Xtrans2 = mkchi2(k=2).fit_transform(Xsp, y).toarray()
assert_equal(Xtrans, Xtrans2)
def test_chi2_coo():
# Check that chi2 works with a COO matrix
# (as returned by CountVectorizer, DictVectorizer)
Xcoo = coo_matrix(X)
mkchi2(k=2).fit_transform(Xcoo, y)
# if we got here without an exception, we're safe
def test_chi2_negative():
# Check for proper error on negative numbers in the input X.
X, y = [[0, 1], [-1e-20, 1]], [0, 1]
for X in (X, np.array(X), csr_matrix(X)):
assert_raises(ValueError, chi2, X, y)
def test_chisquare():
# Test replacement for scipy.stats.chisquare against the original.
obs = np.array([[2., 2.],
[1., 1.]])
exp = np.array([[1.5, 1.5],
[1.5, 1.5]])
# call SciPy first because our version overwrites obs
chi_scp, p_scp = scipy.stats.chisquare(obs, exp)
chi_our, p_our = _chisquare(obs, exp)
assert_array_almost_equal(chi_scp, chi_our)
assert_array_almost_equal(p_scp, p_our)
| bsd-3-clause |
zrhans/pythonanywhere | .virtualenvs/django19/lib/python3.4/site-packages/matplotlib/units.py | 8 | 6188 | """
The classes here provide support for using custom classes with
matplotlib, e.g., those that do not expose the array interface but know
how to converter themselves to arrays. It also supoprts classes with
units and units conversion. Use cases include converters for custom
objects, e.g., a list of datetime objects, as well as for objects that
are unit aware. We don't assume any particular units implementation,
rather a units implementation must provide a ConversionInterface, and
the register with the Registry converter dictionary. For example,
here is a complete implementation which supports plotting with native
datetime objects::
import matplotlib.units as units
import matplotlib.dates as dates
import matplotlib.ticker as ticker
import datetime
class DateConverter(units.ConversionInterface):
@staticmethod
def convert(value, unit, axis):
'convert value to a scalar or array'
return dates.date2num(value)
@staticmethod
def axisinfo(unit, axis):
'return major and minor tick locators and formatters'
if unit!='date': return None
majloc = dates.AutoDateLocator()
majfmt = dates.AutoDateFormatter(majloc)
return AxisInfo(majloc=majloc,
majfmt=majfmt,
label='date')
@staticmethod
def default_units(x, axis):
'return the default unit for x or None'
return 'date'
# finally we register our object type with a converter
units.registry[datetime.date] = DateConverter()
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
from matplotlib.cbook import iterable, is_numlike
import numpy as np
class AxisInfo(object):
"""information to support default axis labeling and tick labeling, and
default limits"""
def __init__(self, majloc=None, minloc=None,
majfmt=None, minfmt=None, label=None,
default_limits=None):
"""
majloc and minloc: TickLocators for the major and minor ticks
majfmt and minfmt: TickFormatters for the major and minor ticks
label: the default axis label
default_limits: the default min, max of the axis if no data is present
If any of the above are None, the axis will simply use the default
"""
self.majloc = majloc
self.minloc = minloc
self.majfmt = majfmt
self.minfmt = minfmt
self.label = label
self.default_limits = default_limits
class ConversionInterface(object):
"""
The minimal interface for a converter to take custom instances (or
sequences) and convert them to values mpl can use
"""
@staticmethod
def axisinfo(unit, axis):
'return an units.AxisInfo instance for axis with the specified units'
return None
@staticmethod
def default_units(x, axis):
'return the default unit for x or None for the given axis'
return None
@staticmethod
def convert(obj, unit, axis):
"""
convert obj using unit for the specified axis. If obj is a sequence,
return the converted sequence. The ouput must be a sequence of scalars
that can be used by the numpy array layer
"""
return obj
@staticmethod
def is_numlike(x):
"""
The matplotlib datalim, autoscaling, locators etc work with
scalars which are the units converted to floats given the
current unit. The converter may be passed these floats, or
arrays of them, even when units are set. Derived conversion
interfaces may opt to pass plain-ol unitless numbers through
the conversion interface and this is a helper function for
them.
"""
if iterable(x):
for thisx in x:
return is_numlike(thisx)
else:
return is_numlike(x)
class Registry(dict):
"""
register types with conversion interface
"""
def __init__(self):
dict.__init__(self)
self._cached = {}
def get_converter(self, x):
'get the converter interface instance for x, or None'
if not len(self):
return None # nothing registered
#DISABLED idx = id(x)
#DISABLED cached = self._cached.get(idx)
#DISABLED if cached is not None: return cached
converter = None
classx = getattr(x, '__class__', None)
if classx is not None:
converter = self.get(classx)
if isinstance(x, np.ndarray) and x.size:
xravel = x.ravel()
try:
# pass the first value of x that is not masked back to
# get_converter
if not np.all(xravel.mask):
# some elements are not masked
converter = self.get_converter(
xravel[np.argmin(xravel.mask)])
return converter
except AttributeError:
# not a masked_array
# Make sure we don't recurse forever -- it's possible for
# ndarray subclasses to continue to return subclasses and
# not ever return a non-subclass for a single element.
next_item = xravel[0]
if (not isinstance(next_item, np.ndarray) or
next_item.shape != x.shape):
converter = self.get_converter(next_item)
return converter
if converter is None and iterable(x):
for thisx in x:
# Make sure that recursing might actually lead to a solution,
# if we are just going to re-examine another item of the same
# kind, then do not look at it.
if classx and classx != getattr(thisx, '__class__', None):
converter = self.get_converter(thisx)
return converter
#DISABLED self._cached[idx] = converter
return converter
registry = Registry()
| apache-2.0 |
lmallin/coverage_test | python_venv/lib/python2.7/site-packages/pandas/tests/frame/test_subclass.py | 15 | 9524 | # -*- coding: utf-8 -*-
from __future__ import print_function
from warnings import catch_warnings
import numpy as np
from pandas import DataFrame, Series, MultiIndex, Panel
import pandas as pd
import pandas.util.testing as tm
from pandas.tests.frame.common import TestData
class TestDataFrameSubclassing(TestData):
def test_frame_subclassing_and_slicing(self):
# Subclass frame and ensure it returns the right class on slicing it
# In reference to PR 9632
class CustomSeries(Series):
@property
def _constructor(self):
return CustomSeries
def custom_series_function(self):
return 'OK'
class CustomDataFrame(DataFrame):
"""
Subclasses pandas DF, fills DF with simulation results, adds some
custom plotting functions.
"""
def __init__(self, *args, **kw):
super(CustomDataFrame, self).__init__(*args, **kw)
@property
def _constructor(self):
return CustomDataFrame
_constructor_sliced = CustomSeries
def custom_frame_function(self):
return 'OK'
data = {'col1': range(10),
'col2': range(10)}
cdf = CustomDataFrame(data)
# Did we get back our own DF class?
assert isinstance(cdf, CustomDataFrame)
# Do we get back our own Series class after selecting a column?
cdf_series = cdf.col1
assert isinstance(cdf_series, CustomSeries)
assert cdf_series.custom_series_function() == 'OK'
# Do we get back our own DF class after slicing row-wise?
cdf_rows = cdf[1:5]
assert isinstance(cdf_rows, CustomDataFrame)
assert cdf_rows.custom_frame_function() == 'OK'
# Make sure sliced part of multi-index frame is custom class
mcol = pd.MultiIndex.from_tuples([('A', 'A'), ('A', 'B')])
cdf_multi = CustomDataFrame([[0, 1], [2, 3]], columns=mcol)
assert isinstance(cdf_multi['A'], CustomDataFrame)
mcol = pd.MultiIndex.from_tuples([('A', ''), ('B', '')])
cdf_multi2 = CustomDataFrame([[0, 1], [2, 3]], columns=mcol)
assert isinstance(cdf_multi2['A'], CustomSeries)
def test_dataframe_metadata(self):
df = tm.SubclassedDataFrame({'X': [1, 2, 3], 'Y': [1, 2, 3]},
index=['a', 'b', 'c'])
df.testattr = 'XXX'
assert df.testattr == 'XXX'
assert df[['X']].testattr == 'XXX'
assert df.loc[['a', 'b'], :].testattr == 'XXX'
assert df.iloc[[0, 1], :].testattr == 'XXX'
# see gh-9776
assert df.iloc[0:1, :].testattr == 'XXX'
# see gh-10553
unpickled = tm.round_trip_pickle(df)
tm.assert_frame_equal(df, unpickled)
assert df._metadata == unpickled._metadata
assert df.testattr == unpickled.testattr
def test_indexing_sliced(self):
# GH 11559
df = tm.SubclassedDataFrame({'X': [1, 2, 3],
'Y': [4, 5, 6],
'Z': [7, 8, 9]},
index=['a', 'b', 'c'])
res = df.loc[:, 'X']
exp = tm.SubclassedSeries([1, 2, 3], index=list('abc'), name='X')
tm.assert_series_equal(res, exp)
assert isinstance(res, tm.SubclassedSeries)
res = df.iloc[:, 1]
exp = tm.SubclassedSeries([4, 5, 6], index=list('abc'), name='Y')
tm.assert_series_equal(res, exp)
assert isinstance(res, tm.SubclassedSeries)
res = df.loc[:, 'Z']
exp = tm.SubclassedSeries([7, 8, 9], index=list('abc'), name='Z')
tm.assert_series_equal(res, exp)
assert isinstance(res, tm.SubclassedSeries)
res = df.loc['a', :]
exp = tm.SubclassedSeries([1, 4, 7], index=list('XYZ'), name='a')
tm.assert_series_equal(res, exp)
assert isinstance(res, tm.SubclassedSeries)
res = df.iloc[1, :]
exp = tm.SubclassedSeries([2, 5, 8], index=list('XYZ'), name='b')
tm.assert_series_equal(res, exp)
assert isinstance(res, tm.SubclassedSeries)
res = df.loc['c', :]
exp = tm.SubclassedSeries([3, 6, 9], index=list('XYZ'), name='c')
tm.assert_series_equal(res, exp)
assert isinstance(res, tm.SubclassedSeries)
def test_to_panel_expanddim(self):
# GH 9762
with catch_warnings(record=True):
class SubclassedFrame(DataFrame):
@property
def _constructor_expanddim(self):
return SubclassedPanel
class SubclassedPanel(Panel):
pass
index = MultiIndex.from_tuples([(0, 0), (0, 1), (0, 2)])
df = SubclassedFrame({'X': [1, 2, 3], 'Y': [4, 5, 6]}, index=index)
result = df.to_panel()
assert isinstance(result, SubclassedPanel)
expected = SubclassedPanel([[[1, 2, 3]], [[4, 5, 6]]],
items=['X', 'Y'], major_axis=[0],
minor_axis=[0, 1, 2],
dtype='int64')
tm.assert_panel_equal(result, expected)
def test_subclass_attr_err_propagation(self):
# GH 11808
class A(DataFrame):
@property
def bar(self):
return self.i_dont_exist
with tm.assert_raises_regex(AttributeError, '.*i_dont_exist.*'):
A().bar
def test_subclass_align(self):
# GH 12983
df1 = tm.SubclassedDataFrame({'a': [1, 3, 5],
'b': [1, 3, 5]}, index=list('ACE'))
df2 = tm.SubclassedDataFrame({'c': [1, 2, 4],
'd': [1, 2, 4]}, index=list('ABD'))
res1, res2 = df1.align(df2, axis=0)
exp1 = tm.SubclassedDataFrame({'a': [1, np.nan, 3, np.nan, 5],
'b': [1, np.nan, 3, np.nan, 5]},
index=list('ABCDE'))
exp2 = tm.SubclassedDataFrame({'c': [1, 2, np.nan, 4, np.nan],
'd': [1, 2, np.nan, 4, np.nan]},
index=list('ABCDE'))
assert isinstance(res1, tm.SubclassedDataFrame)
tm.assert_frame_equal(res1, exp1)
assert isinstance(res2, tm.SubclassedDataFrame)
tm.assert_frame_equal(res2, exp2)
res1, res2 = df1.a.align(df2.c)
assert isinstance(res1, tm.SubclassedSeries)
tm.assert_series_equal(res1, exp1.a)
assert isinstance(res2, tm.SubclassedSeries)
tm.assert_series_equal(res2, exp2.c)
def test_subclass_align_combinations(self):
# GH 12983
df = tm.SubclassedDataFrame({'a': [1, 3, 5],
'b': [1, 3, 5]}, index=list('ACE'))
s = tm.SubclassedSeries([1, 2, 4], index=list('ABD'), name='x')
# frame + series
res1, res2 = df.align(s, axis=0)
exp1 = pd.DataFrame({'a': [1, np.nan, 3, np.nan, 5],
'b': [1, np.nan, 3, np.nan, 5]},
index=list('ABCDE'))
# name is lost when
exp2 = pd.Series([1, 2, np.nan, 4, np.nan],
index=list('ABCDE'), name='x')
assert isinstance(res1, tm.SubclassedDataFrame)
tm.assert_frame_equal(res1, exp1)
assert isinstance(res2, tm.SubclassedSeries)
tm.assert_series_equal(res2, exp2)
# series + frame
res1, res2 = s.align(df)
assert isinstance(res1, tm.SubclassedSeries)
tm.assert_series_equal(res1, exp2)
assert isinstance(res2, tm.SubclassedDataFrame)
tm.assert_frame_equal(res2, exp1)
def test_subclass_iterrows(self):
# GH 13977
df = tm.SubclassedDataFrame({'a': [1]})
for i, row in df.iterrows():
assert isinstance(row, tm.SubclassedSeries)
tm.assert_series_equal(row, df.loc[i])
def test_subclass_sparse_slice(self):
rows = [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]]
ssdf = tm.SubclassedSparseDataFrame(rows)
ssdf.testattr = "testattr"
tm.assert_sp_frame_equal(ssdf.loc[:2],
tm.SubclassedSparseDataFrame(rows[:3]))
tm.assert_sp_frame_equal(ssdf.iloc[:2],
tm.SubclassedSparseDataFrame(rows[:2]))
tm.assert_sp_frame_equal(ssdf[:2],
tm.SubclassedSparseDataFrame(rows[:2]))
assert ssdf.loc[:2].testattr == "testattr"
assert ssdf.iloc[:2].testattr == "testattr"
assert ssdf[:2].testattr == "testattr"
tm.assert_sp_series_equal(ssdf.loc[1],
tm.SubclassedSparseSeries(rows[1]),
check_names=False)
tm.assert_sp_series_equal(ssdf.iloc[1],
tm.SubclassedSparseSeries(rows[1]),
check_names=False)
def test_subclass_sparse_transpose(self):
ossdf = tm.SubclassedSparseDataFrame([[1, 2, 3],
[4, 5, 6]])
essdf = tm.SubclassedSparseDataFrame([[1, 4],
[2, 5],
[3, 6]])
tm.assert_sp_frame_equal(ossdf.T, essdf)
| mit |
jimmbraddock/ns-3.20-ATN | src/core/examples/sample-rng-plot.py | 188 | 1246 | # -*- Mode:Python; -*-
# /*
# * This program is free software; you can redistribute it and/or modify
# * it under the terms of the GNU General Public License version 2 as
# * published by the Free Software Foundation
# *
# * This program is distributed in the hope that it will be useful,
# * but WITHOUT ANY WARRANTY; without even the implied warranty of
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# * GNU General Public License for more details.
# *
# * You should have received a copy of the GNU General Public License
# * along with this program; if not, write to the Free Software
# * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
# */
# Demonstrate use of ns-3 as a random number generator integrated with
# plotting tools; adapted from Gustavo Carneiro's ns-3 tutorial
import numpy as np
import matplotlib.pyplot as plt
import ns.core
# mu, var = 100, 225
rng = ns.core.NormalVariable(100.0, 225.0)
x = [rng.GetValue() for t in range(10000)]
# the histogram of the data
n, bins, patches = plt.hist(x, 50, normed=1, facecolor='g', alpha=0.75)
plt.title('ns-3 histogram')
plt.text(60, .025, r'$\mu=100,\ \sigma=15$')
plt.axis([40, 160, 0, 0.03])
plt.grid(True)
plt.show()
| gpl-2.0 |
stevenwudi/Kernelized_Correlation_Filter | KCFpy.py | 1 | 58037 | """
This is a python reimplementation of the open source tracker in
High-Speed Tracking with Kernelized Correlation Filters
Joao F. Henriques, Rui Caseiro, Pedro Martins, and Jorge Batista, tPAMI 2015
modified by Di Wu
"""
import numpy as np
import matplotlib.pyplot as plt
from scipy.misc import imresize
class KCFTracker:
def __init__(self, feature_type='raw', sub_feature_type='', sub_sub_feature_type='',
debug=False, gt_type='rect', load_model=False, vgglayer='',
model_path='./trained_models/CNN_Model_OBT100_multi_cnn_final.h5',
cnn_maximum=False):
"""
object_example is an image showing the object to track
feature_type:
"raw pixels":
"hog":
"CNN":
"""
# parameters according to the paper --
self.padding = 2.2 # extra area surrounding the target
self.lambda_value = 1e-4 # regularization
self.spatial_bandwidth_sigma_factor = 1 / float(16)
self.feature_type = feature_type
self.patch_size = []
self.output_sigma = []
self.cos_window = []
self.pos = []
self.x = []
self.alphaf = []
self.xf = []
self.yf = []
self.im_crop = []
self.response = []
self.target_out = []
self.target_sz = []
self.vert_delta = 0
self.horiz_delta = 0
# OBT dataset need extra definition
self.sub_feature_type = sub_feature_type
self.sub_sub_feature_type = sub_sub_feature_type
self.name = 'KCF' + feature_type
self.fps = -1
self.type = gt_type
self.res = []
self.im_sz = []
self.debug = debug # a flag indicating to plot the intermediate figures
self.first_patch_sz = []
self.first_target_sz = []
self.currentScaleFactor = 1
self.load_model = load_model
# following is set according to Table 2:
if self.feature_type == 'raw':
self.adaptation_rate = 0.075 # linear interpolation factor for adaptation
self.feature_bandwidth_sigma = 0.2
self.cell_size = 1
elif self.feature_type == 'hog':
self.adaptation_rate = 0.02 # linear interpolation factor for adaptation
self.bin_num = 31
self.cell_size = 4
self.feature_bandwidth_sigma = 0.5
elif self.feature_type == 'dsst':
# this method adopts from the paper Martin Danelljan, Gustav Hger, Fahad Shahbaz Khan and Michael Felsberg.
# "Accurate Scale Estimation for Robust Visual Tracking". (BMVC), 2014.
# The project website is: http: // www.cvl.isy.liu.se / research / objrec / visualtracking / index.html
self.adaptation_rate = 0.025 # linear interpolation factor for adaptation
self.feature_bandwidth_sigma = 0.2
self.cell_size = 1
self.scale_step = 1.02
self.nScales = 33
self.scaleFactors = self.scale_step **(np.ceil(self.nScales * 1.0/ 2) - range(1, self.nScales+1))
self.scale_window = np.hanning(self.nScales)
self.scale_sigma_factor = 1./4
self.scale_sigma = self.nScales / np.sqrt(self.nScales) * self.scale_sigma_factor
self.ys = np.exp(-0.5 * ((range(1, self.nScales+1) - np.ceil(self.nScales * 1.0 /2))**2) / self.scale_sigma**2)
self.ysf = np.fft.fft(self.ys)
self.min_scale_factor = []
self.max_scale_factor = []
self.xs = []
self.xsf = []
# we use linear kernel as in the BMVC2014 paper
self.new_sf_num = []
self.new_sf_den = []
self.scale_response = []
self.lambda_scale = 1e-2
elif self.feature_type == 'vgg' or self.feature_type == 'resnet50':
if self.feature_type == 'vgg':
from keras.applications.vgg19 import VGG19
from keras.models import Model
if vgglayer[:6]=='block2':
self.cell_size = 42
elif vgglayer[:6]=='block3':
self.cell_size = 4
elif vgglayer[:6] == 'block4':
self.cell_size = 8
elif vgglayer[:6] == 'block5':
self.cell_size = 16
else:
assert("not implemented")
self.base_model = VGG19(include_top=False, weights='imagenet')
self.extract_model = Model(input=self.base_model.input, output=self.base_model.get_layer('block3_conv4').output)
elif self.feature_type == 'resnet50':
from keras.applications.resnet50 import ResNet50
from keras.models import Model
self.base_model = ResNet50(weights='imagenet', include_top=False)
self.extract_model = Model(input=self.base_model.input,
output=self.base_model.get_layer('activation_10').output)
self.feature_bandwidth_sigma = 1
self.adaptation_rate = 0.01
if self.sub_feature_type == 'grabcut':
self.grabcut_mask_path = './figures/grabcut_masks/'
elif self.feature_type == 'vgg_rnn':
from keras.applications.vgg19 import VGG19
from keras.models import Model
self.base_model = VGG19(include_top=False, weights='imagenet')
self.extract_model = Model(input=self.base_model.input,
output=self.base_model.get_layer('block3_conv4').output)
# we first resize the response map to a size of 50*80 (store the resize scale)
# because average target size is 81 *52
self.resize_size = (240, 160)
self.cell_size = 4
self.response_size = [self.resize_size[0] / self.cell_size,
self.resize_size[1] / self.cell_size]
self.feature_bandwidth_sigma = 10
self.adaptation_rate = 0.01
grid_y = np.arange(self.response_size[0]) - np.floor(self.response_size[0] / 2)
grid_x = np.arange(self.response_size[1]) - np.floor(self.response_size[1] / 2)
# desired output (gaussian shaped), bandwidth proportional to target size
self.output_sigma = np.sqrt(np.prod(self.response_size)) * self.spatial_bandwidth_sigma_factor
rs, cs = np.meshgrid(grid_x, grid_y)
self.y = np.exp(-0.5 / self.output_sigma ** 2 * (rs ** 2 + cs ** 2))
self.yf = self.fft2(self.y)
# store pre-computed cosine window
self.cos_window = np.outer(np.hanning(self.yf.shape[0]), np.hanning(self.yf.shape[1]))
self.path_resize_size = np.multiply(self.yf.shape, (1 + self.padding))
self.cos_window_patch = np.outer(np.hanning(self.resize_size[0]), np.hanning(self.resize_size[1]))
# Embedding
if load_model:
from keras.models import load_model
self.lstm_model = load_model('rnn_translation_no_scale_freezconv.h5')
self.lstm_input = np.zeros(shape=(1,10,1,60,40)).astype(float)
elif self.feature_type == 'cnn':
from keras.applications.vgg19 import VGG19
from keras.models import Model
self.base_model = VGG19(include_top=False, weights='imagenet')
self.extract_model = Model(input=self.base_model.input,
output=self.base_model.get_layer('block3_conv4').output)
# we first resize the response map to a size of 50*80 (store the resize scale)
# because average target size is 81 *52
self.resize_size = (240, 160)
self.cell_size = 4
self.response_size = [self.resize_size[0] / self.cell_size,
self.resize_size[1] / self.cell_size]
self.feature_bandwidth_sigma = 10
self.adaptation_rate = 0.01
grid_y = np.arange(self.response_size[0]) - np.floor(self.response_size[0] / 2)
grid_x = np.arange(self.response_size[1]) - np.floor(self.response_size[1] / 2)
# desired output (gaussian shaped), bandwidth proportional to target size
self.output_sigma = np.sqrt(np.prod(self.response_size)) * self.spatial_bandwidth_sigma_factor
rs, cs = np.meshgrid(grid_x, grid_y)
y = np.exp(-0.5 / self.output_sigma ** 2 * (rs ** 2 + cs ** 2))
self.yf = self.fft2(y)
# store pre-computed cosine window
self.cos_window = np.outer(np.hanning(self.yf.shape[0]), np.hanning(self.yf.shape[1]))
self.path_resize_size = np.multiply(self.yf.shape, (1 + self.padding))
self.cos_window_patch = np.outer(np.hanning(self.resize_size[0]), np.hanning(self.resize_size[1]))
# Embedding
if load_model:
from keras.models import load_model
self.cnn_model = load_model('cnn_translation_scale_combine.h5')
elif self.feature_type == 'multi_cnn':
from keras.applications.vgg19 import VGG19
from keras.models import Model
import theano
self.base_model = VGG19(include_top=False, weights='imagenet')
self.extract_model_function = theano.function([self.base_model.input],
[self.base_model.get_layer('block1_conv2').output,
self.base_model.get_layer('block2_conv2').output,
self.base_model.get_layer('block3_conv4').output,
self.base_model.get_layer('block4_conv4').output,
self.base_model.get_layer('block5_conv4').output
], allow_input_downcast=True)
# we first resize all the response maps to a size of 40*60 (store the resize scale)
# because average target size is 81 *52
self.resize_size = (240, 160)
self.cell_size = 4
self.response_size = [self.resize_size[0] / self.cell_size,
self.resize_size[1] / self.cell_size]
self.feature_bandwidth_sigma = 0.2
self.adaptation_rate = 0.0025
# store pre-computed cosine window, here is a multiscale CNN, here we have 5 layers cnn:
self.cos_window = []
self.y = []
self.yf = []
self.response_all = []
self.max_list = []
for i in range(5):
cos_wind_sz = np.divide(self.resize_size, 2**i)
self.cos_window.append(np.outer(np.hanning(cos_wind_sz[0]), np.hanning(cos_wind_sz[1])))
grid_y = np.arange(cos_wind_sz[0]) - np.floor(cos_wind_sz[0] / 2)
grid_x = np.arange(cos_wind_sz[1]) - np.floor(cos_wind_sz[1] / 2)
# desired output (gaussian shaped), bandwidth proportional to target size
output_sigma = np.sqrt(np.prod(cos_wind_sz)) * self.spatial_bandwidth_sigma_factor
rs, cs = np.meshgrid(grid_x, grid_y)
y = np.exp(-0.5 / output_sigma ** 2 * (rs ** 2 + cs ** 2))
self.y.append(y)
self.yf.append(self.fft2(y))
# self.path_resize_size = np.multiply(self.yf.shape, (1 + self.padding))
# self.cos_window_patch = np.outer(np.hanning(self.resize_size[0]), np.hanning(self.resize_size[1]))
# Embedding
if load_model:
from keras.models import load_model
if self.sub_feature_type=='class':
self.multi_cnn_model = load_model('./models/CNN_Model_OBT100_multi_cnn_best_valid_cnn_cifar_small_batchnormalisation_class_scale.h5')
from models.DataLoader import DataLoader
loader = DataLoader(batch_size=32, filename="./data/OBT100_new_multi_cnn%d.hdf5")
self.translation_value = np.asarray(loader.translation_value)
self.scale_value = np.asarray(loader.scale_value)
else:
self.multi_cnn_model = load_model(model_path)
self.cnn_maximum = cnn_maximum
if self.sub_feature_type=='dsst':
# this method adopts from the paper Martin Danelljan, Gustav Hger, Fahad Shahbaz Khan and Michael Felsberg.
# "Accurate Scale Estimation for Robust Visual Tracking". (BMVC), 2014.
# The project website is: http: // www.cvl.isy.liu.se / research / objrec / visualtracking / index.html
self.scale_step = 1.01
self.nScales = 33
self.scaleFactors = self.scale_step ** (np.ceil(self.nScales * 1.0 / 2) - range(1, self.nScales + 1))
self.scale_window = np.hanning(self.nScales)
self.scale_sigma_factor = 1. / 4
self.scale_sigma = self.nScales / np.sqrt(self.nScales) * self.scale_sigma_factor
self.ys = np.exp(
-0.5 * ((range(1, self.nScales + 1) - np.ceil(self.nScales * 1.0 / 2)) ** 2) / self.scale_sigma ** 2)
self.ysf = np.fft.fft(self.ys)
self.min_scale_factor = []
self.max_scale_factor = []
self.xs = []
self.xsf = []
self.sf_num = []
self.sf_den = []
# we use linear kernel as in the BMVC2014 paper
self.new_sf_num = []
self.new_sf_den = []
self.scale_response = []
self.lambda_scale = 1e-2
self.adaptation_rate_scale = 0.005
if sub_sub_feature_type == 'adapted_lr':
self.sub_sub_feature_type = sub_sub_feature_type
self.acc_time = 5
self.loss = np.zeros(shape=(self.acc_time, 5))
self.loss_mean = np.zeros(shape=(self.acc_time, 5))
self.loss_std = np.zeros(shape=(self.acc_time, 5))
self.adaptation_rate_range = [0.005, 0.0]
self.adaptation_rate_scale_range = [0.005, 0.00]
self.adaptation_rate = self.adaptation_rate_range[0]
self.adaptation_rate_scale = self.adaptation_rate_scale_range[0]
self.stability = 1
if self.sub_feature_type:
self.name += '_'+sub_feature_type
self.feature_correlation = None
if self.sub_sub_feature_type:
self.name += '_' + sub_sub_feature_type
if self.cnn_maximum:
self.name += '_cnn_maximum'
def train(self, im, init_rect, seqname):
"""
:param im: image should be of 3 dimension: M*N*C
:param pos: the centre position of the target
:param target_sz: target size
"""
self.pos = [init_rect[1]+init_rect[3]/2., init_rect[0]+init_rect[2]/2.]
self.res.append(init_rect)
# for scaling, we always need to set it to 1
self.currentScaleFactor = 1
# Duh OBT is the reverse
self.target_sz = np.asarray(init_rect[2:])
self.target_sz = self.target_sz[::-1]
self.first_target_sz = self.target_sz # because we might introduce the scale changes in the detection
# desired padded input, proportional to input target size
self.patch_size = np.floor(self.target_sz * (1 + self.padding))
self.first_patch_sz = np.array(self.patch_size).astype(int) # because we might introduce the scale changes in the detection
# desired output (gaussian shaped), bandwidth proportional to target size
self.output_sigma = np.sqrt(np.prod(self.target_sz)) * self.spatial_bandwidth_sigma_factor
grid_y = np.arange(np.floor(self.patch_size[0]/self.cell_size)) - np.floor(self.patch_size[0]/(2*self.cell_size))
grid_x = np.arange(np.floor(self.patch_size[1]/self.cell_size)) - np.floor(self.patch_size[1]/(2*self.cell_size))
if self.feature_type == 'resnet50':
# this is an odd tweak to make the dimension uniform:
if np.mod(self.patch_size[0], 2) == 0:
grid_y = np.arange(np.floor(self.patch_size[0] / self.cell_size)-1) - np.floor(
self.patch_size[0] / (2 * self.cell_size)) - 0.5
if np.mod(self.patch_size[1], 2) == 0:
grid_x = np.arange(np.floor(self.patch_size[1] / self.cell_size)-1) - np.floor(
self.patch_size[1] / (2 * self.cell_size)) - 0.5
if self.feature_type == 'vgg_rnn' or self.feature_type == 'cnn':
grid_y = np.arange(self.response_size[0]) - np.floor(self.response_size[0]/2)
grid_x = np.arange(self.response_size[1]) - np.floor(self.response_size[1]/2)
if not self.feature_type == 'multi_cnn':
rs, cs = np.meshgrid(grid_x, grid_y)
self.y = np.exp(-0.5 / self.output_sigma ** 2 * (rs ** 2 + cs ** 2))
self.yf = self.fft2(self.y)
# store pre-computed cosine window
self.cos_window = np.outer(np.hanning(self.yf.shape[0]), np.hanning(self.yf.shape[1]))
# extract and pre-process subwindow
if self.feature_type == 'raw' and im.shape[0] == 3:
im = im.transpose(1, 2, 0)/255.
self.im_sz = im.shape
elif self.feature_type == 'dsst':
im = im.transpose(1, 2, 0) / 255.
self.im_sz = im.shape
self.min_scale_factor = self.scale_step **(np.ceil(np.log(max(5. / self.patch_size)) / np.log(self.scale_step)))
self.max_scale_factor = self.scale_step **(np.log(min(np.array(self.im_sz[:2]).astype(float) / self.target_sz)) / np.log(self.scale_step))
self.xs = self.get_scale_sample(im, self.currentScaleFactor * self.scaleFactors)
self.xsf = np.fft.fftn(self.xs, axes=[0])
# we use linear kernel as in the BMVC2014 paper
self.new_sf_num = np.multiply(self.ysf[:, None], np.conj(self.xsf))
self.new_sf_den = np.real(np.sum(np.multiply(self.xsf, np.conj(self.xsf)), axis=1))
elif self.feature_type == 'vgg' or self.feature_type == 'resnet50' or \
self.feature_type == 'vgg_rnn' or self.feature_type == 'cnn' or self.feature_type == 'multi_cnn':
self.im_sz = im.shape[1:]
self.im_crop = self.get_subwindow(im, self.pos, self.patch_size)
self.x = self.get_features()
self.xf = self.fft2(self.x)
if self.sub_feature_type == 'grabcut':
import matplotlib.image as mpimg
from skimage.transform import resize
img_grabcut = mpimg.imread(self.grabcut_mask_path+seqname+".png")
grabcut_shape = self.x.shape[:2]
img_grabcut = resize(img_grabcut, grabcut_shape)
corr = np.multiply(self.x, img_grabcut[:,:,None])
corr = np.sum(np.sum(corr, axis=0), axis=0)
# we compute the correlation of a filter within a layer to its features
self.feature_correlation = (corr - corr.min()) / (corr.max() - corr.min())
if self.feature_type == 'multi_cnn':
# multi_cnn will render the models to be of a list
self.alphaf = []
for i in range(len(self.x)):
k = self.dense_gauss_kernel(self.feature_bandwidth_sigma, self.xf[i], self.x[i])
self.alphaf.append(np.divide(self.yf[i], self.fft2(k) + self.lambda_value))
if self.sub_feature_type == 'dsst':
self.min_scale_factor = self.scale_step ** (np.ceil(np.log(max(5. / self.patch_size)) / np.log(self.scale_step)))
self.max_scale_factor = self.scale_step ** (np.log(min(np.array(self.im_sz[:2]).astype(float) / self.target_sz)) / np.log(self.scale_step))
self.xs = self.get_scale_sample(im, self.currentScaleFactor * self.scaleFactors)
self.xsf = np.fft.fftn(self.xs, axes=[0])
# we use linear kernel as in the BMVC2014 paper
self.sf_num = np.multiply(self.ysf[:, None], np.conj(self.xsf))
self.sf_den = np.real(np.sum(np.multiply(self.xsf, np.conj(self.xsf)), axis=1))
else:
k = self.dense_gauss_kernel(self.feature_bandwidth_sigma, self.xf, self.x)
self.alphaf = np.divide(self.yf, self.fft2(k) + self.lambda_value)
def detect(self, im, frame):
"""
Note: we assume the target does not change in scale, hence there is no target size
:param im: image should be of 3 dimension: M*N*C
:return:
"""
# extract and pre-process subwindow
if self.feature_type == 'raw' and im.shape[0] == 3:
im = im.transpose(1, 2, 0)/255.
elif self.feature_type == 'dsst':
im = im.transpose(1, 2, 0) / 255.
self.im_sz = im.shape
# Qutoe from BMVC2014paper: Danelljan:
# "In visual tracking scenarios, the scale difference between two frames is typically smaller compared to the
# translation. Therefore, we first apply the translation filter hf given a new frame, afterwards the scale
# filter hs is applied at the new target location.
self.im_crop = self.get_subwindow(im, self.pos, self.patch_size)
z = self.get_features()
zf = self.fft2(z)
if not self.feature_type == 'multi_cnn':
k = self.dense_gauss_kernel(self.feature_bandwidth_sigma, self.xf, self.x, zf, z)
kf = self.fft2(k)
self.response = np.real(np.fft.ifft2(np.multiply(self.alphaf, kf)))
else:
self.response = []
for i in range(len(z)):
k = self.dense_gauss_kernel(self.feature_bandwidth_sigma, self.xf[i], self.x[i], zf[i], z[i])
kf = self.fft2(k)
self.response.append(np.real(np.fft.ifft2(np.multiply(self.alphaf[i], kf))))
if self.feature_type == 'raw' or self.feature_type == 'vgg':
# target location is at the maximum response. We must take into account the fact that, if
# the target doesn't move, the peak will appear at the top-left corner, not at the centre
# (this is discussed in the paper Fig. 6). The response map wrap around cyclically.
v_centre, h_centre = np.unravel_index(self.response.argmax(), self.response.shape)
self.vert_delta, self.horiz_delta = [v_centre - self.response.shape[0] / 2,
h_centre - self.response.shape[1] / 2]
self.pos = self.pos + np.dot(self.cell_size, [self.vert_delta, self.horiz_delta])
elif self.feature_type == 'vgg_rnn':
# We need to normalise it (because our training did so):
response = self.response
response = (response - response.min()) / (response.max() - response.min())
response = np.expand_dims(np.expand_dims(response, axis=0), axis=0)
if frame <= 10:
self.lstm_input[0, frame-1, :, :, :] = response
predicted_output_all = self.lstm_model.predict(self.lstm_input, batch_size=1)
predicted_output = predicted_output_all[0, frame-1,:2]
else:
# we always shift the frame to the left and have the final output prediction
self.lstm_input[0, 0:9, :, :, :] = self.lstm_input[0, 1:10, :, :, :]
self.lstm_input[0, 9, :, :, :] = response
predicted_output_all = self.lstm_model.predict(self.lstm_input, batch_size=1)
predicted_output = predicted_output_all[0, 9, :2]
# target location is at the maximum response. We must take into account the fact that, if
# the target doesn't move, the peak will appear at the top-left corner, not at the centre
# (this is discussed in the paper Fig. 6). The response map wrap around cyclically.
v_centre, h_centre = np.unravel_index(self.response.argmax(), self.response.shape)
self.vert_delta, self.horiz_delta = [v_centre - self.response.shape[0] / 2,
h_centre - self.response.shape[1] / 2]
self.pos_old = [
self.pos[1] + self.patch_size[1] * 1.0 / self.resize_size[1] * self.horiz_delta - self.target_sz[
1] / 2.,
self.pos[0] + self.patch_size[0] * 1.0 / self.resize_size[0] * self.vert_delta - self.target_sz[
0] / 2., ]
self.pos = [self.pos[0] + self.target_sz[0] * predicted_output[0],
self.pos[1] + self.target_sz[1] * predicted_output[1]]
self.pos = [max(self.target_sz[0] / 2, min(self.pos[0], self.im_sz[0] - self.target_sz[0] / 2)),
max(self.target_sz[1] / 2, min(self.pos[1], self.im_sz[1] - self.target_sz[1] / 2))]
elif self.feature_type == 'cnn':
# We need to normalise it (because our training did so):
response = self.response
response = (response-response.min())/(response.max()-response.min())
response = np.expand_dims(np.expand_dims(response, axis=0), axis=0)
predicted_output = self.cnn_model.predict(response, batch_size=1)
# target location is at the maximum response. We must take into account the fact that, if
# the target doesn't move, the peak will appear at the top-left corner, not at the centre
# (this is discussed in the paper Fig. 6). The response map wrap around cyclically.
v_centre, h_centre = np.unravel_index(self.response.argmax(), self.response.shape)
self.vert_delta, self.horiz_delta = [v_centre - self.response.shape[0]/2, h_centre - self.response.shape[1]/2]
self.pos_old = [self.pos[1] + self.patch_size[1] * 1.0 / self.resize_size[1] * self.horiz_delta - self.target_sz[1] / 2.,
self.pos[0] + self.patch_size[0] * 1.0 / self.resize_size[0] * self.vert_delta - self.target_sz[0] / 2.,]
self.pos = [self.pos[0] + self.target_sz[0] * predicted_output[0][0],
self.pos[1] + self.target_sz[1] * predicted_output[0][1]]
self.pos = [max(self.target_sz[0] / 2, min(self.pos[0], self.im_sz[0] - self.target_sz[0] / 2)),
max(self.target_sz[1] / 2, min(self.pos[1], self.im_sz[1] - self.target_sz[1] / 2))]
elif self.feature_type == 'multi_cnn':
response_all = np.zeros(shape=(5, self.resize_size[0], self.resize_size[1]))
self.max_list = [np.max(x) for x in self.response]
if self.sub_sub_feature_type == 'adapted_lr':
loss_idx = np.mod(frame, self.acc_time)
self.loss[loss_idx] = 1 - np.asarray(self.max_list)
self.loss_mean = np.mean(self.loss, axis=0)
self.loss_std = np.std(self.loss, axis=0)
if frame > self.acc_time:
stability_coeff = np.abs(self.loss[loss_idx]-self.loss_mean) / self.loss_std
self.stability = np.mean(np.exp(-stability_coeff))
# stability value is small(0), object is stable, adaptive learning rate is increased to maximum
# stability value is big(1), object is not stable, adaptive learning rate is decreased to minimum
self.adaptation_rate = max(0, self.adaptation_rate_range[1] + \
self.stability*(self.adaptation_rate_range[0] - self.adaptation_rate_range[1]))
self.adaptation_rate_scale = max(0, self.adaptation_rate_scale_range[1] + \
self.stability*(self.adaptation_rate_scale_range[0] - self.adaptation_rate_scale_range[1]))
for i in range(len(self.response)):
response_all[i, :, :] = imresize(self.response[i], size=self.resize_size)
if self.sub_feature_type == 'class' or self.cnn_maximum:
response_all[i, :, :] = np.multiply(response_all[i, :, :], self.max_list[i])
response_all = response_all.astype('float32') / 255. - 0.5
self.response_all = response_all
response_all = np.expand_dims(response_all, axis=0)
predicted_output = self.multi_cnn_model.predict(response_all, batch_size=1)
if self.sub_feature_type=='class':
translational_x = np.dot(predicted_output[0], self.translation_value)
translational_y = np.dot(predicted_output[1], self.translation_value)
scale_change = np.dot(predicted_output[2], self.scale_value)
# translational_x = self.translation_value[np.argmax(predicted_output[0])]
# translational_y = self.translation_value[np.argmax(predicted_output[1])]
# scale_change = self.scale_value[np.argmax(predicted_output[2])]
# calculate the new target size
self.target_sz = np.divide(self.target_sz, scale_change)
# we also require the target size to be smaller than the image size deivided by paddings
self.target_sz = [min(self.im_sz[0], self.target_sz[0]), min(self.im_sz[1], self.target_sz[1])]
self.patch_size = np.multiply(self.target_sz, (1 + self.padding))
self.vert_delta, self.horiz_delta = \
[self.target_sz[0] * translational_x, self.target_sz[1] * translational_y]
self.pos = [self.pos[0] + self.target_sz[0] * translational_x,
self.pos[1] + self.target_sz[1] * translational_y]
self.pos = [max(self.target_sz[0] / 2, min(self.pos[0], self.im_sz[0] - self.target_sz[0] / 2)),
max(self.target_sz[1] / 2, min(self.pos[1], self.im_sz[1] - self.target_sz[1] / 2))]
else:
##################################################################################
# we need to train the tracker again for scaling, it's almost the replicate of train
##################################################################################
# target location is at the maximum response. We must take into account the fact that, if
# the target doesn't move, the peak will appear at the top-left corner, not at the centre
# (this is discussed in the paper Fig. 6). The response map wrap around cyclically.
self.vert_delta, self.horiz_delta = \
[self.target_sz[0] * predicted_output[0][0], self.target_sz[1] * predicted_output[0][1]]
self.pos = [self.pos[0] + self.target_sz[0] * predicted_output[0][0],
self.pos[1] + self.target_sz[1] * predicted_output[0][1]]
self.pos = [max(self.target_sz[0] / 2, min(self.pos[0], self.im_sz[0] - self.target_sz[0] / 2)),
max(self.target_sz[1] / 2, min(self.pos[1], self.im_sz[1] - self.target_sz[1] / 2))]
##################################################################################
# we need to train the tracker again for scaling, it's almost the replicate of train
##################################################################################
# calculate the new target size
# scale_change = predicted_output[0][2:]
# self.target_sz = np.multiply(self.target_sz, scale_change.mean())
# we also require the target size to be smaller than the image size deivided by paddings
##################################################################################
# we need to train the tracker again here, it's almost the replicate of train
##################################################################################
self.im_crop = self.get_subwindow(im, self.pos, self.patch_size)
x_new = self.get_features()
xf_new = self.fft2(x_new)
if self.feature_type == 'multi_cnn':
for i in range(len(x_new)):
k = self.dense_gauss_kernel(self.feature_bandwidth_sigma, xf_new[i], x_new[i])
kf = self.fft2(k)
alphaf_new = np.divide(self.yf[i], kf + self.lambda_value)
self.x[i] = (1 - self.adaptation_rate) * self.x[i] + self.adaptation_rate * x_new[i]
self.xf[i] = (1 - self.adaptation_rate) * self.xf[i] + self.adaptation_rate * xf_new[i]
self.alphaf[i] = (1 - self.adaptation_rate) * self.alphaf[i] + self.adaptation_rate * alphaf_new
if self.sub_feature_type == 'dsst':
self.xs = self.get_scale_sample(im, self.currentScaleFactor * self.scaleFactors)
self.xsf = np.fft.fftn(self.xs, axes=[0])
# calculate the correlation response of the scale filter
scale_response_fft = np.divide(np.multiply(self.sf_num, self.xsf),
(self.sf_den[:, None] + self.lambda_scale))
scale_reponse = np.real(np.fft.ifftn(np.sum(scale_response_fft, axis=1)))
recovered_scale = np.argmax(scale_reponse)
# update the scale
self.currentScaleFactor *= self.scaleFactors[recovered_scale]
if self.currentScaleFactor < self.min_scale_factor:
self.currentScaleFactor = self.min_scale_factor
elif self.currentScaleFactor > self.max_scale_factor:
self.currentScaleFactor = self.max_scale_factor
else:
k = self.dense_gauss_kernel(self.feature_bandwidth_sigma, xf_new, x_new)
kf = self.fft2(k)
alphaf_new = np.divide(self.yf, kf + self.lambda_value)
self.x = (1 - self.adaptation_rate) * self.x + self.adaptation_rate * x_new
self.xf = (1 - self.adaptation_rate) * self.xf + self.adaptation_rate * xf_new
self.alphaf = (1 - self.adaptation_rate) * self.alphaf + self.adaptation_rate * alphaf_new
# we also require the bounding box to be within the image boundary
self.res.append([min(self.im_sz[1] - self.target_sz[1], max(0, self.pos[1] - self.target_sz[1] / 2.)),
min(self.im_sz[0] - self.target_sz[0], max(0, self.pos[0] - self.target_sz[0] / 2.)),
self.target_sz[1], self.target_sz[0]])
if self.sub_feature_type == 'dsst':
self.xs = self.get_scale_sample(im, self.currentScaleFactor * self.scaleFactors)
self.xsf = np.fft.fftn(self.xs, axes=[0])
# we use linear kernel as in the BMVC2014 paper
new_sf_num = np.multiply(self.ysf[:, None], np.conj(self.xsf))
new_sf_den = np.real(np.sum(np.multiply(self.xsf, np.conj(self.xsf)), axis=1))
self.sf_num = (1 - self.adaptation_rate_scale) * self.sf_num + self.adaptation_rate * new_sf_num
self.sf_den = (1 - self.adaptation_rate_scale) * self.sf_den + self.adaptation_rate * new_sf_den
# we only update the target size here.
self.target_sz = np.multiply(self.currentScaleFactor, self.first_target_sz)
self.patch_size = np.multiply(self.target_sz, (1 + self.padding))
return self.pos
def dense_gauss_kernel(self, sigma, xf, x, zf=None, z=None):
"""
Gaussian Kernel with dense sampling.
Evaluates a gaussian kernel with bandwidth SIGMA for all displacements
between input images X and Y, which must both be MxN. They must also
be periodic (ie., pre-processed with a cosine window). The result is
an MxN map of responses.
If X and Y are the same, ommit the third parameter to re-use some
values, which is faster.
:param sigma: feature bandwidth sigma
:param x:
:param y: if y is None, then we calculate the auto-correlation
:return:
"""
N = xf.shape[0]*xf.shape[1]
xx = np.dot(x.flatten().transpose(), x.flatten()) # squared norm of x
if zf is None:
# auto-correlation of x
zf = xf
zz = xx
else:
zz = np.dot(z.flatten().transpose(), z.flatten()) # squared norm of y
xyf = np.multiply(zf, np.conj(xf))
if self.feature_type == 'raw' or self.feature_type == 'dsst':
if len(xyf.shape) == 3:
xyf_ifft = np.fft.ifft2(np.sum(xyf, axis=2))
elif len(xyf.shape) == 2:
xyf_ifft = np.fft.ifft2(xyf)
# elif len(xyf.shape) == 4:
# xyf_ifft = np.fft.ifft2(np.sum(xyf, axis=3))
elif self.feature_type == 'hog':
xyf_ifft = np.fft.ifft2(np.sum(xyf, axis=2))
elif self.feature_type == 'vgg' or self.feature_type == 'resnet50' \
or self.feature_type == 'vgg_rnn' or self.feature_type == 'cnn' or self.feature_type =='multi_cnn':
xyf_ifft = np.fft.ifft2(np.sum(xyf, axis=2))
row_shift, col_shift = np.floor(np.array(xyf_ifft.shape) / 2).astype(int)
xy_complex = np.roll(xyf_ifft, row_shift, axis=0)
xy_complex = np.roll(xy_complex, col_shift, axis=1)
c = np.real(xy_complex)
d = np.real(xx) + np.real(zz) - 2 * c
k = np.exp(-1. / sigma**2 * np.maximum(0, d) / N)
return k
def get_subwindow(self, im, pos, sz):
"""
Obtain sub-window from image, with replication-padding.
Returns sub-window of image IM centered at POS ([y, x] coordinates),
with size SZ ([height, width]). If any pixels are outside of the image,
they will replicate the values at the borders.
The subwindow is also normalized to range -0.5 .. 0.5, and the given
cosine window COS_WINDOW is applied
(though this part could be omitted to make the function more general).
"""
if np.isscalar(sz): # square sub-window
sz = [sz, sz]
ys = np.floor(pos[0]) + np.arange(sz[0], dtype=int) - np.floor(sz[0] / 2)
xs = np.floor(pos[1]) + np.arange(sz[1], dtype=int) - np.floor(sz[1] / 2)
ys = ys.astype(int)
xs = xs.astype(int)
# check for out-of-bounds coordinates and set them to the values at the borders
ys[ys < 0] = 0
ys[ys >= self.im_sz[0]] = self.im_sz[0] - 1
xs[xs < 0] = 0
xs[xs >= self.im_sz[1]] = self.im_sz[1] - 1
# extract image
if self.feature_type == 'raw' or self.feature_type == 'dsst':
out = im[np.ix_(ys, xs)]
# introduce scaling, here, we need them to be the same size
if np.all(self.first_patch_sz == out.shape[:2]):
return out
else:
out = imresize(out, self.first_patch_sz)
return out / 255.
elif self.feature_type == 'vgg' or self.feature_type == 'resnet50' or \
self.feature_type == 'vgg_rnn' or self.feature_type == 'cnn' or self.feature_type == 'multi_cnn':
c = np.array(range(3))
out = im[np.ix_(c, ys, xs)]
# if self.feature_type == 'vgg_rnn' or self.feature_type == 'cnn':
# from keras.applications.vgg19 import preprocess_input
# x = imresize(out.copy(), self.resize_size)
# out = np.multiply(x, self.cos_window_patch[:, :, None])
return out
def fft2(self, x):
"""
FFT transform of the first 2 dimension
:param x: M*N*C the first two dimensions are used for Fast Fourier Transform
:return: M*N*C the FFT2 of the first two dimension
"""
if type(x) == list:
x = [np.fft.fft2(f, axes=(0,1)) for f in x]
return x
else:
return np.fft.fft2(x, axes=(0, 1))
def get_features(self):
"""
:param im: input image
:return:
"""
if self.feature_type == 'raw':
#using only grayscale:
if len(self.im_crop.shape) == 3:
if self.sub_feature_type == 'gray':
img_gray = np.mean(self.im_crop, axis=2)
img_gray = img_gray - img_gray.mean()
features = np.multiply(img_gray, self.cos_window)
else:
img_colour = self.im_crop - self.im_crop.mean()
features = np.multiply(img_colour, self.cos_window[:, :, None])
elif self.feature_type == 'dsst':
img_colour = self.im_crop - self.im_crop.mean()
features = np.multiply(img_colour, self.cos_window[:, :, None])
elif self.feature_type == 'vgg' or self.feature_type == 'resnet50':
if self.feature_type == 'vgg':
from keras.applications.vgg19 import preprocess_input
elif self.feature_type == 'resnet50':
from keras.applications.resnet50 import preprocess_input
x = np.expand_dims(self.im_crop.copy(), axis=0)
x = preprocess_input(x)
features = self.extract_model.predict(x)
features = np.squeeze(features)
features = (features.transpose(1, 2, 0) - features.min()) / (features.max() - features.min())
features = np.multiply(features, self.cos_window[:, :, None])
elif self.feature_type == 'vgg_rnn' or self.feature_type=='cnn':
from keras.applications.vgg19 import preprocess_input
x = imresize(self.im_crop.copy(), self.resize_size)
x = x.transpose((2, 0, 1)).astype(np.float64)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
features = self.extract_model.predict(x)
features = np.squeeze(features)
features = (features.transpose(1, 2, 0) - features.min()) / (features.max() - features.min())
features = np.multiply(features, self.cos_window[:, :, None])
elif self.feature_type == "multi_cnn":
from keras.applications.vgg19 import preprocess_input
x = imresize(self.im_crop.copy(), self.resize_size)
x = x.transpose((2, 0, 1)).astype(np.float64)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
features_list = self.extract_model_function(x)
for i, features in enumerate(features_list):
features = np.squeeze(features)
features = (features.transpose(1, 2, 0) - features.min()) / (features.max() - features.min())
features_list[i] = np.multiply(features, self.cos_window[i][:, :, None])
return features_list
else:
assert 'Non implemented!'
if not (self.sub_feature_type=="" or self.feature_correlation is None):
features = np.multiply(features, self.feature_correlation[None, None, :])
return features
def get_scale_sample(self, im, scaleFactors):
from pyhog import pyhog
#resized_im_array = np.zeros((len(self.scaleFactors), int(np.floor(self.first_target_sz[0]/4) * np.floor(self.first_target_sz[1]/4) * 31)))
resized_im_array = []
for i, s in enumerate(scaleFactors):
patch_sz = np.floor(self.first_target_sz * s)
im_patch = self.get_subwindow(im, self.pos, patch_sz) # extract image
im_patch_resized = imresize(im_patch, self.first_target_sz) #resize image to model size
features_hog = pyhog.features_pedro(im_patch_resized.astype(np.float64)/255.0, 4)
resized_im_array.append(np.multiply(features_hog.flatten(), self.scale_window[i]))
return np.asarray(resized_im_array)
def train_rnn(self, frame, im, init_rect, target_sz, img_rgb_next, next_rect, next_target_sz):
self.pos = [init_rect[1] + init_rect[3] / 2., init_rect[0] + init_rect[2] / 2.]
# Duh OBT is the reverse
self.target_sz = target_sz[::-1]
# desired padded input, proportional to input target size
self.patch_size = np.floor(self.target_sz * (1 + self.padding))
self.im_sz = im.shape[1:]
if frame==0:
self.im_crop = self.get_subwindow(im, self.pos, self.patch_size)
self.x = self.get_features()
self.xf = self.fft2(self.x)
k = self.dense_gauss_kernel(self.feature_bandwidth_sigma, self.xf, self.x)
self.alphaf = np.divide(self.yf, self.fft2(k) + self.lambda_value)
###################### Next frame #####################################
self.im_crop = self.get_subwindow(img_rgb_next, self.pos, self.patch_size)
z = self.get_features()
zf = self.fft2(z)
k = self.dense_gauss_kernel(self.feature_bandwidth_sigma, self.xf, self.x, zf, z)
kf = self.fft2(k)
self.response = np.real(np.fft.ifft2(np.multiply(self.alphaf, kf)))
##################################################################################
# we need to train the tracker again here, it's almost the replicate of train
##################################################################################
self.pos_next = [next_rect[1] + next_rect[3] / 2., next_rect[0] + next_rect[2] / 2.]
self.im_crop = self.get_subwindow(img_rgb_next, self.pos_next, self.patch_size)
x_new = self.get_features()
xf_new = self.fft2(x_new)
k = self.dense_gauss_kernel(self.feature_bandwidth_sigma, xf_new, x_new)
kf = self.fft2(k)
alphaf_new = np.divide(self.yf, kf + self.lambda_value)
self.x = (1 - self.adaptation_rate) * self.x + self.adaptation_rate * x_new
self.xf = (1 - self.adaptation_rate) * self.xf + self.adaptation_rate * xf_new
self.alphaf = (1 - self.adaptation_rate) * self.alphaf + self.adaptation_rate * alphaf_new
lstm_input = self.response.flatten()
lstm_input.resize(1, np.prod(self.response_size))
pos_move = np.array([(self.pos_next[0] - self.pos[0]), (self.pos_next[1] - self.pos[1])])
pos_move.resize(1, 2)
self.lstm_model.fit(lstm_input, pos_move, batch_size=1, verbose=1, nb_epoch=1, shuffle=False)
print('Predicting')
predicted_output = self.lstm_model.predict(lstm_input, batch_size=1)
print(pos_move)
print(predicted_output)
def train_cnn(self, frame, im, init_rect, img_rgb_next, next_rect, x_train, y_train, count):
self.pos = [init_rect[1] + init_rect[3] / 2., init_rect[0] + init_rect[2] / 2.]
# Duh OBT is the reverse
self.target_sz = np.asarray(init_rect[2:])
self.target_sz = self.target_sz[::-1]
self.next_target_sz = np.asarray(next_rect[2:])
self.next_target_sz = self.next_target_sz[::-1]
self.scale_change = np.divide(np.array(self.next_target_sz).astype(float), self.target_sz)
# desired padded input, proportional to input target size
self.patch_size = np.floor(self.target_sz * (1 + self.padding))
self.im_sz = im.shape[1:]
if frame == 0:
self.im_crop = self.get_subwindow(im, self.pos, self.patch_size)
self.x = self.get_features()
self.xf = self.fft2(self.x)
# if self.feature_type == 'multi_cnn':
# self.feature_correlation = []
# for i in range(len(self.x)):
# corr = np.multiply(self.x[i], self.y[i][:, :, None])
# corr = np.sum(np.sum(corr, axis=0), axis=0)
# # we compute the correlation of a filter within a layer to its features
# self.feature_correlation.append((corr - corr.min()) / (corr.max() - corr.min()))
# here self.xf is list
self.alphaf = []
for i in range(len(self.x)):
k = self.dense_gauss_kernel(self.feature_bandwidth_sigma, self.xf[i], self.x[i])
self.alphaf.append(np.divide(self.yf[i], self.fft2(k) + self.lambda_value))
###################### Next frame #####################################
self.im_crop = self.get_subwindow(img_rgb_next, self.pos, self.patch_size)
z = self.get_features()
zf = self.fft2(z)
self.response = []
for i in range(len(z)):
k = self.dense_gauss_kernel(self.feature_bandwidth_sigma, self.xf[i], self.x[i], zf[i], z[i])
kf = self.fft2(k)
self.response.append(np.real(np.fft.ifft2(np.multiply(self.alphaf[i], kf))))
##################################################################################
# we need to train the tracker again here, it's almost the replicate of train
##################################################################################
self.pos_next = [next_rect[1] + next_rect[3] / 2., next_rect[0] + next_rect[2] / 2.]
self.im_crop = self.get_subwindow(img_rgb_next, self.pos_next, self.patch_size)
x_new = self.get_features()
xf_new = self.fft2(x_new)
for i in range(len(x_new)):
k = self.dense_gauss_kernel(self.feature_bandwidth_sigma, xf_new[i], x_new[i])
kf = self.fft2(k)
alphaf_new = np.divide(self.yf[i], kf + self.lambda_value)
self.x[i] = (1 - self.adaptation_rate) * self.x[i] + self.adaptation_rate * x_new[i]
self.xf[i] = (1 - self.adaptation_rate) * self.xf[i] + self.adaptation_rate * xf_new[i]
self.alphaf[i] = (1 - self.adaptation_rate) * self.alphaf[i] + self.adaptation_rate * alphaf_new
response_all = np.zeros(shape=(5, self.resize_size[0], self.resize_size[1]))
for i in range(len(self.response)):
response_all[i, :, :] = imresize(self.response[i], size=self.resize_size)
x_train[count, :, :, :] = response_all
self.pos_next = [next_rect[1] + next_rect[3] / 2., next_rect[0] + next_rect[2] / 2.]
pos_move = np.array([(self.pos_next[0] - self.pos[0]) * 1.0 / self.target_sz[0],
(self.pos_next[1] - self.pos[1]) * 1.0 / self.target_sz[1]])
y_train[count, :] = np.concatenate([pos_move, self.scale_change])
count += 1
return x_train, y_train, count
# ('feature time:', 0.07054710388183594)
# ('fft2:', 0.22904396057128906)
# ('guassian kernel + fft2: ', 0.20537400245666504)
def grabcut(self, im, init_rect, seq_name):
"""
:param im: image should be of 3 dimension: M*N*C
:param pos: the centre position of the target
:param target_sz: target size
"""
import cv2
global img, img2, drawing, value, mask, rectangle, rect, rect_or_mask, ix, iy, rect_over
BLUE = [255, 0, 0] # rectangle color
RED = [0, 0, 255] # PR BG
GREEN = [0, 255, 0] # PR FG
BLACK = [0, 0, 0] # sure BG
WHITE = [255, 255, 255] # sure FG
DRAW_BG = {'color': BLACK, 'val': 0}
DRAW_FG = {'color': WHITE, 'val': 1}
DRAW_PR_FG = {'color': GREEN, 'val': 3}
DRAW_PR_BG = {'color': RED, 'val': 2}
# setting up flags
rect = (0, 0, 1, 1)
drawing = False # flag for drawing curves
rectangle = False # flag for drawing rect
rect_over = False # flag to check if rect drawn
rect_or_mask = 0 # flag for selecting rect or mask mode
value = DRAW_FG # drawing initialized to FG
thickness = 3 # brush thickness
def onmouse(event, x, y, flags, param):
global img, img2, drawing, value, mask, rectangle, rect, rect_or_mask, ix, iy, rect_over
# Draw Rectangle
# if event == cv2.EVENT_RBUTTONDOWN:
# rectangle = True
# ix, iy = x, y
#
# elif event == cv2.EVENT_MOUSEMOVE:
# if rectangle == True:
# img = img2.copy()
# cv2.rectangle(img, (ix, iy), (x, y), BLUE, 2)
# rect = (min(ix, x), min(iy, y), abs(ix - x), abs(iy - y))
# rect_or_mask = 0
#
# elif event == cv2.EVENT_RBUTTONUP:
# rectangle = False
# rect_over = True
# cv2.rectangle(img, (ix, iy), (x, y), BLUE, 2)
# rect = (min(ix, x), min(iy, y), abs(ix - x), abs(iy - y))
# rect_or_mask = 0
# print(" Now press the key 'n' a few times until no further change \n")
# draw touchup curves
if event == cv2.EVENT_LBUTTONDOWN:
rect_over = True
if rect_over == False:
print("first draw rectangle \n")
else:
drawing = True
cv2.circle(img, (x, y), thickness, value['color'], -1)
cv2.circle(mask, (x, y), thickness, value['val'], -1)
elif event == cv2.EVENT_MOUSEMOVE:
if drawing == True:
cv2.circle(img, (x, y), thickness, value['color'], -1)
cv2.circle(mask, (x, y), thickness, value['val'], -1)
elif event == cv2.EVENT_LBUTTONUP:
if drawing == True:
drawing = False
cv2.circle(img, (x, y), thickness, value['color'], -1)
cv2.circle(mask, (x, y), thickness, value['val'], -1)
self.pos = [init_rect[1] + init_rect[3] / 2., init_rect[0] + init_rect[2] / 2.]
self.res.append(init_rect)
# Duh OBT is the reverse
self.target_sz = np.asarray(init_rect[2:])
self.target_sz = self.target_sz[::-1]
self.patch_size = np.floor(self.target_sz * (1 + self.padding))
self.first_patch_sz = np.array(self.patch_size).astype(int)
self.im_sz = im.shape[:2]
########################################################
# let's try grabcut now!
########################################################
self.im_crop = self.get_subwindow(im, self.pos, self.patch_size)
sz = np.array(self.im_crop.shape[:2])
img = self.get_subwindow(im, self.pos, sz)
img2 = img.copy()
mask = np.zeros(img.shape[:2], dtype=np.uint8) # mask initialized to PR_BG
output = np.zeros(img.shape, np.uint8) # output image to be shown
#####################################################
coeff = 1.5
rect = np.array([sz[::-1] / 2 - self.target_sz[::-1] / 2 * coeff, sz[::-1] / 2 + self.target_sz[::-1] / 2 * coeff]).astype(np.int).flatten()
# input and output windows
cv2.namedWindow('output')
cv2.namedWindow('input')
cv2.setMouseCallback('input', onmouse)
cv2.moveWindow('input', img.shape[1] + 10, 90)
cv2.rectangle(img, (rect[0], rect[1]), (rect[2], rect[3]), BLUE, 2)
while True:
cv2.imshow('output', output)
cv2.imshow('input', img)
k = 0xFF & cv2.waitKey(1)
# key bindings
if k == 27: # esc to exit
break
elif k == ord('0'): # BG drawing
print(" mark background regions with left mouse button \n")
value = DRAW_BG
elif k == ord('1'): # FG drawing
print(" mark foreground regions with left mouse button \n")
value = DRAW_FG
elif k == ord('2'): # PR_BG drawing
value = DRAW_PR_BG
elif k == ord('3'): # PR_FG drawing
value = DRAW_PR_FG
elif k == ord('s'): # save image
bar = np.zeros((img.shape[0], 5, 3), np.uint8)
res = np.hstack((img2, bar, img, bar, output))
#cv2.imwrite('./figures/grabcut_output.png', res)
cv2.imwrite('./figures/masks/'+seq_name+'.png', mask2)
print(" Result saved as image \n")
cv2.destroyAllWindows()
break
elif k == ord('r'): # reset everything
print("resetting \n")
rect = (0, 0, 1, 1)
drawing = False
rectangle = False
rect_or_mask = 100
rect_over = False
value = DRAW_FG
img = img2.copy()
mask = np.zeros(img.shape[:2], dtype=np.uint8) # mask initialized to PR_BG
output = np.zeros(img.shape, np.uint8) # output image to be shown
elif k == ord('n'): # segment the image
print(""" For finer touchups, mark foreground and background after pressing keys 0-3
and again press 'n' \n""")
if (rect_or_mask == 0): # grabcut with rect
bgdmodel = np.zeros((1, 65), np.float64)
fgdmodel = np.zeros((1, 65), np.float64)
rect_tuple = (rect[0], rect[1], rect[2]-rect[0], rect[3]-rect[1])
cv2.grabCut(img2, mask, rect_tuple, bgdmodel, fgdmodel, 1, cv2.GC_INIT_WITH_RECT)
rect_or_mask = 1
elif rect_or_mask == 1: # grabcut with mask
bgdmodel = np.zeros((1, 65), np.float64)
fgdmodel = np.zeros((1, 65), np.float64)
cv2.grabCut(img2, mask, rect_tuple, bgdmodel, fgdmodel, 1, cv2.GC_INIT_WITH_MASK)
mask2 = np.where((mask == 1) + (mask == 3), 255, 0).astype('uint8')
output = cv2.bitwise_and(img2, img2, mask=mask2)
| gpl-3.0 |
danielvdende/incubator-airflow | tests/contrib/hooks/test_bigquery_hook.py | 3 | 17582 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import unittest
import warnings
from google.auth.exceptions import GoogleAuthError
import mock
from airflow.contrib.hooks import bigquery_hook as hook
from airflow.contrib.hooks.bigquery_hook import _cleanse_time_partitioning
bq_available = True
try:
hook.BigQueryHook().get_service()
except GoogleAuthError:
bq_available = False
class TestBigQueryDataframeResults(unittest.TestCase):
def setUp(self):
self.instance = hook.BigQueryHook()
@unittest.skipIf(not bq_available, 'BQ is not available to run tests')
def test_output_is_dataframe_with_valid_query(self):
import pandas as pd
df = self.instance.get_pandas_df('select 1')
self.assertIsInstance(df, pd.DataFrame)
@unittest.skipIf(not bq_available, 'BQ is not available to run tests')
def test_throws_exception_with_invalid_query(self):
with self.assertRaises(Exception) as context:
self.instance.get_pandas_df('from `1`')
self.assertIn('Reason: ', str(context.exception), "")
@unittest.skipIf(not bq_available, 'BQ is not available to run tests')
def test_suceeds_with_explicit_legacy_query(self):
df = self.instance.get_pandas_df('select 1', dialect='legacy')
self.assertEqual(df.iloc(0)[0][0], 1)
@unittest.skipIf(not bq_available, 'BQ is not available to run tests')
def test_suceeds_with_explicit_std_query(self):
df = self.instance.get_pandas_df(
'select * except(b) from (select 1 a, 2 b)', dialect='standard')
self.assertEqual(df.iloc(0)[0][0], 1)
@unittest.skipIf(not bq_available, 'BQ is not available to run tests')
def test_throws_exception_with_incompatible_syntax(self):
with self.assertRaises(Exception) as context:
self.instance.get_pandas_df(
'select * except(b) from (select 1 a, 2 b)', dialect='legacy')
self.assertIn('Reason: ', str(context.exception), "")
class TestBigQueryTableSplitter(unittest.TestCase):
def test_internal_need_default_project(self):
with self.assertRaises(Exception) as context:
hook._split_tablename('dataset.table', None)
self.assertIn('INTERNAL: No default project is specified',
str(context.exception), "")
def test_split_dataset_table(self):
project, dataset, table = hook._split_tablename('dataset.table',
'project')
self.assertEqual("project", project)
self.assertEqual("dataset", dataset)
self.assertEqual("table", table)
def test_split_project_dataset_table(self):
project, dataset, table = hook._split_tablename('alternative:dataset.table',
'project')
self.assertEqual("alternative", project)
self.assertEqual("dataset", dataset)
self.assertEqual("table", table)
def test_sql_split_project_dataset_table(self):
project, dataset, table = hook._split_tablename('alternative.dataset.table',
'project')
self.assertEqual("alternative", project)
self.assertEqual("dataset", dataset)
self.assertEqual("table", table)
def test_colon_in_project(self):
project, dataset, table = hook._split_tablename('alt1:alt.dataset.table',
'project')
self.assertEqual('alt1:alt', project)
self.assertEqual("dataset", dataset)
self.assertEqual("table", table)
def test_valid_double_column(self):
project, dataset, table = hook._split_tablename('alt1:alt:dataset.table',
'project')
self.assertEqual('alt1:alt', project)
self.assertEqual("dataset", dataset)
self.assertEqual("table", table)
def test_invalid_syntax_triple_colon(self):
with self.assertRaises(Exception) as context:
hook._split_tablename('alt1:alt2:alt3:dataset.table',
'project')
self.assertIn('Use either : or . to specify project',
str(context.exception), "")
self.assertFalse('Format exception for' in str(context.exception))
def test_invalid_syntax_triple_dot(self):
with self.assertRaises(Exception) as context:
hook._split_tablename('alt1.alt.dataset.table',
'project')
self.assertIn('Expect format of (<project.|<project:)<dataset>.<table>',
str(context.exception), "")
self.assertFalse('Format exception for' in str(context.exception))
def test_invalid_syntax_column_double_project_var(self):
with self.assertRaises(Exception) as context:
hook._split_tablename('alt1:alt2:alt.dataset.table',
'project', 'var_x')
self.assertIn('Use either : or . to specify project',
str(context.exception), "")
self.assertIn('Format exception for var_x:',
str(context.exception), "")
def test_invalid_syntax_triple_colon_project_var(self):
with self.assertRaises(Exception) as context:
hook._split_tablename('alt1:alt2:alt:dataset.table',
'project', 'var_x')
self.assertIn('Use either : or . to specify project',
str(context.exception), "")
self.assertIn('Format exception for var_x:',
str(context.exception), "")
def test_invalid_syntax_triple_dot_var(self):
with self.assertRaises(Exception) as context:
hook._split_tablename('alt1.alt.dataset.table',
'project', 'var_x')
self.assertIn('Expect format of (<project.|<project:)<dataset>.<table>',
str(context.exception), "")
self.assertIn('Format exception for var_x:',
str(context.exception), "")
class TestBigQueryHookSourceFormat(unittest.TestCase):
def test_invalid_source_format(self):
with self.assertRaises(Exception) as context:
hook.BigQueryBaseCursor("test", "test").run_load(
"test.test", "test_schema.json", ["test_data.json"], source_format="json"
)
# since we passed 'json' in, and it's not valid, make sure it's present in the
# error string.
self.assertIn("JSON", str(context.exception))
class TestBigQueryExternalTableSourceFormat(unittest.TestCase):
def test_invalid_source_format(self):
with self.assertRaises(Exception) as context:
hook.BigQueryBaseCursor("test", "test").create_external_table(
external_project_dataset_table='test.test',
schema_fields='test_schema.json',
source_uris=['test_data.json'],
source_format='json'
)
# since we passed 'csv' in, and it's not valid, make sure it's present in the
# error string.
self.assertIn("JSON", str(context.exception))
# Helpers to test_cancel_queries that have mock_poll_job_complete returning false,
# unless mock_job_cancel was called with the same job_id
mock_canceled_jobs = []
def mock_poll_job_complete(job_id):
return job_id in mock_canceled_jobs
def mock_job_cancel(projectId, jobId):
mock_canceled_jobs.append(jobId)
return mock.Mock()
class TestBigQueryBaseCursor(unittest.TestCase):
def test_invalid_schema_update_options(self):
with self.assertRaises(Exception) as context:
hook.BigQueryBaseCursor("test", "test").run_load(
"test.test",
"test_schema.json",
["test_data.json"],
schema_update_options=["THIS IS NOT VALID"]
)
self.assertIn("THIS IS NOT VALID", str(context.exception))
@mock.patch.object(hook.BigQueryBaseCursor, 'run_with_configuration')
def test_bql_deprecation_warning(self, mock_rwc):
with warnings.catch_warnings(record=True) as w:
hook.BigQueryBaseCursor("test", "test").run_query(
bql='select * from test_table'
)
self.assertIn(
'Deprecated parameter `bql`',
w[0].message.args[0])
def test_nobql_nosql_param_error(self):
with self.assertRaises(TypeError) as context:
hook.BigQueryBaseCursor("test", "test").run_query(
sql=None,
bql=None
)
self.assertIn(
'missing 1 required positional',
str(context.exception))
def test_invalid_schema_update_and_write_disposition(self):
with self.assertRaises(Exception) as context:
hook.BigQueryBaseCursor("test", "test").run_load(
"test.test",
"test_schema.json",
["test_data.json"],
schema_update_options=['ALLOW_FIELD_ADDITION'],
write_disposition='WRITE_EMPTY'
)
self.assertIn("schema_update_options is only", str(context.exception))
@mock.patch("airflow.contrib.hooks.bigquery_hook.LoggingMixin")
@mock.patch("airflow.contrib.hooks.bigquery_hook.time")
def test_cancel_queries(self, mocked_time, mocked_logging):
project_id = 12345
running_job_id = 3
mock_jobs = mock.Mock()
mock_jobs.cancel = mock.Mock(side_effect=mock_job_cancel)
mock_service = mock.Mock()
mock_service.jobs = mock.Mock(return_value=mock_jobs)
bq_hook = hook.BigQueryBaseCursor(mock_service, project_id)
bq_hook.running_job_id = running_job_id
bq_hook.poll_job_complete = mock.Mock(side_effect=mock_poll_job_complete)
bq_hook.cancel_query()
mock_jobs.cancel.assert_called_with(projectId=project_id, jobId=running_job_id)
@mock.patch.object(hook.BigQueryBaseCursor, 'run_with_configuration')
def test_run_query_sql_dialect_default(self, run_with_config):
cursor = hook.BigQueryBaseCursor(mock.Mock(), "project_id")
cursor.run_query('query')
args, kwargs = run_with_config.call_args
self.assertIs(args[0]['query']['useLegacySql'], True)
@mock.patch.object(hook.BigQueryBaseCursor, 'run_with_configuration')
def test_run_query_sql_dialect_override(self, run_with_config):
for bool_val in [True, False]:
cursor = hook.BigQueryBaseCursor(mock.Mock(), "project_id")
cursor.run_query('query', use_legacy_sql=bool_val)
args, kwargs = run_with_config.call_args
self.assertIs(args[0]['query']['useLegacySql'], bool_val)
class TestLabelsInRunJob(unittest.TestCase):
@mock.patch.object(hook.BigQueryBaseCursor, 'run_with_configuration')
def test_run_query_with_arg(self, mocked_rwc):
project_id = 12345
def run_with_config(config):
self.assertEqual(
config['labels'], {'label1': 'test1', 'label2': 'test2'}
)
mocked_rwc.side_effect = run_with_config
bq_hook = hook.BigQueryBaseCursor(mock.Mock(), project_id)
bq_hook.run_query(
sql='select 1',
destination_dataset_table='my_dataset.my_table',
labels={'label1': 'test1', 'label2': 'test2'}
)
mocked_rwc.assert_called_once()
class TestTimePartitioningInRunJob(unittest.TestCase):
@mock.patch("airflow.contrib.hooks.bigquery_hook.LoggingMixin")
@mock.patch("airflow.contrib.hooks.bigquery_hook.time")
@mock.patch.object(hook.BigQueryBaseCursor, 'run_with_configuration')
def test_run_load_default(self, mocked_rwc, mocked_time, mocked_logging):
project_id = 12345
def run_with_config(config):
self.assertIsNone(config['load'].get('timePartitioning'))
mocked_rwc.side_effect = run_with_config
bq_hook = hook.BigQueryBaseCursor(mock.Mock(), project_id)
bq_hook.run_load(
destination_project_dataset_table='my_dataset.my_table',
schema_fields=[],
source_uris=[],
)
mocked_rwc.assert_called_once()
@mock.patch("airflow.contrib.hooks.bigquery_hook.LoggingMixin")
@mock.patch("airflow.contrib.hooks.bigquery_hook.time")
@mock.patch.object(hook.BigQueryBaseCursor, 'run_with_configuration')
def test_run_load_with_arg(self, mocked_rwc, mocked_time, mocked_logging):
project_id = 12345
def run_with_config(config):
self.assertEqual(
config['load']['timePartitioning'],
{
'field': 'test_field',
'type': 'DAY',
'expirationMs': 1000
}
)
mocked_rwc.side_effect = run_with_config
bq_hook = hook.BigQueryBaseCursor(mock.Mock(), project_id)
bq_hook.run_load(
destination_project_dataset_table='my_dataset.my_table',
schema_fields=[],
source_uris=[],
time_partitioning={'type': 'DAY', 'field': 'test_field', 'expirationMs': 1000}
)
mocked_rwc.assert_called_once()
@mock.patch("airflow.contrib.hooks.bigquery_hook.LoggingMixin")
@mock.patch("airflow.contrib.hooks.bigquery_hook.time")
@mock.patch.object(hook.BigQueryBaseCursor, 'run_with_configuration')
def test_run_query_default(self, mocked_rwc, mocked_time, mocked_logging):
project_id = 12345
def run_with_config(config):
self.assertIsNone(config['query'].get('timePartitioning'))
mocked_rwc.side_effect = run_with_config
bq_hook = hook.BigQueryBaseCursor(mock.Mock(), project_id)
bq_hook.run_query(sql='select 1')
mocked_rwc.assert_called_once()
@mock.patch("airflow.contrib.hooks.bigquery_hook.LoggingMixin")
@mock.patch("airflow.contrib.hooks.bigquery_hook.time")
@mock.patch.object(hook.BigQueryBaseCursor, 'run_with_configuration')
def test_run_query_with_arg(self, mocked_rwc, mocked_time, mocked_logging):
project_id = 12345
def run_with_config(config):
self.assertEqual(
config['query']['timePartitioning'],
{
'field': 'test_field',
'type': 'DAY',
'expirationMs': 1000
}
)
mocked_rwc.side_effect = run_with_config
bq_hook = hook.BigQueryBaseCursor(mock.Mock(), project_id)
bq_hook.run_query(
sql='select 1',
destination_dataset_table='my_dataset.my_table',
time_partitioning={'type': 'DAY',
'field': 'test_field', 'expirationMs': 1000}
)
mocked_rwc.assert_called_once()
def test_dollar_makes_partition(self):
tp_out = _cleanse_time_partitioning('test.teast$20170101', {})
expect = {
'type': 'DAY'
}
self.assertEqual(tp_out, expect)
def test_extra_time_partitioning_options(self):
tp_out = _cleanse_time_partitioning(
'test.teast',
{'type': 'DAY', 'field': 'test_field', 'expirationMs': 1000}
)
expect = {
'type': 'DAY',
'field': 'test_field',
'expirationMs': 1000
}
self.assertEqual(tp_out, expect)
def test_cant_add_dollar_and_field_name(self):
with self.assertRaises(AssertionError):
_cleanse_time_partitioning(
'test.teast$20170101',
{'type': 'DAY', 'field': 'test_field', 'expirationMs': 1000}
)
class TestBigQueryHookLegacySql(unittest.TestCase):
"""Ensure `use_legacy_sql` param in `BigQueryHook` propagates properly."""
@mock.patch.object(hook.BigQueryBaseCursor, 'run_with_configuration')
def test_hook_uses_legacy_sql_by_default(self, run_with_config):
with mock.patch.object(hook.BigQueryHook, 'get_service'):
bq_hook = hook.BigQueryHook()
bq_hook.get_first('query')
args, kwargs = run_with_config.call_args
self.assertIs(args[0]['query']['useLegacySql'], True)
@mock.patch.object(hook.BigQueryBaseCursor, 'run_with_configuration')
def test_legacy_sql_override_propagates_properly(self, run_with_config):
with mock.patch.object(hook.BigQueryHook, 'get_service'):
bq_hook = hook.BigQueryHook(use_legacy_sql=False)
bq_hook.get_first('query')
args, kwargs = run_with_config.call_args
self.assertIs(args[0]['query']['useLegacySql'], False)
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
larsoner/mne-python | mne/externals/h5io/_h5io.py | 6 | 27210 | # -*- coding: utf-8 -*-
# Authors: Eric Larson <[email protected]>
#
# License: BSD (3-clause)
from datetime import datetime, timezone, timedelta
import json
import sys
import tempfile
from shutil import rmtree
from os import path as op
import numpy as np
try:
from scipy import sparse
except ImportError:
sparse = None
# Adapted from six
PY3 = sys.version_info[0] == 3
text_type = str if PY3 else unicode # noqa
string_types = str if PY3 else basestring # noqa
special_chars = {'{FWDSLASH}': '/'}
tab_str = '----'
##############################################################################
# WRITING
def _check_h5py():
"""Helper to check if h5py is installed"""
try:
import h5py
except ImportError:
raise ImportError('the h5py module is required to use HDF5 I/O')
return h5py
def _create_titled_group(root, key, title):
"""Helper to create a titled group in h5py"""
out = root.create_group(key)
out.attrs['TITLE'] = title
return out
def _create_titled_dataset(root, key, title, data, comp_kw=None):
"""Helper to create a titled dataset in h5py"""
comp_kw = {} if comp_kw is None else comp_kw
out = root.create_dataset(key, data=data, **comp_kw)
out.attrs['TITLE'] = title
return out
def _create_pandas_dataset(fname, root, key, title, data):
h5py = _check_h5py()
rootpath = '/'.join([root, key])
data.to_hdf(fname, rootpath)
with h5py.File(fname, mode='a') as fid:
fid[rootpath].attrs['TITLE'] = 'pd_dataframe'
def write_hdf5(fname, data, overwrite=False, compression=4,
title='h5io', slash='error', use_json=False):
"""Write python object to HDF5 format using h5py.
Parameters
----------
fname : str
Filename to use.
data : object
Object to write. Can be of any of these types:
{ndarray, dict, list, tuple, int, float, str, Datetime}
Note that dict objects must only have ``str`` keys. It is recommended
to use ndarrays where possible, as it is handled most efficiently.
overwrite : True | False | 'update'
If True, overwrite file (if it exists). If 'update', appends the title
to the file (or replace value if title exists).
compression : int
Compression level to use (0-9) to compress data using gzip.
title : str
The top-level directory name to use. Typically it is useful to make
this your package name, e.g. ``'mnepython'``.
slash : 'error' | 'replace'
Whether to replace forward-slashes ('/') in any key found nested within
keys in data. This does not apply to the top level name (title).
If 'error', '/' is not allowed in any lower-level keys.
use_json : bool
To accelerate the read and write performance of small dictionaries and
lists they can be combined to JSON objects and stored as strings.
"""
h5py = _check_h5py()
mode = 'w'
if op.isfile(fname):
if isinstance(overwrite, string_types):
if overwrite != 'update':
raise ValueError('overwrite must be "update" or a bool')
mode = 'a'
elif not overwrite:
raise IOError('file "%s" exists, use overwrite=True to overwrite'
% fname)
if not isinstance(title, string_types):
raise ValueError('title must be a string')
comp_kw = dict()
if compression > 0:
comp_kw = dict(compression='gzip', compression_opts=compression)
with h5py.File(fname, mode=mode) as fid:
if title in fid:
del fid[title]
cleanup_data = []
_triage_write(title, data, fid, comp_kw, str(type(data)),
cleanup_data, slash=slash, title=title,
use_json=use_json)
# Will not be empty if any extra data to be written
for data in cleanup_data:
# In case different extra I/O needs different inputs
title = list(data.keys())[0]
if title in ['pd_dataframe', 'pd_series']:
rootname, key, value = data[title]
_create_pandas_dataset(fname, rootname, key, title, value)
def _triage_write(key, value, root, comp_kw, where,
cleanup_data, slash='error', title=None,
use_json=False):
if key != title and '/' in key:
if slash == 'error':
raise ValueError('Found a key with "/", '
'this is not allowed if slash == error')
elif slash == 'replace':
# Auto-replace keys with proper values
for key_spec, val_spec in special_chars.items():
key = key.replace(val_spec, key_spec)
else:
raise ValueError("slash must be one of ['error', 'replace'")
if use_json and isinstance(value, (list, dict)) and \
_json_compatible(value, slash=slash):
value = np.frombuffer(json.dumps(value).encode('utf-8'), np.uint8)
_create_titled_dataset(root, key, 'json', value, comp_kw)
elif isinstance(value, dict):
sub_root = _create_titled_group(root, key, 'dict')
for key, sub_value in value.items():
if not isinstance(key, string_types):
raise TypeError('All dict keys must be strings')
_triage_write(
'key_{0}'.format(key), sub_value, sub_root, comp_kw,
where + '["%s"]' % key, cleanup_data=cleanup_data, slash=slash)
elif isinstance(value, (list, tuple)):
title = 'list' if isinstance(value, list) else 'tuple'
sub_root = _create_titled_group(root, key, title)
for vi, sub_value in enumerate(value):
_triage_write(
'idx_{0}'.format(vi), sub_value, sub_root, comp_kw,
where + '[%s]' % vi, cleanup_data=cleanup_data, slash=slash)
elif isinstance(value, type(None)):
_create_titled_dataset(root, key, 'None', [False])
elif isinstance(value, (int, float)):
if isinstance(value, int):
title = 'int'
else: # isinstance(value, float):
title = 'float'
_create_titled_dataset(root, key, title, np.atleast_1d(value))
elif isinstance(value, datetime):
title = 'datetime'
value = np.frombuffer(value.isoformat().encode('utf-8'), np.uint8)
_create_titled_dataset(root, key, title, value)
elif isinstance(value, (np.integer, np.floating, np.bool_)):
title = 'np_{0}'.format(value.__class__.__name__)
_create_titled_dataset(root, key, title, np.atleast_1d(value))
elif isinstance(value, string_types):
if isinstance(value, text_type): # unicode
value = np.frombuffer(value.encode('utf-8'), np.uint8)
title = 'unicode'
else:
value = np.frombuffer(value.encode('ASCII'), np.uint8)
title = 'ascii'
_create_titled_dataset(root, key, title, value, comp_kw)
elif isinstance(value, np.ndarray):
if not (value.dtype == np.dtype('object') and
len(set([sub.dtype for sub in value])) == 1):
_create_titled_dataset(root, key, 'ndarray', value)
else:
ma_index, ma_data = multiarray_dump(value)
sub_root = _create_titled_group(root, key, 'multiarray')
_create_titled_dataset(sub_root, 'index', 'ndarray', ma_index)
_create_titled_dataset(sub_root, 'data', 'ndarray', ma_data)
elif sparse is not None and isinstance(value, sparse.csc_matrix):
sub_root = _create_titled_group(root, key, 'csc_matrix')
_triage_write('data', value.data, sub_root, comp_kw,
where + '.csc_matrix_data', cleanup_data=cleanup_data,
slash=slash)
_triage_write('indices', value.indices, sub_root, comp_kw,
where + '.csc_matrix_indices', cleanup_data=cleanup_data,
slash=slash)
_triage_write('indptr', value.indptr, sub_root, comp_kw,
where + '.csc_matrix_indptr', cleanup_data=cleanup_data,
slash=slash)
elif sparse is not None and isinstance(value, sparse.csr_matrix):
sub_root = _create_titled_group(root, key, 'csr_matrix')
_triage_write('data', value.data, sub_root, comp_kw,
where + '.csr_matrix_data', cleanup_data=cleanup_data,
slash=slash)
_triage_write('indices', value.indices, sub_root, comp_kw,
where + '.csr_matrix_indices', cleanup_data=cleanup_data,
slash=slash)
_triage_write('indptr', value.indptr, sub_root, comp_kw,
where + '.csr_matrix_indptr', cleanup_data=cleanup_data,
slash=slash)
_triage_write('shape', value.shape, sub_root, comp_kw,
where + '.csr_matrix_shape', cleanup_data=cleanup_data,
slash=slash)
else:
try:
from pandas import DataFrame, Series
except ImportError:
pass
else:
if isinstance(value, (DataFrame, Series)):
if isinstance(value, DataFrame):
title = 'pd_dataframe'
else:
title = 'pd_series'
rootname = root.name
cleanup_data.append({title: (rootname, key, value)})
return
err_str = 'unsupported type %s (in %s)' % (type(value), where)
raise TypeError(err_str)
##############################################################################
# READING
def read_hdf5(fname, title='h5io', slash='ignore'):
"""Read python object from HDF5 format using h5py
Parameters
----------
fname : str
File to load.
title : str
The top-level directory name to use. Typically it is useful to make
this your package name, e.g. ``'mnepython'``.
slash : 'ignore' | 'replace'
Whether to replace the string {FWDSLASH} with the value /. This does
not apply to the top level name (title). If 'ignore', nothing will be
replaced.
Returns
-------
data : object
The loaded data. Can be of any type supported by ``write_hdf5``.
"""
h5py = _check_h5py()
if not op.isfile(fname):
raise IOError('file "%s" not found' % fname)
if not isinstance(title, string_types):
raise ValueError('title must be a string')
with h5py.File(fname, mode='r') as fid:
if title not in fid:
raise ValueError('no "%s" data found' % title)
if isinstance(fid[title], h5py.Group):
if 'TITLE' not in fid[title].attrs:
raise ValueError('no "%s" data found' % title)
data = _triage_read(fid[title], slash=slash)
return data
def _triage_read(node, slash='ignore'):
if slash not in ['ignore', 'replace']:
raise ValueError("slash must be one of 'replace', 'ignore'")
h5py = _check_h5py()
type_str = node.attrs['TITLE']
if isinstance(type_str, bytes):
type_str = type_str.decode()
if isinstance(node, h5py.Group):
if type_str == 'dict':
data = dict()
for key, subnode in node.items():
if slash == 'replace':
for key_spec, val_spec in special_chars.items():
key = key.replace(key_spec, val_spec)
data[key[4:]] = _triage_read(subnode, slash=slash)
elif type_str in ['list', 'tuple']:
data = list()
ii = 0
while True:
subnode = node.get('idx_{0}'.format(ii), None)
if subnode is None:
break
data.append(_triage_read(subnode, slash=slash))
ii += 1
assert len(data) == ii
data = tuple(data) if type_str == 'tuple' else data
return data
elif type_str == 'csc_matrix':
if sparse is None:
raise RuntimeError('scipy must be installed to read this data')
data = sparse.csc_matrix((_triage_read(node['data'], slash=slash),
_triage_read(node['indices'],
slash=slash),
_triage_read(node['indptr'],
slash=slash)))
elif type_str == 'csr_matrix':
if sparse is None:
raise RuntimeError('scipy must be installed to read this data')
data = sparse.csr_matrix((_triage_read(node['data'], slash=slash),
_triage_read(node['indices'],
slash=slash),
_triage_read(node['indptr'],
slash=slash)),
shape=_triage_read(node['shape']))
elif type_str in ['pd_dataframe', 'pd_series']:
from pandas import read_hdf, HDFStore
rootname = node.name
filename = node.file.filename
with HDFStore(filename, 'r') as tmpf:
data = read_hdf(tmpf, rootname)
elif type_str == 'multiarray':
ma_index = _triage_read(node.get('index', None), slash=slash)
ma_data = _triage_read(node.get('data', None), slash=slash)
data = multiarray_load(ma_index, ma_data)
else:
raise NotImplementedError('Unknown group type: {0}'
''.format(type_str))
elif type_str == 'ndarray':
data = np.array(node)
elif type_str in ('int', 'float'):
cast = int if type_str == 'int' else float
data = cast(np.array(node)[0])
elif type_str == 'datetime':
data = text_type(np.array(node).tobytes().decode('utf-8'))
data = fromisoformat(data)
elif type_str.startswith('np_'):
np_type = type_str.split('_')[1]
cast = getattr(np, np_type)
data = cast(np.array(node)[0])
elif type_str in ('unicode', 'ascii', 'str'): # 'str' for backward compat
decoder = 'utf-8' if type_str == 'unicode' else 'ASCII'
cast = text_type if type_str == 'unicode' else str
data = cast(np.array(node).tobytes().decode(decoder))
elif type_str == 'json':
node_unicode = str(np.array(node).tobytes().decode('utf-8'))
data = json.loads(node_unicode)
elif type_str == 'None':
data = None
else:
raise TypeError('Unknown node type: {0}'.format(type_str))
return data
# ############################################################################
# UTILITIES
def _sort_keys(x):
"""Sort and return keys of dict"""
keys = list(x.keys()) # note: not thread-safe
idx = np.argsort([str(k) for k in keys])
keys = [keys[ii] for ii in idx]
return keys
def object_diff(a, b, pre=''):
"""Compute all differences between two python variables
Parameters
----------
a : object
Currently supported: dict, list, tuple, ndarray, int, str, bytes,
float.
b : object
Must be same type as x1.
pre : str
String to prepend to each line.
Returns
-------
diffs : str
A string representation of the differences.
"""
try:
from pandas import DataFrame, Series
except ImportError:
DataFrame = Series = type(None)
out = ''
if type(a) != type(b):
out += pre + ' type mismatch (%s, %s)\n' % (type(a), type(b))
elif isinstance(a, dict):
k1s = _sort_keys(a)
k2s = _sort_keys(b)
m1 = set(k2s) - set(k1s)
if len(m1):
out += pre + ' x1 missing keys %s\n' % (m1)
for key in k1s:
if key not in k2s:
out += pre + ' x2 missing key %s\n' % key
else:
out += object_diff(a[key], b[key], pre + 'd1[%s]' % repr(key))
elif isinstance(a, (list, tuple)):
if len(a) != len(b):
out += pre + ' length mismatch (%s, %s)\n' % (len(a), len(b))
else:
for xx1, xx2 in zip(a, b):
out += object_diff(xx1, xx2, pre='')
elif isinstance(a, (string_types, int, float, bytes)):
if a != b:
out += pre + ' value mismatch (%s, %s)\n' % (a, b)
elif a is None:
pass # b must be None due to our type checking
elif isinstance(a, np.ndarray):
if not np.array_equal(a, b):
out += pre + ' array mismatch\n'
elif sparse is not None and sparse.isspmatrix(a):
# sparsity and sparse type of b vs a already checked above by type()
if b.shape != a.shape:
out += pre + (' sparse matrix a and b shape mismatch'
'(%s vs %s)' % (a.shape, b.shape))
else:
c = a - b
c.eliminate_zeros()
if c.nnz > 0:
out += pre + (' sparse matrix a and b differ on %s '
'elements' % c.nnz)
elif isinstance(a, (DataFrame, Series)):
if b.shape != a.shape:
out += pre + (' pandas values a and b shape mismatch'
'(%s vs %s)' % (a.shape, b.shape))
else:
c = a.values - b.values
nzeros = np.sum(c != 0)
if nzeros > 0:
out += pre + (' pandas values a and b differ on %s '
'elements' % nzeros)
else:
raise RuntimeError(pre + ': unsupported type %s (%s)' % (type(a), a))
return out
class _TempDir(str):
"""Class for creating and auto-destroying temp dir
This is designed to be used with testing modules. Instances should be
defined inside test functions. Instances defined at module level can not
guarantee proper destruction of the temporary directory.
When used at module level, the current use of the __del__() method for
cleanup can fail because the rmtree function may be cleaned up before this
object (an alternative could be using the atexit module instead).
"""
def __new__(self):
new = str.__new__(self, tempfile.mkdtemp())
return new
def __init__(self):
self._path = self.__str__()
def __del__(self):
rmtree(self._path, ignore_errors=True)
def _list_file_contents(h5file):
if 'h5io' not in h5file.keys():
raise ValueError('h5file must contain h5io data')
# Set up useful variables for later
h5file = h5file['h5io']
root_title = h5file.attrs['TITLE']
n_space = np.max([(len(key), len(val.attrs['TITLE']))
for key, val in h5file.items()]) + 2
# Create print strings
strs = ['Root type: %s | Items: %s\n' % (root_title, len(h5file))]
for key, data in h5file.items():
type_str = data.attrs['TITLE']
str_format = '%%-%ss' % n_space
if type_str == 'ndarray':
desc = 'Shape: %s'
desc_val = data.shape
elif type_str in ['pd_dataframe', 'pd_series']:
desc = 'Shape: %s'
desc_val = data['values'].shape
elif type_str in ('unicode', 'ascii', 'str'):
desc = 'Text: %s'
decoder = 'utf-8' if type_str == 'unicode' else 'ASCII'
cast = text_type if type_str == 'unicode' else str
data = cast(np.array(data).tobytes().decode(decoder))
desc_val = data[:10] + '...' if len(data) > 10 else data
else:
desc = 'Items: %s'
desc_val = len(data)
this_str = ('%%s Key: %s | Type: %s | ' + desc) % (
str_format, str_format, str_format)
this_str = this_str % (tab_str, key, type_str, desc_val)
strs.append(this_str)
out_str = '\n'.join(strs)
print(out_str)
def list_file_contents(h5file):
"""List the contents of an h5io file.
This will list the root and one-level-deep contents of the file.
Parameters
----------
h5file : str
The path to an h5io hdf5 file.
"""
h5py = _check_h5py()
err = 'h5file must be an h5py File object, not {0}'
if isinstance(h5file, str):
with h5py.File(h5file, 'r') as f:
_list_file_contents(f)
else:
if not isinstance(h5file, h5py.File):
raise TypeError(err.format(type(h5file)))
_list_file_contents(h5file)
def _json_compatible(obj, slash='error'):
if isinstance(obj, (string_types, int, float, bool, type(None))):
return True
elif isinstance(obj, list):
return all([_json_compatible(item) for item in obj])
elif isinstance(obj, dict):
_check_keys_in_dict(obj, slash=slash)
return all([_json_compatible(item) for item in obj.values()])
else:
return False
def _check_keys_in_dict(obj, slash='error'):
repl = list()
for key in obj.keys():
if '/' in key:
key_prev = key
if slash == 'error':
raise ValueError('Found a key with "/", '
'this is not allowed if slash == error')
elif slash == 'replace':
# Auto-replace keys with proper values
for key_spec, val_spec in special_chars.items():
key = key.replace(val_spec, key_spec)
repl.append((key, key_prev))
else:
raise ValueError("slash must be one of ['error', 'replace'")
for key, key_prev in repl:
obj[key] = obj.pop(key_prev)
##############################################################################
# Arrays with mixed dimensions
def _validate_object_array(array):
if not (array.dtype == np.dtype('object') and
len(set([sub.dtype for sub in array])) == 1):
raise TypeError('unsupported array type')
def _shape_list(array):
return [np.shape(sub) for sub in array]
def _validate_sub_shapes(shape_lst):
if not all([shape_lst[0][1:] == t[1:] for t in shape_lst]):
raise ValueError('shape does not match!')
def _array_index(shape_lst):
return [t[0] for t in shape_lst]
def _index_sum(index_lst):
index_sum_lst = []
for step in index_lst:
if index_sum_lst != []:
index_sum_lst.append(index_sum_lst[-1] + step)
else:
index_sum_lst.append(step)
return index_sum_lst
def _merge_array(array):
merged_lst = []
for sub in array:
merged_lst += sub.tolist()
return np.array(merged_lst)
def multiarray_dump(array):
_validate_object_array(array)
shape_lst = _shape_list(array)
_validate_sub_shapes(shape_lst=shape_lst)
index_sum = _index_sum(index_lst=_array_index(shape_lst=shape_lst))
return index_sum, _merge_array(array=array)
def multiarray_load(index, array_merged):
array_restore = []
i_prev = 0
for i in index[:-1]:
array_restore.append(array_merged[i_prev:i])
i_prev = i
array_restore.append(array_merged[i_prev:])
return np.array(array_restore)
###############################################################################
# BACKPORTS
try:
fromisoformat = datetime.fromisoformat
except AttributeError: # Python < 3.7
# Code adapted from CPython
# https://github.com/python/cpython/blob/master/Lib/datetime.py
def _parse_hh_mm_ss_ff(tstr):
# Parses things of the form HH[:MM[:SS[.fff[fff]]]]
len_str = len(tstr)
time_comps = [0, 0, 0, 0]
pos = 0
for comp in range(0, 3):
if (len_str - pos) < 2:
raise ValueError('Incomplete time component')
time_comps[comp] = int(tstr[pos:pos + 2])
pos += 2
next_char = tstr[pos:pos + 1]
if not next_char or comp >= 2:
break
if next_char != ':':
raise ValueError('Invalid time separator: %c' % next_char)
pos += 1
if pos < len_str:
if tstr[pos] != '.':
raise ValueError('Invalid microsecond component')
else:
pos += 1
len_remainder = len_str - pos
if len_remainder not in (3, 6):
raise ValueError('Invalid microsecond component')
time_comps[3] = int(tstr[pos:])
if len_remainder == 3:
time_comps[3] *= 1000
return time_comps
def fromisoformat(date_string):
"""Construct a datetime from the output of datetime.isoformat()."""
if not isinstance(date_string, str):
raise TypeError('fromisoformat: argument must be str')
# Split this at the separator
dstr = date_string[0:10]
tstr = date_string[11:]
try:
date_components = _parse_isoformat_date(dstr)
except ValueError:
raise ValueError(
'Invalid isoformat string: {!r}'.format(date_string))
if tstr:
try:
time_components = _parse_isoformat_time(tstr)
except ValueError:
raise ValueError(
'Invalid isoformat string: {!r}'.format(date_string))
else:
time_components = [0, 0, 0, 0, None]
return datetime(*(date_components + time_components))
def _parse_isoformat_date(dtstr):
# It is assumed that this function will only be called with a
# string of length exactly 10, and (though this is not used) ASCII-only
year = int(dtstr[0:4])
if dtstr[4] != '-':
raise ValueError('Invalid date separator: %s' % dtstr[4])
month = int(dtstr[5:7])
if dtstr[7] != '-':
raise ValueError('Invalid date separator')
day = int(dtstr[8:10])
return [year, month, day]
def _parse_isoformat_time(tstr):
# Format supported is HH[:MM[:SS[.fff[fff]]]][+HH:MM[:SS[.ffffff]]]
len_str = len(tstr)
if len_str < 2:
raise ValueError('Isoformat time too short')
# This is equivalent to re.search('[+-]', tstr), but faster
tz_pos = (tstr.find('-') + 1 or tstr.find('+') + 1)
timestr = tstr[:tz_pos - 1] if tz_pos > 0 else tstr
time_comps = _parse_hh_mm_ss_ff(timestr)
tzi = None
if tz_pos > 0:
tzstr = tstr[tz_pos:]
# Valid time zone strings are:
# HH:MM len: 5
# HH:MM:SS len: 8
# HH:MM:SS.ffffff len: 15
if len(tzstr) not in (5, 8, 15):
raise ValueError('Malformed time zone string')
tz_comps = _parse_hh_mm_ss_ff(tzstr)
if all(x == 0 for x in tz_comps):
tzi = timezone.utc
else:
tzsign = -1 if tstr[tz_pos - 1] == '-' else 1
td = timedelta(hours=tz_comps[0], minutes=tz_comps[1],
seconds=tz_comps[2], microseconds=tz_comps[3])
tzi = timezone(tzsign * td)
time_comps.append(tzi)
return time_comps
| bsd-3-clause |
agomariz/scikit-neuralnetwork | sknn/tests/test_pipeline.py | 8 | 1268 | import unittest
from nose.tools import (assert_equal, assert_true)
import io
import pickle
import numpy
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import MinMaxScaler
from sknn.mlp import Regressor as MLPR
from sknn.mlp import Layer as L
class TestPipeline(unittest.TestCase):
def _run(self, pipeline):
a_in, a_out = numpy.zeros((8,16)), numpy.zeros((8,4))
pipeline.fit(a_in, a_out)
pipeline.predict(a_in)
def test_NeuralNetworkOnly(self):
pipeline = Pipeline([
('neural network', MLPR(layers=[L("Linear")], n_iter=1))
])
self._run(pipeline)
def test_ScalerThenNeuralNetwork(self):
pipeline = Pipeline([
('min/max scaler', MinMaxScaler()),
('neural network', MLPR(layers=[L("Linear")], n_iter=1))
])
self._run(pipeline)
class TestSerializedPipeline(TestPipeline):
def _run(self, pipeline):
a_in, a_out = numpy.zeros((8,16)), numpy.zeros((8,4))
pipeline.fit(a_in, a_out)
a_test = pipeline.predict(a_in)
buf = io.BytesIO()
pickle.dump(pipeline, buf)
buf.seek(0)
p = pickle.load(buf)
assert_true((a_test == p.predict(a_in)).all())
| bsd-3-clause |
ChileanVirtualObservatory/StructureDetection | detection.py | 1 | 7367 | #This file is part of ChiVO, the Chilean Virtual Observatory
#A project sponsored by FONDEF (D11I1060)
#Copyright (C) 2015 Universidad Tecnica Federico Santa Maria Mauricio Solar
# Marcelo Mendoza
# Universidad de Chile Diego Mardones
# Pontificia Universidad Catolica Karim Pichara
# Universidad de Concepcion Ricardo Contreras
# Universidad de Santiago Victor Parada
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import sys
import os
import numpy as np
import pyfits
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
#import pywt
import cv
import cv2
from pylab import*
def cdir(directory):
if not os.path.exists(directory):
os.makedirs(directory)
def dirs():
d = ["clusters","detections"]
for di in d:
cdir (di)
def access(filepath):
try:
f = open(filepath, "r")
except IOError as e:
return False
return True
######################################################################################
# wavelet(numpy array image, numpy array kernel, int J)-> list of numpy arrays with levels
# computes the J levels of stationnary wavelet transform using "algorithme a trous"
# Starck & Murtagh: Handbook of Astronomical Data Analysis, 2006.
# Appendix A: A Trous Wavelet Transform
######################################################################################
def wavelet(image,kernel, J):
levels=[]
Ck=np.copy(image)
for k in range(J):
#print kernel
kernel2D=np.zeros((len(kernel),len(kernel)))
kernel1D=np.array(kernel,dtype=np.float64)
for i in range(len(kernel2D)):
for j in range(len(kernel2D[0])):
kernel2D[i][j]=kernel1D[i]*kernel1D[j]
Ck1=np.zeros(image.shape,image.dtype)
#transform to cvMat
kernel2D=cv.fromarray(kernel2D)
Ck = cv.fromarray(Ck)
Ck1 = cv.fromarray(Ck1)
#covolve
cv.Filter2D(Ck,Ck1,kernel2D)
#back to numpy array
Ck=np.asarray(Ck)
Ck1=np.asarray(Ck1)
#append Level
levels.append(Ck-Ck1)
#update the kernel and Ck for next step:
auxkernel=[]
for i in range(len(kernel)-1):
auxkernel.append(kernel[i])
auxkernel.append(0.0)
auxkernel.append(kernel[-1])
kernel=auxkernel
Ck=np.copy(Ck1)
levels.append(Ck)
return levels
#############################################################################
# function kSigmaClipping(numpy array w1, int k)-> double sigma
# calculates standard deviation for noise using k-sigma-clipping algortihm
# Starck & Murtagh: Handbook of Astronomical Data Analysis, 2006.
# 2.3.3 Automatic Estimation of Gaussian Noise
#############################################################################
def kSigmaClipping(wi,k):
sigma=np.std(wi)
d=np.copy(wi)
for i in range(3):
d=d*(d<k*sigma)
d=d*( d>-k*sigma)
sigma=np.std(d)
return sigma
########################################
dirs()
if len(sys.argv) > 1:
for grp in sys.argv[1:]:
if access('dataset/%s' %grp):
print "Calculando "+grp
sigma_e_j=np.array([0.889, 0.200, 0.086, 0.041, 0.020, 0.010, 0.005],dtype=np.float64)
n_levels=8
k=3
#Kernel B3-spline
kernel1D = np.array([1./16.,1./4.,3./8.,1./4.,1./16.],dtype=np.float64)
#0: abrir imput y obtener arreglo con datos de imagen:
hdu=pyfits.open('dataset/%s' %grp)
data= asarray(hdu[0].data,dtype=np.float64)
print hdu[0].header["CTYPE1"]
hdu.close()
#1: aplicar transformada de wavelets
print "Calculando transformada de wavelets..."
levels=wavelet(data,kernel1D,n_levels)
#2,3: por cada nivel: calcular threshold y aplicarlo para guardar solo pixeles significativos por nivel
print "Calculando threshold por nivel..."
levels_s=[]
for level in levels[:-1]:
sigma_j= kSigmaClipping(level,k)
significants=level*(level>=k*sigma_j)
#significants+=level*(level<=-k*sigma_j)
levels_s.append(significants)
#4: Agrupar niveles segun estructuras:
print "Agrupando estructuras..."
estrellas=levels_s[1]+levels_s[2]
estrellas=np.asarray(255.*(estrellas!=0),dtype=np.uint8)
nucleos=levels_s[3]+levels_s[4]
nucleos=np.asarray(255.*(nucleos!=0),dtype=np.uint8)
nubes=levels_s[5]+levels_s[6]+levels_s[7]
nubes=np.asarray(255.*(nubes!=0),dtype=np.uint8)
#5: mostrar contornos
estrella_contours,eh=cv2.findContours(estrellas, cv2.RETR_LIST,cv2.CHAIN_APPROX_NONE)
estrella_contours=np.asarray(estrella_contours)
nucleo_contours,nh=cv2.findContours(nucleos, cv2.RETR_LIST,cv2.CHAIN_APPROX_NONE)
nucleo_contours=np.asarray(nucleo_contours)
nube_contours,nnh=cv2.findContours(nubes, cv2.RETR_LIST,cv2.CHAIN_APPROX_NONE)
nube_contours=np.asarray(nube_contours)
#escalar rango (solo para que se visualice el resultado)
s= kSigmaClipping(data,k)
data=data*(data>=k*s)
data-=k*s
#mostrar estructuras
print "Guardando deteccion de objetos en carpeta detections/ ..."
image1=np.asarray(cv2.cvtColor(np.asarray(data,dtype=np.float32), cv.CV_GRAY2RGB))
image=np.asarray(cv2.cvtColor(np.asarray(data,dtype=np.float32), cv.CV_GRAY2RGB))
rango=500000./image.max()
image*=rango
image1*=rango
red=(0,0,255)
green=(0,255,0)
blue=(255,0,0)
cv2.drawContours(image, estrella_contours, -1, red)
cv2.drawContours(image, nucleo_contours, -1, green,4)
cv2.drawContours(image, nube_contours, -1, blue,4)
cv2.imwrite("detections/%s-detec.png"%grp[:-5], image)
cv2.imwrite("detections/%s-Orig.png" %grp[:-5], image1)
#clustering por forma:
print "Formando clusters para clasificar nucleos..."
hu_moments=[]
K=3
i=0
eliminar=[]
for contour in nucleo_contours:
hu=cv2.HuMoments(cv2.moments(contour))
if hu[0]==0:
eliminar.append(i)
else:
map(lambda x: sign(x)*log(abs(x)), hu)
hu_moments.append(hu)
i+=1
for index in reversed(eliminar):
nucleo_contours=np.delete(nucleo_contours,index)
retval,labels,centers=cv2.kmeans(np.asarray(hu_moments,dtype=np.float32),K,(cv2.TERM_CRITERIA_EPS|cv2.TERM_CRITERIA_MAX_ITER,1,100),1,cv2.KMEANS_RANDOM_CENTERS )
#graficar resultado
print "Guardando nucleos clasificados en carpeta cluster/ ..."
im_class=[]
for i in range(K):
im_class.append(np.zeros(image.shape,image.dtype))
for i in range(len(labels)):
cv2.drawContours(im_class[labels[i][0]], nucleo_contours[i], -1,green,4)
i=0
for im in im_class:
cv2.imwrite("clusters/%s-class-%d.png"%(grp[:-5],i), im)
i+=1
print "Terminado "+grp
else:
print "El archivo {0} no existe".format(grp)
else:
print "Ingrese algun parametro!"
print "Terminado."
| gpl-3.0 |
miloharper/neural-network-animation | matplotlib/pyplot.py | 10 | 120496 | # Note: The first part of this file can be modified in place, but the latter
# part is autogenerated by the boilerplate.py script.
"""
Provides a MATLAB-like plotting framework.
:mod:`~matplotlib.pylab` combines pyplot with numpy into a single namespace.
This is convenient for interactive work, but for programming it
is recommended that the namespaces be kept separate, e.g.::
import numpy as np
import matplotlib.pyplot as plt
x = np.arange(0, 5, 0.1);
y = np.sin(x)
plt.plot(x, y)
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import sys
import warnings
import matplotlib
import matplotlib.colorbar
from matplotlib import style
from matplotlib import _pylab_helpers, interactive
from matplotlib.cbook import dedent, silent_list, is_string_like, is_numlike
from matplotlib.cbook import _string_to_bool
from matplotlib import docstring
from matplotlib.backend_bases import FigureCanvasBase
from matplotlib.figure import Figure, figaspect
from matplotlib.gridspec import GridSpec
from matplotlib.image import imread as _imread
from matplotlib.image import imsave as _imsave
from matplotlib import rcParams, rcParamsDefault, get_backend
from matplotlib import rc_context
from matplotlib.rcsetup import interactive_bk as _interactive_bk
from matplotlib.artist import getp, get, Artist
from matplotlib.artist import setp as _setp
from matplotlib.axes import Axes, Subplot
from matplotlib.projections import PolarAxes
from matplotlib import mlab # for csv2rec, detrend_none, window_hanning
from matplotlib.scale import get_scale_docs, get_scale_names
from matplotlib import cm
from matplotlib.cm import get_cmap, register_cmap
import numpy as np
# We may not need the following imports here:
from matplotlib.colors import Normalize
from matplotlib.colors import normalize # for backwards compat.
from matplotlib.lines import Line2D
from matplotlib.text import Text, Annotation
from matplotlib.patches import Polygon, Rectangle, Circle, Arrow
from matplotlib.widgets import SubplotTool, Button, Slider, Widget
from .ticker import TickHelper, Formatter, FixedFormatter, NullFormatter,\
FuncFormatter, FormatStrFormatter, ScalarFormatter,\
LogFormatter, LogFormatterExponent, LogFormatterMathtext,\
Locator, IndexLocator, FixedLocator, NullLocator,\
LinearLocator, LogLocator, AutoLocator, MultipleLocator,\
MaxNLocator
## Backend detection ##
def _backend_selection():
""" If rcParams['backend_fallback'] is true, check to see if the
current backend is compatible with the current running event
loop, and if not switches to a compatible one.
"""
backend = rcParams['backend']
if not rcParams['backend_fallback'] or \
backend not in _interactive_bk:
return
is_agg_backend = rcParams['backend'].endswith('Agg')
if 'wx' in sys.modules and not backend in ('WX', 'WXAgg'):
import wx
if wx.App.IsMainLoopRunning():
rcParams['backend'] = 'wx' + 'Agg' * is_agg_backend
elif 'PyQt4.QtCore' in sys.modules and not backend == 'Qt4Agg':
import PyQt4.QtGui
if not PyQt4.QtGui.qApp.startingUp():
# The mainloop is running.
rcParams['backend'] = 'qt4Agg'
elif 'PyQt5.QtCore' in sys.modules and not backend == 'Qt5Agg':
import PyQt5.QtWidgets
if not PyQt5.QtWidgets.qApp.startingUp():
# The mainloop is running.
rcParams['backend'] = 'qt5Agg'
elif ('gtk' in sys.modules
and backend not in ('GTK', 'GTKAgg', 'GTKCairo')
and 'gi.repository.GObject' not in sys.modules):
import gobject
if gobject.MainLoop().is_running():
rcParams['backend'] = 'gtk' + 'Agg' * is_agg_backend
elif 'Tkinter' in sys.modules and not backend == 'TkAgg':
# import Tkinter
pass # what if anything do we need to do for tkinter?
_backend_selection()
## Global ##
from matplotlib.backends import pylab_setup
_backend_mod, new_figure_manager, draw_if_interactive, _show = pylab_setup()
@docstring.copy_dedent(Artist.findobj)
def findobj(o=None, match=None, include_self=True):
if o is None:
o = gcf()
return o.findobj(match, include_self=include_self)
def switch_backend(newbackend):
"""
Switch the default backend. This feature is **experimental**, and
is only expected to work switching to an image backend. e.g., if
you have a bunch of PostScript scripts that you want to run from
an interactive ipython session, you may want to switch to the PS
backend before running them to avoid having a bunch of GUI windows
popup. If you try to interactively switch from one GUI backend to
another, you will explode.
Calling this command will close all open windows.
"""
close('all')
global _backend_mod, new_figure_manager, draw_if_interactive, _show
matplotlib.use(newbackend, warn=False, force=True)
from matplotlib.backends import pylab_setup
_backend_mod, new_figure_manager, draw_if_interactive, _show = pylab_setup()
def show(*args, **kw):
"""
Display a figure.
When running in ipython with its pylab mode, display all
figures and return to the ipython prompt.
In non-interactive mode, display all figures and block until
the figures have been closed; in interactive mode it has no
effect unless figures were created prior to a change from
non-interactive to interactive mode (not recommended). In
that case it displays the figures but does not block.
A single experimental keyword argument, *block*, may be
set to True or False to override the blocking behavior
described above.
"""
global _show
return _show(*args, **kw)
def isinteractive():
"""
Return status of interactive mode.
"""
return matplotlib.is_interactive()
def ioff():
'Turn interactive mode off.'
matplotlib.interactive(False)
def ion():
'Turn interactive mode on.'
matplotlib.interactive(True)
def pause(interval):
"""
Pause for *interval* seconds.
If there is an active figure it will be updated and displayed,
and the GUI event loop will run during the pause.
If there is no active figure, or if a non-interactive backend
is in use, this executes time.sleep(interval).
This can be used for crude animation. For more complex
animation, see :mod:`matplotlib.animation`.
This function is experimental; its behavior may be changed
or extended in a future release.
"""
backend = rcParams['backend']
if backend in _interactive_bk:
figManager = _pylab_helpers.Gcf.get_active()
if figManager is not None:
canvas = figManager.canvas
canvas.draw()
show(block=False)
canvas.start_event_loop(interval)
return
# No on-screen figure is active, so sleep() is all we need.
import time
time.sleep(interval)
@docstring.copy_dedent(matplotlib.rc)
def rc(*args, **kwargs):
matplotlib.rc(*args, **kwargs)
@docstring.copy_dedent(matplotlib.rc_context)
def rc_context(rc=None, fname=None):
return matplotlib.rc_context(rc, fname)
@docstring.copy_dedent(matplotlib.rcdefaults)
def rcdefaults():
matplotlib.rcdefaults()
draw_if_interactive()
# The current "image" (ScalarMappable) is retrieved or set
# only via the pyplot interface using the following two
# functions:
def gci():
"""
Get the current colorable artist. Specifically, returns the
current :class:`~matplotlib.cm.ScalarMappable` instance (image or
patch collection), or *None* if no images or patch collections
have been defined. The commands :func:`~matplotlib.pyplot.imshow`
and :func:`~matplotlib.pyplot.figimage` create
:class:`~matplotlib.image.Image` instances, and the commands
:func:`~matplotlib.pyplot.pcolor` and
:func:`~matplotlib.pyplot.scatter` create
:class:`~matplotlib.collections.Collection` instances. The
current image is an attribute of the current axes, or the nearest
earlier axes in the current figure that contains an image.
"""
return gcf()._gci()
def sci(im):
"""
Set the current image. This image will be the target of colormap
commands like :func:`~matplotlib.pyplot.jet`,
:func:`~matplotlib.pyplot.hot` or
:func:`~matplotlib.pyplot.clim`). The current image is an
attribute of the current axes.
"""
gca()._sci(im)
## Any Artist ##
# (getp is simply imported)
@docstring.copy(_setp)
def setp(*args, **kwargs):
ret = _setp(*args, **kwargs)
draw_if_interactive()
return ret
def xkcd(scale=1, length=100, randomness=2):
"""
Turns on `xkcd <http://xkcd.com/>`_ sketch-style drawing mode.
This will only have effect on things drawn after this function is
called.
For best results, the "Humor Sans" font should be installed: it is
not included with matplotlib.
Parameters
----------
scale : float, optional
The amplitude of the wiggle perpendicular to the source line.
length : float, optional
The length of the wiggle along the line.
randomness : float, optional
The scale factor by which the length is shrunken or expanded.
Notes
-----
This function works by a number of rcParams, so it will probably
override others you have set before.
If you want the effects of this function to be temporary, it can
be used as a context manager, for example::
with plt.xkcd():
# This figure will be in XKCD-style
fig1 = plt.figure()
# ...
# This figure will be in regular style
fig2 = plt.figure()
"""
if rcParams['text.usetex']:
raise RuntimeError(
"xkcd mode is not compatible with text.usetex = True")
from matplotlib import patheffects
context = rc_context()
try:
rcParams['font.family'] = ['Humor Sans', 'Comic Sans MS']
rcParams['font.size'] = 14.0
rcParams['path.sketch'] = (scale, length, randomness)
rcParams['path.effects'] = [
patheffects.withStroke(linewidth=4, foreground="w")]
rcParams['axes.linewidth'] = 1.5
rcParams['lines.linewidth'] = 2.0
rcParams['figure.facecolor'] = 'white'
rcParams['grid.linewidth'] = 0.0
rcParams['axes.unicode_minus'] = False
rcParams['axes.color_cycle'] = ['b', 'r', 'c', 'm']
rcParams['xtick.major.size'] = 8
rcParams['xtick.major.width'] = 3
rcParams['ytick.major.size'] = 8
rcParams['ytick.major.width'] = 3
except:
context.__exit__(*sys.exc_info())
raise
return context
## Figures ##
def figure(num=None, # autoincrement if None, else integer from 1-N
figsize=None, # defaults to rc figure.figsize
dpi=None, # defaults to rc figure.dpi
facecolor=None, # defaults to rc figure.facecolor
edgecolor=None, # defaults to rc figure.edgecolor
frameon=True,
FigureClass=Figure,
**kwargs
):
"""
Creates a new figure.
Parameters
----------
num : integer or string, optional, default: none
If not provided, a new figure will be created, and the figure number
will be incremented. The figure objects holds this number in a `number`
attribute.
If num is provided, and a figure with this id already exists, make
it active, and returns a reference to it. If this figure does not
exists, create it and returns it.
If num is a string, the window title will be set to this figure's
`num`.
figsize : tuple of integers, optional, default: None
width, height in inches. If not provided, defaults to rc
figure.figsize.
dpi : integer, optional, default: None
resolution of the figure. If not provided, defaults to rc figure.dpi.
facecolor :
the background color. If not provided, defaults to rc figure.facecolor
edgecolor :
the border color. If not provided, defaults to rc figure.edgecolor
Returns
-------
figure : Figure
The Figure instance returned will also be passed to new_figure_manager
in the backends, which allows to hook custom Figure classes into the
pylab interface. Additional kwargs will be passed to the figure init
function.
Notes
-----
If you are creating many figures, make sure you explicitly call "close"
on the figures you are not using, because this will enable pylab
to properly clean up the memory.
rcParams defines the default values, which can be modified in the
matplotlibrc file
"""
if figsize is None:
figsize = rcParams['figure.figsize']
if dpi is None:
dpi = rcParams['figure.dpi']
if facecolor is None:
facecolor = rcParams['figure.facecolor']
if edgecolor is None:
edgecolor = rcParams['figure.edgecolor']
allnums = get_fignums()
next_num = max(allnums) + 1 if allnums else 1
figLabel = ''
if num is None:
num = next_num
elif is_string_like(num):
figLabel = num
allLabels = get_figlabels()
if figLabel not in allLabels:
if figLabel == 'all':
warnings.warn("close('all') closes all existing figures")
num = next_num
else:
inum = allLabels.index(figLabel)
num = allnums[inum]
else:
num = int(num) # crude validation of num argument
figManager = _pylab_helpers.Gcf.get_fig_manager(num)
if figManager is None:
max_open_warning = rcParams['figure.max_open_warning']
if (max_open_warning >= 1 and
len(allnums) >= max_open_warning):
warnings.warn(
"More than %d figures have been opened. Figures "
"created through the pyplot interface "
"(`matplotlib.pyplot.figure`) are retained until "
"explicitly closed and may consume too much memory. "
"(To control this warning, see the rcParam "
"`figure.max_open_warning`)." %
max_open_warning, RuntimeWarning)
if get_backend().lower() == 'ps':
dpi = 72
figManager = new_figure_manager(num, figsize=figsize,
dpi=dpi,
facecolor=facecolor,
edgecolor=edgecolor,
frameon=frameon,
FigureClass=FigureClass,
**kwargs)
if figLabel:
figManager.set_window_title(figLabel)
figManager.canvas.figure.set_label(figLabel)
# make this figure current on button press event
def make_active(event):
_pylab_helpers.Gcf.set_active(figManager)
cid = figManager.canvas.mpl_connect('button_press_event', make_active)
figManager._cidgcf = cid
_pylab_helpers.Gcf.set_active(figManager)
figManager.canvas.figure.number = num
draw_if_interactive()
return figManager.canvas.figure
def gcf():
"Get a reference to the current figure."
figManager = _pylab_helpers.Gcf.get_active()
if figManager is not None:
return figManager.canvas.figure
else:
return figure()
fignum_exists = _pylab_helpers.Gcf.has_fignum
def get_fignums():
"""Return a list of existing figure numbers."""
fignums = list(six.iterkeys(_pylab_helpers.Gcf.figs))
fignums.sort()
return fignums
def get_figlabels():
"Return a list of existing figure labels."
figManagers = _pylab_helpers.Gcf.get_all_fig_managers()
figManagers.sort(key=lambda m: m.num)
return [m.canvas.figure.get_label() for m in figManagers]
def get_current_fig_manager():
figManager = _pylab_helpers.Gcf.get_active()
if figManager is None:
gcf() # creates an active figure as a side effect
figManager = _pylab_helpers.Gcf.get_active()
return figManager
@docstring.copy_dedent(FigureCanvasBase.mpl_connect)
def connect(s, func):
return get_current_fig_manager().canvas.mpl_connect(s, func)
@docstring.copy_dedent(FigureCanvasBase.mpl_disconnect)
def disconnect(cid):
return get_current_fig_manager().canvas.mpl_disconnect(cid)
def close(*args):
"""
Close a figure window.
``close()`` by itself closes the current figure
``close(h)`` where *h* is a :class:`Figure` instance, closes that figure
``close(num)`` closes figure number *num*
``close(name)`` where *name* is a string, closes figure with that label
``close('all')`` closes all the figure windows
"""
if len(args) == 0:
figManager = _pylab_helpers.Gcf.get_active()
if figManager is None:
return
else:
_pylab_helpers.Gcf.destroy(figManager.num)
elif len(args) == 1:
arg = args[0]
if arg == 'all':
_pylab_helpers.Gcf.destroy_all()
elif isinstance(arg, six.integer_types):
_pylab_helpers.Gcf.destroy(arg)
elif hasattr(arg, 'int'):
# if we are dealing with a type UUID, we
# can use its integer representation
_pylab_helpers.Gcf.destroy(arg.int)
elif is_string_like(arg):
allLabels = get_figlabels()
if arg in allLabels:
num = get_fignums()[allLabels.index(arg)]
_pylab_helpers.Gcf.destroy(num)
elif isinstance(arg, Figure):
_pylab_helpers.Gcf.destroy_fig(arg)
else:
raise TypeError('Unrecognized argument type %s to close' % type(arg))
else:
raise TypeError('close takes 0 or 1 arguments')
def clf():
"""
Clear the current figure.
"""
gcf().clf()
draw_if_interactive()
def draw():
"""
Redraw the current figure.
This is used in interactive mode to update a figure that
has been altered using one or more plot object method calls;
it is not needed if figure modification is done entirely
with pyplot functions, if a sequence of modifications ends
with a pyplot function, or if matplotlib is in non-interactive
mode and the sequence of modifications ends with :func:`show` or
:func:`savefig`.
A more object-oriented alternative, given any
:class:`~matplotlib.figure.Figure` instance, :attr:`fig`, that
was created using a :mod:`~matplotlib.pyplot` function, is::
fig.canvas.draw()
"""
get_current_fig_manager().canvas.draw()
@docstring.copy_dedent(Figure.savefig)
def savefig(*args, **kwargs):
fig = gcf()
res = fig.savefig(*args, **kwargs)
draw() # need this if 'transparent=True' to reset colors
return res
@docstring.copy_dedent(Figure.ginput)
def ginput(*args, **kwargs):
"""
Blocking call to interact with the figure.
This will wait for *n* clicks from the user and return a list of the
coordinates of each click.
If *timeout* is negative, does not timeout.
"""
return gcf().ginput(*args, **kwargs)
@docstring.copy_dedent(Figure.waitforbuttonpress)
def waitforbuttonpress(*args, **kwargs):
"""
Blocking call to interact with the figure.
This will wait for *n* key or mouse clicks from the user and
return a list containing True's for keyboard clicks and False's
for mouse clicks.
If *timeout* is negative, does not timeout.
"""
return gcf().waitforbuttonpress(*args, **kwargs)
# Putting things in figures
@docstring.copy_dedent(Figure.text)
def figtext(*args, **kwargs):
ret = gcf().text(*args, **kwargs)
draw_if_interactive()
return ret
@docstring.copy_dedent(Figure.suptitle)
def suptitle(*args, **kwargs):
ret = gcf().suptitle(*args, **kwargs)
draw_if_interactive()
return ret
@docstring.Appender("Addition kwargs: hold = [True|False] overrides default hold state", "\n")
@docstring.copy_dedent(Figure.figimage)
def figimage(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
ret = gcf().figimage(*args, **kwargs)
draw_if_interactive()
#sci(ret) # JDH figimage should not set current image -- it is not mappable, etc
return ret
def figlegend(handles, labels, loc, **kwargs):
"""
Place a legend in the figure.
*labels*
a sequence of strings
*handles*
a sequence of :class:`~matplotlib.lines.Line2D` or
:class:`~matplotlib.patches.Patch` instances
*loc*
can be a string or an integer specifying the legend
location
A :class:`matplotlib.legend.Legend` instance is returned.
Example::
figlegend( (line1, line2, line3),
('label1', 'label2', 'label3'),
'upper right' )
.. seealso::
:func:`~matplotlib.pyplot.legend`
"""
l = gcf().legend(handles, labels, loc, **kwargs)
draw_if_interactive()
return l
## Figure and Axes hybrid ##
def hold(b=None):
"""
Set the hold state. If *b* is None (default), toggle the
hold state, else set the hold state to boolean value *b*::
hold() # toggle hold
hold(True) # hold is on
hold(False) # hold is off
When *hold* is *True*, subsequent plot commands will be added to
the current axes. When *hold* is *False*, the current axes and
figure will be cleared on the next plot command.
"""
fig = gcf()
ax = fig.gca()
fig.hold(b)
ax.hold(b)
# b=None toggles the hold state, so let's get get the current hold
# state; but should pyplot hold toggle the rc setting - me thinks
# not
b = ax.ishold()
rc('axes', hold=b)
def ishold():
"""
Return the hold status of the current axes.
"""
return gca().ishold()
def over(func, *args, **kwargs):
"""
Call a function with hold(True).
Calls::
func(*args, **kwargs)
with ``hold(True)`` and then restores the hold state.
"""
h = ishold()
hold(True)
func(*args, **kwargs)
hold(h)
## Axes ##
def axes(*args, **kwargs):
"""
Add an axes to the figure.
The axes is added at position *rect* specified by:
- ``axes()`` by itself creates a default full ``subplot(111)`` window axis.
- ``axes(rect, axisbg='w')`` where *rect* = [left, bottom, width,
height] in normalized (0, 1) units. *axisbg* is the background
color for the axis, default white.
- ``axes(h)`` where *h* is an axes instance makes *h* the current
axis. An :class:`~matplotlib.axes.Axes` instance is returned.
======= ============== ==============================================
kwarg Accepts Description
======= ============== ==============================================
axisbg color the axes background color
frameon [True|False] display the frame?
sharex otherax current axes shares xaxis attribute
with otherax
sharey otherax current axes shares yaxis attribute
with otherax
polar [True|False] use a polar axes?
aspect [str | num] ['equal', 'auto'] or a number. If a number
the ratio of x-unit/y-unit in screen-space.
Also see
:meth:`~matplotlib.axes.Axes.set_aspect`.
======= ============== ==============================================
Examples:
* :file:`examples/pylab_examples/axes_demo.py` places custom axes.
* :file:`examples/pylab_examples/shared_axis_demo.py` uses
*sharex* and *sharey*.
"""
nargs = len(args)
if len(args) == 0:
return subplot(111, **kwargs)
if nargs > 1:
raise TypeError('Only one non keyword arg to axes allowed')
arg = args[0]
if isinstance(arg, Axes):
a = gcf().sca(arg)
else:
rect = arg
a = gcf().add_axes(rect, **kwargs)
draw_if_interactive()
return a
def delaxes(*args):
"""
Remove an axes from the current figure. If *ax*
doesn't exist, an error will be raised.
``delaxes()``: delete the current axes
"""
if not len(args):
ax = gca()
else:
ax = args[0]
ret = gcf().delaxes(ax)
draw_if_interactive()
return ret
def sca(ax):
"""
Set the current Axes instance to *ax*.
The current Figure is updated to the parent of *ax*.
"""
managers = _pylab_helpers.Gcf.get_all_fig_managers()
for m in managers:
if ax in m.canvas.figure.axes:
_pylab_helpers.Gcf.set_active(m)
m.canvas.figure.sca(ax)
return
raise ValueError("Axes instance argument was not found in a figure.")
def gca(**kwargs):
"""
Get the current :class:`~matplotlib.axes.Axes` instance on the
current figure matching the given keyword args, or create one.
Examples
---------
To get the current polar axes on the current figure::
plt.gca(projection='polar')
If the current axes doesn't exist, or isn't a polar one, the appropriate
axes will be created and then returned.
See Also
--------
matplotlib.figure.Figure.gca : The figure's gca method.
"""
ax = gcf().gca(**kwargs)
return ax
# More ways of creating axes:
def subplot(*args, **kwargs):
"""
Return a subplot axes positioned by the given grid definition.
Typical call signature::
subplot(nrows, ncols, plot_number)
Where *nrows* and *ncols* are used to notionally split the figure
into ``nrows * ncols`` sub-axes, and *plot_number* is used to identify
the particular subplot that this function is to create within the notional
grid. *plot_number* starts at 1, increments across rows first and has a
maximum of ``nrows * ncols``.
In the case when *nrows*, *ncols* and *plot_number* are all less than 10,
a convenience exists, such that the a 3 digit number can be given instead,
where the hundreds represent *nrows*, the tens represent *ncols* and the
units represent *plot_number*. For instance::
subplot(211)
produces a subaxes in a figure which represents the top plot (i.e. the
first) in a 2 row by 1 column notional grid (no grid actually exists,
but conceptually this is how the returned subplot has been positioned).
.. note::
Creating a new subplot with a position which is entirely inside a
pre-existing axes will trigger the larger axes to be deleted::
import matplotlib.pyplot as plt
# plot a line, implicitly creating a subplot(111)
plt.plot([1,2,3])
# now create a subplot which represents the top plot of a grid
# with 2 rows and 1 column. Since this subplot will overlap the
# first, the plot (and its axes) previously created, will be removed
plt.subplot(211)
plt.plot(range(12))
plt.subplot(212, axisbg='y') # creates 2nd subplot with yellow background
If you do not want this behavior, use the
:meth:`~matplotlib.figure.Figure.add_subplot` method or the
:func:`~matplotlib.pyplot.axes` function instead.
Keyword arguments:
*axisbg*:
The background color of the subplot, which can be any valid
color specifier. See :mod:`matplotlib.colors` for more
information.
*polar*:
A boolean flag indicating whether the subplot plot should be
a polar projection. Defaults to *False*.
*projection*:
A string giving the name of a custom projection to be used
for the subplot. This projection must have been previously
registered. See :mod:`matplotlib.projections`.
.. seealso::
:func:`~matplotlib.pyplot.axes`
For additional information on :func:`axes` and
:func:`subplot` keyword arguments.
:file:`examples/pie_and_polar_charts/polar_scatter_demo.py`
For an example
**Example:**
.. plot:: mpl_examples/subplots_axes_and_figures/subplot_demo.py
"""
# if subplot called without arguments, create subplot(1,1,1)
if len(args)==0:
args=(1,1,1)
# This check was added because it is very easy to type
# subplot(1, 2, False) when subplots(1, 2, False) was intended
# (sharex=False, that is). In most cases, no error will
# ever occur, but mysterious behavior can result because what was
# intended to be the sharex argument is instead treated as a
# subplot index for subplot()
if len(args) >= 3 and isinstance(args[2], bool) :
warnings.warn("The subplot index argument to subplot() appears"
" to be a boolean. Did you intend to use subplots()?")
fig = gcf()
a = fig.add_subplot(*args, **kwargs)
bbox = a.bbox
byebye = []
for other in fig.axes:
if other==a: continue
if bbox.fully_overlaps(other.bbox):
byebye.append(other)
for ax in byebye: delaxes(ax)
draw_if_interactive()
return a
def subplots(nrows=1, ncols=1, sharex=False, sharey=False, squeeze=True,
subplot_kw=None, gridspec_kw=None, **fig_kw):
"""
Create a figure with a set of subplots already made.
This utility wrapper makes it convenient to create common layouts of
subplots, including the enclosing figure object, in a single call.
Keyword arguments:
*nrows* : int
Number of rows of the subplot grid. Defaults to 1.
*ncols* : int
Number of columns of the subplot grid. Defaults to 1.
*sharex* : string or bool
If *True*, the X axis will be shared amongst all subplots. If
*True* and you have multiple rows, the x tick labels on all but
the last row of plots will have visible set to *False*
If a string must be one of "row", "col", "all", or "none".
"all" has the same effect as *True*, "none" has the same effect
as *False*.
If "row", each subplot row will share a X axis.
If "col", each subplot column will share a X axis and the x tick
labels on all but the last row will have visible set to *False*.
*sharey* : string or bool
If *True*, the Y axis will be shared amongst all subplots. If
*True* and you have multiple columns, the y tick labels on all but
the first column of plots will have visible set to *False*
If a string must be one of "row", "col", "all", or "none".
"all" has the same effect as *True*, "none" has the same effect
as *False*.
If "row", each subplot row will share a Y axis and the y tick
labels on all but the first column will have visible set to *False*.
If "col", each subplot column will share a Y axis.
*squeeze* : bool
If *True*, extra dimensions are squeezed out from the
returned axis object:
- if only one subplot is constructed (nrows=ncols=1), the
resulting single Axis object is returned as a scalar.
- for Nx1 or 1xN subplots, the returned object is a 1-d numpy
object array of Axis objects are returned as numpy 1-d
arrays.
- for NxM subplots with N>1 and M>1 are returned as a 2d
array.
If *False*, no squeezing at all is done: the returned axis
object is always a 2-d array containing Axis instances, even if it
ends up being 1x1.
*subplot_kw* : dict
Dict with keywords passed to the
:meth:`~matplotlib.figure.Figure.add_subplot` call used to
create each subplots.
*gridspec_kw* : dict
Dict with keywords passed to the
:class:`~matplotlib.gridspec.GridSpec` constructor used to create
the grid the subplots are placed on.
*fig_kw* : dict
Dict with keywords passed to the :func:`figure` call. Note that all
keywords not recognized above will be automatically included here.
Returns:
fig, ax : tuple
- *fig* is the :class:`matplotlib.figure.Figure` object
- *ax* can be either a single axis object or an array of axis
objects if more than one subplot was created. The dimensions
of the resulting array can be controlled with the squeeze
keyword, see above.
Examples::
x = np.linspace(0, 2*np.pi, 400)
y = np.sin(x**2)
# Just a figure and one subplot
f, ax = plt.subplots()
ax.plot(x, y)
ax.set_title('Simple plot')
# Two subplots, unpack the output array immediately
f, (ax1, ax2) = plt.subplots(1, 2, sharey=True)
ax1.plot(x, y)
ax1.set_title('Sharing Y axis')
ax2.scatter(x, y)
# Four polar axes
plt.subplots(2, 2, subplot_kw=dict(polar=True))
# Share a X axis with each column of subplots
plt.subplots(2, 2, sharex='col')
# Share a Y axis with each row of subplots
plt.subplots(2, 2, sharey='row')
# Share a X and Y axis with all subplots
plt.subplots(2, 2, sharex='all', sharey='all')
# same as
plt.subplots(2, 2, sharex=True, sharey=True)
"""
# for backwards compatibility
if isinstance(sharex, bool):
if sharex:
sharex = "all"
else:
sharex = "none"
if isinstance(sharey, bool):
if sharey:
sharey = "all"
else:
sharey = "none"
share_values = ["all", "row", "col", "none"]
if sharex not in share_values:
# This check was added because it is very easy to type subplots(1, 2, 1)
# when subplot(1, 2, 1) was intended. In most cases, no error will
# ever occur, but mysterious behavior will result because what was
# intended to be the subplot index is instead treated as a bool for
# sharex.
if isinstance(sharex, int):
warnings.warn("sharex argument to subplots() was an integer."
" Did you intend to use subplot() (without 's')?")
raise ValueError("sharex [%s] must be one of %s" % \
(sharex, share_values))
if sharey not in share_values:
raise ValueError("sharey [%s] must be one of %s" % \
(sharey, share_values))
if subplot_kw is None:
subplot_kw = {}
if gridspec_kw is None:
gridspec_kw = {}
fig = figure(**fig_kw)
gs = GridSpec(nrows, ncols, **gridspec_kw)
# Create empty object array to hold all axes. It's easiest to make it 1-d
# so we can just append subplots upon creation, and then
nplots = nrows*ncols
axarr = np.empty(nplots, dtype=object)
# Create first subplot separately, so we can share it if requested
ax0 = fig.add_subplot(gs[0, 0], **subplot_kw)
#if sharex:
# subplot_kw['sharex'] = ax0
#if sharey:
# subplot_kw['sharey'] = ax0
axarr[0] = ax0
r, c = np.mgrid[:nrows, :ncols]
r = r.flatten() * ncols
c = c.flatten()
lookup = {
"none": np.arange(nplots),
"all": np.zeros(nplots, dtype=int),
"row": r,
"col": c,
}
sxs = lookup[sharex]
sys = lookup[sharey]
# Note off-by-one counting because add_subplot uses the MATLAB 1-based
# convention.
for i in range(1, nplots):
if sxs[i] == i:
subplot_kw['sharex'] = None
else:
subplot_kw['sharex'] = axarr[sxs[i]]
if sys[i] == i:
subplot_kw['sharey'] = None
else:
subplot_kw['sharey'] = axarr[sys[i]]
axarr[i] = fig.add_subplot(gs[i // ncols, i % ncols], **subplot_kw)
# returned axis array will be always 2-d, even if nrows=ncols=1
axarr = axarr.reshape(nrows, ncols)
# turn off redundant tick labeling
if sharex in ["col", "all"] and nrows > 1:
#if sharex and nrows>1:
# turn off all but the bottom row
for ax in axarr[:-1, :].flat:
for label in ax.get_xticklabels():
label.set_visible(False)
ax.xaxis.offsetText.set_visible(False)
if sharey in ["row", "all"] and ncols > 1:
#if sharey and ncols>1:
# turn off all but the first column
for ax in axarr[:, 1:].flat:
for label in ax.get_yticklabels():
label.set_visible(False)
ax.yaxis.offsetText.set_visible(False)
if squeeze:
# Reshape the array to have the final desired dimension (nrow,ncol),
# though discarding unneeded dimensions that equal 1. If we only have
# one subplot, just return it instead of a 1-element array.
if nplots==1:
ret = fig, axarr[0,0]
else:
ret = fig, axarr.squeeze()
else:
# returned axis array will be always 2-d, even if nrows=ncols=1
ret = fig, axarr.reshape(nrows, ncols)
return ret
def subplot2grid(shape, loc, rowspan=1, colspan=1, **kwargs):
"""
Create a subplot in a grid. The grid is specified by *shape*, at
location of *loc*, spanning *rowspan*, *colspan* cells in each
direction. The index for loc is 0-based. ::
subplot2grid(shape, loc, rowspan=1, colspan=1)
is identical to ::
gridspec=GridSpec(shape[0], shape[2])
subplotspec=gridspec.new_subplotspec(loc, rowspan, colspan)
subplot(subplotspec)
"""
fig = gcf()
s1, s2 = shape
subplotspec = GridSpec(s1, s2).new_subplotspec(loc,
rowspan=rowspan,
colspan=colspan)
a = fig.add_subplot(subplotspec, **kwargs)
bbox = a.bbox
byebye = []
for other in fig.axes:
if other==a: continue
if bbox.fully_overlaps(other.bbox):
byebye.append(other)
for ax in byebye: delaxes(ax)
draw_if_interactive()
return a
def twinx(ax=None):
"""
Make a second axes that shares the *x*-axis. The new axes will
overlay *ax* (or the current axes if *ax* is *None*). The ticks
for *ax2* will be placed on the right, and the *ax2* instance is
returned.
.. seealso::
:file:`examples/api_examples/two_scales.py`
For an example
"""
if ax is None:
ax=gca()
ax1 = ax.twinx()
draw_if_interactive()
return ax1
def twiny(ax=None):
"""
Make a second axes that shares the *y*-axis. The new axis will
overlay *ax* (or the current axes if *ax* is *None*). The ticks
for *ax2* will be placed on the top, and the *ax2* instance is
returned.
"""
if ax is None:
ax=gca()
ax1 = ax.twiny()
draw_if_interactive()
return ax1
def subplots_adjust(*args, **kwargs):
"""
Tune the subplot layout.
call signature::
subplots_adjust(left=None, bottom=None, right=None, top=None,
wspace=None, hspace=None)
The parameter meanings (and suggested defaults) are::
left = 0.125 # the left side of the subplots of the figure
right = 0.9 # the right side of the subplots of the figure
bottom = 0.1 # the bottom of the subplots of the figure
top = 0.9 # the top of the subplots of the figure
wspace = 0.2 # the amount of width reserved for blank space between subplots
hspace = 0.2 # the amount of height reserved for white space between subplots
The actual defaults are controlled by the rc file
"""
fig = gcf()
fig.subplots_adjust(*args, **kwargs)
draw_if_interactive()
def subplot_tool(targetfig=None):
"""
Launch a subplot tool window for a figure.
A :class:`matplotlib.widgets.SubplotTool` instance is returned.
"""
tbar = rcParams['toolbar'] # turn off the navigation toolbar for the toolfig
rcParams['toolbar'] = 'None'
if targetfig is None:
manager = get_current_fig_manager()
targetfig = manager.canvas.figure
else:
# find the manager for this figure
for manager in _pylab_helpers.Gcf._activeQue:
if manager.canvas.figure==targetfig: break
else: raise RuntimeError('Could not find manager for targetfig')
toolfig = figure(figsize=(6,3))
toolfig.subplots_adjust(top=0.9)
ret = SubplotTool(targetfig, toolfig)
rcParams['toolbar'] = tbar
_pylab_helpers.Gcf.set_active(manager) # restore the current figure
return ret
def tight_layout(pad=1.08, h_pad=None, w_pad=None, rect=None):
"""
Automatically adjust subplot parameters to give specified padding.
Parameters:
pad : float
padding between the figure edge and the edges of subplots, as a fraction of the font-size.
h_pad, w_pad : float
padding (height/width) between edges of adjacent subplots.
Defaults to `pad_inches`.
rect : if rect is given, it is interpreted as a rectangle
(left, bottom, right, top) in the normalized figure
coordinate that the whole subplots area (including
labels) will fit into. Default is (0, 0, 1, 1).
"""
fig = gcf()
fig.tight_layout(pad=pad, h_pad=h_pad, w_pad=w_pad, rect=rect)
draw_if_interactive()
def box(on=None):
"""
Turn the axes box on or off. *on* may be a boolean or a string,
'on' or 'off'.
If *on* is *None*, toggle state.
"""
ax = gca()
on = _string_to_bool(on)
if on is None:
on = not ax.get_frame_on()
ax.set_frame_on(on)
draw_if_interactive()
def title(s, *args, **kwargs):
"""
Set a title of the current axes.
Set one of the three available axes titles. The available titles are
positioned above the axes in the center, flush with the left edge,
and flush with the right edge.
.. seealso::
See :func:`~matplotlib.pyplot.text` for adding text
to the current axes
Parameters
----------
label : str
Text to use for the title
fontdict : dict
A dictionary controlling the appearance of the title text,
the default `fontdict` is:
{'fontsize': rcParams['axes.titlesize'],
'fontweight' : rcParams['axes.titleweight'],
'verticalalignment': 'baseline',
'horizontalalignment': loc}
loc : {'center', 'left', 'right'}, str, optional
Which title to set, defaults to 'center'
Returns
-------
text : :class:`~matplotlib.text.Text`
The matplotlib text instance representing the title
Other parameters
----------------
kwargs : text properties
Other keyword arguments are text properties, see
:class:`~matplotlib.text.Text` for a list of valid text
properties.
"""
l = gca().set_title(s, *args, **kwargs)
draw_if_interactive()
return l
## Axis ##
def axis(*v, **kwargs):
"""
Convenience method to get or set axis properties.
Calling with no arguments::
>>> axis()
returns the current axes limits ``[xmin, xmax, ymin, ymax]``.::
>>> axis(v)
sets the min and max of the x and y axes, with
``v = [xmin, xmax, ymin, ymax]``.::
>>> axis('off')
turns off the axis lines and labels.::
>>> axis('equal')
changes limits of *x* or *y* axis so that equal increments of *x*
and *y* have the same length; a circle is circular.::
>>> axis('scaled')
achieves the same result by changing the dimensions of the plot box instead
of the axis data limits.::
>>> axis('tight')
changes *x* and *y* axis limits such that all data is shown. If
all data is already shown, it will move it to the center of the
figure without modifying (*xmax* - *xmin*) or (*ymax* -
*ymin*). Note this is slightly different than in MATLAB.::
>>> axis('image')
is 'scaled' with the axis limits equal to the data limits.::
>>> axis('auto')
and::
>>> axis('normal')
are deprecated. They restore default behavior; axis limits are automatically
scaled to make the data fit comfortably within the plot box.
if ``len(*v)==0``, you can pass in *xmin*, *xmax*, *ymin*, *ymax*
as kwargs selectively to alter just those limits without changing
the others.
The xmin, xmax, ymin, ymax tuple is returned
.. seealso::
:func:`xlim`, :func:`ylim`
For setting the x- and y-limits individually.
"""
ax = gca()
v = ax.axis(*v, **kwargs)
draw_if_interactive()
return v
def xlabel(s, *args, **kwargs):
"""
Set the *x* axis label of the current axis.
Default override is::
override = {
'fontsize' : 'small',
'verticalalignment' : 'top',
'horizontalalignment' : 'center'
}
.. seealso::
:func:`~matplotlib.pyplot.text`
For information on how override and the optional args work
"""
l = gca().set_xlabel(s, *args, **kwargs)
draw_if_interactive()
return l
def ylabel(s, *args, **kwargs):
"""
Set the *y* axis label of the current axis.
Defaults override is::
override = {
'fontsize' : 'small',
'verticalalignment' : 'center',
'horizontalalignment' : 'right',
'rotation'='vertical' : }
.. seealso::
:func:`~matplotlib.pyplot.text`
For information on how override and the optional args
work.
"""
l = gca().set_ylabel(s, *args, **kwargs)
draw_if_interactive()
return l
def xlim(*args, **kwargs):
"""
Get or set the *x* limits of the current axes.
::
xmin, xmax = xlim() # return the current xlim
xlim( (xmin, xmax) ) # set the xlim to xmin, xmax
xlim( xmin, xmax ) # set the xlim to xmin, xmax
If you do not specify args, you can pass the xmin and xmax as
kwargs, e.g.::
xlim(xmax=3) # adjust the max leaving min unchanged
xlim(xmin=1) # adjust the min leaving max unchanged
Setting limits turns autoscaling off for the x-axis.
The new axis limits are returned as a length 2 tuple.
"""
ax = gca()
if not args and not kwargs:
return ax.get_xlim()
ret = ax.set_xlim(*args, **kwargs)
draw_if_interactive()
return ret
def ylim(*args, **kwargs):
"""
Get or set the *y*-limits of the current axes.
::
ymin, ymax = ylim() # return the current ylim
ylim( (ymin, ymax) ) # set the ylim to ymin, ymax
ylim( ymin, ymax ) # set the ylim to ymin, ymax
If you do not specify args, you can pass the *ymin* and *ymax* as
kwargs, e.g.::
ylim(ymax=3) # adjust the max leaving min unchanged
ylim(ymin=1) # adjust the min leaving max unchanged
Setting limits turns autoscaling off for the y-axis.
The new axis limits are returned as a length 2 tuple.
"""
ax = gca()
if not args and not kwargs:
return ax.get_ylim()
ret = ax.set_ylim(*args, **kwargs)
draw_if_interactive()
return ret
@docstring.dedent_interpd
def xscale(*args, **kwargs):
"""
Set the scaling of the *x*-axis.
call signature::
xscale(scale, **kwargs)
The available scales are: %(scale)s
Different keywords may be accepted, depending on the scale:
%(scale_docs)s
"""
ax = gca()
ax.set_xscale(*args, **kwargs)
draw_if_interactive()
@docstring.dedent_interpd
def yscale(*args, **kwargs):
"""
Set the scaling of the *y*-axis.
call signature::
yscale(scale, **kwargs)
The available scales are: %(scale)s
Different keywords may be accepted, depending on the scale:
%(scale_docs)s
"""
ax = gca()
ax.set_yscale(*args, **kwargs)
draw_if_interactive()
def xticks(*args, **kwargs):
"""
Get or set the *x*-limits of the current tick locations and labels.
::
# return locs, labels where locs is an array of tick locations and
# labels is an array of tick labels.
locs, labels = xticks()
# set the locations of the xticks
xticks( arange(6) )
# set the locations and labels of the xticks
xticks( arange(5), ('Tom', 'Dick', 'Harry', 'Sally', 'Sue') )
The keyword args, if any, are :class:`~matplotlib.text.Text`
properties. For example, to rotate long labels::
xticks( arange(12), calendar.month_name[1:13], rotation=17 )
"""
ax = gca()
if len(args)==0:
locs = ax.get_xticks()
labels = ax.get_xticklabels()
elif len(args)==1:
locs = ax.set_xticks(args[0])
labels = ax.get_xticklabels()
elif len(args)==2:
locs = ax.set_xticks(args[0])
labels = ax.set_xticklabels(args[1], **kwargs)
else: raise TypeError('Illegal number of arguments to xticks')
if len(kwargs):
for l in labels:
l.update(kwargs)
draw_if_interactive()
return locs, silent_list('Text xticklabel', labels)
def yticks(*args, **kwargs):
"""
Get or set the *y*-limits of the current tick locations and labels.
::
# return locs, labels where locs is an array of tick locations and
# labels is an array of tick labels.
locs, labels = yticks()
# set the locations of the yticks
yticks( arange(6) )
# set the locations and labels of the yticks
yticks( arange(5), ('Tom', 'Dick', 'Harry', 'Sally', 'Sue') )
The keyword args, if any, are :class:`~matplotlib.text.Text`
properties. For example, to rotate long labels::
yticks( arange(12), calendar.month_name[1:13], rotation=45 )
"""
ax = gca()
if len(args)==0:
locs = ax.get_yticks()
labels = ax.get_yticklabels()
elif len(args)==1:
locs = ax.set_yticks(args[0])
labels = ax.get_yticklabels()
elif len(args)==2:
locs = ax.set_yticks(args[0])
labels = ax.set_yticklabels(args[1], **kwargs)
else: raise TypeError('Illegal number of arguments to yticks')
if len(kwargs):
for l in labels:
l.update(kwargs)
draw_if_interactive()
return ( locs,
silent_list('Text yticklabel', labels)
)
def minorticks_on():
"""
Display minor ticks on the current plot.
Displaying minor ticks reduces performance; turn them off using
minorticks_off() if drawing speed is a problem.
"""
gca().minorticks_on()
draw_if_interactive()
def minorticks_off():
"""
Remove minor ticks from the current plot.
"""
gca().minorticks_off()
draw_if_interactive()
def rgrids(*args, **kwargs):
"""
Get or set the radial gridlines on a polar plot.
call signatures::
lines, labels = rgrids()
lines, labels = rgrids(radii, labels=None, angle=22.5, **kwargs)
When called with no arguments, :func:`rgrid` simply returns the
tuple (*lines*, *labels*), where *lines* is an array of radial
gridlines (:class:`~matplotlib.lines.Line2D` instances) and
*labels* is an array of tick labels
(:class:`~matplotlib.text.Text` instances). When called with
arguments, the labels will appear at the specified radial
distances and angles.
*labels*, if not *None*, is a len(*radii*) list of strings of the
labels to use at each angle.
If *labels* is None, the rformatter will be used
Examples::
# set the locations of the radial gridlines and labels
lines, labels = rgrids( (0.25, 0.5, 1.0) )
# set the locations and labels of the radial gridlines and labels
lines, labels = rgrids( (0.25, 0.5, 1.0), ('Tom', 'Dick', 'Harry' )
"""
ax = gca()
if not isinstance(ax, PolarAxes):
raise RuntimeError('rgrids only defined for polar axes')
if len(args)==0:
lines = ax.yaxis.get_gridlines()
labels = ax.yaxis.get_ticklabels()
else:
lines, labels = ax.set_rgrids(*args, **kwargs)
draw_if_interactive()
return ( silent_list('Line2D rgridline', lines),
silent_list('Text rgridlabel', labels) )
def thetagrids(*args, **kwargs):
"""
Get or set the theta locations of the gridlines in a polar plot.
If no arguments are passed, return a tuple (*lines*, *labels*)
where *lines* is an array of radial gridlines
(:class:`~matplotlib.lines.Line2D` instances) and *labels* is an
array of tick labels (:class:`~matplotlib.text.Text` instances)::
lines, labels = thetagrids()
Otherwise the syntax is::
lines, labels = thetagrids(angles, labels=None, fmt='%d', frac = 1.1)
set the angles at which to place the theta grids (these gridlines
are equal along the theta dimension).
*angles* is in degrees.
*labels*, if not *None*, is a len(angles) list of strings of the
labels to use at each angle.
If *labels* is *None*, the labels will be ``fmt%angle``.
*frac* is the fraction of the polar axes radius at which to place
the label (1 is the edge). e.g., 1.05 is outside the axes and 0.95
is inside the axes.
Return value is a list of tuples (*lines*, *labels*):
- *lines* are :class:`~matplotlib.lines.Line2D` instances
- *labels* are :class:`~matplotlib.text.Text` instances.
Note that on input, the *labels* argument is a list of strings,
and on output it is a list of :class:`~matplotlib.text.Text`
instances.
Examples::
# set the locations of the radial gridlines and labels
lines, labels = thetagrids( range(45,360,90) )
# set the locations and labels of the radial gridlines and labels
lines, labels = thetagrids( range(45,360,90), ('NE', 'NW', 'SW','SE') )
"""
ax = gca()
if not isinstance(ax, PolarAxes):
raise RuntimeError('rgrids only defined for polar axes')
if len(args)==0:
lines = ax.xaxis.get_ticklines()
labels = ax.xaxis.get_ticklabels()
else:
lines, labels = ax.set_thetagrids(*args, **kwargs)
draw_if_interactive()
return (silent_list('Line2D thetagridline', lines),
silent_list('Text thetagridlabel', labels)
)
## Plotting Info ##
def plotting():
pass
def get_plot_commands():
"""
Get a sorted list of all of the plotting commands.
"""
# This works by searching for all functions in this module and
# removing a few hard-coded exclusions, as well as all of the
# colormap-setting functions, and anything marked as private with
# a preceding underscore.
import inspect
exclude = set(['colormaps', 'colors', 'connect', 'disconnect',
'get_plot_commands', 'get_current_fig_manager',
'ginput', 'plotting', 'waitforbuttonpress'])
exclude |= set(colormaps())
this_module = inspect.getmodule(get_plot_commands)
commands = set()
for name, obj in list(six.iteritems(globals())):
if name.startswith('_') or name in exclude:
continue
if inspect.isfunction(obj) and inspect.getmodule(obj) is this_module:
commands.add(name)
commands = list(commands)
commands.sort()
return commands
def colors():
"""
This is a do-nothing function to provide you with help on how
matplotlib handles colors.
Commands which take color arguments can use several formats to
specify the colors. For the basic built-in colors, you can use a
single letter
===== =======
Alias Color
===== =======
'b' blue
'g' green
'r' red
'c' cyan
'm' magenta
'y' yellow
'k' black
'w' white
===== =======
For a greater range of colors, you have two options. You can
specify the color using an html hex string, as in::
color = '#eeefff'
or you can pass an R,G,B tuple, where each of R,G,B are in the
range [0,1].
You can also use any legal html name for a color, for example::
color = 'red'
color = 'burlywood'
color = 'chartreuse'
The example below creates a subplot with a dark
slate gray background::
subplot(111, axisbg=(0.1843, 0.3098, 0.3098))
Here is an example that creates a pale turquoise title::
title('Is this the best color?', color='#afeeee')
"""
pass
def colormaps():
"""
Matplotlib provides a number of colormaps, and others can be added using
:func:`~matplotlib.cm.register_cmap`. This function documents the built-in
colormaps, and will also return a list of all registered colormaps if called.
You can set the colormap for an image, pcolor, scatter, etc,
using a keyword argument::
imshow(X, cmap=cm.hot)
or using the :func:`set_cmap` function::
imshow(X)
pyplot.set_cmap('hot')
pyplot.set_cmap('jet')
In interactive mode, :func:`set_cmap` will update the colormap post-hoc,
allowing you to see which one works best for your data.
All built-in colormaps can be reversed by appending ``_r``: For instance,
``gray_r`` is the reverse of ``gray``.
There are several common color schemes used in visualization:
Sequential schemes
for unipolar data that progresses from low to high
Diverging schemes
for bipolar data that emphasizes positive or negative deviations from a
central value
Cyclic schemes
meant for plotting values that wrap around at the
endpoints, such as phase angle, wind direction, or time of day
Qualitative schemes
for nominal data that has no inherent ordering, where color is used
only to distinguish categories
The base colormaps are derived from those of the same name provided
with Matlab:
========= =======================================================
Colormap Description
========= =======================================================
autumn sequential linearly-increasing shades of red-orange-yellow
bone sequential increasing black-white color map with
a tinge of blue, to emulate X-ray film
cool linearly-decreasing shades of cyan-magenta
copper sequential increasing shades of black-copper
flag repetitive red-white-blue-black pattern (not cyclic at
endpoints)
gray sequential linearly-increasing black-to-white
grayscale
hot sequential black-red-yellow-white, to emulate blackbody
radiation from an object at increasing temperatures
hsv cyclic red-yellow-green-cyan-blue-magenta-red, formed
by changing the hue component in the HSV color space
jet a spectral map with dark endpoints, blue-cyan-yellow-red;
based on a fluid-jet simulation by NCSA [#]_
pink sequential increasing pastel black-pink-white, meant
for sepia tone colorization of photographs
prism repetitive red-yellow-green-blue-purple-...-green pattern
(not cyclic at endpoints)
spring linearly-increasing shades of magenta-yellow
summer sequential linearly-increasing shades of green-yellow
winter linearly-increasing shades of blue-green
========= =======================================================
For the above list only, you can also set the colormap using the
corresponding pylab shortcut interface function, similar to Matlab::
imshow(X)
hot()
jet()
The next set of palettes are from the `Yorick scientific visualisation
package <http://yorick.sourceforge.net/index.php>`_, an evolution of
the GIST package, both by David H. Munro:
============ =======================================================
Colormap Description
============ =======================================================
gist_earth mapmaker's colors from dark blue deep ocean to green
lowlands to brown highlands to white mountains
gist_heat sequential increasing black-red-orange-white, to emulate
blackbody radiation from an iron bar as it grows hotter
gist_ncar pseudo-spectral black-blue-green-yellow-red-purple-white
colormap from National Center for Atmospheric
Research [#]_
gist_rainbow runs through the colors in spectral order from red to
violet at full saturation (like *hsv* but not cyclic)
gist_stern "Stern special" color table from Interactive Data
Language software
============ =======================================================
The following colormaps are based on the `ColorBrewer
<http://colorbrewer.org>`_ color specifications and designs developed by
Cynthia Brewer:
ColorBrewer Diverging (luminance is highest at the midpoint, and
decreases towards differently-colored endpoints):
======== ===================================
Colormap Description
======== ===================================
BrBG brown, white, blue-green
PiYG pink, white, yellow-green
PRGn purple, white, green
PuOr orange, white, purple
RdBu red, white, blue
RdGy red, white, gray
RdYlBu red, yellow, blue
RdYlGn red, yellow, green
Spectral red, orange, yellow, green, blue
======== ===================================
ColorBrewer Sequential (luminance decreases monotonically):
======== ====================================
Colormap Description
======== ====================================
Blues white to dark blue
BuGn white, light blue, dark green
BuPu white, light blue, dark purple
GnBu white, light green, dark blue
Greens white to dark green
Greys white to black (not linear)
Oranges white, orange, dark brown
OrRd white, orange, dark red
PuBu white, light purple, dark blue
PuBuGn white, light purple, dark green
PuRd white, light purple, dark red
Purples white to dark purple
RdPu white, pink, dark purple
Reds white to dark red
YlGn light yellow, dark green
YlGnBu light yellow, light green, dark blue
YlOrBr light yellow, orange, dark brown
YlOrRd light yellow, orange, dark red
======== ====================================
ColorBrewer Qualitative:
(For plotting nominal data, :class:`ListedColormap` should be used,
not :class:`LinearSegmentedColormap`. Different sets of colors are
recommended for different numbers of categories. These continuous
versions of the qualitative schemes may be removed or converted in the
future.)
* Accent
* Dark2
* Paired
* Pastel1
* Pastel2
* Set1
* Set2
* Set3
Other miscellaneous schemes:
============= =======================================================
Colormap Description
============= =======================================================
afmhot sequential black-orange-yellow-white blackbody
spectrum, commonly used in atomic force microscopy
brg blue-red-green
bwr diverging blue-white-red
coolwarm diverging blue-gray-red, meant to avoid issues with 3D
shading, color blindness, and ordering of colors [#]_
CMRmap "Default colormaps on color images often reproduce to
confusing grayscale images. The proposed colormap
maintains an aesthetically pleasing color image that
automatically reproduces to a monotonic grayscale with
discrete, quantifiable saturation levels." [#]_
cubehelix Unlike most other color schemes cubehelix was designed
by D.A. Green to be monotonically increasing in terms
of perceived brightness. Also, when printed on a black
and white postscript printer, the scheme results in a
greyscale with monotonically increasing brightness.
This color scheme is named cubehelix because the r,g,b
values produced can be visualised as a squashed helix
around the diagonal in the r,g,b color cube.
gnuplot gnuplot's traditional pm3d scheme
(black-blue-red-yellow)
gnuplot2 sequential color printable as gray
(black-blue-violet-yellow-white)
ocean green-blue-white
rainbow spectral purple-blue-green-yellow-orange-red colormap
with diverging luminance
seismic diverging blue-white-red
nipy_spectral black-purple-blue-green-yellow-red-white spectrum,
originally from the Neuroimaging in Python project
terrain mapmaker's colors, blue-green-yellow-brown-white,
originally from IGOR Pro
============= =======================================================
The following colormaps are redundant and may be removed in future
versions. It's recommended to use the names in the descriptions
instead, which produce identical output:
========= =======================================================
Colormap Description
========= =======================================================
gist_gray identical to *gray*
gist_yarg identical to *gray_r*
binary identical to *gray_r*
spectral identical to *nipy_spectral* [#]_
========= =======================================================
.. rubric:: Footnotes
.. [#] Rainbow colormaps, ``jet`` in particular, are considered a poor
choice for scientific visualization by many researchers: `Rainbow Color
Map (Still) Considered Harmful
<http://www.jwave.vt.edu/%7Erkriz/Projects/create_color_table/color_07.pdf>`_
.. [#] Resembles "BkBlAqGrYeOrReViWh200" from NCAR Command
Language. See `Color Table Gallery
<http://www.ncl.ucar.edu/Document/Graphics/color_table_gallery.shtml>`_
.. [#] See `Diverging Color Maps for Scientific Visualization
<http://www.cs.unm.edu/~kmorel/documents/ColorMaps/>`_ by Kenneth
Moreland.
.. [#] See `A Color Map for Effective Black-and-White Rendering of
Color-Scale Images
<http://www.mathworks.com/matlabcentral/fileexchange/2662-cmrmap-m>`_
by Carey Rappaport
.. [#] Changed to distinguish from ColorBrewer's *Spectral* map.
:func:`spectral` still works, but
``set_cmap('nipy_spectral')`` is recommended for clarity.
"""
return sorted(cm.cmap_d.keys())
def _setup_pyplot_info_docstrings():
"""
Generates the plotting and docstring.
These must be done after the entire module is imported, so it is
called from the end of this module, which is generated by
boilerplate.py.
"""
# Generate the plotting docstring
import re
def pad(s, l):
"""Pad string *s* to length *l*."""
if l < len(s):
return s[:l]
return s + ' ' * (l - len(s))
commands = get_plot_commands()
first_sentence = re.compile("(?:\s*).+?\.(?:\s+|$)", flags=re.DOTALL)
# Collect the first sentence of the docstring for all of the
# plotting commands.
rows = []
max_name = 0
max_summary = 0
for name in commands:
doc = globals()[name].__doc__
summary = ''
if doc is not None:
match = first_sentence.match(doc)
if match is not None:
summary = match.group(0).strip().replace('\n', ' ')
name = '`%s`' % name
rows.append([name, summary])
max_name = max(max_name, len(name))
max_summary = max(max_summary, len(summary))
lines = []
sep = '=' * max_name + ' ' + '=' * max_summary
lines.append(sep)
lines.append(' '.join([pad("Function", max_name),
pad("Description", max_summary)]))
lines.append(sep)
for name, summary in rows:
lines.append(' '.join([pad(name, max_name),
pad(summary, max_summary)]))
lines.append(sep)
plotting.__doc__ = '\n'.join(lines)
## Plotting part 1: manually generated functions and wrappers ##
def colorbar(mappable=None, cax=None, ax=None, **kw):
if mappable is None:
mappable = gci()
if mappable is None:
raise RuntimeError('No mappable was found to use for colorbar '
'creation. First define a mappable such as '
'an image (with imshow) or a contour set ('
'with contourf).')
if ax is None:
ax = gca()
ret = gcf().colorbar(mappable, cax = cax, ax=ax, **kw)
draw_if_interactive()
return ret
colorbar.__doc__ = matplotlib.colorbar.colorbar_doc
def clim(vmin=None, vmax=None):
"""
Set the color limits of the current image.
To apply clim to all axes images do::
clim(0, 0.5)
If either *vmin* or *vmax* is None, the image min/max respectively
will be used for color scaling.
If you want to set the clim of multiple images,
use, for example::
for im in gca().get_images():
im.set_clim(0, 0.05)
"""
im = gci()
if im is None:
raise RuntimeError('You must first define an image, e.g., with imshow')
im.set_clim(vmin, vmax)
draw_if_interactive()
def set_cmap(cmap):
"""
Set the default colormap. Applies to the current image if any.
See help(colormaps) for more information.
*cmap* must be a :class:`~matplotlib.colors.Colormap` instance, or
the name of a registered colormap.
See :func:`matplotlib.cm.register_cmap` and
:func:`matplotlib.cm.get_cmap`.
"""
cmap = cm.get_cmap(cmap)
rc('image', cmap=cmap.name)
im = gci()
if im is not None:
im.set_cmap(cmap)
draw_if_interactive()
@docstring.copy_dedent(_imread)
def imread(*args, **kwargs):
return _imread(*args, **kwargs)
@docstring.copy_dedent(_imsave)
def imsave(*args, **kwargs):
return _imsave(*args, **kwargs)
def matshow(A, fignum=None, **kw):
"""
Display an array as a matrix in a new figure window.
The origin is set at the upper left hand corner and rows (first
dimension of the array) are displayed horizontally. The aspect
ratio of the figure window is that of the array, unless this would
make an excessively short or narrow figure.
Tick labels for the xaxis are placed on top.
With the exception of *fignum*, keyword arguments are passed to
:func:`~matplotlib.pyplot.imshow`. You may set the *origin*
kwarg to "lower" if you want the first row in the array to be
at the bottom instead of the top.
*fignum*: [ None | integer | False ]
By default, :func:`matshow` creates a new figure window with
automatic numbering. If *fignum* is given as an integer, the
created figure will use this figure number. Because of how
:func:`matshow` tries to set the figure aspect ratio to be the
one of the array, if you provide the number of an already
existing figure, strange things may happen.
If *fignum* is *False* or 0, a new figure window will **NOT** be created.
"""
A = np.asanyarray(A)
if fignum is False or fignum is 0:
ax = gca()
else:
# Extract actual aspect ratio of array and make appropriately sized figure
fig = figure(fignum, figsize=figaspect(A))
ax = fig.add_axes([0.15, 0.09, 0.775, 0.775])
im = ax.matshow(A, **kw)
sci(im)
draw_if_interactive()
return im
def polar(*args, **kwargs):
"""
Make a polar plot.
call signature::
polar(theta, r, **kwargs)
Multiple *theta*, *r* arguments are supported, with format
strings, as in :func:`~matplotlib.pyplot.plot`.
"""
ax = gca(polar=True)
ret = ax.plot(*args, **kwargs)
draw_if_interactive()
return ret
def plotfile(fname, cols=(0,), plotfuncs=None,
comments='#', skiprows=0, checkrows=5, delimiter=',',
names=None, subplots=True, newfig=True, **kwargs):
"""
Plot the data in in a file.
*cols* is a sequence of column identifiers to plot. An identifier
is either an int or a string. If it is an int, it indicates the
column number. If it is a string, it indicates the column header.
matplotlib will make column headers lower case, replace spaces with
underscores, and remove all illegal characters; so ``'Adj Close*'``
will have name ``'adj_close'``.
- If len(*cols*) == 1, only that column will be plotted on the *y* axis.
- If len(*cols*) > 1, the first element will be an identifier for
data for the *x* axis and the remaining elements will be the
column indexes for multiple subplots if *subplots* is *True*
(the default), or for lines in a single subplot if *subplots*
is *False*.
*plotfuncs*, if not *None*, is a dictionary mapping identifier to
an :class:`~matplotlib.axes.Axes` plotting function as a string.
Default is 'plot', other choices are 'semilogy', 'fill', 'bar',
etc. You must use the same type of identifier in the *cols*
vector as you use in the *plotfuncs* dictionary, e.g., integer
column numbers in both or column names in both. If *subplots*
is *False*, then including any function such as 'semilogy'
that changes the axis scaling will set the scaling for all
columns.
*comments*, *skiprows*, *checkrows*, *delimiter*, and *names*
are all passed on to :func:`matplotlib.pylab.csv2rec` to
load the data into a record array.
If *newfig* is *True*, the plot always will be made in a new figure;
if *False*, it will be made in the current figure if one exists,
else in a new figure.
kwargs are passed on to plotting functions.
Example usage::
# plot the 2nd and 4th column against the 1st in two subplots
plotfile(fname, (0,1,3))
# plot using column names; specify an alternate plot type for volume
plotfile(fname, ('date', 'volume', 'adj_close'),
plotfuncs={'volume': 'semilogy'})
Note: plotfile is intended as a convenience for quickly plotting
data from flat files; it is not intended as an alternative
interface to general plotting with pyplot or matplotlib.
"""
if newfig:
fig = figure()
else:
fig = gcf()
if len(cols)<1:
raise ValueError('must have at least one column of data')
if plotfuncs is None:
plotfuncs = dict()
r = mlab.csv2rec(fname, comments=comments, skiprows=skiprows,
checkrows=checkrows, delimiter=delimiter, names=names)
def getname_val(identifier):
'return the name and column data for identifier'
if is_string_like(identifier):
return identifier, r[identifier]
elif is_numlike(identifier):
name = r.dtype.names[int(identifier)]
return name, r[name]
else:
raise TypeError('identifier must be a string or integer')
xname, x = getname_val(cols[0])
ynamelist = []
if len(cols)==1:
ax1 = fig.add_subplot(1,1,1)
funcname = plotfuncs.get(cols[0], 'plot')
func = getattr(ax1, funcname)
func(x, **kwargs)
ax1.set_ylabel(xname)
else:
N = len(cols)
for i in range(1,N):
if subplots:
if i==1:
ax = ax1 = fig.add_subplot(N-1,1,i)
else:
ax = fig.add_subplot(N-1,1,i, sharex=ax1)
elif i==1:
ax = fig.add_subplot(1,1,1)
yname, y = getname_val(cols[i])
ynamelist.append(yname)
funcname = plotfuncs.get(cols[i], 'plot')
func = getattr(ax, funcname)
func(x, y, **kwargs)
if subplots:
ax.set_ylabel(yname)
if ax.is_last_row():
ax.set_xlabel(xname)
else:
ax.set_xlabel('')
if not subplots:
ax.legend(ynamelist, loc='best')
if xname=='date':
fig.autofmt_xdate()
draw_if_interactive()
def _autogen_docstring(base):
"""Autogenerated wrappers will get their docstring from a base function
with an addendum."""
msg = "\n\nAdditional kwargs: hold = [True|False] overrides default hold state"
addendum = docstring.Appender(msg, '\n\n')
return lambda func: addendum(docstring.copy_dedent(base)(func))
# This function cannot be generated by boilerplate.py because it may
# return an image or a line.
@_autogen_docstring(Axes.spy)
def spy(Z, precision=0, marker=None, markersize=None, aspect='equal', hold=None, **kwargs):
ax = gca()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
if hold is not None:
ax.hold(hold)
try:
ret = ax.spy(Z, precision, marker, markersize, aspect, **kwargs)
draw_if_interactive()
finally:
ax.hold(washold)
if isinstance(ret, cm.ScalarMappable):
sci(ret)
return ret
################# REMAINING CONTENT GENERATED BY boilerplate.py ##############
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@_autogen_docstring(Axes.acorr)
def acorr(x, hold=None, **kwargs):
ax = gca()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
if hold is not None:
ax.hold(hold)
try:
ret = ax.acorr(x, **kwargs)
draw_if_interactive()
finally:
ax.hold(washold)
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@_autogen_docstring(Axes.angle_spectrum)
def angle_spectrum(x, Fs=None, Fc=None, window=None, pad_to=None, sides=None,
hold=None, **kwargs):
ax = gca()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
if hold is not None:
ax.hold(hold)
try:
ret = ax.angle_spectrum(x, Fs=Fs, Fc=Fc, window=window, pad_to=pad_to,
sides=sides, **kwargs)
draw_if_interactive()
finally:
ax.hold(washold)
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@_autogen_docstring(Axes.arrow)
def arrow(x, y, dx, dy, hold=None, **kwargs):
ax = gca()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
if hold is not None:
ax.hold(hold)
try:
ret = ax.arrow(x, y, dx, dy, **kwargs)
draw_if_interactive()
finally:
ax.hold(washold)
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@_autogen_docstring(Axes.axhline)
def axhline(y=0, xmin=0, xmax=1, hold=None, **kwargs):
ax = gca()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
if hold is not None:
ax.hold(hold)
try:
ret = ax.axhline(y=y, xmin=xmin, xmax=xmax, **kwargs)
draw_if_interactive()
finally:
ax.hold(washold)
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@_autogen_docstring(Axes.axhspan)
def axhspan(ymin, ymax, xmin=0, xmax=1, hold=None, **kwargs):
ax = gca()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
if hold is not None:
ax.hold(hold)
try:
ret = ax.axhspan(ymin, ymax, xmin=xmin, xmax=xmax, **kwargs)
draw_if_interactive()
finally:
ax.hold(washold)
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@_autogen_docstring(Axes.axvline)
def axvline(x=0, ymin=0, ymax=1, hold=None, **kwargs):
ax = gca()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
if hold is not None:
ax.hold(hold)
try:
ret = ax.axvline(x=x, ymin=ymin, ymax=ymax, **kwargs)
draw_if_interactive()
finally:
ax.hold(washold)
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@_autogen_docstring(Axes.axvspan)
def axvspan(xmin, xmax, ymin=0, ymax=1, hold=None, **kwargs):
ax = gca()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
if hold is not None:
ax.hold(hold)
try:
ret = ax.axvspan(xmin, xmax, ymin=ymin, ymax=ymax, **kwargs)
draw_if_interactive()
finally:
ax.hold(washold)
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@_autogen_docstring(Axes.bar)
def bar(left, height, width=0.8, bottom=None, hold=None, **kwargs):
ax = gca()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
if hold is not None:
ax.hold(hold)
try:
ret = ax.bar(left, height, width=width, bottom=bottom, **kwargs)
draw_if_interactive()
finally:
ax.hold(washold)
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@_autogen_docstring(Axes.barh)
def barh(bottom, width, height=0.8, left=None, hold=None, **kwargs):
ax = gca()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
if hold is not None:
ax.hold(hold)
try:
ret = ax.barh(bottom, width, height=height, left=left, **kwargs)
draw_if_interactive()
finally:
ax.hold(washold)
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@_autogen_docstring(Axes.broken_barh)
def broken_barh(xranges, yrange, hold=None, **kwargs):
ax = gca()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
if hold is not None:
ax.hold(hold)
try:
ret = ax.broken_barh(xranges, yrange, **kwargs)
draw_if_interactive()
finally:
ax.hold(washold)
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@_autogen_docstring(Axes.boxplot)
def boxplot(x, notch=False, sym=None, vert=True, whis=1.5, positions=None,
widths=None, patch_artist=False, bootstrap=None, usermedians=None,
conf_intervals=None, meanline=False, showmeans=False, showcaps=True,
showbox=True, showfliers=True, boxprops=None, labels=None,
flierprops=None, medianprops=None, meanprops=None, capprops=None,
whiskerprops=None, manage_xticks=True, hold=None):
ax = gca()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
if hold is not None:
ax.hold(hold)
try:
ret = ax.boxplot(x, notch=notch, sym=sym, vert=vert, whis=whis,
positions=positions, widths=widths,
patch_artist=patch_artist, bootstrap=bootstrap,
usermedians=usermedians,
conf_intervals=conf_intervals, meanline=meanline,
showmeans=showmeans, showcaps=showcaps,
showbox=showbox, showfliers=showfliers,
boxprops=boxprops, labels=labels,
flierprops=flierprops, medianprops=medianprops,
meanprops=meanprops, capprops=capprops,
whiskerprops=whiskerprops, manage_xticks=manage_xticks)
draw_if_interactive()
finally:
ax.hold(washold)
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@_autogen_docstring(Axes.cohere)
def cohere(x, y, NFFT=256, Fs=2, Fc=0, detrend=mlab.detrend_none,
window=mlab.window_hanning, noverlap=0, pad_to=None, sides='default',
scale_by_freq=None, hold=None, **kwargs):
ax = gca()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
if hold is not None:
ax.hold(hold)
try:
ret = ax.cohere(x, y, NFFT=NFFT, Fs=Fs, Fc=Fc, detrend=detrend,
window=window, noverlap=noverlap, pad_to=pad_to,
sides=sides, scale_by_freq=scale_by_freq, **kwargs)
draw_if_interactive()
finally:
ax.hold(washold)
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@_autogen_docstring(Axes.clabel)
def clabel(CS, *args, **kwargs):
ax = gca()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
hold = kwargs.pop('hold', None)
if hold is not None:
ax.hold(hold)
try:
ret = ax.clabel(CS, *args, **kwargs)
draw_if_interactive()
finally:
ax.hold(washold)
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@_autogen_docstring(Axes.contour)
def contour(*args, **kwargs):
ax = gca()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
hold = kwargs.pop('hold', None)
if hold is not None:
ax.hold(hold)
try:
ret = ax.contour(*args, **kwargs)
draw_if_interactive()
finally:
ax.hold(washold)
if ret._A is not None: sci(ret)
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@_autogen_docstring(Axes.contourf)
def contourf(*args, **kwargs):
ax = gca()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
hold = kwargs.pop('hold', None)
if hold is not None:
ax.hold(hold)
try:
ret = ax.contourf(*args, **kwargs)
draw_if_interactive()
finally:
ax.hold(washold)
if ret._A is not None: sci(ret)
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@_autogen_docstring(Axes.csd)
def csd(x, y, NFFT=None, Fs=None, Fc=None, detrend=None, window=None,
noverlap=None, pad_to=None, sides=None, scale_by_freq=None,
return_line=None, hold=None, **kwargs):
ax = gca()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
if hold is not None:
ax.hold(hold)
try:
ret = ax.csd(x, y, NFFT=NFFT, Fs=Fs, Fc=Fc, detrend=detrend,
window=window, noverlap=noverlap, pad_to=pad_to,
sides=sides, scale_by_freq=scale_by_freq,
return_line=return_line, **kwargs)
draw_if_interactive()
finally:
ax.hold(washold)
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@_autogen_docstring(Axes.errorbar)
def errorbar(x, y, yerr=None, xerr=None, fmt='', ecolor=None, elinewidth=None,
capsize=3, barsabove=False, lolims=False, uplims=False,
xlolims=False, xuplims=False, errorevery=1, capthick=None,
hold=None, **kwargs):
ax = gca()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
if hold is not None:
ax.hold(hold)
try:
ret = ax.errorbar(x, y, yerr=yerr, xerr=xerr, fmt=fmt, ecolor=ecolor,
elinewidth=elinewidth, capsize=capsize,
barsabove=barsabove, lolims=lolims, uplims=uplims,
xlolims=xlolims, xuplims=xuplims,
errorevery=errorevery, capthick=capthick, **kwargs)
draw_if_interactive()
finally:
ax.hold(washold)
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@_autogen_docstring(Axes.eventplot)
def eventplot(positions, orientation='horizontal', lineoffsets=1, linelengths=1,
linewidths=None, colors=None, linestyles='solid', hold=None,
**kwargs):
ax = gca()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
if hold is not None:
ax.hold(hold)
try:
ret = ax.eventplot(positions, orientation=orientation,
lineoffsets=lineoffsets, linelengths=linelengths,
linewidths=linewidths, colors=colors,
linestyles=linestyles, **kwargs)
draw_if_interactive()
finally:
ax.hold(washold)
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@_autogen_docstring(Axes.fill)
def fill(*args, **kwargs):
ax = gca()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
hold = kwargs.pop('hold', None)
if hold is not None:
ax.hold(hold)
try:
ret = ax.fill(*args, **kwargs)
draw_if_interactive()
finally:
ax.hold(washold)
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@_autogen_docstring(Axes.fill_between)
def fill_between(x, y1, y2=0, where=None, interpolate=False, hold=None, **kwargs):
ax = gca()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
if hold is not None:
ax.hold(hold)
try:
ret = ax.fill_between(x, y1, y2=y2, where=where,
interpolate=interpolate, **kwargs)
draw_if_interactive()
finally:
ax.hold(washold)
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@_autogen_docstring(Axes.fill_betweenx)
def fill_betweenx(y, x1, x2=0, where=None, hold=None, **kwargs):
ax = gca()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
if hold is not None:
ax.hold(hold)
try:
ret = ax.fill_betweenx(y, x1, x2=x2, where=where, **kwargs)
draw_if_interactive()
finally:
ax.hold(washold)
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@_autogen_docstring(Axes.hexbin)
def hexbin(x, y, C=None, gridsize=100, bins=None, xscale='linear',
yscale='linear', extent=None, cmap=None, norm=None, vmin=None,
vmax=None, alpha=None, linewidths=None, edgecolors='none',
reduce_C_function=np.mean, mincnt=None, marginals=False, hold=None,
**kwargs):
ax = gca()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
if hold is not None:
ax.hold(hold)
try:
ret = ax.hexbin(x, y, C=C, gridsize=gridsize, bins=bins, xscale=xscale,
yscale=yscale, extent=extent, cmap=cmap, norm=norm,
vmin=vmin, vmax=vmax, alpha=alpha,
linewidths=linewidths, edgecolors=edgecolors,
reduce_C_function=reduce_C_function, mincnt=mincnt,
marginals=marginals, **kwargs)
draw_if_interactive()
finally:
ax.hold(washold)
sci(ret)
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@_autogen_docstring(Axes.hist)
def hist(x, bins=10, range=None, normed=False, weights=None, cumulative=False,
bottom=None, histtype='bar', align='mid', orientation='vertical',
rwidth=None, log=False, color=None, label=None, stacked=False,
hold=None, **kwargs):
ax = gca()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
if hold is not None:
ax.hold(hold)
try:
ret = ax.hist(x, bins=bins, range=range, normed=normed,
weights=weights, cumulative=cumulative, bottom=bottom,
histtype=histtype, align=align, orientation=orientation,
rwidth=rwidth, log=log, color=color, label=label,
stacked=stacked, **kwargs)
draw_if_interactive()
finally:
ax.hold(washold)
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@_autogen_docstring(Axes.hist2d)
def hist2d(x, y, bins=10, range=None, normed=False, weights=None, cmin=None,
cmax=None, hold=None, **kwargs):
ax = gca()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
if hold is not None:
ax.hold(hold)
try:
ret = ax.hist2d(x, y, bins=bins, range=range, normed=normed,
weights=weights, cmin=cmin, cmax=cmax, **kwargs)
draw_if_interactive()
finally:
ax.hold(washold)
sci(ret[-1])
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@_autogen_docstring(Axes.hlines)
def hlines(y, xmin, xmax, colors='k', linestyles='solid', label='', hold=None,
**kwargs):
ax = gca()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
if hold is not None:
ax.hold(hold)
try:
ret = ax.hlines(y, xmin, xmax, colors=colors, linestyles=linestyles,
label=label, **kwargs)
draw_if_interactive()
finally:
ax.hold(washold)
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@_autogen_docstring(Axes.imshow)
def imshow(X, cmap=None, norm=None, aspect=None, interpolation=None, alpha=None,
vmin=None, vmax=None, origin=None, extent=None, shape=None,
filternorm=1, filterrad=4.0, imlim=None, resample=None, url=None,
hold=None, **kwargs):
ax = gca()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
if hold is not None:
ax.hold(hold)
try:
ret = ax.imshow(X, cmap=cmap, norm=norm, aspect=aspect,
interpolation=interpolation, alpha=alpha, vmin=vmin,
vmax=vmax, origin=origin, extent=extent, shape=shape,
filternorm=filternorm, filterrad=filterrad,
imlim=imlim, resample=resample, url=url, **kwargs)
draw_if_interactive()
finally:
ax.hold(washold)
sci(ret)
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@_autogen_docstring(Axes.loglog)
def loglog(*args, **kwargs):
ax = gca()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
hold = kwargs.pop('hold', None)
if hold is not None:
ax.hold(hold)
try:
ret = ax.loglog(*args, **kwargs)
draw_if_interactive()
finally:
ax.hold(washold)
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@_autogen_docstring(Axes.magnitude_spectrum)
def magnitude_spectrum(x, Fs=None, Fc=None, window=None, pad_to=None,
sides=None, scale=None, hold=None, **kwargs):
ax = gca()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
if hold is not None:
ax.hold(hold)
try:
ret = ax.magnitude_spectrum(x, Fs=Fs, Fc=Fc, window=window,
pad_to=pad_to, sides=sides, scale=scale,
**kwargs)
draw_if_interactive()
finally:
ax.hold(washold)
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@_autogen_docstring(Axes.pcolor)
def pcolor(*args, **kwargs):
ax = gca()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
hold = kwargs.pop('hold', None)
if hold is not None:
ax.hold(hold)
try:
ret = ax.pcolor(*args, **kwargs)
draw_if_interactive()
finally:
ax.hold(washold)
sci(ret)
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@_autogen_docstring(Axes.pcolormesh)
def pcolormesh(*args, **kwargs):
ax = gca()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
hold = kwargs.pop('hold', None)
if hold is not None:
ax.hold(hold)
try:
ret = ax.pcolormesh(*args, **kwargs)
draw_if_interactive()
finally:
ax.hold(washold)
sci(ret)
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@_autogen_docstring(Axes.phase_spectrum)
def phase_spectrum(x, Fs=None, Fc=None, window=None, pad_to=None, sides=None,
hold=None, **kwargs):
ax = gca()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
if hold is not None:
ax.hold(hold)
try:
ret = ax.phase_spectrum(x, Fs=Fs, Fc=Fc, window=window, pad_to=pad_to,
sides=sides, **kwargs)
draw_if_interactive()
finally:
ax.hold(washold)
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@_autogen_docstring(Axes.pie)
def pie(x, explode=None, labels=None, colors=None, autopct=None,
pctdistance=0.6, shadow=False, labeldistance=1.1, startangle=None,
radius=None, counterclock=True, wedgeprops=None, textprops=None,
hold=None):
ax = gca()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
if hold is not None:
ax.hold(hold)
try:
ret = ax.pie(x, explode=explode, labels=labels, colors=colors,
autopct=autopct, pctdistance=pctdistance, shadow=shadow,
labeldistance=labeldistance, startangle=startangle,
radius=radius, counterclock=counterclock,
wedgeprops=wedgeprops, textprops=textprops)
draw_if_interactive()
finally:
ax.hold(washold)
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@_autogen_docstring(Axes.plot)
def plot(*args, **kwargs):
ax = gca()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
hold = kwargs.pop('hold', None)
if hold is not None:
ax.hold(hold)
try:
ret = ax.plot(*args, **kwargs)
draw_if_interactive()
finally:
ax.hold(washold)
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@_autogen_docstring(Axes.plot_date)
def plot_date(x, y, fmt='o', tz=None, xdate=True, ydate=False, hold=None,
**kwargs):
ax = gca()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
if hold is not None:
ax.hold(hold)
try:
ret = ax.plot_date(x, y, fmt=fmt, tz=tz, xdate=xdate, ydate=ydate,
**kwargs)
draw_if_interactive()
finally:
ax.hold(washold)
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@_autogen_docstring(Axes.psd)
def psd(x, NFFT=None, Fs=None, Fc=None, detrend=None, window=None,
noverlap=None, pad_to=None, sides=None, scale_by_freq=None,
return_line=None, hold=None, **kwargs):
ax = gca()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
if hold is not None:
ax.hold(hold)
try:
ret = ax.psd(x, NFFT=NFFT, Fs=Fs, Fc=Fc, detrend=detrend,
window=window, noverlap=noverlap, pad_to=pad_to,
sides=sides, scale_by_freq=scale_by_freq,
return_line=return_line, **kwargs)
draw_if_interactive()
finally:
ax.hold(washold)
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@_autogen_docstring(Axes.quiver)
def quiver(*args, **kw):
ax = gca()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
hold = kw.pop('hold', None)
if hold is not None:
ax.hold(hold)
try:
ret = ax.quiver(*args, **kw)
draw_if_interactive()
finally:
ax.hold(washold)
sci(ret)
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@_autogen_docstring(Axes.quiverkey)
def quiverkey(*args, **kw):
ax = gca()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
hold = kw.pop('hold', None)
if hold is not None:
ax.hold(hold)
try:
ret = ax.quiverkey(*args, **kw)
draw_if_interactive()
finally:
ax.hold(washold)
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@_autogen_docstring(Axes.scatter)
def scatter(x, y, s=20, c='b', marker='o', cmap=None, norm=None, vmin=None,
vmax=None, alpha=None, linewidths=None, verts=None, hold=None,
**kwargs):
ax = gca()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
if hold is not None:
ax.hold(hold)
try:
ret = ax.scatter(x, y, s=s, c=c, marker=marker, cmap=cmap, norm=norm,
vmin=vmin, vmax=vmax, alpha=alpha,
linewidths=linewidths, verts=verts, **kwargs)
draw_if_interactive()
finally:
ax.hold(washold)
sci(ret)
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@_autogen_docstring(Axes.semilogx)
def semilogx(*args, **kwargs):
ax = gca()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
hold = kwargs.pop('hold', None)
if hold is not None:
ax.hold(hold)
try:
ret = ax.semilogx(*args, **kwargs)
draw_if_interactive()
finally:
ax.hold(washold)
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@_autogen_docstring(Axes.semilogy)
def semilogy(*args, **kwargs):
ax = gca()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
hold = kwargs.pop('hold', None)
if hold is not None:
ax.hold(hold)
try:
ret = ax.semilogy(*args, **kwargs)
draw_if_interactive()
finally:
ax.hold(washold)
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@_autogen_docstring(Axes.specgram)
def specgram(x, NFFT=None, Fs=None, Fc=None, detrend=None, window=None,
noverlap=None, cmap=None, xextent=None, pad_to=None, sides=None,
scale_by_freq=None, mode=None, scale=None, vmin=None, vmax=None,
hold=None, **kwargs):
ax = gca()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
if hold is not None:
ax.hold(hold)
try:
ret = ax.specgram(x, NFFT=NFFT, Fs=Fs, Fc=Fc, detrend=detrend,
window=window, noverlap=noverlap, cmap=cmap,
xextent=xextent, pad_to=pad_to, sides=sides,
scale_by_freq=scale_by_freq, mode=mode, scale=scale,
vmin=vmin, vmax=vmax, **kwargs)
draw_if_interactive()
finally:
ax.hold(washold)
sci(ret[-1])
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@_autogen_docstring(Axes.stackplot)
def stackplot(x, *args, **kwargs):
ax = gca()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
hold = kwargs.pop('hold', None)
if hold is not None:
ax.hold(hold)
try:
ret = ax.stackplot(x, *args, **kwargs)
draw_if_interactive()
finally:
ax.hold(washold)
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@_autogen_docstring(Axes.stem)
def stem(*args, **kwargs):
ax = gca()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
hold = kwargs.pop('hold', None)
if hold is not None:
ax.hold(hold)
try:
ret = ax.stem(*args, **kwargs)
draw_if_interactive()
finally:
ax.hold(washold)
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@_autogen_docstring(Axes.step)
def step(x, y, *args, **kwargs):
ax = gca()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
hold = kwargs.pop('hold', None)
if hold is not None:
ax.hold(hold)
try:
ret = ax.step(x, y, *args, **kwargs)
draw_if_interactive()
finally:
ax.hold(washold)
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@_autogen_docstring(Axes.streamplot)
def streamplot(x, y, u, v, density=1, linewidth=None, color=None, cmap=None,
norm=None, arrowsize=1, arrowstyle='-|>', minlength=0.1,
transform=None, zorder=1, hold=None):
ax = gca()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
if hold is not None:
ax.hold(hold)
try:
ret = ax.streamplot(x, y, u, v, density=density, linewidth=linewidth,
color=color, cmap=cmap, norm=norm,
arrowsize=arrowsize, arrowstyle=arrowstyle,
minlength=minlength, transform=transform,
zorder=zorder)
draw_if_interactive()
finally:
ax.hold(washold)
sci(ret.lines)
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@_autogen_docstring(Axes.tricontour)
def tricontour(*args, **kwargs):
ax = gca()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
hold = kwargs.pop('hold', None)
if hold is not None:
ax.hold(hold)
try:
ret = ax.tricontour(*args, **kwargs)
draw_if_interactive()
finally:
ax.hold(washold)
if ret._A is not None: sci(ret)
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@_autogen_docstring(Axes.tricontourf)
def tricontourf(*args, **kwargs):
ax = gca()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
hold = kwargs.pop('hold', None)
if hold is not None:
ax.hold(hold)
try:
ret = ax.tricontourf(*args, **kwargs)
draw_if_interactive()
finally:
ax.hold(washold)
if ret._A is not None: sci(ret)
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@_autogen_docstring(Axes.tripcolor)
def tripcolor(*args, **kwargs):
ax = gca()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
hold = kwargs.pop('hold', None)
if hold is not None:
ax.hold(hold)
try:
ret = ax.tripcolor(*args, **kwargs)
draw_if_interactive()
finally:
ax.hold(washold)
sci(ret)
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@_autogen_docstring(Axes.triplot)
def triplot(*args, **kwargs):
ax = gca()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
hold = kwargs.pop('hold', None)
if hold is not None:
ax.hold(hold)
try:
ret = ax.triplot(*args, **kwargs)
draw_if_interactive()
finally:
ax.hold(washold)
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@_autogen_docstring(Axes.violinplot)
def violinplot(dataset, positions=None, vert=True, widths=0.5, showmeans=False,
showextrema=True, showmedians=False, points=100, bw_method=None,
hold=None):
ax = gca()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
if hold is not None:
ax.hold(hold)
try:
ret = ax.violinplot(dataset, positions=positions, vert=vert,
widths=widths, showmeans=showmeans,
showextrema=showextrema, showmedians=showmedians,
points=points, bw_method=bw_method)
draw_if_interactive()
finally:
ax.hold(washold)
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@_autogen_docstring(Axes.vlines)
def vlines(x, ymin, ymax, colors='k', linestyles='solid', label='', hold=None,
**kwargs):
ax = gca()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
if hold is not None:
ax.hold(hold)
try:
ret = ax.vlines(x, ymin, ymax, colors=colors, linestyles=linestyles,
label=label, **kwargs)
draw_if_interactive()
finally:
ax.hold(washold)
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@_autogen_docstring(Axes.xcorr)
def xcorr(x, y, normed=True, detrend=mlab.detrend_none, usevlines=True,
maxlags=10, hold=None, **kwargs):
ax = gca()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
if hold is not None:
ax.hold(hold)
try:
ret = ax.xcorr(x, y, normed=normed, detrend=detrend,
usevlines=usevlines, maxlags=maxlags, **kwargs)
draw_if_interactive()
finally:
ax.hold(washold)
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@_autogen_docstring(Axes.barbs)
def barbs(*args, **kw):
ax = gca()
# allow callers to override the hold state by passing hold=True|False
washold = ax.ishold()
hold = kw.pop('hold', None)
if hold is not None:
ax.hold(hold)
try:
ret = ax.barbs(*args, **kw)
draw_if_interactive()
finally:
ax.hold(washold)
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@docstring.copy_dedent(Axes.cla)
def cla():
ret = gca().cla()
draw_if_interactive()
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@docstring.copy_dedent(Axes.grid)
def grid(b=None, which='major', axis='both', **kwargs):
ret = gca().grid(b=b, which=which, axis=axis, **kwargs)
draw_if_interactive()
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@docstring.copy_dedent(Axes.legend)
def legend(*args, **kwargs):
ret = gca().legend(*args, **kwargs)
draw_if_interactive()
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@docstring.copy_dedent(Axes.table)
def table(**kwargs):
ret = gca().table(**kwargs)
draw_if_interactive()
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@docstring.copy_dedent(Axes.text)
def text(x, y, s, fontdict=None, withdash=False, **kwargs):
ret = gca().text(x, y, s, fontdict=fontdict, withdash=withdash, **kwargs)
draw_if_interactive()
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@docstring.copy_dedent(Axes.annotate)
def annotate(*args, **kwargs):
ret = gca().annotate(*args, **kwargs)
draw_if_interactive()
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@docstring.copy_dedent(Axes.ticklabel_format)
def ticklabel_format(**kwargs):
ret = gca().ticklabel_format(**kwargs)
draw_if_interactive()
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@docstring.copy_dedent(Axes.locator_params)
def locator_params(axis='both', tight=None, **kwargs):
ret = gca().locator_params(axis=axis, tight=tight, **kwargs)
draw_if_interactive()
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@docstring.copy_dedent(Axes.tick_params)
def tick_params(axis='both', **kwargs):
ret = gca().tick_params(axis=axis, **kwargs)
draw_if_interactive()
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@docstring.copy_dedent(Axes.margins)
def margins(*args, **kw):
ret = gca().margins(*args, **kw)
draw_if_interactive()
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
@docstring.copy_dedent(Axes.autoscale)
def autoscale(enable=True, axis='both', tight=None):
ret = gca().autoscale(enable=enable, axis=axis, tight=tight)
draw_if_interactive()
return ret
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def autumn():
'''
set the default colormap to autumn and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='autumn')
im = gci()
if im is not None:
im.set_cmap(cm.autumn)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def bone():
'''
set the default colormap to bone and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='bone')
im = gci()
if im is not None:
im.set_cmap(cm.bone)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def cool():
'''
set the default colormap to cool and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='cool')
im = gci()
if im is not None:
im.set_cmap(cm.cool)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def copper():
'''
set the default colormap to copper and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='copper')
im = gci()
if im is not None:
im.set_cmap(cm.copper)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def flag():
'''
set the default colormap to flag and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='flag')
im = gci()
if im is not None:
im.set_cmap(cm.flag)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def gray():
'''
set the default colormap to gray and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='gray')
im = gci()
if im is not None:
im.set_cmap(cm.gray)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def hot():
'''
set the default colormap to hot and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='hot')
im = gci()
if im is not None:
im.set_cmap(cm.hot)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def hsv():
'''
set the default colormap to hsv and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='hsv')
im = gci()
if im is not None:
im.set_cmap(cm.hsv)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def jet():
'''
set the default colormap to jet and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='jet')
im = gci()
if im is not None:
im.set_cmap(cm.jet)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def pink():
'''
set the default colormap to pink and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='pink')
im = gci()
if im is not None:
im.set_cmap(cm.pink)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def prism():
'''
set the default colormap to prism and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='prism')
im = gci()
if im is not None:
im.set_cmap(cm.prism)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def spring():
'''
set the default colormap to spring and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='spring')
im = gci()
if im is not None:
im.set_cmap(cm.spring)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def summer():
'''
set the default colormap to summer and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='summer')
im = gci()
if im is not None:
im.set_cmap(cm.summer)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def winter():
'''
set the default colormap to winter and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='winter')
im = gci()
if im is not None:
im.set_cmap(cm.winter)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def spectral():
'''
set the default colormap to spectral and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='spectral')
im = gci()
if im is not None:
im.set_cmap(cm.spectral)
draw_if_interactive()
_setup_pyplot_info_docstrings()
| mit |
hagabbar/pycbc_copy | pycbc/results/scatter_histograms.py | 1 | 28572 | # Copyright (C) 2016 Miriam Cabero Mueller, Collin Capano
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# =============================================================================
#
# Preamble
#
# =============================================================================
#
"""
Module to generate figures with scatter plots and histograms.
"""
import numpy
import scipy.stats
import itertools
import matplotlib
# Only if a backend is not already set ... This should really *not* be done
# here, but in the executables you should set matplotlib.use()
# This matches the check that matplotlib does internally, but this *may* be
# version dependenant. If this is a problem then remove this and control from
# the executables directly.
import sys
if not 'matplotlib.backends' in sys.modules:
matplotlib.use('agg')
from matplotlib import offsetbox
from matplotlib import pyplot
import matplotlib.gridspec as gridspec
from pycbc.results import str_utils
from pycbc.io import FieldArray
def create_axes_grid(parameters, labels=None, height_ratios=None,
width_ratios=None, no_diagonals=False):
"""Given a list of parameters, creates a figure with an axis for
every possible combination of the parameters.
Parameters
----------
parameters : list
Names of the variables to be plotted.
labels : {None, dict}, optional
A dictionary of parameters -> parameter labels.
height_ratios : {None, list}, optional
Set the height ratios of the axes; see `matplotlib.gridspec.GridSpec`
for details.
width_ratios : {None, list}, optional
Set the width ratios of the axes; see `matplotlib.gridspec.GridSpec`
for details.
no_diagonals : {False, bool}, optional
Do not produce axes for the same parameter on both axes.
Returns
-------
fig : pyplot.figure
The figure that was created.
axis_dict : dict
A dictionary mapping the parameter combinations to the axis and their
location in the subplots grid; i.e., the key, values are:
`{('param1', 'param2'): (pyplot.axes, row index, column index)}`
"""
if labels is None:
labels = {p: p for p in parameters}
elif any(p not in labels for p in parameters):
raise ValueError("labels must be provided for all parameters")
# Create figure with adequate size for number of parameters.
ndim = len(parameters)
if no_diagonals:
ndim -= 1
if ndim < 3:
fsize = (8, 7)
else:
fsize = (ndim*3 - 1, ndim*3 - 2)
fig = pyplot.figure(figsize=fsize)
# create the axis grid
gs = gridspec.GridSpec(ndim, ndim, width_ratios=width_ratios,
height_ratios=height_ratios, wspace=0.05, hspace=0.05)
# create grid of axis numbers to easily create axes in the right locations
axes = numpy.arange(ndim**2).reshape((ndim, ndim))
# Select possible combinations of plots and establish rows and columns.
combos = list(itertools.combinations(parameters, 2))
# add the diagonals
if not no_diagonals:
combos += [(p, p) for p in parameters]
# create the mapping between parameter combos and axes
axis_dict = {}
# cycle over all the axes, setting thing as needed
for nrow in range(ndim):
for ncolumn in range(ndim):
ax = pyplot.subplot(gs[axes[nrow, ncolumn]])
# map to a parameter index
px = parameters[ncolumn]
if no_diagonals:
py = parameters[nrow+1]
else:
py = parameters[nrow]
if (px, py) in combos:
axis_dict[px, py] = (ax, nrow, ncolumn)
# x labels only on bottom
if nrow + 1 == ndim:
ax.set_xlabel('{}'.format(labels[px]), fontsize=18)
else:
pyplot.setp(ax.get_xticklabels(), visible=False)
# y labels only on left
if ncolumn == 0:
ax.set_ylabel('{}'.format(labels[py]), fontsize=18)
else:
pyplot.setp(ax.get_yticklabels(), visible=False)
else:
# make non-used axes invisible
ax.axis('off')
return fig, axis_dict
def get_scale_fac(fig, fiducial_width=8, fiducial_height=7):
"""Gets a factor to scale fonts by for the given figure. The scale
factor is relative to a figure with dimensions
(`fiducial_width`, `fiducial_height`).
"""
width, height = fig.get_size_inches()
return (width*height/(fiducial_width*fiducial_height))**0.5
def construct_kde(samples_array, use_kombine=False):
"""Constructs a KDE from the given samples.
"""
if use_kombine:
try:
import kombine
except ImportError:
raise ImportError("kombine is not installed.")
# construct the kde
if use_kombine:
kde = kombine.clustered_kde.KDE(samples_array)
else:
kde = scipy.stats.gaussian_kde(samples_array.T)
return kde
def create_density_plot(xparam, yparam, samples, plot_density=True,
plot_contours=True, percentiles=None, cmap='viridis',
contour_color=None, xmin=None, xmax=None, ymin=None, ymax=None,
exclude_region=None, fig=None, ax=None, use_kombine=False):
"""Computes and plots posterior density and confidence intervals using the
given samples.
Parameters
----------
xparam : string
The parameter to plot on the x-axis.
yparam : string
The parameter to plot on the y-axis.
samples : dict, numpy structured array, or FieldArray
The samples to plot.
plot_density : {True, bool}
Plot a color map of the density.
plot_contours : {True, bool}
Plot contours showing the n-th percentiles of the density.
percentiles : {None, float or array}
What percentile contours to draw. If None, will plot the 50th
and 90th percentiles.
cmap : {'viridis', string}
The name of the colormap to use for the density plot.
contour_color : {None, string}
What color to make the contours. Default is white for density
plots and black for other plots.
xmin : {None, float}
Minimum value to plot on x-axis.
xmax : {None, float}
Maximum value to plot on x-axis.
ymin : {None, float}
Minimum value to plot on y-axis.
ymax : {None, float}
Maximum value to plot on y-axis.
exclue_region : {None, str}
Exclude the specified region when plotting the density or contours.
Must be a string in terms of `xparam` and `yparam` that is
understandable by numpy's logical evaluation. For example, if
`xparam = m_1` and `yparam = m_2`, and you want to exclude the region
for which `m_2` is greater than `m_1`, then exclude region should be
`'m_2 > m_1'`.
fig : {None, pyplot.figure}
Add the plot to the given figure. If None and ax is None, will create
a new figure.
ax : {None, pyplot.axes}
Draw plot on the given axis. If None, will create a new axis from
`fig`.
use_kombine : {False, bool}
Use kombine's KDE to calculate density. Otherwise, will use
`scipy.stats.gaussian_kde.` Default is False.
Returns
-------
fig : pyplot.figure
The figure the plot was made on.
ax : pyplot.axes
The axes the plot was drawn on.
"""
if percentiles is None:
percentiles = numpy.array([50., 90.])
percentiles = 100. - numpy.array(percentiles)
percentiles.sort()
if ax is None and fig is None:
fig = pyplot.figure()
if ax is None:
ax = fig.add_subplot(111)
# convert samples to array and construct kde
xsamples = samples[xparam]
ysamples = samples[yparam]
arr = numpy.vstack((xsamples, ysamples)).T
kde = construct_kde(arr, use_kombine=use_kombine)
# construct grid to evaluate on
if xmin is None:
xmin = xsamples.min()
if xmax is None:
xmax = xsamples.max()
if ymin is None:
ymin = ysamples.min()
if ymax is None:
ymax = ysamples.max()
npts = 100
X, Y = numpy.mgrid[xmin:xmax:complex(0,npts), ymin:ymax:complex(0,npts)] # pylint:disable=invalid-slice-index
pos = numpy.vstack([X.ravel(), Y.ravel()])
if use_kombine:
Z = numpy.exp(kde(pos.T).reshape(X.shape))
draw = kde.draw
else:
Z = kde(pos).T.reshape(X.shape)
draw = kde.resample
if exclude_region is not None:
# convert X,Y to a single FieldArray so we can use it's ability to
# evaluate strings
farr = FieldArray.from_kwargs(**{xparam: X, yparam: Y})
Z[farr[exclude_region]] = 0.
if plot_density:
ax.imshow(numpy.rot90(Z), extent=[xmin, xmax, ymin, ymax],
aspect='auto', cmap=cmap, zorder=1)
if contour_color is None:
contour_color = 'w'
if plot_contours:
# compute the percentile values
resamps = kde(draw(int(npts**2)))
if use_kombine:
resamps = numpy.exp(resamps)
s = numpy.percentile(resamps, percentiles)
if contour_color is None:
contour_color = 'k'
# make linewidths thicker if not plotting density for clarity
if plot_density:
lw = 1
else:
lw = 2
ct = ax.contour(X, Y, Z, s, colors=contour_color, linewidths=lw,
zorder=3)
# label contours
lbls = ['{p}%'.format(p=int(p)) for p in (100. - percentiles)]
fmt = dict(zip(ct.levels, lbls))
fs = 12
ax.clabel(ct, ct.levels, inline=True, fmt=fmt, fontsize=fs)
return fig, ax
def create_marginalized_hist(ax, values, label, percentiles=None,
color='k', fillcolor='gray', linecolor='navy',
title=True, expected_value=None, expected_color='red',
rotated=False, plot_min=None, plot_max=None):
"""Plots a 1D marginalized histogram of the given param from the given
samples.
Parameters
----------
ax : pyplot.Axes
The axes on which to draw the plot.
values : array
The parameter values to plot.
label : str
A label to use for the title.
percentiles : {None, float or array}
What percentiles to draw lines at. If None, will draw lines at
`[5, 50, 95]` (i.e., the bounds on the upper 90th percentile and the
median).
color : {'k', string}
What color to make the histogram; default is black.
fillcolor : {'gray', string, or None}
What color to fill the histogram with. Set to None to not fill the
histogram. Default is 'gray'.
linecolor : {'navy', string}
What color to use for the percentile lines. Default is 'navy'.
title : {True, bool}
Add a title with the median value +/- uncertainty, with the
max(min) `percentile` used for the +(-) uncertainty.
rotated : {False, bool}
Plot the histogram on the y-axis instead of the x. Default is False.
plot_min : {None, float}
The minimum value to plot. If None, will default to whatever `pyplot`
creates.
plot_max : {None, float}
The maximum value to plot. If None, will default to whatever `pyplot`
creates.
scalefac : {1., float}
Factor to scale the default font sizes by. Default is 1 (no scaling).
"""
if fillcolor is None:
htype = 'step'
else:
htype = 'stepfilled'
if rotated:
orientation = 'horizontal'
else:
orientation = 'vertical'
ax.hist(values, bins=50, histtype=htype, orientation=orientation,
facecolor=fillcolor, edgecolor=color, lw=2, normed=True)
if percentiles is None:
percentiles = [5., 50., 95.]
values = numpy.percentile(values, percentiles)
for val in values:
if rotated:
ax.axhline(y=val, ls='dashed', color=linecolor, lw=2, zorder=3)
else:
ax.axvline(x=val, ls='dashed', color=linecolor, lw=2, zorder=3)
# plot expected
if expected_value is not None:
if rotated:
ax.axhline(expected_value, color=expected_color, lw=1.5, zorder=2)
else:
ax.axvline(expected_value, color=expected_color, lw=1.5, zorder=2)
if title:
values_med = numpy.median(values)
values_min = values.min()
values_max = values.max()
negerror = values_med - values_min
poserror = values_max - values_med
fmt = '$' + str_utils.format_value(values_med, negerror,
plus_error=poserror, ndecs=2) + '$'
if rotated:
ax.yaxis.set_label_position("right")
# sets colored title for marginal histogram
set_marginal_histogram_title(ax, fmt, color,
label=label, rotated=rotated)
# Remove x-ticks
ax.set_xticks([])
# turn off x-labels
ax.set_xlabel('')
# set limits
ymin, ymax = ax.get_ylim()
if plot_min is not None:
ymin = plot_min
if plot_max is not None:
ymax = plot_max
ax.set_ylim(ymin, ymax)
else:
# sets colored title for marginal histogram
set_marginal_histogram_title(ax, fmt, color, label=label)
# Remove y-ticks
ax.set_yticks([])
# turn off y-label
ax.set_ylabel('')
# set limits
xmin, xmax = ax.get_xlim()
if plot_min is not None:
xmin = plot_min
if plot_max is not None:
xmax = plot_max
ax.set_xlim(xmin, xmax)
def set_marginal_histogram_title(ax, fmt, color, label=None, rotated=False):
""" Sets the title of the marginal histograms.
Parameters
----------
ax : Axes
The `Axes` instance for the plot.
fmt : str
The string to add to the title.
color : str
The color of the text to add to the title.
label : str
If title does not exist, then include label at beginning of the string.
rotated : bool
If `True` then rotate the text 270 degrees for sideways title.
"""
# get rotation angle of the title
rotation = 270 if rotated else 0
# get how much to displace title on axes
xscale = 1.05 if rotated else 0.0
if rotated:
yscale = 1.0
elif len(ax.get_figure().axes) > 1:
yscale = 1.15
else:
yscale = 1.05
# get class that packs text boxes vertical or horizonitally
packer_class = offsetbox.VPacker if rotated else offsetbox.HPacker
# if no title exists
if not hasattr(ax, "title_boxes"):
# create a text box
title = "{} = {}".format(label, fmt)
tbox1 = offsetbox.TextArea(
title,
textprops=dict(color=color, size=15, rotation=rotation,
ha='left', va='bottom'))
# save a list of text boxes as attribute for later
ax.title_boxes = [tbox1]
# pack text boxes
ybox = packer_class(children=ax.title_boxes,
align="bottom", pad=0, sep=5)
# else append existing title
else:
# delete old title
ax.title_anchor.remove()
# add new text box to list
tbox1 = offsetbox.TextArea(
" {}".format(fmt),
textprops=dict(color=color, size=15, rotation=rotation,
ha='left', va='bottom'))
ax.title_boxes = ax.title_boxes + [tbox1]
# pack text boxes
ybox = packer_class(children=ax.title_boxes,
align="bottom", pad=0, sep=5)
# add new title and keep reference to instance as an attribute
anchored_ybox = offsetbox.AnchoredOffsetbox(
loc=2, child=ybox, pad=0.,
frameon=False, bbox_to_anchor=(xscale, yscale),
bbox_transform=ax.transAxes, borderpad=0.)
ax.title_anchor = ax.add_artist(anchored_ybox)
def create_multidim_plot(parameters, samples, labels=None,
mins=None, maxs=None, expected_parameters=None,
expected_parameters_color='r',
plot_marginal=True, plot_scatter=True,
marginal_percentiles=None, contour_percentiles=None,
zvals=None, show_colorbar=True, cbar_label=None,
vmin=None, vmax=None, scatter_cmap='plasma',
plot_density=False, plot_contours=True,
density_cmap='viridis',
contour_color=None, hist_color='black',
line_color=None, fill_color='gray',
use_kombine=False, fig=None, axis_dict=None):
"""Generate a figure with several plots and histograms.
Parameters
----------
parameters: list
Names of the variables to be plotted.
samples : FieldArray
A field array of the samples to plot.
labels: {None, list}, optional
A list of names for the parameters.
mins : {None, dict}, optional
Minimum value for the axis of each variable in `parameters`.
If None, it will use the minimum of the corresponding variable in
`samples`.
maxs : {None, dict}, optional
Maximum value for the axis of each variable in `parameters`.
If None, it will use the maximum of the corresponding variable in
`samples`.
expected_parameters : {None, dict}, optional
Expected values of `parameters`, as a dictionary mapping parameter
names -> values. A cross will be plotted at the location of the
expected parameters on axes that plot any of the expected parameters.
expected_parameters_color : {'r', string}, optional
What color to make the expected parameters cross.
plot_marginal : {True, bool}
Plot the marginalized distribution on the diagonals. If False, the
diagonal axes will be turned off.
plot_scatter : {True, bool}
Plot each sample point as a scatter plot.
marginal_percentiles : {None, array}
What percentiles to draw lines at on the 1D histograms.
If None, will draw lines at `[5, 50, 95]` (i.e., the bounds on the
upper 90th percentile and the median).
contour_percentiles : {None, array}
What percentile contours to draw on the scatter plots. If None,
will plot the 50th and 90th percentiles.
zvals : {None, array}
An array to use for coloring the scatter plots. If None, scatter points
will be the same color.
show_colorbar : {True, bool}
Show the colorbar of zvalues used for the scatter points. A ValueError
will be raised if zvals is None and this is True.
cbar_label : {None, str}
Specify a label to add to the colorbar.
vmin: {None, float}, optional
Minimum value for the colorbar. If None, will use the minimum of zvals.
vmax: {None, float}, optional
Maximum value for the colorbar. If None, will use the maxmimum of
zvals.
scatter_cmap : {'plasma', string}
The color map to use for the scatter points. Default is 'plasma'.
plot_density : {False, bool}
Plot the density of points as a color map.
plot_contours : {True, bool}
Draw contours showing the 50th and 90th percentile confidence regions.
density_cmap : {'viridis', string}
The color map to use for the density plot.
contour_color : {None, string}
The color to use for the contour lines. Defaults to white for
density plots, navy for scatter plots without zvals, and black
otherwise.
use_kombine : {False, bool}
Use kombine's KDE to calculate density. Otherwise, will use
`scipy.stats.gaussian_kde.` Default is False.
Returns
-------
fig : pyplot.figure
The figure that was created.
axis_dict : dict
A dictionary mapping the parameter combinations to the axis and their
location in the subplots grid; i.e., the key, values are:
`{('param1', 'param2'): (pyplot.axes, row index, column index)}`
"""
if labels is None:
labels = [p for p in parameters]
# turn labels into a dict for easier access
labels = dict(zip(parameters, labels))
# set up the figure with a grid of axes
# if only plotting 2 parameters, make the marginal plots smaller
nparams = len(parameters)
if nparams == 2:
width_ratios = [3,1]
height_ratios = [1,3]
else:
width_ratios = height_ratios = None
# only plot scatter if more than one parameter
plot_scatter = plot_scatter and nparams > 1
# Sort zvals to get higher values on top in scatter plots
if plot_scatter:
if zvals is not None:
sort_indices = zvals.argsort()
zvals = zvals[sort_indices]
samples = samples[sort_indices]
if contour_color is None:
contour_color = 'k'
elif show_colorbar:
raise ValueError("must provide z values to create a colorbar")
else:
# just make all scatter points same color
zvals = 'gray'
if plot_contours and contour_color is None:
contour_color = 'navy'
# convert samples to a dictionary to avoid re-computing derived parameters
# every time they are needed
samples = dict([[p, samples[p]] for p in parameters])
# values for axis bounds
if mins is None:
mins = {p:samples[p].min() for p in parameters}
else:
# copy the dict
mins = {p:val for p,val in mins.items()}
if maxs is None:
maxs = {p:samples[p].max() for p in parameters}
else:
# copy the dict
maxs = {p:val for p,val in maxs.items()}
# remove common offsets
for pi,param in enumerate(parameters):
values, offset = remove_common_offset(samples[param])
if offset != 0:
# we'll add the offset removed to the label
labels[param] = '{} - {:d}'.format(labels[param], offset)
samples[param] = values
mins[param] = mins[param] - float(offset)
maxs[param] = maxs[param] - float(offset)
# create the axis grid
if fig is None and axis_dict is None:
fig, axis_dict = create_axes_grid(
parameters, labels=labels,
width_ratios=width_ratios, height_ratios=height_ratios,
no_diagonals=not plot_marginal)
# Diagonals...
if plot_marginal:
for pi,param in enumerate(parameters):
ax, _, _ = axis_dict[param, param]
# if only plotting 2 parameters and on the second parameter,
# rotate the marginal plot
rotated = nparams == 2 and pi == nparams-1
# see if there are expected values
if expected_parameters is not None:
try:
expected_value = expected_parameters[param]
except KeyError:
expected_value = None
else:
expected_value = None
create_marginalized_hist(ax, samples[param], label=labels[param],
color=hist_color, fillcolor=fill_color, linecolor=line_color,
title=True, expected_value=expected_value,
expected_color=expected_parameters_color,
rotated=rotated, plot_min=mins[param], plot_max=maxs[param],
percentiles=marginal_percentiles)
# Off-diagonals...
for px, py in axis_dict:
if px == py:
continue
ax, _, _ = axis_dict[px, py]
if plot_scatter:
if plot_density:
alpha = 0.3
else:
alpha = 1.
plt = ax.scatter(x=samples[px], y=samples[py], c=zvals, s=5,
edgecolors='none', vmin=vmin, vmax=vmax,
cmap=scatter_cmap, alpha=alpha, zorder=2)
if plot_contours or plot_density:
# Exclude out-of-bound regions
# this is a bit kludgy; should probably figure out a better
# solution to eventually allow for more than just m_p m_s
if (px == 'm_p' and py == 'm_s') or (py == 'm_p' and px == 'm_s'):
exclude_region = 'm_s > m_p'
else:
exclude_region = None
create_density_plot(px, py, samples, plot_density=plot_density,
plot_contours=plot_contours, cmap=density_cmap,
percentiles=contour_percentiles,
contour_color=contour_color, xmin=mins[px], xmax=maxs[px],
ymin=mins[py], ymax=maxs[py],
exclude_region=exclude_region, ax=ax,
use_kombine=use_kombine)
if expected_parameters is not None:
try:
ax.axvline(expected_parameters[px], lw=1.5,
color=expected_parameters_color, zorder=5)
except KeyError:
pass
try:
ax.axhline(expected_parameters[py], lw=1.5,
color=expected_parameters_color, zorder=5)
except KeyError:
pass
ax.set_xlim(mins[px], maxs[px])
ax.set_ylim(mins[py], maxs[py])
# adjust tick number for large number of plots
if len(parameters) > 3:
for px, py in axis_dict:
ax, _, _ = axis_dict[px, py]
ax.set_xticks(reduce_ticks(ax, 'x', maxticks=3))
ax.set_yticks(reduce_ticks(ax, 'y', maxticks=3))
if plot_scatter and show_colorbar:
# compute font size based on fig size
scale_fac = get_scale_fac(fig)
fig.subplots_adjust(right=0.85, wspace=0.03)
cbar_ax = fig.add_axes([0.9, 0.1, 0.03, 0.8])
cb = fig.colorbar(plt, cax=cbar_ax)
if cbar_label is not None:
cb.set_label(cbar_label, fontsize=12*scale_fac)
cb.ax.tick_params(labelsize=8*scale_fac)
return fig, axis_dict
def remove_common_offset(arr):
"""Given an array of data, removes a common offset > 1000, returning the
removed value.
"""
offset = 0
isneg = (arr <= 0).all()
# make sure all values have the same sign
if isneg or (arr >= 0).all():
# only remove offset if the minimum and maximum values are the same
# order of magintude and > O(1000)
minpwr = numpy.log10(abs(arr).min())
maxpwr = numpy.log10(abs(arr).max())
if numpy.floor(minpwr) == numpy.floor(maxpwr) and minpwr > 3:
offset = numpy.floor(10**minpwr)
if isneg:
offset *= -1
arr = arr - offset
return arr, int(offset)
def reduce_ticks(ax, which, maxticks=3):
"""Given a pyplot axis, resamples its `which`-axis ticks such that are at most
`maxticks` left.
Parameters
----------
ax : axis
The axis to adjust.
which : {'x' | 'y'}
Which axis to adjust.
maxticks : {3, int}
Maximum number of ticks to use.
Returns
-------
array
An array of the selected ticks.
"""
ticks = getattr(ax, 'get_{}ticks'.format(which))()
if len(ticks) > maxticks:
# make sure the left/right value is not at the edge
minax, maxax = getattr(ax, 'get_{}lim'.format(which))()
dw = abs(maxax-minax)/10.
start_idx, end_idx = 0, len(ticks)
if ticks[0] < minax + dw:
start_idx += 1
if ticks[-1] > maxax - dw:
end_idx -= 1
# get reduction factor
fac = int(len(ticks) / maxticks)
ticks = ticks[start_idx:end_idx:fac]
return ticks
| gpl-3.0 |
mjudsp/Tsallis | examples/plot_kernel_ridge_regression.py | 39 | 6259 | """
=============================================
Comparison of kernel ridge regression and SVR
=============================================
Both kernel ridge regression (KRR) and SVR learn a non-linear function by
employing the kernel trick, i.e., they learn a linear function in the space
induced by the respective kernel which corresponds to a non-linear function in
the original space. They differ in the loss functions (ridge versus
epsilon-insensitive loss). In contrast to SVR, fitting a KRR can be done in
closed-form and is typically faster for medium-sized datasets. On the other
hand, the learned model is non-sparse and thus slower than SVR at
prediction-time.
This example illustrates both methods on an artificial dataset, which
consists of a sinusoidal target function and strong noise added to every fifth
datapoint. The first figure compares the learned model of KRR and SVR when both
complexity/regularization and bandwidth of the RBF kernel are optimized using
grid-search. The learned functions are very similar; however, fitting KRR is
approx. seven times faster than fitting SVR (both with grid-search). However,
prediction of 100000 target values is more than tree times faster with SVR
since it has learned a sparse model using only approx. 1/3 of the 100 training
datapoints as support vectors.
The next figure compares the time for fitting and prediction of KRR and SVR for
different sizes of the training set. Fitting KRR is faster than SVR for medium-
sized training sets (less than 1000 samples); however, for larger training sets
SVR scales better. With regard to prediction time, SVR is faster than
KRR for all sizes of the training set because of the learned sparse
solution. Note that the degree of sparsity and thus the prediction time depends
on the parameters epsilon and C of the SVR.
"""
# Authors: Jan Hendrik Metzen <[email protected]>
# License: BSD 3 clause
from __future__ import division
import time
import numpy as np
from sklearn.svm import SVR
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import learning_curve
from sklearn.kernel_ridge import KernelRidge
import matplotlib.pyplot as plt
rng = np.random.RandomState(0)
#############################################################################
# Generate sample data
X = 5 * rng.rand(10000, 1)
y = np.sin(X).ravel()
# Add noise to targets
y[::5] += 3 * (0.5 - rng.rand(X.shape[0]/5))
X_plot = np.linspace(0, 5, 100000)[:, None]
#############################################################################
# Fit regression model
train_size = 100
svr = GridSearchCV(SVR(kernel='rbf', gamma=0.1), cv=5,
param_grid={"C": [1e0, 1e1, 1e2, 1e3],
"gamma": np.logspace(-2, 2, 5)})
kr = GridSearchCV(KernelRidge(kernel='rbf', gamma=0.1), cv=5,
param_grid={"alpha": [1e0, 0.1, 1e-2, 1e-3],
"gamma": np.logspace(-2, 2, 5)})
t0 = time.time()
svr.fit(X[:train_size], y[:train_size])
svr_fit = time.time() - t0
print("SVR complexity and bandwidth selected and model fitted in %.3f s"
% svr_fit)
t0 = time.time()
kr.fit(X[:train_size], y[:train_size])
kr_fit = time.time() - t0
print("KRR complexity and bandwidth selected and model fitted in %.3f s"
% kr_fit)
sv_ratio = svr.best_estimator_.support_.shape[0] / train_size
print("Support vector ratio: %.3f" % sv_ratio)
t0 = time.time()
y_svr = svr.predict(X_plot)
svr_predict = time.time() - t0
print("SVR prediction for %d inputs in %.3f s"
% (X_plot.shape[0], svr_predict))
t0 = time.time()
y_kr = kr.predict(X_plot)
kr_predict = time.time() - t0
print("KRR prediction for %d inputs in %.3f s"
% (X_plot.shape[0], kr_predict))
#############################################################################
# look at the results
sv_ind = svr.best_estimator_.support_
plt.scatter(X[sv_ind], y[sv_ind], c='r', s=50, label='SVR support vectors',
zorder=2)
plt.scatter(X[:100], y[:100], c='k', label='data', zorder=1)
plt.hold('on')
plt.plot(X_plot, y_svr, c='r',
label='SVR (fit: %.3fs, predict: %.3fs)' % (svr_fit, svr_predict))
plt.plot(X_plot, y_kr, c='g',
label='KRR (fit: %.3fs, predict: %.3fs)' % (kr_fit, kr_predict))
plt.xlabel('data')
plt.ylabel('target')
plt.title('SVR versus Kernel Ridge')
plt.legend()
# Visualize training and prediction time
plt.figure()
# Generate sample data
X = 5 * rng.rand(10000, 1)
y = np.sin(X).ravel()
y[::5] += 3 * (0.5 - rng.rand(X.shape[0]/5))
sizes = np.logspace(1, 4, 7)
for name, estimator in {"KRR": KernelRidge(kernel='rbf', alpha=0.1,
gamma=10),
"SVR": SVR(kernel='rbf', C=1e1, gamma=10)}.items():
train_time = []
test_time = []
for train_test_size in sizes:
t0 = time.time()
estimator.fit(X[:train_test_size], y[:train_test_size])
train_time.append(time.time() - t0)
t0 = time.time()
estimator.predict(X_plot[:1000])
test_time.append(time.time() - t0)
plt.plot(sizes, train_time, 'o-', color="r" if name == "SVR" else "g",
label="%s (train)" % name)
plt.plot(sizes, test_time, 'o--', color="r" if name == "SVR" else "g",
label="%s (test)" % name)
plt.xscale("log")
plt.yscale("log")
plt.xlabel("Train size")
plt.ylabel("Time (seconds)")
plt.title('Execution Time')
plt.legend(loc="best")
# Visualize learning curves
plt.figure()
svr = SVR(kernel='rbf', C=1e1, gamma=0.1)
kr = KernelRidge(kernel='rbf', alpha=0.1, gamma=0.1)
train_sizes, train_scores_svr, test_scores_svr = \
learning_curve(svr, X[:100], y[:100], train_sizes=np.linspace(0.1, 1, 10),
scoring="mean_squared_error", cv=10)
train_sizes_abs, train_scores_kr, test_scores_kr = \
learning_curve(kr, X[:100], y[:100], train_sizes=np.linspace(0.1, 1, 10),
scoring="mean_squared_error", cv=10)
plt.plot(train_sizes, test_scores_svr.mean(1), 'o-', color="r",
label="SVR")
plt.plot(train_sizes, test_scores_kr.mean(1), 'o-', color="g",
label="KRR")
plt.xlabel("Train size")
plt.ylabel("Mean Squared Error")
plt.title('Learning curves')
plt.legend(loc="best")
plt.show()
| bsd-3-clause |
hrjn/scikit-learn | sklearn/cluster/setup.py | 79 | 1855 | # Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import os
from os.path import join
import numpy
from sklearn._build_utils import get_blas_info
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
cblas_libs, blas_info = get_blas_info()
libraries = []
if os.name == 'posix':
cblas_libs.append('m')
libraries.append('m')
config = Configuration('cluster', parent_package, top_path)
config.add_extension('_dbscan_inner',
sources=['_dbscan_inner.pyx'],
include_dirs=[numpy.get_include()],
language="c++")
config.add_extension('_hierarchical',
sources=['_hierarchical.pyx'],
language="c++",
include_dirs=[numpy.get_include()],
libraries=libraries)
config.add_extension('_k_means_elkan',
sources=['_k_means_elkan.pyx'],
include_dirs=[numpy.get_include()],
libraries=libraries)
config.add_extension('_k_means',
libraries=cblas_libs,
sources=['_k_means.pyx'],
include_dirs=[join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
extra_compile_args=blas_info.pop(
'extra_compile_args', []),
**blas_info
)
config.add_subpackage('tests')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| bsd-3-clause |
RPGOne/Skynet | imbalanced-learn-master/examples/over-sampling/plot_random_over_sampling.py | 3 | 1909 | """
====================
Random over-sampling
====================
An illustration of the random over-sampling method.
"""
print(__doc__)
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
# Define some color for the plotting
almost_black = '#262626'
palette = sns.color_palette()
from sklearn.datasets import make_classification
from sklearn.decomposition import PCA
from imblearn.over_sampling import RandomOverSampler
# Generate the dataset
X, y = make_classification(n_classes=2, class_sep=2, weights=[0.1, 0.9],
n_informative=3, n_redundant=1, flip_y=0,
n_features=20, n_clusters_per_class=1,
n_samples=5000, random_state=10)
# Instanciate a PCA object for the sake of easy visualisation
pca = PCA(n_components=2)
# Fit and transform x to visualise inside a 2D feature space
X_vis = pca.fit_transform(X)
# Apply the random over-sampling
ros = RandomOverSampler()
X_resampled, y_resampled = ros.fit_sample(X, y)
X_res_vis = pca.transform(X_resampled)
# Two subplots, unpack the axes array immediately
f, (ax1, ax2) = plt.subplots(1, 2)
ax1.scatter(X_vis[y == 0, 0], X_vis[y == 0, 1], label="Class #0", alpha=0.5,
edgecolor=almost_black, facecolor=palette[0], linewidth=0.15)
ax1.scatter(X_vis[y == 1, 0], X_vis[y == 1, 1], label="Class #1", alpha=0.5,
edgecolor=almost_black, facecolor=palette[2], linewidth=0.15)
ax1.set_title('Original set')
ax2.scatter(X_res_vis[y_resampled == 0, 0], X_res_vis[y_resampled == 0, 1],
label="Class #0", alpha=.5, edgecolor=almost_black,
facecolor=palette[0], linewidth=0.15)
ax2.scatter(X_res_vis[y_resampled == 1, 0], X_res_vis[y_resampled == 1, 1],
label="Class #1", alpha=.5, edgecolor=almost_black,
facecolor=palette[2], linewidth=0.15)
ax2.set_title('Random over-sampling')
plt.show()
| bsd-3-clause |
walterreade/scikit-learn | sklearn/utils/tests/test_seq_dataset.py | 47 | 2486 | # Author: Tom Dupre la Tour <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import scipy.sparse as sp
from sklearn.utils.seq_dataset import ArrayDataset, CSRDataset
from sklearn.datasets import load_iris
from numpy.testing import assert_array_equal
from nose.tools import assert_equal
iris = load_iris()
X = iris.data.astype(np.float64)
y = iris.target.astype(np.float64)
X_csr = sp.csr_matrix(X)
sample_weight = np.arange(y.size, dtype=np.float64)
def assert_csr_equal(X, Y):
X.eliminate_zeros()
Y.eliminate_zeros()
assert_equal(X.shape[0], Y.shape[0])
assert_equal(X.shape[1], Y.shape[1])
assert_array_equal(X.data, Y.data)
assert_array_equal(X.indices, Y.indices)
assert_array_equal(X.indptr, Y.indptr)
def test_seq_dataset():
dataset1 = ArrayDataset(X, y, sample_weight, seed=42)
dataset2 = CSRDataset(X_csr.data, X_csr.indptr, X_csr.indices,
y, sample_weight, seed=42)
for dataset in (dataset1, dataset2):
for i in range(5):
# next sample
xi_, yi, swi, idx = dataset._next_py()
xi = sp.csr_matrix((xi_), shape=(1, X.shape[1]))
assert_csr_equal(xi, X_csr[idx])
assert_equal(yi, y[idx])
assert_equal(swi, sample_weight[idx])
# random sample
xi_, yi, swi, idx = dataset._random_py()
xi = sp.csr_matrix((xi_), shape=(1, X.shape[1]))
assert_csr_equal(xi, X_csr[idx])
assert_equal(yi, y[idx])
assert_equal(swi, sample_weight[idx])
def test_seq_dataset_shuffle():
dataset1 = ArrayDataset(X, y, sample_weight, seed=42)
dataset2 = CSRDataset(X_csr.data, X_csr.indptr, X_csr.indices,
y, sample_weight, seed=42)
# not shuffled
for i in range(5):
_, _, _, idx1 = dataset1._next_py()
_, _, _, idx2 = dataset2._next_py()
assert_equal(idx1, i)
assert_equal(idx2, i)
for i in range(5):
_, _, _, idx1 = dataset1._random_py()
_, _, _, idx2 = dataset2._random_py()
assert_equal(idx1, idx2)
seed = 77
dataset1._shuffle_py(seed)
dataset2._shuffle_py(seed)
for i in range(5):
_, _, _, idx1 = dataset1._next_py()
_, _, _, idx2 = dataset2._next_py()
assert_equal(idx1, idx2)
_, _, _, idx1 = dataset1._random_py()
_, _, _, idx2 = dataset2._random_py()
assert_equal(idx1, idx2)
| bsd-3-clause |
evelkey/vahun | vahun/corpus.py | 1 | 11394 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import collections
import numpy as np
import time
import pandas as pd
import random
class Corpus:
def __init__(self,abc,feature_len):
self.abc=abc
self.encoding_len=feature_len
self.maxlen=feature_len
def filter_wordlist_maxlen(self):
newwordlist=[]
if isinstance(self.wordlist[0],list):
for i in range(len(self.wordlist)):
if(len(self.wordlist[i][0])<self.maxlen and len(self.wordlist[i][1])<self.maxlen):
newwordlist.append(self.wordlist[i])
self.wordlist=newwordlist
def shuffle(self):
random.shuffle(self.wordlist)
def _getabc(self):
abc=set()
for word in self.wordlist:
if type(word) is str:
for char in word:
abc.add(char)
return "".join(sorted(abc, key=str.lower))
def featurize_data_charlevel_onehot(self,x, maxlen=0):
"""
@x: list of words
@returns the feature tensor
"""
if maxlen==0:
maxlen=self.encoding_len
self.feature_tensor = []
for dix,item in enumerate(x):
counter = 0
one_hot = np.zeros((maxlen, len(self.abc)))
if type(item) is not float:
chars = list(item)
if len(chars)<=maxlen:
for i in range(len(chars)):
if chars[i] in self.abc:
one_hot[maxlen-len(chars)+i,self.abc.find(chars[i])]=1
for i in range(maxlen-len(chars)):
one_hot[i,0]=1
self.feature_tensor.append(one_hot)
self.feature_tensor=np.asarray(self.feature_tensor)
return self.feature_tensor
def defeaturize_data_charlevel_onehot(self,x,maxlen=0):
"""
@x is the feature tensor
@returns the decoded word from the tensor
"""
if maxlen==0:
maxlen=self.encoding_len
defeaturized=[]
for item in x:
out=""
for i in range(maxlen):
out+=self.abc[item[i,:].argmax()]
defeaturized.append(out)
return defeaturized
class TSV_Corpus(Corpus):
def __init__(self,
corpus_path,col=0,
size=0, #0 means all of them
encoding_len=20, #max size
printer=True,
language="Hun"):
self.encoding_len=encoding_len
self.printer=printer
self.df = pd.read_csv(corpus_path, delimiter='\t',header=None)
if size==0:
size=len(self.df[col].values.tolist())
# mix the words::
adf=self.df.values.tolist()
self.wordlist=[adf[i][col] for i in range(0,size)]
self.abc=" " + self._getabc()
self.all_features=self.featurize_data_charlevel_onehot(self.wordlist)
train=self.all_features[0:int(len(self.all_features)*0.8)]
valid=self.all_features[int(len(self.all_features)*0.8):int(len(self.all_features)*0.9)]
test=self.all_features[int(len(self.all_features)*0.9):len(self.all_features)]
self.x_train = train.reshape((len(train), np.prod(train.shape[1:])))
self.x_valid= valid.reshape((len(valid), np.prod(valid.shape[1:])))
self.x_test = test.reshape((len(test), np.prod(test.shape[1:])))
self.df=None
self.all_features=None
class TrainXY_Corpus(Corpus):
def _getabc(self):
abc=set()
for words in self.wordlist:
for char in words[1]:
abc.add(char)
return "".join(sorted(abc, key=str.lower))
def __init__(self,
corpus_path,
size=0, #0 means all of them
encoding_len=20, #max size
printer=True,
language="Hun"):
self.encoding_len=encoding_len
self.maxlen=encoding_len
self.printer=printer
self.df = pd.read_csv(corpus_path, delimiter='\t',header=None)
if size==0:
size=len(self.df[col].values.tolist())
# mix the words::
adf=self.df.values.tolist()
self.wordlist=[[str(adf[i][0]),str(adf[i][1])] for i in range(0,size)]
self.shuffle()
self.filter_wordlist_maxlen()
self.abc=" " + self._getabc()
self.all_features=self.featurize_data_charlevel_onehot(
[self.wordlist[i][0] for i in range(len(self.wordlist))])
train=self.all_features[0:int(len(self.all_features)*0.8)]
valid=self.all_features[int(len(self.all_features)*0.8):int(len(self.all_features)*0.9)]
test=self.all_features[int(len(self.all_features)*0.9):len(self.all_features)]
self.x_train = train.reshape((len(train), np.prod(train.shape[1:])))
self.x_valid= valid.reshape((len(valid), np.prod(valid.shape[1:])))
self.x_test = test.reshape((len(test), np.prod(test.shape[1:])))
self.all_features=self.featurize_data_charlevel_onehot(
[self.wordlist[i][1] for i in range(len(self.wordlist))])
self.df=None
train=self.all_features[0:int(len(self.all_features)*0.8)]
valid=self.all_features[int(len(self.all_features)*0.8):int(len(self.all_features)*0.9)]
test=self.all_features[int(len(self.all_features)*0.9):len(self.all_features)]
self.y_train = train.reshape((len(train), np.prod(train.shape[1:])))
self.y_valid= valid.reshape((len(valid), np.prod(valid.shape[1:])))
self.y_test = test.reshape((len(test), np.prod(test.shape[1:])))
self.df=None
self.all_features=None
class WPL_Corpus:
def __init__(self,
corpus_path,
size=4000000,
language="Hun",
needed_corpus=["unique","lower","hun_lower","lower_unique","hun_lower_unique"],
encoding_len=10,
corpus_stream=None,
printer=True,
new=True):
"""
Creates corpus object, with the given parameters
@needed_corpus: list, can contain: "unique","lower","hun_lower","lower_unique","hun_lower_unique"
"""
self.encoding_len=encoding_len
self.symbol='0123456789-,;.!?:’\”\"/\\|_@#$%^&*~`+ =<>()[]{}'
self.accents = 'áéíóöőúüű'
self.alphabet = 'abcdefghijklmnopqrstuvwxyz'
self.space = ' '
self.beginend= '^$'
if language=="Hun":
self.abc=self.space+self.alphabet+self.accents+self.beginend
self.language=language
self.embedder=np.random.normal(size=[len(self.abc),encoding_len])
self.full=[]
self.printer=printer
if corpus_stream==None:
self.corpus_path=corpus_path
self.read_all_words(size)
else:
self.corpus_path=None
self.read_stream(corpus_stream,size)
if "unique" in needed_corpus:
self.unique=list(set(self.full))
if "lower" in needed_corpus:
self.lower=self.lowercasen(self.full)
if "lower_unique" in needed_corpus:
self.lower_unique=list(set(self.lowercasen(self.full)))
if "hun_lower" in needed_corpus:
self.hun_lower=self.filter_only_words(self.lowercasen(self.full))
if "hun_lower_unique" in needed_corpus:
self.hun_lower_unique=list(set(self.filter_only_words(self.lowercasen(self.full))))
if self.printer: print("Corpus initalized, fields:",needed_corpus,"\nUnique words: ",len(set(self.full)))
def is_hun_word(self,word):
"""
@word: char sequence without spaces
@return: true if the word can be hungarian, no symbols included
"""
hun_word=True
if "eeeoddd" in word or ' ' in word or ""==word:
return False
for char in self.symbol:
if char in word:
return False
return hun_word
def read_line_wpl(self,line):
"""
Reads a line from a world per line format
@line: line in file
@return: the word
"""
return line.replace("\n","")
def lowercasen(self,list_of_words):
return [word.lower() for word in list_of_words]
def filter_only_words(self,corpus):
if self.language != "Hun":
return []
return [word for word in corpus if self.is_hun_word(word)]
def read_all_words(self,size,format="wpl"):
"""
Reads words from the specified format
@size: max number of words
"""
i=0
start=time.time()
with open(self.corpus_path,encoding='utf8') as f:
for line in f:
if i==size:
break
else:
if format=="wpl" :
self.full.append(self.read_line_wpl(line))
i+=1
if i%1000000==0:
if i!=0 and self.printer: print("Reading file, speed: ",1000000/(time.time()-start)," words/s")
start=time.time()
def read_stream(self,stream,size,format="wpl"):
"""
Reads words from the specified format
@size: max number of words
"""
i=0
start=time.time()
with stream as f:
for line in f:
if i==size:
break
else:
if format=="wpl" :
self.full.append(self.read_line_wpl(line))
i+=1
if i%1000000==0:
if i!=0 and self.printer: print("Reading file, speed: ",1000000/(time.time()-start)," words/s")
start=time.time()
def get_stat(self,corpus):
frequency=collections.Counter(corpus)
return frequency
def create_most_common_corpus(self,corpus,count):
self.most_common=[]
for item in self.get_stat(corpus).most_common(count):
self.most_common.append(item[0])
return self.most_common
def get_random_block_from_data(self,batch_size):
start_index = np.random.randint(0, len(data) - batch_size)
return self.feature_tensor[start_index:(start_index + batch_size)]
def mark_begin_end(self,word):
return "^" + word + "$"
def mark_list(self,lista):
return [self.mark_begin_end(word) for word in lista]
def di2single(self,word):
word=word.replace("cs","C")
word=word.replace("ly","J")
word=word.replace("zs","Z")
word=word.replace("ny","N")
word=word.replace("dz","D")
word=word.replace("dzs","K")
word=word.replace("sz","S")
word=word.replace("ty","T")
word=word.replace("gy","G")
return word
def digraph_2_single(self,lista):
return [self.di2single(word) for word in lista]
| apache-2.0 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.