repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
sanketloke/scikit-learn | examples/tree/unveil_tree_structure.py | 67 | 4824 | """
=========================================
Understanding the decision tree structure
=========================================
The decision tree structure can be analysed to gain further insight on the
relation between the features and the target to predict. In this example, we
show how to retrieve:
- the binary tree structure;
- the depth of each node and whether or not it's a leaf;
- the nodes that were reached by a sample using the ``decision_path`` method;
- the leaf that was reached by a sample using the apply method;
- the rules that were used to predict a sample;
- the decision path shared by a group of samples.
"""
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.datasets import load_iris
from sklearn.tree import DecisionTreeClassifier
iris = load_iris()
X = iris.data
y = iris.target
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
estimator = DecisionTreeClassifier(max_leaf_nodes=3, random_state=0)
estimator.fit(X_train, y_train)
# The decision estimator has an attribute called tree_ which stores the entire
# tree structure and allows access to low level attributes. The binary tree
# tree_ is represented as a number of parallel arrays. The i-th element of each
# array holds information about the node `i`. Node 0 is the tree's root. NOTE:
# Some of the arrays only apply to either leaves or split nodes, resp. In this
# case the values of nodes of the other type are arbitrary!
#
# Among those arrays, we have:
# - left_child, id of the left child of the node
# - right_child, id of the right child of the node
# - feature, feature used for splitting the node
# - threshold, threshold value at the node
#
# Using those arrays, we can parse the tree structure:
n_nodes = estimator.tree_.node_count
children_left = estimator.tree_.children_left
children_right = estimator.tree_.children_right
feature = estimator.tree_.feature
threshold = estimator.tree_.threshold
# The tree structure can be traversed to compute various properties such
# as the depth of each node and whether or not it is a leaf.
node_depth = np.zeros(shape=n_nodes)
is_leaves = np.zeros(shape=n_nodes, dtype=bool)
stack = [(0, -1)] # seed is the root node id and its parent depth
while len(stack) > 0:
node_id, parent_depth = stack.pop()
node_depth[node_id] = parent_depth + 1
# If we have a test node
if (children_left[node_id] != children_right[node_id]):
stack.append((children_left[node_id], parent_depth + 1))
stack.append((children_right[node_id], parent_depth + 1))
else:
is_leaves[node_id] = True
print("The binary tree structure has %s nodes and has "
"the following tree structure:"
% n_nodes)
for i in range(n_nodes):
if is_leaves[i]:
print("%snode=%s leaf node." % (node_depth[i] * "\t", i))
else:
print("%snode=%s test node: go to node %s if X[:, %s] <= %ss else to "
"node %s."
% (node_depth[i] * "\t",
i,
children_left[i],
feature[i],
threshold[i],
children_right[i],
))
print()
# First let's retrieve the decision path of each sample. The decision_path
# method allows to retrieve the node indicator functions. A non zero element of
# indicator matrix at the position (i, j) indicates that the sample i goes
# through the node j.
node_indicator = estimator.decision_path(X_test)
# Similarly, we can also have the leaves ids reached by each sample.
leave_id = estimator.apply(X_test)
# Now, it's possible to get the tests that were used to predict a sample or
# a group of samples. First, let's make it for the sample.
sample_id = 0
node_index = node_indicator.indices[node_indicator.indptr[sample_id]:
node_indicator.indptr[sample_id + 1]]
print('Rules used to predict sample %s: ' % sample_id)
for node_id in node_index:
if leave_id[sample_id] != node_id:
continue
if (X_test[sample_id, feature[node_id]] <= threshold[node_id]):
threshold_sign = "<="
else:
threshold_sign = ">"
print("decision id node %s : (X[%s, %s] (= %s) %s %s)"
% (node_id,
sample_id,
feature[node_id],
X_test[i, feature[node_id]],
threshold_sign,
threshold[node_id]))
# For a group of samples, we have the following common node.
sample_ids = [0, 1]
common_nodes = (node_indicator.toarray()[sample_ids].sum(axis=0) ==
len(sample_ids))
common_node_id = np.arange(n_nodes)[common_nodes]
print("\nThe following samples %s share the node %s in the tree"
% (sample_ids, common_node_id))
print("It is %s %% of all nodes." % (100 * len(common_node_id) / n_nodes,))
| bsd-3-clause |
akchinSTC/systemml | projects/breast_cancer/breastcancer/preprocessing.py | 3 | 27672 | #-------------------------------------------------------------
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
#-------------------------------------------------------------
"""
Preprocessing -- Predicting Breast Cancer Proliferation Scores with
Apache SystemML
This module contains functions for the preprocessing phase of the
breast cancer project.
"""
import math
import os
import numpy as np
import openslide
from openslide.deepzoom import DeepZoomGenerator
import pandas as pd
from pyspark.ml.linalg import Vectors
import pyspark.sql.functions as F
from scipy.ndimage.morphology import binary_fill_holes
from skimage.color import rgb2gray
from skimage.feature import canny
from skimage.morphology import binary_closing, binary_dilation, disk
# Open Whole-Slide Image
def open_slide(slide_num, folder, training):
"""
Open a whole-slide image, given an image number.
Args:
slide_num: Slide image number as an integer.
folder: Directory in which the slides folder is stored, as a string.
This should contain either a `training_image_data` folder with
images in the format `TUPAC-TR-###.svs`, or a `testing_image_data`
folder with images in the format `TUPAC-TE-###.svs`.
training: Boolean for training or testing datasets.
Returns:
An OpenSlide object representing a whole-slide image.
"""
if training:
filename = os.path.join(folder, "training_image_data",
"TUPAC-TR-{}.svs".format(str(slide_num).zfill(3)))
else:
# Testing images
filename = os.path.join(folder, "testing_image_data",
"TUPAC-TE-{}.svs".format(str(slide_num).zfill(3)))
slide = openslide.open_slide(filename)
return slide
# Create Tile Generator
def create_tile_generator(slide, tile_size, overlap):
"""
Create a tile generator for the given slide.
This generator is able to extract tiles from the overall
whole-slide image.
Args:
slide: An OpenSlide object representing a whole-slide image.
tile_size: The width and height of a square tile to be generated.
overlap: Number of pixels by which to overlap the tiles.
Returns:
A DeepZoomGenerator object representing the tile generator. Each
extracted tile is a PIL Image with shape
(tile_size, tile_size, channels).
Note: This generator is not a true "Python generator function", but
rather is an object that is capable of extracting individual tiles.
"""
generator = DeepZoomGenerator(slide, tile_size=tile_size, overlap=overlap, limit_bounds=True)
return generator
# Determine 20x Magnification Zoom Level
def get_20x_zoom_level(slide, generator):
"""
Return the zoom level that corresponds to a 20x magnification.
The generator can extract tiles from multiple zoom levels,
downsampling by a factor of 2 per level from highest to lowest
resolution.
Args:
slide: An OpenSlide object representing a whole-slide image.
generator: A DeepZoomGenerator object representing a tile generator.
Note: This generator is not a true "Python generator function",
but rather is an object that is capable of extracting individual
tiles.
Returns:
Zoom level corresponding to a 20x magnification, or as close as
possible.
"""
highest_zoom_level = generator.level_count - 1 # 0-based indexing
try:
mag = int(slide.properties[openslide.PROPERTY_NAME_OBJECTIVE_POWER])
# `mag / 20` gives the downsampling factor between the slide's
# magnification and the desired 20x magnification.
# `(mag / 20) / 2` gives the zoom level offset from the highest
# resolution level, based on a 2x downsampling factor in the
# generator.
offset = math.floor((mag / 20) / 2)
level = highest_zoom_level - offset
except ValueError:
# In case the slide magnification level is unknown, just
# use the highest resolution.
level = highest_zoom_level
return level
# Generate Tile Indices For Whole-Slide Image.
def process_slide(slide_num, folder, training, tile_size, overlap):
"""
Generate all possible tile indices for a whole-slide image.
Given a slide number, tile size, and overlap, generate
all possible (slide_num, tile_size, overlap, zoom_level, col, row)
indices.
Args:
slide_num: Slide image number as an integer.
folder: Directory in which the slides folder is stored, as a string.
This should contain either a `training_image_data` folder with
images in the format `TUPAC-TR-###.svs`, or a `testing_image_data`
folder with images in the format `TUPAC-TE-###.svs`.
training: Boolean for training or testing datasets.
tile_size: The width and height of a square tile to be generated.
overlap: Number of pixels by which to overlap the tiles.
Returns:
A list of (slide_num, tile_size, overlap, zoom_level, col, row)
integer index tuples representing possible tiles to extract.
"""
# Open slide.
slide = open_slide(slide_num, folder, training)
# Create tile generator.
generator = create_tile_generator(slide, tile_size, overlap)
# Get 20x zoom level.
zoom_level = get_20x_zoom_level(slide, generator)
# Generate all possible (zoom_level, col, row) tile index tuples.
cols, rows = generator.level_tiles[zoom_level]
tile_indices = [(slide_num, tile_size, overlap, zoom_level, col, row)
for col in range(cols) for row in range(rows)]
return tile_indices
# Generate Tile From Tile Index
def process_tile_index(tile_index, folder, training):
"""
Generate a tile from a tile index.
Given a (slide_num, tile_size, overlap, zoom_level, col, row) tile
index, generate a (slide_num, tile) tuple.
Args:
tile_index: A (slide_num, tile_size, overlap, zoom_level, col, row)
integer index tuple representing a tile to extract.
folder: Directory in which the slides folder is stored, as a string.
This should contain either a `training_image_data` folder with
images in the format `TUPAC-TR-###.svs`, or a `testing_image_data`
folder with images in the format `TUPAC-TE-###.svs`.
training: Boolean for training or testing datasets.
Returns:
A (slide_num, tile) tuple, where slide_num is an integer, and tile
is a 3D NumPy array of shape (tile_size, tile_size, channels) in
RGB format.
"""
slide_num, tile_size, overlap, zoom_level, col, row = tile_index
# Open slide.
slide = open_slide(slide_num, folder, training)
# Create tile generator.
generator = create_tile_generator(slide, tile_size, overlap)
# Generate tile.
tile = np.asarray(generator.get_tile(zoom_level, (col, row)))
return (slide_num, tile)
# Filter Tile For Dimensions & Tissue Threshold
def optical_density(tile):
"""
Convert a tile to optical density values.
Args:
tile: A 3D NumPy array of shape (tile_size, tile_size, channels).
Returns:
A 3D NumPy array of shape (tile_size, tile_size, channels)
representing optical density values.
"""
tile = tile.astype(np.float64)
#od = -np.log10(tile/255 + 1e-8)
od = -np.log((tile+1)/240)
return od
def keep_tile(tile_tuple, tile_size, tissue_threshold):
"""
Determine if a tile should be kept.
This filters out tiles based on size and a tissue percentage
threshold, using a custom algorithm. If a tile has height &
width equal to (tile_size, tile_size), and contains greater
than or equal to the given percentage, then it will be kept;
otherwise it will be filtered out.
Args:
tile_tuple: A (slide_num, tile) tuple, where slide_num is an
integer, and tile is a 3D NumPy array of shape
(tile_size, tile_size, channels) in RGB format.
tile_size: The width and height of a square tile to be generated.
tissue_threshold: Tissue percentage threshold.
Returns:
A Boolean indicating whether or not a tile should be kept for
future usage.
"""
slide_num, tile = tile_tuple
if tile.shape[0:2] == (tile_size, tile_size):
tile_orig = tile
# Check 1
# Convert 3D RGB image to 2D grayscale image, from
# 0 (dense tissue) to 1 (plain background).
tile = rgb2gray(tile)
# 8-bit depth complement, from 1 (dense tissue)
# to 0 (plain background).
tile = 1 - tile
# Canny edge detection with hysteresis thresholding.
# This returns a binary map of edges, with 1 equal to
# an edge. The idea is that tissue would be full of
# edges, while background would not.
tile = canny(tile)
# Binary closing, which is a dilation followed by
# an erosion. This removes small dark spots, which
# helps remove noise in the background.
tile = binary_closing(tile, disk(10))
# Binary dilation, which enlarges bright areas,
# and shrinks dark areas. This helps fill in holes
# within regions of tissue.
tile = binary_dilation(tile, disk(10))
# Fill remaining holes within regions of tissue.
tile = binary_fill_holes(tile)
# Calculate percentage of tissue coverage.
percentage = tile.mean()
check1 = percentage >= tissue_threshold
# Check 2
# Convert to optical density values
tile = optical_density(tile_orig)
# Threshold at beta
beta = 0.15
tile = np.min(tile, axis=2) >= beta
# Apply morphology for same reasons as above.
tile = binary_closing(tile, disk(2))
tile = binary_dilation(tile, disk(2))
tile = binary_fill_holes(tile)
percentage = tile.mean()
check2 = percentage >= tissue_threshold
return check1 and check2
else:
return False
# Generate Samples From Tile
def process_tile(tile_tuple, sample_size, grayscale):
"""
Process a tile into a group of smaller samples.
Cut up a tile into smaller blocks of sample_size x sample_size pixels,
change the shape of each sample from (H, W, channels) to
(channels, H, W), then flatten each into a vector of length
channels*H*W.
Args:
tile_tuple: A (slide_num, tile) tuple, where slide_num is an
integer, and tile is a 3D NumPy array of shape
(tile_size, tile_size, channels).
sample_size: The new width and height of the square samples to be
generated.
grayscale: Whether or not to generate grayscale samples, rather
than RGB.
Returns:
A list of (slide_num, sample) tuples representing cut up tiles,
where each sample is a 3D NumPy array of shape
(sample_size_x, sample_size_y, channels).
"""
slide_num, tile = tile_tuple
if grayscale:
tile = rgb2gray(tile)[:, :, np.newaxis] # Grayscale
# Save disk space and future IO time by converting from [0,1] to [0,255],
# at the expense of some minor loss of information.
tile = np.round(tile * 255).astype("uint8")
x, y, ch = tile.shape
# 1. Reshape into a 5D array of (num_x, sample_size_x, num_y, sample_size_y, ch), where
# num_x and num_y are the number of chopped tiles on the x and y axes, respectively.
# 2. Swap sample_size_x and num_y axes to create
# (num_x, num_y, sample_size_x, sample_size_y, ch).
# 3. Combine num_x and num_y into single axis, returning
# (num_samples, sample_size_x, sample_size_y, ch).
samples = (tile.reshape((x // sample_size, sample_size, y // sample_size, sample_size, ch))
.swapaxes(1,2)
.reshape((-1, sample_size, sample_size, ch)))
samples = [(slide_num, sample) for sample in list(samples)]
return samples
# Normalize staining
def normalize_staining(sample_tuple, beta=0.15, alpha=1, light_intensity=255):
"""
Normalize the staining of H&E histology slides.
This function normalizes the staining of H&E histology slides.
References:
- Macenko, Marc, et al. "A method for normalizing histology slides
for quantitative analysis." Biomedical Imaging: From Nano to Macro,
2009. ISBI'09. IEEE International Symposium on. IEEE, 2009.
- http://wwwx.cs.unc.edu/~mn/sites/default/files/macenko2009.pdf
- https://github.com/mitkovetta/staining-normalization
Args:
sample_tuple: A (slide_num, sample) tuple, where slide_num is an
integer, and sample is a 3D NumPy array of shape (H,W,C).
Returns:
A (slide_num, sample) tuple, where the sample is a 3D NumPy array
of shape (H,W,C) that has been stain normalized.
"""
# Setup.
slide_num, sample = sample_tuple
x = np.asarray(sample)
h, w, c = x.shape
x = x.reshape(-1, c).astype(np.float64) # shape (H*W, C)
# Reference stain vectors and stain saturations. We will normalize all slides
# to these references. To create these, grab the stain vectors and stain
# saturations from a desirable slide.
# Values in reference implementation for use with eigendecomposition approach, natural log,
# and `light_intensity=240`.
#stain_ref = np.array([0.5626, 0.2159, 0.7201, 0.8012, 0.4062, 0.5581]).reshape(3,2)
#max_sat_ref = np.array([1.9705, 1.0308]).reshape(2,1)
# SVD w/ log10, and `light_intensity=255`.
stain_ref = (np.array([0.54598845, 0.322116, 0.72385198, 0.76419107, 0.42182333, 0.55879629])
.reshape(3,2))
max_sat_ref = np.array([0.82791151, 0.61137274]).reshape(2,1)
# Convert RGB to OD.
# Note: The original paper used log10, and the reference implementation used the natural log.
#OD = -np.log((x+1)/light_intensity) # shape (H*W, C)
OD = -np.log10(x/light_intensity + 1e-8)
# Remove data with OD intensity less than beta.
# I.e. remove transparent pixels.
# Note: This needs to be checked per channel, rather than
# taking an average over all channels for a given pixel.
OD_thresh = OD[np.all(OD >= beta, 1), :] # shape (K, C)
# Calculate eigenvectors.
# Note: We can either use eigenvector decomposition, or SVD.
#eigvals, eigvecs = np.linalg.eig(np.cov(OD_thresh.T)) # np.cov results in inf/nans
U, s, V = np.linalg.svd(OD_thresh, full_matrices=False)
# Extract two largest eigenvectors.
# Note: We swap the sign of the eigvecs here to be consistent
# with other implementations. Both +/- eigvecs are valid, with
# the same eigenvalue, so this is okay.
#top_eigvecs = eigvecs[:, np.argsort(eigvals)[-2:]] * -1
top_eigvecs = V[0:2, :].T * -1 # shape (C, 2)
# Project thresholded optical density values onto plane spanned by
# 2 largest eigenvectors.
proj = np.dot(OD_thresh, top_eigvecs) # shape (K, 2)
# Calculate angle of each point wrt the first plane direction.
# Note: the parameters are `np.arctan2(y, x)`
angles = np.arctan2(proj[:, 1], proj[:, 0]) # shape (K,)
# Find robust extremes (a and 100-a percentiles) of the angle.
min_angle = np.percentile(angles, alpha)
max_angle = np.percentile(angles, 100-alpha)
# Convert min/max vectors (extremes) back to optimal stains in OD space.
# This computes a set of axes for each angle onto which we can project
# the top eigenvectors. This assumes that the projected values have
# been normalized to unit length.
extreme_angles = np.array(
[[np.cos(min_angle), np.cos(max_angle)],
[np.sin(min_angle), np.sin(max_angle)]]
) # shape (2,2)
stains = np.dot(top_eigvecs, extreme_angles) # shape (C, 2)
# Merge vectors with hematoxylin first, and eosin second, as a heuristic.
if stains[0, 0] < stains[0, 1]:
stains[:, [0, 1]] = stains[:, [1, 0]] # swap columns
# Calculate saturations of each stain.
# Note: Here, we solve
# OD = VS
# S = V^{-1}OD
# where `OD` is the matrix of optical density values of our image,
# `V` is the matrix of stain vectors, and `S` is the matrix of stain
# saturations. Since this is an overdetermined system, we use the
# least squares solver, rather than a direct solve.
sats, _, _, _ = np.linalg.lstsq(stains, OD.T)
# Normalize stain saturations to have same pseudo-maximum based on
# a reference max saturation.
max_sat = np.percentile(sats, 99, axis=1, keepdims=True)
sats = sats / max_sat * max_sat_ref
# Compute optimal OD values.
OD_norm = np.dot(stain_ref, sats)
# Recreate image.
# Note: If the image is immediately converted to uint8 with `.astype(np.uint8)`, it will
# not return the correct values due to the initital values being outside of [0,255].
# To fix this, we round to the nearest integer, and then clip to [0,255], which is the
# same behavior as Matlab.
#x_norm = np.exp(OD_norm) * light_intensity # natural log approach
x_norm = 10**(-OD_norm) * light_intensity - 1e-8 # log10 approach
x_norm = np.clip(np.round(x_norm), 0, 255).astype(np.uint8)
x_norm = x_norm.astype(np.uint8)
x_norm = x_norm.T.reshape(h,w,c)
return (slide_num, x_norm)
def flatten_sample(sample_tuple):
"""
Flatten a (H,W,C) sample into a (C*H*W) row vector.
Transpose each sample from (H, W, channels) to (channels, H, W), then
flatten each into a vector of length channels*H*W.
Args:
sample_tuple: A (slide_num, sample) tuple, where slide_num is an
integer, and sample is a 3D NumPy array of shape (H,W,C).
Returns:
A (slide_num, sample) tuple, where the sample has been transposed
from (H,W,C) to (C,H,W), and flattened to a vector of length
(C*H*W).
"""
slide_num, sample = sample_tuple
# 1. Swap axes from (sample_size_x, sample_size_y, ch) to
# (ch, sample_size_x, sample_size_y).
# 2. Flatten sample into (ch*sample_size_x*sample_size_y).
flattened_sample = sample.transpose(2,0,1).reshape(-1)
return (slide_num, flattened_sample)
# Get Ground Truth Labels
def get_labels_df(folder):
"""
Create a DataFrame with the ground truth labels for each slide.
Args:
folder: Directory containing a `training_ground_truth.csv` file
containing the ground truth "tumor_score" and "molecular_score"
labels for each slide.
Returns:
A Pandas DataFrame containing the ground truth labels for each
slide.
"""
filepath = os.path.join(folder, "training_ground_truth.csv")
labels_df = pd.read_csv(filepath, names=["tumor_score", "molecular_score"], header=None)
labels_df["slide_num"] = labels_df.index + 1 # slide numbering starts at 1
labels_df.set_index("slide_num", drop=False, inplace=True) # use the slide num as index
return labels_df
# Process All Slides Into A Spark DataFrame
def preprocess(spark, slide_nums, folder="data", training=True, tile_size=1024, overlap=0,
tissue_threshold=0.9, sample_size=256, grayscale=False, normalize_stains=True,
num_partitions=20000):
"""
Preprocess a set of whole-slide images.
Preprocess a set of whole-slide images as follows:
1. Tile the slides into tiles of size (tile_size, tile_size, 3).
2. Filter the tiles to remove unnecessary tissue.
3. Cut the remaining tiles into samples of size
(sample_size, sample_size, ch), where `ch` is 1 if `grayscale`
is true, or 3 otherwise.
Args:
spark: SparkSession.
slide_nums: List of whole-slide numbers to process.
folder: Local directory in which the slides folder and ground truth
file is stored, as a string. This should contain a
`training_image_data` folder with images in the format
`TUPAC-TR-###.svs`, as well as a `training_ground_truth.csv` file
containing the ground truth "tumor_score" and "molecular_score"
labels for each slide. Alternatively, the folder should contain a
`testing_image_data` folder with images in the format
`TUPAC-TE-###.svs`.
training: Boolean for training or testing datasets.
tile_size: The width and height of a square tile to be generated.
overlap: Number of pixels by which to overlap the tiles.
tissue_threshold: Tissue percentage threshold for filtering.
sample_size: The new width and height of the square samples to be
generated.
grayscale: Whether or not to generate grayscale samples, rather
than RGB.
normalize_stains: Whether or not to apply stain normalization.
num_partitions: Number of partitions to use during processing.
Returns:
A Spark DataFrame in which each row contains the slide number, tumor
score, molecular score, and the sample stretched out into a Vector.
"""
slides = spark.sparkContext.parallelize(slide_nums)
# Create DataFrame of all tile locations and increase number of partitions
# to avoid OOM during subsequent processing.
tile_indices = (slides.flatMap(
lambda slide: process_slide(slide, folder, training, tile_size, overlap)))
# TODO: Explore computing the ideal paritition sizes based on projected number
# of tiles after filtering. I.e. something like the following:
#rows = tile_indices.count()
#part_size = 128
#channels = 1 if grayscale else 3
#row_mb = tile_size * tile_size * channels * 8 / 1024 / 1024 # size of one row in MB
#rows_per_part = round(part_size / row_mb)
#num_parts = rows / rows_per_part
tile_indices = tile_indices.repartition(num_partitions)
tile_indices.cache()
# Extract all tiles into a DataFrame, filter, cut into smaller samples, apply stain
# normalization, and flatten.
tiles = tile_indices.map(lambda tile_index: process_tile_index(tile_index, folder, training))
filtered_tiles = tiles.filter(lambda tile: keep_tile(tile, tile_size, tissue_threshold))
samples = filtered_tiles.flatMap(lambda tile: process_tile(tile, sample_size, grayscale))
if normalize_stains:
samples = samples.map(lambda sample: normalize_staining(sample))
samples = samples.map(lambda sample: flatten_sample(sample))
if training:
# Append labels
labels_df = get_labels_df(folder)
samples_with_labels = (samples.map(
lambda tup: (tup[0], int(labels_df.at[tup[0],"tumor_score"]),
float(labels_df.at[tup[0],"molecular_score"]), Vectors.dense(tup[1]))))
df = samples_with_labels.toDF(["slide_num", "tumor_score", "molecular_score", "sample"])
df = df.select(df.slide_num.astype("int"), df.tumor_score.astype("int"),
df.molecular_score, df["sample"])
else: # testing data -- no labels
df = samples.toDF(["slide_num", "sample"])
df = df.select(df.slide_num.astype("int"), df["sample"])
return df
# Split Into Separate Train & Validation DataFrames Based On Slide Number
def train_val_split(spark, df, slide_nums, folder, train_frac=0.8, add_row_indices=True, seed=None,
debug=False):
"""
Split a DataFrame of slide samples into training and validation sets.
Args:
spark: SparkSession.
df: A Spark DataFrame in which each row contains the slide number,
tumor score, molecular score, and the sample stretched out into
a Vector.
slide_nums: A list of slide numbers to sample from.
folder: Directory containing a `training_ground_truth.csv` file
containing the ground truth "tumor_score" and "molecular_score"
labels for each slide.
train_frac: Fraction of the data to assign to the training set, with
`1-frac` assigned to the valiation set.
add_row_indices: Boolean for whether or not to prepend an index
column contain the row index for use downstream by SystemML.
The column name will be "__INDEX".
Returns:
A Spark DataFrame in which each row contains the slide number, tumor
score, molecular score, and the sample stretched out into a Vector.
"""
# Create DataFrame of labels for the given slide numbers.
labels_df = get_labels_df(folder)
labels_df = labels_df.loc[slide_nums]
# Randomly split slides 80%/20% into train and validation sets.
train_nums_df = labels_df.sample(frac=train_frac, random_state=seed)
val_nums_df = labels_df.drop(train_nums_df.index)
train_nums = (spark.createDataFrame(train_nums_df)
.selectExpr("cast(slide_num as int)")
.coalesce(1))
val_nums = (spark.createDataFrame(val_nums_df)
.selectExpr("cast(slide_num as int)")
.coalesce(1))
# Note: Explicitly mark the smaller DataFrames as able to be broadcasted
# in order to have Catalyst choose the more efficient BroadcastHashJoin,
# rather than the costly SortMergeJoin.
train = df.join(F.broadcast(train_nums), on="slide_num")
val = df.join(F.broadcast(val_nums), on="slide_num")
if debug:
# DEBUG: Sanity checks.
assert len(pd.merge(train_nums_df, val_nums_df, on="slide_num")) == 0
assert train_nums.join(val_nums, on="slide_num").count() == 0
assert train.join(val, on="slide_num").count() == 0
# - Check distributions.
for pdf in train_nums_df, val_nums_df:
print(pdf.count())
print(pdf["tumor_score"].value_counts(sort=False))
print(pdf["tumor_score"].value_counts(normalize=True, sort=False), "\n")
# - Check total number of examples in each.
print(train.count(), val.count())
# - Check physical plans for broadcast join.
print(train.explain(), val.explain())
# Add row indices for use with SystemML.
if add_row_indices:
train = (train.rdd
.zipWithIndex()
.map(lambda r: (r[1] + 1, *r[0])) # flatten & convert index to 1-based indexing
.toDF(['__INDEX', 'slide_num', 'tumor_score', 'molecular_score', 'sample']))
train = train.select(train["__INDEX"].astype("int"), train.slide_num.astype("int"),
train.tumor_score.astype("int"), train.molecular_score, train["sample"])
val = (val.rdd
.zipWithIndex()
.map(lambda r: (r[1] + 1, *r[0])) # flatten & convert index to 1-based indexing
.toDF(['__INDEX', 'slide_num', 'tumor_score', 'molecular_score', 'sample']))
val = val.select(val["__INDEX"].astype("int"), val.slide_num.astype("int"),
val.tumor_score.astype("int"), val.molecular_score, val["sample"])
return train, val
# Save DataFrame
def save(df, filepath, sample_size, grayscale, mode="error", format="parquet", file_size=128):
"""
Save a preprocessed DataFrame with a constraint on the file sizes.
Args:
df: A Spark DataFrame.
filepath: Hadoop-supported path at which to save `df`.
sample_size: The width and height of the square samples.
grayscale: Whether or not to the samples are in grayscale format,
rather than RGB.
mode: Specifies the behavior of `df.write.mode` when the data
already exists. Options include:
* `append`: Append contents of this DataFrame to
existing data.
* `overwrite`: Overwrite existing data.
* `error`: Throw an exception if data already exists.
* `ignore`: Silently ignore this operation if data already
exists.
format: The format in which to save the DataFrame.
file_size: Size in MB of each saved file. 128 MB is an
empirically ideal size.
"""
channels = 1 if grayscale else 3
row_mb = sample_size * sample_size * channels * 8 / 1024 / 1024 # size of one row in MB
rows_per_file = round(file_size / row_mb)
df.write.option("maxRecordsPerFile", rows_per_file).mode(mode).save(filepath, format=format)
| apache-2.0 |
salbrandi/patella | patella/tablereader.py | 1 | 1851 | """
This module is for reading the html tables that list world leaders off of varying webpages.
It is not very dynamic or robust, and has priority for updates.
More tables of FEs from countries around the world will be added so users can compare their data sets to different
world leaders.
"""
import pandas as pd
# A website with an html table to scrape for presidential term data
fe_url = 'http://www.enchantedlearning.com/history/us/pres/list.shtml'
fe_term_column = 2
header_row = 0
unique_text = 'Vice-President'
fe_table = pd.read_html(fe_url, match=unique_text, flavor='bs4', header=header_row,
index_col=fe_term_column, parse_dates=True)[0]
fe_names = []
index_list = fe_table.index.get_level_values(0).values.tolist() # slice the data frame index as a list
year_list = []
for term in index_list:
term_num = term.split('-')[0] # get the first year of the term number
year_list.append(term_num) # add it to the year list
fe_table['Year'] = year_list
fe_table.set_index('President', drop=False, inplace=True)
# Some formatting transformations are required for this table. This loop sets the fe names to only alpha chars
for item in fe_table.index.get_level_values(0).values.tolist():
fe_name = '' # reset the name
for char in item: # loop through each character to check if it is alpha-nonnumeric, and append it to a string
if char.isalpha() or char == ' ':
fe_name = fe_name + char
fe_names.append(fe_name) # append the string to the presidential names list
fe_table['President'] = fe_names # add the name list to the 'President' column of the fe dataframe
fe_table.set_index('Year', drop=True, inplace=True) # Set the index to the year column
# A simple getter function to return the fe dataframe when needed in other modules
def get_fe():
return fe_table
| mit |
yograterol/Simulated-Annealing | recocido_simulado.py | 1 | 3700 | """
Implementacion del algoritmo de recocido simulado
para la materia electiva Computacion Emergente
@author Yohan Graterol <[email protected]> 2013
"""
from collections import deque
from math import exp
try:
from numpy.random import (permutation, random_sample)
from numpy import (log, matrix, array, add)
except ImportError:
from numpypy.random import (permutation, random_sample)
from numpypy import (log, matrix, array, add)
from copy import deepcopy
from random import randint
from time import time
class LoadData(object):
__slots__ = ['data', 'matrix']
file_name = 'tsp29.txt'
def __init__(self, file_name=None):
if file_name:
self.file_name = file_name
self.load_data()
def load_data(self):
tmp_file = open(self.file_name)
self.data = tmp_file.readlines()
self.data.append('0 0')
tmp_file.close()
def create_matrix(self):
self.matrix = list()
total_line = len(self.data)
for line in self.data:
line = deque(map(lambda x: int(x), line.split()))
for i in range(total_line - len(line)):
line.appendleft(0)
self.matrix.append(list(line))
self.matrix = array(self.matrix)
self.matrix = add(self.matrix, self.matrix.transpose())
class SimulatedAnnealing(object):
__slots__ = ['matrix', 'T', 't', 't_final', 'step', 'cities', 'firts_vc',
'Vc', 'Vn', 'Vc_eval', 'Vn_eval', 'alpha']
def __init__(self, T=1000, alpha=0.9899, t_final=0.001, t=1, cities=29, step=200):
data = LoadData()
data.create_matrix()
self.matrix = data.matrix
self.T = T
#self.t = t
self.t_final = t_final
self.alpha = alpha
self.cities = cities
self.Vc = None
self.firts_vc = range(self.cities)
self.step = step
#import pandas
#print pandas.DataFrame(self.matrix, range(self.cities), range(self.cities))
def tsp(self):
self.Vc = self.generate_solution()
self.Vc_eval = self.eval_solution(self.Vc)
while(self.T > self.t_final):
for i in range(self.step):
self.Vn = self.generate_solution(self.Vc)
self.Vn_eval = self.eval_solution(self.Vn)
delta = self.Vn_eval - self.Vc_eval
if delta < 0:
self.Vc = self.Vn
self.Vc_eval = self.Vn_eval
elif random_sample() < exp(-delta/self.T):
self.Vc = self.Vn
self.Vc_eval = self.Vn_eval
self.T *= self.alpha
#self.T *= self.reduce_temp(self.t)
#self.t += 1
def reduce_temp(self, t):
return self.alpha / log(1 + t)
def generate_solution(self, Vc=None):
if Vc is None:
Vn = list(permutation(self.firts_vc))
return Vn
Vn = deepcopy(Vc)
i1 = randint(0, self.cities - 1)
i2 = randint(0, self.cities - 1)
while(i1 == i2):
i2 = randint(0, self.cities - 1)
Vn[i1], Vn[i2] = Vn[i2], Vn[i1]
return Vn
def eval_solution(self, Vn):
km = 0
for c in range(len(Vn) - 1):
i = Vn[c]
j = Vn[c + 1]
km += self.matrix[i][j]
km += self.matrix[Vn[0]][Vn[self.cities - 1]]
return km
def print_result(self):
print self.Vc
print self.eval_solution(self.Vc)
if __name__ == '__main__':
start = time()
tsp = SimulatedAnnealing()
tsp.tsp()
print "Resultado optimo"
tsp.print_result()
print "Tiempo: ", time() - start
| bsd-3-clause |
suttond/MODOI | ase/phasediagram.py | 2 | 20868 | from __future__ import division, print_function
import fractions
import functools
import re
import numpy as np
from scipy.spatial import ConvexHull, Delaunay
import ase.units as units
from ase.atoms import string2symbols
from ase.utils import hill
_solvated = []
def parse_formula(formula):
aq = formula.endswith('(aq)')
if aq:
formula = formula[:-4]
charge = formula.count('+') - formula.count('-')
if charge:
formula = formula.rstrip('+-')
count = {}
for symbol in string2symbols(formula):
count[symbol] = count.get(symbol, 0) + 1
return count, charge, aq
def float2str(x):
f = fractions.Fraction(x).limit_denominator(100)
n = f.numerator
d = f.denominator
if abs(n / d - f) > 1e-6:
return '{0:.3f}'.format(f)
if d == 0:
return '0'
if f.denominator == 1:
return str(n)
return '{0}/{1}'.format(f.numerator, f.denominator)
def solvated(symbols):
"""Extract solvation energies from database.
symbols: str
Extract only those molecules that contain the chemical elements
given by the symbols string (plus water and H+).
Data from:
Johnson JW, Oelkers EH, Helgeson HC (1992)
Comput Geosci 18(7):899.
doi:10.1016/0098-3004(92)90029-Q
and:
Pourbaix M (1966)
Atlas of electrochemical equilibria in aqueous solutions.
No. v. 1 in Atlas of Electrochemical Equilibria in Aqueous Solutions.
Pergamon Press, New York.
Returns list of (name, energy) tuples.
"""
if isinstance(symbols, str):
symbols = set(string2symbols(symbols))
if len(_solvated) == 0:
for line in _aqueous.splitlines():
energy, formula = line.split(',')
name = formula + '(aq)'
count, charge, aq = parse_formula(name)
energy = float(energy) * 0.001 * units.kcal / units.mol
_solvated.append((name, count, charge, aq, energy))
references = []
for name, count, charge, aq, energy in _solvated:
for symbol in count:
if symbol not in 'HO' and symbol not in symbols:
break
else:
references.append((name, energy))
return references
def bisect(A, X, Y, f):
a = []
for i in [0, -1]:
for j in [0, -1]:
if A[i, j] == -1:
A[i, j] = f(X[i], Y[j])
a.append(A[i, j])
if np.ptp(a) == 0:
A[:] = a[0]
return
if a[0] == a[1]:
A[0] = a[0]
if a[1] == a[3]:
A[:, -1] = a[1]
if a[3] == a[2]:
A[-1] = a[3]
if a[2] == a[0]:
A[:, 0] = a[2]
if not (A == -1).any():
return
i = len(X) // 2
j = len(Y) // 2
bisect(A[:i + 1, :j + 1], X[:i + 1], Y[:j + 1], f)
bisect(A[:i + 1, j:], X[:i + 1], Y[j:], f)
bisect(A[i:, :j + 1], X[i:], Y[:j + 1], f)
bisect(A[i:, j:], X[i:], Y[j:], f)
def print_results(results):
total_energy = 0.0
print('reference coefficient energy')
print('------------------------------------')
for name, coef, energy in results:
total_energy += coef * energy
if abs(coef) < 1e-7:
continue
print('{0:14}{1:>10}{2:12.3f}'.format(name, float2str(coef), energy))
print('------------------------------------')
print('Total energy: {0:22.3f}'.format(total_energy))
print('------------------------------------')
class Pourbaix:
def __init__(self, references, formula=None, T=300.0, **kwargs):
"""Pourbaix object.
references: list of (name, energy) tuples
Examples of names: ZnO2, H+(aq), H2O(aq), Zn++(aq), ...
formula: str
Stoichiometry. Example: ``'ZnO'``. Can also be given as
keyword arguments: ``Pourbaix(refs, Zn=1, O=1)``.
T: float
Temperature in Kelvin.
"""
if formula:
assert not kwargs
kwargs = parse_formula(formula)[0]
self.kT = units.kB * T
self.references = []
for name, energy in references:
if name == 'O':
continue
count, charge, aq = parse_formula(name)
for symbol in count:
if aq:
if not (symbol in 'HO' or symbol in kwargs):
break
else:
if symbol not in kwargs:
break
else:
self.references.append((count, charge, aq, energy, name))
self.references.append(({}, -1, False, 0.0, 'e-')) # an electron
self.count = kwargs
if 'O' not in self.count:
self.count['O'] = 0
self.N = {'e-': 0, 'H': 1}
for symbol in kwargs:
if symbol not in self.N:
self.N[symbol] = len(self.N)
def decompose(self, U, pH, verbose=True, concentration=1e-6):
"""Decompose material.
U: float
Potential in eV.
pH: float
pH value.
verbose: bool
Default is True.
concentration: float
Concentration of solvated references.
Returns optimal coefficients and energy.
"""
alpha = np.log(10) * self.kT
entropy = -np.log(concentration) * self.kT
# We want to minimize np.dot(energies, x) under the constraints:
#
# np.dot(x, eq2) == eq1
#
# with bounds[i,0] <= x[i] <= bounds[i, 1].
#
# First two equations are charge and number of hydrogens, and
# the rest are the remaining species.
eq1 = [0, 0] + list(self.count.values())
eq2 = []
energies = []
bounds = []
names = []
for count, charge, aq, energy, name in self.references:
eq = np.zeros(len(self.N))
eq[0] = charge
for symbol, n in count.items():
eq[self.N[symbol]] = n
eq2.append(eq)
if name in ['H2O(aq)', 'H+(aq)', 'e-']:
bounds.append((-np.inf, np.inf))
if name == 'e-':
energy = -U
elif name == 'H+(aq)':
energy = -pH * alpha
else:
bounds.append((0, 1))
if aq:
energy -= entropy
if verbose:
print('{0:<5}{1:10}{2:10.3f}'.format(len(energies),
name, energy))
energies.append(energy)
names.append(name)
try:
from scipy.optimize import linprog
except ImportError:
from ase.utils._linprog import linprog
result = linprog(energies, None, None, np.transpose(eq2), eq1, bounds)
if verbose:
print_results(zip(names, result.x, energies))
return result.x, result.fun
def diagram(self, U, pH, plot=True, show=True):
"""Calculate Pourbaix diagram.
U: list of float
Potentials in eV.
pH: list of float
pH values.
plot: bool
Create plot.
show: bool
Show plot.
"""
a = np.empty((len(U), len(pH)), int)
a[:] = -1
colors = {}
f = functools.partial(self.colorfunction, colors=colors)
bisect(a, U, pH, f)
compositions = [None] * len(colors)
names = [ref[-1] for ref in self.references]
for indices, color in colors.items():
compositions[color] = ' + '.join(names[i] for i in indices
if names[i] not in
['H2O(aq)', 'H+(aq)', 'e-'])
text = []
for i, name in enumerate(compositions):
b = (a == i)
x = np.dot(b.sum(1), U) / b.sum()
y = np.dot(b.sum(0), pH) / b.sum()
name = re.sub('(\S)([+-]+)', r'\1$^{\2}$', name)
name = re.sub('(\d+)', r'$_{\1}$', name)
text.append((x, y, name))
if plot:
import matplotlib.pyplot as plt
import matplotlib.cm as cm
plt.pcolormesh(pH, U, a, cmap=cm.Accent)
for x, y, name in text:
plt.text(y, x, name, horizontalalignment='center')
plt.xlabel('pH')
plt.ylabel('potential [eV]')
plt.xlim(min(pH), max(pH))
plt.ylim(min(U), max(U))
if show:
plt.show()
return a, compositions, text
def colorfunction(self, U, pH, colors):
coefs, energy = self.decompose(U, pH, verbose=False)
indices = tuple(sorted(np.where(abs(coefs) > 1e-7)[0]))
color = colors.get(indices)
if color is None:
color = len(colors)
colors[indices] = color
return color
class PhaseDiagram:
def __init__(self, references, filter='', verbose=True):
"""Phase-diagram.
references: list of (name, energy) tuples
List of references. The names can also be dicts like
``{'Zn': 1, 'O': 2}`` which would be equivalent to ``'ZnO2'``.
filter: str or list of str
Use only those references that match the given filter.
Example: ``filter='ZnO'`` will select those that
contain zinc or oxygen.
verbose: bool
Write information.
"""
filter = parse_formula(filter)[0]
self.verbose = verbose
self.species = {}
self.references = []
for name, energy in references:
if isinstance(name, str):
count = parse_formula(name)[0]
else:
count = name
name = hill(count)
if filter and any(symbol not in filter for symbol in count):
continue
natoms = 0
for symbol, n in count.items():
natoms += n
if symbol not in self.species:
self.species[symbol] = len(self.species)
self.references.append((count, energy, name, natoms))
if verbose:
print('Species:', ', '.join(self.species))
print('References:', len(self.references))
for i, (count, energy, name, natoms) in enumerate(self.references):
print('{0:<5}{1:10}{2:10.3f}'.format(i, name, energy))
self.points = np.zeros((len(self.references), len(self.species) + 1))
for s, (count, energy, name, natoms) in enumerate(self.references):
for symbol, n in count.items():
self.points[s, self.species[symbol]] = n / natoms
self.points[s, -1] = energy / natoms
hull = ConvexHull(self.points[:, 1:])
# Find relevant vertices:
ok = hull.equations[:, -2] < 0
vertices = set()
for simplex in hull.simplices[ok]:
vertices.update(simplex)
self.vertices = np.array(list(vertices))
if verbose:
print('Simplices:', ok.sum())
# Create triangulation:
if len(self.species) == 2:
D = Delaunay1D # scipy's Delaunay doesn't like 1-d!
else:
D = Delaunay
self.tri = D(self.points[self.vertices, 1:-1])
def decompose(self, formula=None, **kwargs):
"""Find the combination of the references with the lowest energy.
formula: str
Stoichiometry. Example: ``'ZnO'``. Can also be given as
keyword arguments: ``decompose(Zn=1, O=1)``.
Example::
pd = PhaseDiagram(...)
pd.decompose(Zn=1, O=3)
Returns energy, indices of references and coefficients."""
if formula:
assert not kwargs
kwargs = parse_formula(formula)[0]
point = np.zeros(len(self.species))
natoms = 0
for symbol, n in kwargs.items():
point[self.species[symbol]] = n
natoms += n
i = self.tri.find_simplex(point[1:] / natoms)
indices = self.vertices[self.tri.simplices[i]]
points = self.points[indices]
scaledcoefs = np.linalg.solve(points[:, :-1].T, point)
energy = np.dot(scaledcoefs, points[:, -1])
coefs = []
results = []
for coef, s in zip(scaledcoefs, indices):
count, e, name, natoms = self.references[s]
coef /= natoms
coefs.append(coef)
results.append((name, coef, e))
if self.verbose:
print_results(results)
return energy, indices, np.array(coefs)
def plot(self):
"""Plot datapoints and convex hull.
Works only for 2 and 3 components systems.
"""
if len(self.species) == 2:
self.plot2d()
elif len(self.species) == 3:
self.plot3d()
else:
raise ValueError('...')
def plot2d(self):
import matplotlib.pyplot as plt
x, y = self.points[:, 1:].T
xsymbol = [symbol for symbol, id in self.species.items() if id == 1][0]
plt.plot(x, y, 'or')
for i, j in self.tri.simplices:
plt.plot([x[i], x[j]], [y[i], y[j]], '-g')
for count, energy, name, natoms in self.references:
name = re.sub('(\d+)', r'$_{\1}$', name)
plt.text(count.get(xsymbol, 0) / natoms, energy / natoms, name,
horizontalalignment='center', verticalalignment='bottom')
plt.xlabel(xsymbol)
plt.ylabel('energy')
plt.show()
class Delaunay1D:
"""Simple 1-d implementation."""
def __init__(self, points):
self.points = points[:, 0]
a = self.points.argsort()
self.simplices = np.array([a[:-1], a[1:]]).T
def find_simplex(self, point):
p = point[0]
for i, s in enumerate(self.simplices[:, 1]):
if p < self.points[s]:
return i
return i + 1
_aqueous = """\
-525700,SiF6--
-514100,Rh(SO4)3----
-504800,Ru(SO4)3----
-499900,Pd(SO4)3----
-495200,Ru(SO4)3---
-485700,H4P2O7
-483700,Rh(SO4)3---
-483600,H3P2O7-
-480400,H2P2O7--
-480380,Pt(SO4)3----
-471400,HP2O7---
-458700,P2O7----
-447500,LaF4-
-437600,LaH2PO4++
-377900,LaF3
-376299,Ca(HSiO3)+
-370691,BeF4--
-355400,BF4-
-353025,Mg(HSiO3)+
-346900,LaSO4+
-334100,Rh(SO4)2--
-325400,Ru(SO4)2--
-319640,Pd(SO4)2--
-317900,Ru(SO4)2-
-312970,Cr2O7--
-312930,CaSO4
-307890,NaHSiO3
-307800,LaF2+
-307000,LaHCO3++
-306100,Rh(SO4)2-
-302532,BeF3-
-300670,Pt(SO4)2--
-299900,LaCO3+
-289477,MgSO4
-288400,LaCl4-
-281500,HZrO3-
-279200,HHfO3-
-276720,Sr(HCO3)+
-275700,Ba(HCO3)+
-273830,Ca(HCO3)+
-273100,H3PO4
-270140,H2PO4-
-266500,S2O8--
-264860,Sr(CO3)
-264860,SrCO3
-263830,Ba(CO3)
-263830,BaCO3
-262850,Ca(CO3)
-262850,CaCO3
-260310,HPO4--
-257600,LaCl3
-250200,Mg(HCO3)+
-249200,H3VO4
-248700,S4O6--
-246640,KSO4-
-243990,H2VO4-
-243500,PO4---
-243400,KHSO4
-242801,HSiO3-
-241700,HYO2
-241476,NaSO4-
-239700,HZrO2+
-239300,LaO2H
-238760,Mg(CO3)
-238760,MgCO3
-237800,HHfO2+
-236890,Ag(CO3)2---
-236800,HNbO3
-236600,LaF++
-235640,MnSO4
-233400,ZrO2
-233000,HVO4--
-231600,HScO2
-231540,B(OH)3
-231400,HfO2
-231386,BeF2
-231000,S2O6--
-229000,S3O6--
-229000,S5O6--
-228460,HTiO3-
-227400,YO2-
-227100,NbO3-
-226700,LaCl2+
-223400,HWO4-
-221700,LaO2-
-218500,WO4--
-218100,ScO2-
-214900,VO4---
-210000,YOH++
-208900,LaOH++
-207700,HAlO2
-206400,HMoO4-
-204800,H3PO3
-202350,H2PO3-
-202290,SrF+
-201807,BaF+
-201120,BaF+
-200400,MoO4--
-200390,CaF+
-199190,SiO2
-198693,AlO2-
-198100,YO+
-195900,LaO+
-195800,LaCl++
-194000,CaCl2
-194000,HPO3--
-191300,LaNO3++
-190400,ZrOH+++
-189000,HfOH+++
-189000,S2O5--
-187600,ZrO++
-186000,HfO++
-183700,HCrO4-
-183600,ScO+
-183100,H3AsO4
-180630,HSO4-
-180010,H2AsO4-
-177930,SO4--
-177690,MgF+
-174800,CrO4--
-173300,SrOH+
-172300,BaOH+
-172200,HBeO2-
-171300,CaOH+
-170790,HAsO4--
-166000,ReO4-
-165800,SrCl+
-165475,Al(OH)++
-165475,AlOH++
-164730,BaCl+
-164000,La+++
-163800,Y+++
-163100,CaCl+
-162240,BO2-
-158493,BeF+
-158188,AlO+
-155700,VOOH+
-155164,CdF2
-154970,AsO4---
-153500,Rh(SO4)
-152900,BeO2--
-152370,HSO5-
-151540,RuCl6---
-149255,MgOH+
-147400,H2S2O4
-146900,HS2O4-
-146081,CdCl4--
-145521,BeCl2
-145200,Ru(SO4)
-145056,PbF2
-143500,S2O4--
-140330,H2AsO3-
-140300,VO2+
-140282,HCO3-
-140200,Sc+++
-139900,BeOH+
-139700,MgCl+
-139200,Ru(SO4)+
-139000,Pd(SO4)
-138160,HF2-
-138100,HCrO2
-138000,TiO++
-137300,HGaO2
-136450,RbF
-134760,Sr++
-134030,Ba++
-133270,Zr++++
-133177,PbCl4--
-132600,Hf++++
-132120,Ca++
-129310,ZnCl3-
-128700,GaO2-
-128600,BeO
-128570,NaF
-128000,H2S2O3
-127500,Rh(SO4)+
-127200,HS2O3-
-126191,CO3--
-126130,HSO3-
-125300,CrO2-
-125100,H3PO2
-124900,S2O3--
-123641,MnF+
-122400,H2PO2-
-121000,HMnO2-
-120700,RuCl5--
-120400,MnO4--
-120300,Pt(SO4)
-119800,HInO2
-116300,SO3--
-115971,CdCl3-
-115609,Al+++
-115316,BeCl+
-112280,AgCl4---
-111670,TiO2++
-111500,VOH++
-111430,Ag(CO3)-
-110720,HZnO2-
-108505,Mg++
-108100,HSeO4-
-108000,LiOH
-107600,MnO4-
-106988,HgCl4--
-106700,InO2-
-106700,VO++
-106100,VO+
-105500,SeO4--
-105100,RbOH
-105000,CsOH
-104500,KOH
-104109,ZnF+
-103900,PdCl4--
-103579,CuCl4--
-102600,MnO2--
-102150,PbCl3-
-101850,H2SeO3
-101100,HFeO2
-100900,CsCl
-100500,CrOH++
-99900,NaOH
-99800,VOH+
-99250,LiCl
-98340,HSeO3-
-98300,ZnCl2
-97870,RbCl
-97400,HSbO2
-97300,HSnO2-
-97300,MnOH+
-97016,InF++
-96240,HAsO2
-95430,KCl
-95400,HFeO2-
-94610,CsBr
-93290,ZnO2--
-93250,RhCl4--
-92910,NaCl
-92800,CrO+
-92250,CO2
-91210,PtCl4--
-91157,FeF+
-91100,GaOH++
-91010,RbBr
-90550,Be++
-90010,KBr
-89963,CuCl3--
-89730,RuCl4-
-88400,SeO3--
-88000,FeO2-
-87373,CdF+
-86600,GaO+
-86500,HCdO2-
-86290,MnCl+
-85610,NaBr
-84851,CdCl2
-83900,RuCl4--
-83650,AsO2-
-83600,Ti+++
-83460,CsI
-83400,HCoO2-
-82710,AgCl3--
-82400,SbO2-
-81980,HNiO2-
-81732,CoF+
-81500,MnO
-81190,ZnOH+
-81000,HPbO2-
-79768,NiF+
-79645,FeF++
-79300,HBiO2
-78900,RbI
-77740,KI
-77700,La++
-77500,RhCl4-
-75860,PbF+
-75338,CuCl3-
-75216,TlF
-75100,Ti++
-74600,InOH++
-74504,HgCl3-
-73480,FeCl2
-72900,NaI
-71980,SO2
-71662,HF
-71600,RuO4--
-71200,PbCl2
-69933,Li+
-69810,PdCl3-
-69710,Cs+
-69400,InO+
-67811,AuCl3--
-67800,Rb+
-67510,K+
-67420,ZnO
-67340,F-
-67300,CdO2--
-66850,ZnCl+
-65850,FeOH+
-65550,TlOH
-64200,NiO2--
-63530,RhCl3-
-63200,CoO2--
-62591,Na+
-61700,BiO2-
-61500,CdOH+
-60100,HCuO2-
-59226,InCl++
-58600,SnOH+
-58560,RuCl3
-58038,CuCl2-
-57900,V+++
-57800,FeOH++
-57760,PtCl3-
-57600,HTlO2
-56690,H2O
-56025,CoOH+
-55100,Mn++
-54380,RuCl3-
-53950,PbOH+
-53739,CuF+
-53600,SnO
-53100,FeO+
-53030,FeCl+
-52850,NiOH+
-52627,CdCl+
-52000,V++
-51560,AgCl2-
-50720,FeO
-49459,AgF
-49300,Cr+++
-47500,CdO
-46190,RhCl3
-46142,CuCl2
-45200,HHgO2-
-45157,CoCl+
-44000,CoO
-42838,HgCl2
-41600,TlO2-
-41200,CuO2--
-40920,NiCl+
-39815,TlCl
-39400,Cr++
-39350,PbO
-39340,NiO
-39050,PbCl+
-38000,Ga+++
-37518,FeCl++
-36781,AuCl2-
-35332,AuCl4-
-35200,Zn++
-35160,PdCl2
-33970,RhCl2
-32300,BiOH++
-31700,HIO3
-31379,Cl-
-30600,IO3-
-30410,HCl
-30204,HgF+
-30200,CuOH+
-29300,BiO+
-28682,CO
-26507,NO3-
-26440,RuCl2+
-25590,Br3-
-25060,RuCl2
-24870,Br-
-24730,HNO3
-23700,HIO
-23400,In+++
-23280,OCN-
-23000,CoOH++
-22608,CuCl
-22290,PtCl2
-21900,AgOH
-21870,Fe++
-20800,CuO
-20300,Mn+++
-20058,Pb(HS)2
-19700,HBrO
-19100,HClO
-19100,ScOH++
-18990,NH4+
-18971,Pb(HS)3-
-18560,Cd++
-18290,Rh(OH)+
-17450,AgCl
-16250,CuCl+
-14780,RhCl2+
-14000,IO4-
-13130,Pd(OH)+
-13000,Co++
-12700,HgOH+
-12410,I-
-12300,I3-
-12190,Ru(OH)++
-12100,HNO2
-11500,PdO
-10900,Ni++
-10470,Ru(OH)+
-10450,RuO+
-9200,IO-
-8900,HgO
-8800,ClO-
-8000,BrO-
-7740,Tl+
-7738,AgNO3
-7700,NO2-
-7220,RhO
-6673,H2S
-6570,Sn++
-6383,NH3
-5710,Pb++
-5500,AgO-
-4500,TlOH++
-4120,Fe+++
-3380,RhCl+
-3200,TlO+
-3184,AuCl
-2155,HgCl+
-2040,ClO4-
-1900,ClO3-
-1130,PtO
-820,Rh(OH)++
0,Ag(HS)2-
0,H+
230,RuO
1400,HClO2
1560,Pt(OH)+
2429,Au(HS)2-
2500,PdCl+
2860,HS-
3140,RhO+
3215,Xe
3554,Kr
3890,Ar
4100,ClO2-
4347,N2
4450,BrO3-
4565,Ne
4658,He
5210,RuCl+
7100,RuCl++
8600,H2N2O2
9375,TlCl++
10500,HSe-
11950,Cu+
15675,Cu++
15700,S5--
16500,S4--
17600,S3--
18200,HN2O2-
18330,RhCl++
18380,PtCl+
18427,Ag+
19000,S2--
19500,SeCN-
19700,N2H5+
21100,N2H6++
22160,SCN-
22880,Bi+++
27700,Rh++
28200,BrO4-
28600,HCN
32000,Co+++
33200,N2O2--
35900,Ru++
36710,Hg2++
39360,Hg++
41200,CN-
41440,Ru+++
42200,Pd++
51300,Tl+++
52450,Rh+++
61600,Pt++
64300,Ag++
103600,Au+++"""
| lgpl-3.0 |
tomlof/scikit-learn | sklearn/decomposition/tests/test_incremental_pca.py | 43 | 10272 | """Tests for Incremental PCA."""
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn import datasets
from sklearn.decomposition import PCA, IncrementalPCA
iris = datasets.load_iris()
def test_incremental_pca():
# Incremental PCA on dense arrays.
X = iris.data
batch_size = X.shape[0] // 3
ipca = IncrementalPCA(n_components=2, batch_size=batch_size)
pca = PCA(n_components=2)
pca.fit_transform(X)
X_transformed = ipca.fit_transform(X)
np.testing.assert_equal(X_transformed.shape, (X.shape[0], 2))
assert_almost_equal(ipca.explained_variance_ratio_.sum(),
pca.explained_variance_ratio_.sum(), 1)
for n_components in [1, 2, X.shape[1]]:
ipca = IncrementalPCA(n_components, batch_size=batch_size)
ipca.fit(X)
cov = ipca.get_covariance()
precision = ipca.get_precision()
assert_array_almost_equal(np.dot(cov, precision),
np.eye(X.shape[1]))
def test_incremental_pca_check_projection():
# Test that the projection of data is correct.
rng = np.random.RandomState(1999)
n, p = 100, 3
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5])
Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5])
# Get the reconstruction of the generated data X
# Note that Xt has the same "components" as X, just separated
# This is what we want to ensure is recreated correctly
Yt = IncrementalPCA(n_components=2).fit(X).transform(Xt)
# Normalize
Yt /= np.sqrt((Yt ** 2).sum())
# Make sure that the first element of Yt is ~1, this means
# the reconstruction worked as expected
assert_almost_equal(np.abs(Yt[0][0]), 1., 1)
def test_incremental_pca_inverse():
# Test that the projection of data can be inverted.
rng = np.random.RandomState(1999)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed
# signal (since the data is almost of rank n_components)
ipca = IncrementalPCA(n_components=2, batch_size=10).fit(X)
Y = ipca.transform(X)
Y_inverse = ipca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=3)
def test_incremental_pca_validation():
# Test that n_components is >=1 and <= n_features.
X = [[0, 1], [1, 0]]
for n_components in [-1, 0, .99, 3]:
assert_raises(ValueError, IncrementalPCA(n_components,
batch_size=10).fit, X)
def test_incremental_pca_set_params():
# Test that components_ sign is stable over batch sizes.
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 20
X = rng.randn(n_samples, n_features)
X2 = rng.randn(n_samples, n_features)
X3 = rng.randn(n_samples, n_features)
ipca = IncrementalPCA(n_components=20)
ipca.fit(X)
# Decreasing number of components
ipca.set_params(n_components=10)
assert_raises(ValueError, ipca.partial_fit, X2)
# Increasing number of components
ipca.set_params(n_components=15)
assert_raises(ValueError, ipca.partial_fit, X3)
# Returning to original setting
ipca.set_params(n_components=20)
ipca.partial_fit(X)
def test_incremental_pca_num_features_change():
# Test that changing n_components will raise an error.
rng = np.random.RandomState(1999)
n_samples = 100
X = rng.randn(n_samples, 20)
X2 = rng.randn(n_samples, 50)
ipca = IncrementalPCA(n_components=None)
ipca.fit(X)
assert_raises(ValueError, ipca.partial_fit, X2)
def test_incremental_pca_batch_signs():
# Test that components_ sign is stable over batch sizes.
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features)
all_components = []
batch_sizes = np.arange(10, 20)
for batch_size in batch_sizes:
ipca = IncrementalPCA(n_components=None, batch_size=batch_size).fit(X)
all_components.append(ipca.components_)
for i, j in zip(all_components[:-1], all_components[1:]):
assert_almost_equal(np.sign(i), np.sign(j), decimal=6)
def test_incremental_pca_batch_values():
# Test that components_ values are stable over batch sizes.
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features)
all_components = []
batch_sizes = np.arange(20, 40, 3)
for batch_size in batch_sizes:
ipca = IncrementalPCA(n_components=None, batch_size=batch_size).fit(X)
all_components.append(ipca.components_)
for i, j in zip(all_components[:-1], all_components[1:]):
assert_almost_equal(i, j, decimal=1)
def test_incremental_pca_partial_fit():
# Test that fit and partial_fit get equivalent results.
rng = np.random.RandomState(1999)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed
# signal (since the data is almost of rank n_components)
batch_size = 10
ipca = IncrementalPCA(n_components=2, batch_size=batch_size).fit(X)
pipca = IncrementalPCA(n_components=2, batch_size=batch_size)
# Add one to make sure endpoint is included
batch_itr = np.arange(0, n + 1, batch_size)
for i, j in zip(batch_itr[:-1], batch_itr[1:]):
pipca.partial_fit(X[i:j, :])
assert_almost_equal(ipca.components_, pipca.components_, decimal=3)
def test_incremental_pca_against_pca_iris():
# Test that IncrementalPCA and PCA are approximate (to a sign flip).
X = iris.data
Y_pca = PCA(n_components=2).fit_transform(X)
Y_ipca = IncrementalPCA(n_components=2, batch_size=25).fit_transform(X)
assert_almost_equal(np.abs(Y_pca), np.abs(Y_ipca), 1)
def test_incremental_pca_against_pca_random_data():
# Test that IncrementalPCA and PCA are approximate (to a sign flip).
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features) + 5 * rng.rand(1, n_features)
Y_pca = PCA(n_components=3).fit_transform(X)
Y_ipca = IncrementalPCA(n_components=3, batch_size=25).fit_transform(X)
assert_almost_equal(np.abs(Y_pca), np.abs(Y_ipca), 1)
def test_explained_variances():
# Test that PCA and IncrementalPCA calculations match
X = datasets.make_low_rank_matrix(1000, 100, tail_strength=0.,
effective_rank=10, random_state=1999)
prec = 3
n_samples, n_features = X.shape
for nc in [None, 99]:
pca = PCA(n_components=nc).fit(X)
ipca = IncrementalPCA(n_components=nc, batch_size=100).fit(X)
assert_almost_equal(pca.explained_variance_, ipca.explained_variance_,
decimal=prec)
assert_almost_equal(pca.explained_variance_ratio_,
ipca.explained_variance_ratio_, decimal=prec)
assert_almost_equal(pca.noise_variance_, ipca.noise_variance_,
decimal=prec)
def test_singular_values():
# Check that the IncrementalPCA output has the correct singular values
rng = np.random.RandomState(0)
n_samples = 1000
n_features = 100
X = datasets.make_low_rank_matrix(n_samples, n_features, tail_strength=0.0,
effective_rank=10, random_state=rng)
pca = PCA(n_components=10, svd_solver='full', random_state=rng).fit(X)
ipca = IncrementalPCA(n_components=10, batch_size=100).fit(X)
assert_array_almost_equal(pca.singular_values_, ipca.singular_values_, 2)
# Compare to the Frobenius norm
X_pca = pca.transform(X)
X_ipca = ipca.transform(X)
assert_array_almost_equal(np.sum(pca.singular_values_**2.0),
np.linalg.norm(X_pca, "fro")**2.0, 12)
assert_array_almost_equal(np.sum(ipca.singular_values_**2.0),
np.linalg.norm(X_ipca, "fro")**2.0, 2)
# Compare to the 2-norms of the score vectors
assert_array_almost_equal(pca.singular_values_,
np.sqrt(np.sum(X_pca**2.0, axis=0)), 12)
assert_array_almost_equal(ipca.singular_values_,
np.sqrt(np.sum(X_ipca**2.0, axis=0)), 2)
# Set the singular values and see what we get back
rng = np.random.RandomState(0)
n_samples = 100
n_features = 110
X = datasets.make_low_rank_matrix(n_samples, n_features, tail_strength=0.0,
effective_rank=3, random_state=rng)
pca = PCA(n_components=3, svd_solver='full', random_state=rng)
ipca = IncrementalPCA(n_components=3, batch_size=100)
X_pca = pca.fit_transform(X)
X_pca /= np.sqrt(np.sum(X_pca**2.0, axis=0))
X_pca[:, 0] *= 3.142
X_pca[:, 1] *= 2.718
X_hat = np.dot(X_pca, pca.components_)
pca.fit(X_hat)
ipca.fit(X_hat)
assert_array_almost_equal(pca.singular_values_, [3.142, 2.718, 1.0], 14)
assert_array_almost_equal(ipca.singular_values_, [3.142, 2.718, 1.0], 14)
def test_whitening():
# Test that PCA and IncrementalPCA transforms match to sign flip.
X = datasets.make_low_rank_matrix(1000, 10, tail_strength=0.,
effective_rank=2, random_state=1999)
prec = 3
n_samples, n_features = X.shape
for nc in [None, 9]:
pca = PCA(whiten=True, n_components=nc).fit(X)
ipca = IncrementalPCA(whiten=True, n_components=nc,
batch_size=250).fit(X)
Xt_pca = pca.transform(X)
Xt_ipca = ipca.transform(X)
assert_almost_equal(np.abs(Xt_pca), np.abs(Xt_ipca), decimal=prec)
Xinv_ipca = ipca.inverse_transform(Xt_ipca)
Xinv_pca = pca.inverse_transform(Xt_pca)
assert_almost_equal(X, Xinv_ipca, decimal=prec)
assert_almost_equal(X, Xinv_pca, decimal=prec)
assert_almost_equal(Xinv_pca, Xinv_ipca, decimal=prec)
| bsd-3-clause |
dboonz/polymode | Polymode/Solver.py | 5 | 24557 | # _*_ coding=utf-8 _*_
#
#---------------------------------------------------------------------------------
#Copyright © 2009 Andrew Docherty
#
#This program is part of Polymode.
#Polymode is free software: you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation, either version 3 of the License, or
#(at your option) any later version.
#
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License
#along with this program. If not, see <http://www.gnu.org/licenses/>.
#---------------------------------------------------------------------------------
"""
Solver.py
===========
Main solve class for Polymode
Solvers
-------
AdaptiveWavelengthTrack
- Solver for calculating modes over a range of wavelengths
with an adaptive wavelength step
WavelengthScan
- Solver for calculating modes over a range of wavelengths
specifying each wavelength to solve at
WavelengthConditionScan
- Solver that returns a map of the condition number over
effective index versus the wavelength. No mode solving
is performed.
Utility functions
-----------------
batch_file_save(solvers, filename=None)
- Save solvers in batch file for later solution
batch_file_load(filename=None)
- Load solvers in from a batch file
batch_file_load_modes(filename=None)
- Load modes directly from batch file
batch_solve(solvers, filename=None)
- Solve problems in list solvers saving periodically to the specified file if given.
batch_continue(filename)
- Continue an aborted batch_solve with a batch file
export_to_nader(solver, prefix="")
- Export waveguide.in and input_paramters.in to be read by Nader's solver
"""
from __future__ import division
import logging
import numpy as np
#To be depricated, should use above imports only
from numpy import *
from . import Material, Waveguide, Equation, Modes, Plotter
# Cached random functions
class CachedRandom(object):
"""
Create
"""
def __init__(self):
self.cache = []
self.index = 0
def reset(self):
self.index = 0
def __call__(self, shape):
from scipy import random
if self.index>=len(self.cache):
self.cache += [random.random(shape)]
x = self.cache[self.index]
self.index += 1
return x
#**************************************************************************************
class Solve(object):
'''
Main solve class for VWE/SWE. Construct a solver object with
wg: The waveguide
Nres: The resolution of the calculation grid
store: Store mode field information if true
label: Dict of information to associate with the solver and mode
compress_to_size: Size to compress to or None to not store modes
mode_calculations: Calculate mode information before discarding fields
'''
def __init__(self, wg, Nres=None, store=True, compress_to_size=None,
mode_calculations=False, label={}):
self.wg = wg
self.base_shape = Nres
#General solver options - All solvers should support these paramters
self.store_mode_properties = mode_calculations
self.store_fields = store
self.compress_to_size = compress_to_size
self.force_electric_calculation = False
self.dependancies = [] #Don't run this solver until these are true
self.label = label #Custom label to identify the solver
self.dtype = complex128
self.modes = []
#Setup equation with default parameters, can call it to customize
#Solver specific paramters
self.setup()
def setup(self):
#Solver specific paramteres
pass
def add_dependancy(self, depends):
"Add solver to dependancy list"
depends = atleast_1d(depends)
for d in depends:
if hasattr(d,'id'):
self.dependancies.append(d.id)
elif 0<int(d)<len(Solve.ids):
self.dependancies.append(Solve.ids[d])
else:
raise LookupError , "Dependancy not recognised, should be a solver"
# +-----------------------------------------------------------------------+
# | General solver functions .. may be overloaded
# +-----------------------------------------------------------------------+
def plot(self):
"Plot the effective indices of the found modes"
import pylab as p_
col_red = array([0.8,0,0.2])
col_blue = array([0.2,0,0.8])
neffs = [md.neff for md in self.modes]
spurious = array([md.guess_spurious() for md in self.modes])
nconverged = array([md.residue>self.tolerance for md in self.modes])
colors = col_red*spurious[:,newaxis] + col_blue*nconverged[:,newaxis]
p_.scatter(real(neffs), imag(neffs), s=5, c=colors, marker='o')
##
## Mode information functions
##
def guess_spurious_mode(self, mode, cutoff=5.0):
'''
Guess if this is a real or spurious mode based on the mode
intensity distribution
'''
#The outermost object in the waveguide, guess if not given
router = 0.95*self.wg.get_rmax(0)
c = mode.coord
#If the coord object doesn't have a rv member or
#mode doesn't have field information this will fail
try:
#RMS of magnetic intensity over azimuthal direction
hr,ha,hz = mode.magnetic_field()
pprofile = mean(abs(hr)**2+abs(ha)**2+abs(ha)**2,axis=1)
pfrac = mean(pprofile[c.rv>router])/mean(pprofile[c.rv<router])
except:
pfrac=0
mode.is_spurious = pfrac>cutoff
return mode.is_spurious
def residue(self, x, l=None):
pass
## Interface to generic solver commands
def get_data(self):
return self.modes
def clear_data(self):
self.modes = []
def _clean_up_temporary_data(self):
"""
Remove and clean up any temporary matrices
or other data used
"""
pass
def calculate(self, number=inf):
pass
def __call__(self, *args, **kwargs):
"""
Solve the constructed problem with
m0: the waveguide symmetry index
wl: wavelength
neffrange: the upper and lower real effective indices of the search range
nefflist: Find modes near these effective indices
modelist: Find modes near these modes
totalnumber: total number of modes to find
"""
self.initialize(*args, **kwargs)
self.calculate()
self.finalize()
return self.modes
def initialize(self, wl, m0=0, neffrange=None, nefflist=None, modelist=None, number=1):
'''
Setup the solver with pre-calculation parameters with:
wl: wavelength
m0: the waveguide symmetry index
neffrange: the upper and lower real effective indices of the search range
nefflist: Find modes near these effective indices
modelist: Find modes near these modes
totalnumber: total number of modes to find
'''
self.m0 = m0
self.wl = wl
self.k0 = 2*pi/wl
#Set number and neffrange depending on the case
self.numbercalculated = 0
if nefflist is not None:
self.bracket = 0,inf
self.totalnumber = len(nefflist)
elif modelist is not None:
self.bracket = 0,inf
self.totalnumber = len(modelist)
else:
#Calculate range from core index if not given
self.bracket = self.wg.index_range(wl)
self.totalnumber = number
#Or manual setting
if neffrange is not None:
if iterable(neffrange):
self.bracket = neffrange
else:
self.bracket = (self.wg.index_range(wl)[0], neffrange)
#Clear modes
self.clear_data()
#Mode/neff lists if any
self.nefflist = nefflist
self.modelist = modelist
self.is_finalized = False
def _estimate_complete_fraction(self):
"Return a number between 0 (started) and 1 (finished)"
return float(len(self.modes))/self.totalnumber
def finalize(self):
"""
Finalize the modes after the solver has finished.
Including
- Clean up temporary objects
- Delete or compress mode vectors is required
- Remove debug information if not in debugging mode
"""
#Clean up temprorary data
self._clean_up_temporary_data()
logging.info("Finalizing calculated modes")
for ii,mode in enumerate(self.modes):
#Label the mode
mode.label = self.label
#Update spurious indicator
self.guess_spurious_mode(mode)
#Remove calculated EF if forced
if self.store_fields:
mode.store_calculated_electric_field(wg=self.wg, force=self.force_electric_calculation)
if self.compress_to_size is not None:
mode.compress(self.compress_to_size, self.wg)
#Add extension for behaviour outside the computational domain
mode.normalize(wg=self.wg)
else:
mode.discard_fields()
#Sort modes
self.modes.sort(reverse=True)
self.is_finalized = True
class AdaptiveWavelengthTrack(Solve):
'''
Track modes over a wavelength range with adaptive step size
'''
def __init__(self, solver, track_range=None, dont_lose_modes=False):
self.solver = solver
self.track_range = None
self.ga_target = 1e-3
self.dont_lose_modes = dont_lose_modes
Solve.__init__(self, solver.wg, compress_to_size=solver.compress_to_size)
def initialize(self, wl_range, *args, **kwargs):
self.wl_range = wl_range
self.solver_args = args
self.solver_kwargs = kwargs
#We need the m0 SC to restart the solver at different wavelengths
#This shouldn't be needed!
self.m0 = args[0] if len(args)>0 else kwargs.get('m0', 0)
def calculate(self, number=inf):
import pylab as pl
solver = self.solver
#Setup wavelength range
wl_start, wl_stop = self.wl_range
#Starting step size
dwl = (wl_stop-wl_start)/100.0
#Tolerances for adaptive step sizes
dwl_minimum = dwl/10
dwl_maximum = 5*dwl
ga_target = self.ga_target
ga_minimum = ga_target/10
ga_maximum = ga_target*10
#Find start modes to track
modes = self.solver(wl_start, *self.solver_args, **self.solver_kwargs)
#Tracking modes
Nm = len(modes)
#Bail if we can't find any modes to start with
if Nm<1:
logging.error("No modes found with intial solver parameters, wavelength track aborted")
return []
else:
logging.info("Now tracking %d modes" % Nm)
dneffdwl = zeros(Nm, complex_)
modes_track = [m.copy() for m in modes]
num_eval_backtrack = num_eval = 0
do_update=True
wl = wl_start
self.modes = list(modes)
while wl<wl_stop:
#Update wavelength
wl += dwl
logging.info("WL %.6g, step size: %.4g" % (wl,dwl))
#Find new modes
self.solver.initialize(wl, self.m0, modelist=modes_track)
modes_current = self.solver.calculate()
num_eval +=1
if 0:
m1 = modes[0]
solver.equation.set_lambda(m1.evalue)
M1x = solver.equation.matvec(m1.right) - m1.evalue*m1.right
solver.jacobian.setup(solver.base_shape,solver.wg,self.m0,wl+dwl)
solver.jacobian.set_lambda(m1.evalue)
M0px = solver.jacobian.matvec(m1.right) - m1.right
dmu = -dot(conj(m1.left), M1x)/dot(conj(m1.left), M0px)
neff_guess = sqrt(m1.evalue+dmu)/(2*pi/m1.wl)
Nm_current = len(modes_current)
if Nm_current==0: #Jump to next point and try and find modes there
continue
elif Nm_current<Nm: #Find a replacement mode?
if self.dont_lose_modes:
wl -= dwl/2
logging.warning("Lost %d modes: Retracking" % (Nm - Nm_current))
continue
else:
logging.warning("Lost %d modes" % (Nm - Nm_current))
elif Nm_current>Nm:
logging.warning("Found more modes than requested!")
#Calculate mode differences
remove_modes = []
dneffdwl_last = dneffdwl
dneffdwl = zeros(Nm_current, complex_)
ga_max = 0; ga_min = inf
for ii in range(Nm_current):
neff = modes_current[ii].neff
#Find closest neff
neff_differences = [neff - x.neff for x in modes_track]
track_closest = np.argmin(np.absolute(neff_differences))
#Calculate dispersion from previous mode
dneffdwl[ii] = (modes[track_closest].neff - neff)/dwl
#Guess accuracy
ga = abs(neff_differences[track_closest])/abs(neff)
ga_max=max(ga_max,ga); ga_min=min(ga_min,ga)
#Have the modes left the tracked range?
if self.track_range is not None and (neff<min(track_range) or neff>max(track_range)):
logging.warning("Mode has left tracked neff range")
remove_modes.append(ii)
#Adaptive guess for next dwl
accept = True
if wl>wl_start+dwl:
if ga_max>0:
dwl_target = dwl*(ga_target/ga_max)**(0.5)
if (ga_max>ga_maximum) and (dwl>dwl_minimum):
logging.info("Eigenvalue change to large. Backtracking")
accept = False
dwl_target = min(dwl_target,dwl*0.5)
dwl = dwl_target
#Guess next neff
if accept:
self.modes += modes_current
dneffdwl_last = dneffdwl
modes = modes_current
#Backtrack!!
else:
wl -= dwl
dneffdwl = dneffdwl_last
num_eval_backtrack +=1
#Use length of current modes, which must be the same as length of dneffdwl
Nm = len(modes)
#Truncate modes_track otherwise modes can be larger than modes_last
modes_track = [m.copy() for m in modes]
#Update neff for modes_track
for ii in range(Nm):
modes_track[ii].neff = (modes[ii].neff + dneffdwl[ii]*dwl)
logging.debug("Dispersion: %s " % dneffdwl)
logging.debug("Guess accuracy: %0.4g -> %0.4g" % (ga_max, ga_min))
logging.info("Total points: %d, number of backtracks: %d" % (num_eval, num_eval_backtrack))
return self.modes
def update_eigenvector(self,m1,m2):
#Calculate perturbation
# eps = 1e-3
# solver.equation.setup(solver.base_shape,solver.wg,m0,m1.wavelength+eps)
# solver.equation.set_lambda(m1.evalue)
# M1xp = solver.equation.matvec(m1.right)
# solver.equation.setup(solver.base_shape,solver.wg,m0,m1.wavelength-eps)
# solver.equation.set_lambda(m1.evalue)
# M1xm = solver.equation.matvec(m1.right)
# M1x = (M1xp-M1xm)/(2*eps)
solver.equation.setup(solver.base_shape,solver.wg,m0,m2.wavelength)
solver.equation.set_lambda(m1.evalue)
M1x = solver.equation.matvec(m1.right) - m1.evalue*m1.right
solver.jacobian.setup(solver.base_shape,solver.wg,m0,m1.wavelength)
solver.jacobian.set_lambda(m1.evalue)
M0px = solver.jacobian.matvec(m1.right) - m1.right
dmu = -dot(conj(m1.left), M1x)/dot(conj(m1.left), M0px)
dneffc1 = (m1.neff**2/m1.wavelength+0.5*dmu/m1.k0)/m1.neff
dneffc = sqrt(m1.evalue+dmu)/m2.k0 - m1.neff
print "dneff(1)", dneffc1
print "dneff(2)", dneffc
print
neff_guess += [sqrt(m1.evalue+dmu)/m2.k0]
#Find correction to eigenvector
mu2 = m2.evalue+0*dmu
Mx1 = -(M0px*dmu/delta + M1x)
#Approx:
if not hasattr(solver, 'matrix'):
Nr, Naz = solver.base_shape
bw = solver.equation.diff.bandwidth
blockshape = (solver.equation.pmax*Naz,)*2
solver.matrix = blockarray.BlockArray((Nr,bw), blockshape=blockshape, dtype=complex_)
si = Solver.ShiftInvertBlock(overwrite=False)
solver.generate()
si.set_shift(solver.matrix, complex(m1.evalue))
x1 = si.matvec(Mx1)
y = m1.right + delta*x1
solver.equation.set_lambda(m2.evalue)
print "Diff1", linalg.norm(solver.equation(y)-m2.evalue*y)
print "Diff2", linalg.norm(solver.equation(m1.right)-m2.evalue*m1.right)
def plot(self, style=''):
"""Plot the found effective index versus the wavelength for all modes
fourd in the wavelength scan.
Arguments:
style: the matplotlib line style for the plotted points
"""
Plotter.plot_mode_properties(self.modes, 'neff', 'wl', style=style)
def finalize(self):
#Modes should be already finalized by the subordinate solver,
#Here we should just sort them by wavelength
self.modes.sort(cmp=lambda x,y: cmp(x.wl,y.wl))
class WavelengthScan(Solve):
'''
Find all modes within a range at constant wavelength step size
'''
def __init__(self, solver, Nscan=100):
self.solver = solver
self.Nscan = Nscan
Solve.__init__(self, solver.wg, compress_to_size=solver.compress_to_size)
def initialize(self, wl_range, *args, **kwargs):
self.wl_range = wl_range
self.solver_args = args
self.solver_kwargs = kwargs
def calculate(self, number=inf):
import pylab as pl
solver = self.solver
#Setup wavelength range
wl_start, wl_stop = self.wl_range
#Step size
dwl = (wl_stop-wl_start)/self.Nscan
wl = wl_start
self.modes = []
while wl<wl_stop:
logging.info("WL %.6g, step size: %.4g" % (wl,dwl))
#Find new modes
modes_current = self.solver(wl, *self.solver_args, **self.solver_kwargs)
self.modes.extend(modes_current)
#Update wavelength
wl += dwl
return self.modes
def plot(self, style=''):
"""Plot the found effective index versus the wavelength for all modes
fourd in the wavelength scan.
Arguments:
style: the matplotlib line style for the plotted points
"""
Plotter.plot_mode_properties(self.modes, 'neff', 'wl', style=style)
def finalize(self):
#Modes should be already finalized by the subordinate solver,
#Here we should just sort them by wavelength
self.modes.sort(cmp=lambda x,y: cmp(x.wl,y.wl))
class WavelengthConditionScan(Solve):
'''
Scan over a wavelength range and plot a condition number for the modal
eigenvalue problem. The exact nature of this condition number depends
upon the nature of the algorithm in the supplied solver
'''
def __init__(self, solver, Nscan=(20,100)):
self.solver = solver
self.Nscan = Nscan
#The condition number scan is stored here
self.Cscan = np.zeros(self.Nscan, dtype=float)
self.neffscan = np.zeros(self.Nscan, dtype=float)
self.wlscan = np.zeros(self.Nscan[0], dtype=float)
Solve.__init__(self, solver.wg, compress_to_size=solver.compress_to_size)
def initialize(self, wl_range, *args, **kwargs):
self.wl_range = wl_range
self.solver_args = args
self.solver_kwargs = kwargs
self.solver.initialize(wl_range[0], *args, **kwargs)
if 'neffrange' in kwargs:
self.neffrange = kwargs['neffrange']
else:
self.neffrange = None
def calculate(self, number=inf):
import pylab as pl
solver = self.solver
#Setup wavelengths
dwl = (self.wl_range[1]-self.wl_range[0])/self.Nscan[0]
for ii in range(self.Nscan[0]):
wl = self.wl_range[0] + ii*dwl
logging.info("Calculating scan at %d of %d points" % (ii+1, self.Nscan[0]))
#Update wavelength
self.solver.initialize(wl, *self.solver_args)
#Range to scan
if self.neffrange is None:
neffrange=self.wg.index_range(wl)
else:
neffrange=self.neffrange
dneff = (neffrange[1]-neffrange[0])/self.Nscan[1]
neffs = np.arange(neffrange[0], neffrange[1], dneff)
#Scan over beta range
self.Cscan[ii] = np.abs(self.solver.condition(neffs*self.solver.k0))
self.neffscan[ii] = neffs
self.wlscan[ii] = wl
return self.Cscan
def plot(self, style={}):
import pylab as pl
dwl = (self.wl_range[1]-self.wl_range[0])/self.Nscan[0]
wls = np.arange(self.wl_range[0], self.wl_range[1], dwl)
wlscan = self.wlscan[:,newaxis] + 0*self.neffscan
#We need to plot it twice otherwise it introduces odd lines
pl.contourf(wlscan, self.neffscan, np.log10(self.Cscan), 100, **style)
pl.contourf(wlscan, self.neffscan, np.log10(self.Cscan), 100, **style)
if 0:
pl.plot(betascan/self.solver.k0, self.Cscan[ii])
pl.pcolor(wlscan, self.neffscan, np.log10(self.Cscan), **style)
def finalize(self):
pass
def batch_file_save(solvers, filename=None):
"Save solvers in batch file for later solution"
from cPickle import dump
try:
dump(solvers, open(filename,'wb'))
except IOError:
logging.error("Failed to save solvers to file %s" % filename)
def batch_file_load(filename=None):
"Load solvers in from a batch file"
from cPickle import load
try:
solvers = load(open(filename,'rb'))
except IOError:
solvers = []
logging.error("Failed to load batch solver file %s" % filename)
return solvers
def batch_file_load_modes(filename=None, return_wg=False):
"Load modes from batch file"
solvers = batch_file_load(filename)
#Add modes to list
modes = []
wgs = []
for solver in solvers:
modes += solver.get_data() #This must return a list!
wgs.append( solver.wg )
#Return waveguides if requested
if return_wg:
return modes, wgs
else:
return modes
def batch_solve(solvers, filename=None):
"""
Solve problems in list solvers saving periodically
to the specified file if given.
The batch solve can be continued if interrupted
with the function `batch_continue(filename)`.
"""
from cPickle import dump
for solver in solvers:
#Resume calculation if not finished
if not solver.isfinished():
solver.calculate()
#Save solver queue
if filename is not None:
dump(solvers, open(filename,'wb'))
modes = []
for solver in solvers:
modes += solver.get_data()
return modes
def batch_continue(filename):
"""
Continue an aborted batch_solve with a batch file
"""
solvers = batch_file_load(filename)
return batch_solve(solvers, filename)
| gpl-3.0 |
SpatialMetabolomics/SM_distributed | sm/engine/msm_basic/msm_basic_search.py | 2 | 2848 | from collections import OrderedDict
import pandas as pd
from sm.engine.util import SMConfig
from sm.engine.msm_basic.formula_imager_segm import compute_sf_images
from sm.engine.msm_basic.formula_img_validator import sf_image_metrics
from sm.engine.search_algorithm import SearchAlgorithm
import logging
logger = logging.getLogger('engine')
class MSMBasicSearch(SearchAlgorithm):
def __init__(self, sc, ds, ds_reader, mol_db, centr_gen, fdr, ds_config):
super(MSMBasicSearch, self).__init__(sc, ds, ds_reader, mol_db, fdr, ds_config)
self.metrics = OrderedDict([('chaos', 0), ('spatial', 0), ('spectral', 0),
('total_iso_ints', [0, 0, 0, 0]),
('min_iso_ints', [0, 0, 0, 0]),
('max_iso_ints', [0, 0, 0, 0])])
self.max_fdr = 0.5
self._centr_gen = centr_gen
def search(self):
""" Search for molecules in the dataset
Returns
-------
: tuple
(ion metrics DataFrame, ion image pyspark.RDD)
"""
logger.info('Running molecule search')
ion_centroids_df = self._centr_gen.centroids_subset(self._fdr.ion_tuples())
ion_images = compute_sf_images(self._sc, self._ds_reader, ion_centroids_df,
self.ds_config['image_generation']['ppm'])
ion_metrics_df = self.calc_metrics(ion_images, ion_centroids_df)
ion_metrics_fdr_df = self.estimate_fdr(ion_metrics_df)
ion_metrics_fdr_df = self.filter_sf_metrics(ion_metrics_fdr_df)
ion_images = self.filter_sf_images(ion_images, ion_metrics_fdr_df)
return ion_metrics_fdr_df, ion_images
def calc_metrics(self, sf_images, ion_centroids_df):
ion_centr_ints = (ion_centroids_df.reset_index().groupby(['ion_i'])
.apply(lambda df: df.int.tolist()).to_dict())
all_sf_metrics_df = sf_image_metrics(sf_images=sf_images, metrics=self.metrics, ds=self._ds,
ds_reader=self._ds_reader, ion_centr_ints=ion_centr_ints, sc=self._sc)
return all_sf_metrics_df
def estimate_fdr(self, ion_metrics_df):
ion_metrics_sf_adduct_df = ion_metrics_df.join(self._centr_gen.ion_df)
sf_adduct_fdr_df = self._fdr.estimate_fdr(
ion_metrics_sf_adduct_df.set_index(['sf', 'adduct']).msm)
ion_metrics_sf_adduct_fdr_df = pd.merge(ion_metrics_sf_adduct_df.reset_index(),
sf_adduct_fdr_df.reset_index(),
how='inner', on=['sf', 'adduct']).set_index('ion_i')
return ion_metrics_sf_adduct_fdr_df
def filter_sf_metrics(self, sf_metrics_df):
return sf_metrics_df[sf_metrics_df.fdr <= self.max_fdr]
| apache-2.0 |
Obus/scikit-learn | sklearn/decomposition/tests/test_factor_analysis.py | 222 | 3055 | # Author: Christian Osendorfer <[email protected]>
# Alexandre Gramfort <[email protected]>
# Licence: BSD3
import numpy as np
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils import ConvergenceWarning
from sklearn.decomposition import FactorAnalysis
def test_factor_analysis():
# Test FactorAnalysis ability to recover the data covariance structure
rng = np.random.RandomState(0)
n_samples, n_features, n_components = 20, 5, 3
# Some random settings for the generative model
W = rng.randn(n_components, n_features)
# latent variable of dim 3, 20 of it
h = rng.randn(n_samples, n_components)
# using gamma to model different noise variance
# per component
noise = rng.gamma(1, size=n_features) * rng.randn(n_samples, n_features)
# generate observations
# wlog, mean is 0
X = np.dot(h, W) + noise
assert_raises(ValueError, FactorAnalysis, svd_method='foo')
fa_fail = FactorAnalysis()
fa_fail.svd_method = 'foo'
assert_raises(ValueError, fa_fail.fit, X)
fas = []
for method in ['randomized', 'lapack']:
fa = FactorAnalysis(n_components=n_components, svd_method=method)
fa.fit(X)
fas.append(fa)
X_t = fa.transform(X)
assert_equal(X_t.shape, (n_samples, n_components))
assert_almost_equal(fa.loglike_[-1], fa.score_samples(X).sum())
assert_almost_equal(fa.score_samples(X).mean(), fa.score(X))
diff = np.all(np.diff(fa.loglike_))
assert_greater(diff, 0., 'Log likelihood dif not increase')
# Sample Covariance
scov = np.cov(X, rowvar=0., bias=1.)
# Model Covariance
mcov = fa.get_covariance()
diff = np.sum(np.abs(scov - mcov)) / W.size
assert_less(diff, 0.1, "Mean absolute difference is %f" % diff)
fa = FactorAnalysis(n_components=n_components,
noise_variance_init=np.ones(n_features))
assert_raises(ValueError, fa.fit, X[:, :2])
f = lambda x, y: np.abs(getattr(x, y)) # sign will not be equal
fa1, fa2 = fas
for attr in ['loglike_', 'components_', 'noise_variance_']:
assert_almost_equal(f(fa1, attr), f(fa2, attr))
fa1.max_iter = 1
fa1.verbose = True
assert_warns(ConvergenceWarning, fa1.fit, X)
# Test get_covariance and get_precision with n_components == n_features
# with n_components < n_features and with n_components == 0
for n_components in [0, 2, X.shape[1]]:
fa.n_components = n_components
fa.fit(X)
cov = fa.get_covariance()
precision = fa.get_precision()
assert_array_almost_equal(np.dot(cov, precision),
np.eye(X.shape[1]), 12)
| bsd-3-clause |
srjoglekar246/sympy | doc/ext/docscrape_sphinx.py | 2 | 7957 | import re, inspect, textwrap, pydoc
import sphinx
from docscrape import NumpyDocString, FunctionDoc, ClassDoc
class SphinxDocString(NumpyDocString):
def __init__(self, docstring, config={}):
self.use_plots = config.get('use_plots', False)
NumpyDocString.__init__(self, docstring, config=config)
# string conversion routines
def _str_header(self, name, symbol='`'):
return ['.. rubric:: ' + name, '']
def _str_field_list(self, name):
return [':' + name + ':']
def _str_indent(self, doc, indent=4):
out = []
for line in doc:
out += [' '*indent + line]
return out
def _str_signature(self):
return ['']
if self['Signature']:
return ['``%s``' % self['Signature']] + ['']
else:
return ['']
def _str_summary(self):
return self['Summary'] + ['']
def _str_extended_summary(self):
return self['Extended Summary'] + ['']
def _str_param_list(self, name):
out = []
if self[name]:
out += self._str_field_list(name)
out += ['']
for param,param_type,desc in self[name]:
out += self._str_indent(['**%s** : %s' % (param.strip(),
param_type)])
out += ['']
out += self._str_indent(desc,8)
out += ['']
return out
@property
def _obj(self):
if hasattr(self, '_cls'):
return self._cls
elif hasattr(self, '_f'):
return self._f
return None
def _str_member_list(self, name):
"""
Generate a member listing, autosummary:: table where possible,
and a table where not.
"""
out = []
if self[name]:
out += ['.. rubric:: %s' % name, '']
prefix = getattr(self, '_name', '')
if prefix:
prefix = '~%s.' % prefix
## Lines that are commented out are used to make the
## autosummary:: table. Since SymPy does not use the
## autosummary:: functionality, it is easiest to just comment it
## out.
#autosum = []
others = []
for param, param_type, desc in self[name]:
param = param.strip()
#if not self._obj or hasattr(self._obj, param):
# autosum += [" %s%s" % (prefix, param)]
#else:
others.append((param, param_type, desc))
#if autosum:
# out += ['.. autosummary::', ' :toctree:', '']
# out += autosum
if others:
maxlen_0 = max([len(x[0]) for x in others])
maxlen_1 = max([len(x[1]) for x in others])
hdr = "="*maxlen_0 + " " + "="*maxlen_1 + " " + "="*10
fmt = '%%%ds %%%ds ' % (maxlen_0, maxlen_1)
n_indent = maxlen_0 + maxlen_1 + 4
out += [hdr]
for param, param_type, desc in others:
out += [fmt % (param.strip(), param_type)]
out += self._str_indent(desc, n_indent)
out += [hdr]
out += ['']
return out
def _str_section(self, name):
out = []
if self[name]:
out += self._str_header(name)
out += ['']
content = textwrap.dedent("\n".join(self[name])).split("\n")
out += content
out += ['']
return out
def _str_see_also(self, func_role):
out = []
if self['See Also']:
see_also = super(SphinxDocString, self)._str_see_also(func_role)
out = ['.. seealso::', '']
out += self._str_indent(see_also[2:])
return out
def _str_warnings(self):
out = []
if self['Warnings']:
out = ['.. warning::', '']
out += self._str_indent(self['Warnings'])
return out
def _str_index(self):
idx = self['index']
out = []
if len(idx) == 0:
return out
out += ['.. index:: %s' % idx.get('default','')]
for section, references in idx.iteritems():
if section == 'default':
continue
elif section == 'refguide':
out += [' single: %s' % (', '.join(references))]
else:
out += [' %s: %s' % (section, ','.join(references))]
return out
def _str_references(self):
out = []
if self['References']:
out += self._str_header('References')
if isinstance(self['References'], str):
self['References'] = [self['References']]
out.extend(self['References'])
out += ['']
# Latex collects all references to a separate bibliography,
# so we need to insert links to it
if sphinx.__version__ >= "0.6":
out += ['.. only:: latex','']
else:
out += ['.. latexonly::','']
items = []
for line in self['References']:
m = re.match(r'.. \[([a-z0-9._-]+)\]', line, re.I)
if m:
items.append(m.group(1))
out += [' ' + ", ".join(["[%s]_" % item for item in items]), '']
return out
def _str_examples(self):
examples_str = "\n".join(self['Examples'])
if (self.use_plots and 'import matplotlib' in examples_str
and 'plot::' not in examples_str):
out = []
out += self._str_header('Examples')
out += ['.. plot::', '']
out += self._str_indent(self['Examples'])
out += ['']
return out
else:
return self._str_section('Examples')
def __str__(self, indent=0, func_role="obj"):
out = []
out += self._str_signature()
out += self._str_index() + ['']
out += self._str_summary()
out += self._str_extended_summary()
for param_list in ('Parameters', 'Returns', 'Other Parameters',
'Raises', 'Warns'):
out += self._str_param_list(param_list)
out += self._str_warnings()
out += self._str_see_also(func_role)
out += self._str_section('Notes')
out += self._str_references()
out += self._str_examples()
for s in self._other_keys:
out += self._str_section(s)
out += self._str_member_list('Attributes')
out = self._str_indent(out,indent)
return '\n'.join(out)
class SphinxFunctionDoc(SphinxDocString, FunctionDoc):
def __init__(self, obj, doc=None, config={}):
self.use_plots = config.get('use_plots', False)
FunctionDoc.__init__(self, obj, doc=doc, config=config)
class SphinxClassDoc(SphinxDocString, ClassDoc):
def __init__(self, obj, doc=None, func_doc=None, config={}):
self.use_plots = config.get('use_plots', False)
ClassDoc.__init__(self, obj, doc=doc, func_doc=None, config=config)
class SphinxObjDoc(SphinxDocString):
def __init__(self, obj, doc=None, config={}):
self._f = obj
SphinxDocString.__init__(self, doc, config=config)
def get_doc_object(obj, what=None, doc=None, config={}):
if inspect.isclass(obj):
what = 'class'
elif inspect.ismodule(obj):
what = 'module'
elif callable(obj):
what = 'function'
else:
what = 'object'
if what == 'class':
return SphinxClassDoc(obj, func_doc=SphinxFunctionDoc, doc=doc,
config=config)
elif what in ('function', 'method'):
return SphinxFunctionDoc(obj, doc=doc, config=config)
else:
if doc is None:
doc = pydoc.getdoc(obj)
return SphinxObjDoc(obj, doc, config=config)
| bsd-3-clause |
mahak/spark | python/pyspark/pandas/ml.py | 9 | 4160 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import List, Tuple, TYPE_CHECKING, cast
import numpy as np
import pandas as pd
import pyspark
from pyspark.ml.feature import VectorAssembler
from pyspark.ml.stat import Correlation
from pyspark.pandas._typing import Label
from pyspark.pandas.utils import column_labels_level
if TYPE_CHECKING:
import pyspark.pandas as ps # noqa: F401 (SPARK-34943)
CORRELATION_OUTPUT_COLUMN = "__correlation_output__"
def corr(psdf: "ps.DataFrame", method: str = "pearson") -> pd.DataFrame:
"""
The correlation matrix of all the numerical columns of this dataframe.
Only accepts scalar numerical values for now.
:param psdf: the pandas-on-Spark dataframe.
:param method: {'pearson', 'spearman'}
* pearson : standard correlation coefficient
* spearman : Spearman rank correlation
:return: :class:`pandas.DataFrame`
>>> ps.DataFrame({'A': [0, 1], 'B': [1, 0], 'C': ['x', 'y']}).corr()
A B
A 1.0 -1.0
B -1.0 1.0
"""
assert method in ("pearson", "spearman")
ndf, column_labels = to_numeric_df(psdf)
corr = Correlation.corr(ndf, CORRELATION_OUTPUT_COLUMN, method)
pcorr = cast(pd.DataFrame, corr.toPandas())
arr = pcorr.iloc[0, 0].toArray()
if column_labels_level(column_labels) > 1:
idx = pd.MultiIndex.from_tuples(column_labels)
else:
idx = pd.Index([label[0] for label in column_labels])
return pd.DataFrame(arr, columns=idx, index=idx)
def to_numeric_df(psdf: "ps.DataFrame") -> Tuple[pyspark.sql.DataFrame, List[Label]]:
"""
Takes a dataframe and turns it into a dataframe containing a single numerical
vector of doubles. This dataframe has a single field called '_1'.
TODO: index is not preserved currently
:param psdf: the pandas-on-Spark dataframe.
:return: a pair of dataframe, list of strings (the name of the columns
that were converted to numerical types)
>>> to_numeric_df(ps.DataFrame({'A': [0, 1], 'B': [1, 0], 'C': ['x', 'y']}))
(DataFrame[__correlation_output__: vector], [('A',), ('B',)])
"""
# TODO, it should be more robust.
accepted_types = {
np.dtype(dt)
for dt in [np.int8, np.int16, np.int32, np.int64, np.float32, np.float64, np.bool_]
}
numeric_column_labels = [
label for label in psdf._internal.column_labels if psdf[label].dtype in accepted_types
]
numeric_df = psdf._internal.spark_frame.select(
*[psdf._internal.spark_column_for(idx) for idx in numeric_column_labels]
)
va = VectorAssembler(inputCols=numeric_df.columns, outputCol=CORRELATION_OUTPUT_COLUMN)
v = va.transform(numeric_df).select(CORRELATION_OUTPUT_COLUMN)
return v, numeric_column_labels
def _test() -> None:
import os
import doctest
import sys
from pyspark.sql import SparkSession
import pyspark.pandas.ml
os.chdir(os.environ["SPARK_HOME"])
globs = pyspark.pandas.ml.__dict__.copy()
globs["ps"] = pyspark.pandas
spark = SparkSession.builder.master("local[4]").appName("pyspark.pandas.ml tests").getOrCreate()
(failure_count, test_count) = doctest.testmod(
pyspark.pandas.ml, globs=globs, optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE
)
spark.stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
| apache-2.0 |
OshynSong/scikit-learn | examples/linear_model/plot_sgd_weighted_samples.py | 344 | 1458 | """
=====================
SGD: Weighted samples
=====================
Plot decision function of a weighted dataset, where the size of points
is proportional to its weight.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
# we create 20 points
np.random.seed(0)
X = np.r_[np.random.randn(10, 2) + [1, 1], np.random.randn(10, 2)]
y = [1] * 10 + [-1] * 10
sample_weight = 100 * np.abs(np.random.randn(20))
# and assign a bigger weight to the last 10 samples
sample_weight[:10] *= 10
# plot the weighted data points
xx, yy = np.meshgrid(np.linspace(-4, 5, 500), np.linspace(-4, 5, 500))
plt.figure()
plt.scatter(X[:, 0], X[:, 1], c=y, s=sample_weight, alpha=0.9,
cmap=plt.cm.bone)
## fit the unweighted model
clf = linear_model.SGDClassifier(alpha=0.01, n_iter=100)
clf.fit(X, y)
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
no_weights = plt.contour(xx, yy, Z, levels=[0], linestyles=['solid'])
## fit the weighted model
clf = linear_model.SGDClassifier(alpha=0.01, n_iter=100)
clf.fit(X, y, sample_weight=sample_weight)
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
samples_weights = plt.contour(xx, yy, Z, levels=[0], linestyles=['dashed'])
plt.legend([no_weights.collections[0], samples_weights.collections[0]],
["no weights", "with weights"], loc="lower left")
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
JoeriHermans/ml-scripts | scripts/adverserial-variational-optimization/avo.py | 2 | 11733 |
# Adverserial Variational Optimization
import math
import numpy as np
import random
import sys
import torch
import torch.nn.functional as F
from sklearn.utils import check_random_state
from torch.autograd import Variable
def main():
# Assume there exists some true parameterization.
# Beam Energy = 43 Gev, and Fermi's Constant is 0.9
theta_true = [43.0, 0.9]
# Assume there is an experiment drawing (real) samples from nature.
p_r = real_experiment(theta_true, 100000)
# Initialize the prior of theta, parameterized by a Gaussian.
proposal = {'mu': [], 'sigma': []}
# Check if a custom mu has been specified.
if '--mu' in sys.argv:
mu = sys.argv[sys.argv.index('--mu') + 1].split(",")
mu = [float(e) for e in mu]
proposal['mu'] = mu
#proposal['sigma'] = [np.log(.1), np.log(.01)]
proposal['sigma'] = [np.log(.1), np.log(.1)]
else:
# Add random beam energy.
add_prior_beam_energy(proposal)
# Add random Fermi constant.
add_prior_fermi_constant(proposal)
# Check if a custom sigma has been specified.
if '--sigma' in sys.argv:
sigma = sys.argv[sys.argv.index('--sigma') + 1].split(",")
sigma = [np.log(float(e)) for e in sigma]
proposal['sigma'] = sigma
else:
# Initialize default sigma.
proposal['sigma'] = [np.log(.1), np.log(.1)]
# Convert the proposal lists to PyTorch Tensors.
proposal['mu'] = torch.FloatTensor(proposal['mu'])
proposal['sigma'] = torch.FloatTensor(proposal['sigma'])
# Inference on theta is done using a critic network in an adverserial setting.
if '--sigmoid' in sys.argv:
critic = CriticWithSigmoid(num_hidden=50)
else:
critic = Critic(num_hidden=50)
# Obtain the batch size from the arguments.
if '--batch-size' in sys.argv:
batch_size = int(sys.argv[sys.argv.index('--batch-size') + 1])
else:
batch_size = 256
# Check if the variables need to be normalized.
if '--normalize' in sys.argv:
proposal['mu'] = normalize(proposal['mu'])
# Fit the proposal distribution to the real distribution using the critic.
fit(proposal=proposal, p_r=p_r, critic=critic, theta_true=theta_true, batch_size=batch_size)
# Display the current parameterization of the proposal distribution.
print("\nProposal Distribution:")
print(" - Beam Energy:")
print(" mu: " + str(proposal['mu'][0]))
print(" sigma: " + str(proposal['sigma'][0]))
print(" - Fermi's Constant:")
print(" mu: " + str(proposal['mu'][1]))
print(" sigma: " + str(proposal['sigma'][1]))
print("\nTrue Distribution:")
print(" - Beam Energy: " + str(theta_true[0]))
print(" - Fermi's Constant: " + str(theta_true[1]))
def normalize(mu):
min_mu = torch.FloatTensor([30, 0])
max_mu = torch.FloatTensor([60, 2])
if '--normalize' in sys.argv:
mu = (mu - min_mu) / (max_mu - min_mu)
return mu
def denormalize(mu):
min_mu = torch.FloatTensor([30, 0])
max_mu = torch.FloatTensor([60, 2])
if '--normalize' in sys.argv:
mu = mu * (max_mu - min_mu) + min_mu
return mu
def fit(proposal, p_r, critic, theta_true, num_iterations=100000, batch_size=256):
critic_optimizer = torch.optim.Adam(critic.parameters(), lr=0.01)
for iteration in range(0, num_iterations):
print("True Mu: " + str(theta_true))
print("Current Mu: " + str(denormalize(proposal['mu'])))
print("Current Sigma: " + str(proposal['sigma'].exp()))
# Fit the critic network.
fit_critic(proposal, p_r, critic, critic_optimizer, batch_size=batch_size, num_critic_iterations=100)
# Fit the proposal distribution.
fit_proposal(proposal, p_r, critic, batch_size)
def fit_critic(proposal, p_r, critic, optimizer, num_critic_iterations=4000, batch_size=256):
# Generate the simulation data.
x_g = sample_generated_data(proposal, batch_size)
# Fit the critic optimally.
for iteration in range(0, num_critic_iterations):
# Fetch the real data.
x_r = sample_real_data(p_r, batch_size)
# Reset the gradients.
critic.zero_grad()
# Forward pass with real data.
y_r = critic(x_r)
# Forward pass with generated data.
y_g = critic(x_g)
# Obtain gradient penalty (GP).
gp = compute_gradient_penalty(critic, x_r.data, x_g.data)
# Compute the loss, and the accompanying gradients.
loss = y_g - y_r + gp
loss.mean().backward()
optimizer.step()
# Display the loss of the critic at the last step.
print("Loss: " + str(loss.mean().data.numpy()[0]))
def fit_proposal(proposal, p_r, critic, batch_size=256, gamma=5.0):
gradient_u_mu = torch.FloatTensor([0, 0])
gradient_u_sigma = torch.FloatTensor([0, 0])
gradient_entropy_sigma = torch.FloatTensor([0, 0])
# Draw several thetas from the current proposal distribution.
thetas = draw_gaussian(proposal, batch_size)
# Compute the q-gradient for every theta.
for theta in thetas:
# Draw a sample from the simulator.
x = torch.autograd.Variable(simulator(theta, 1))
likelihood_x = critic(x).mean().view(-1)
mu = torch.autograd.Variable(proposal['mu'], requires_grad=True)
sigma = torch.autograd.Variable(proposal['sigma'], requires_grad=True)
# Compute the gradient of the Gaussian logpdf.
theta = torch.autograd.Variable(normalize(theta), requires_grad=True)
logpdf = gaussian_logpdf(mu, sigma, theta)
logpdf.sum().backward()
gradient_logpdf_mu = mu.grad.data
gradient_logpdf_sigma = sigma.grad.data
# Add the logpdf gradient to the current variational upperbound.
gradient_u_mu += -likelihood_x.data * gradient_logpdf_mu
gradient_u_sigma += -likelihood_x.data * gradient_logpdf_sigma
# Compute the gradient of the entropy.
sigma = torch.autograd.Variable(proposal['sigma'], requires_grad=True)
differential_entropy = gaussian_differential_entropy(sigma)
differential_entropy.sum().backward()
gradient_entropy_sigma = sigma.grad.data
# Compute the final adverserial gradient.
gradient_u_mu = .01 * ((1. / batch_size) * gradient_u_mu)
gradient_u_sigma = .01 * ((1. / batch_size) * gradient_u_sigma + gamma * gradient_entropy_sigma)
# Apply the gradient to the proposal distribution.
proposal['mu'] -= gradient_u_mu
proposal['sigma'] -= gradient_u_sigma
#proposal['sigma'] = proposal['sigma'].exp().log() + 0.01
def compute_gradient_penalty(critic, real, fake, l=5.0):
# Compute x_hat and its output.
epsilon = torch.rand(real.size())
x_hat = epsilon * real + ((1. - epsilon) * fake)
x_hat = torch.autograd.Variable(x_hat, requires_grad=True)
y_hat = critic(x_hat)
# Compute the associated gradients.
gradients = torch.autograd.grad(outputs=y_hat, inputs=x_hat,
grad_outputs=torch.ones(y_hat.size()),
create_graph=True, retain_graph=True, only_inputs=True)[0]
# Prevent norm 0 causing NaN.
gradients = gradients + 1e-16
# Compute the gradient penalty.
gradient_penalty = l * ((gradients.norm(2, dim=1) - 1.) ** 2)
return gradient_penalty
def sample_real_data(p_r, batch_size=256):
samples = torch.zeros((batch_size, 1))
num_samples_p_r = len(p_r)
for index in range(0, batch_size):
random_index = random.randint(0, num_samples_p_r - 1)
samples[index, :] = p_r[random_index]
return torch.autograd.Variable(samples, requires_grad=True)
def sample_generated_data(proposal, batch_size=256):
# Sample `batch_size` thetas according to our proposal distribution.
thetas = draw_gaussian(proposal, batch_size)
# Obtain the individual Gaussians.
theta_beam_energy = thetas[:, 0]
theta_fermi_constant = thetas[:, 1]
# Sample according to the proposal distribution.
samples = torch.zeros((batch_size, 1))
for sample_index, theta in enumerate(thetas):
samples[sample_index, :] = simulator(theta, 1)
return torch.autograd.Variable(samples, requires_grad=True)
def gaussian_logpdf(mu, sigma, theta):
#sigma = sigma.exp()
#logpdf = -(sigma.log() + np.log((2. * np.pi) ** .5) + (theta - mu) ** 2 / (2. * sigma ** 2))
logpdf = -(sigma + np.log((2. * np.pi) ** .5) + (theta - mu) ** 2 / (2. * sigma.exp() ** 2))
return logpdf
def gaussian_differential_entropy(sigma):
#sigma = sigma.exp()
#dentropy = (sigma.log() * (2. * np.pi * np.e) ** .5).log()
dentropy = (sigma * (2. * np.pi * np.e) ** .5).log()
return dentropy
def add_prior_beam_energy(prior):
g = random_gaussian(mu=[30, 60], sigma=1.0)
add_prior(prior, g['mu'], g['sigma'])
def add_prior_fermi_constant(prior):
g = random_gaussian(mu=[0, 2], sigma=1.0)
add_prior(prior, g['mu'], g['sigma'])
def add_prior(prior, mu, sigma):
prior['mu'].append(mu)
prior['sigma'].append(sigma)
def random_gaussian(mu=[-1, 1], sigma=5.0):
return {'mu': np.random.uniform(mu[0], mu[1]),
'sigma': np.log(np.random.uniform(0.0, sigma))}
def draw_gaussian(d, num_samples, random_state=None):
num_parameters = len(d['mu'])
thetas = torch.zeros((num_samples, num_parameters))
mu = denormalize(d['mu'])
sigma = d['sigma'].exp()
for i in range(0, num_samples):
gaussian = torch.normal(mu, sigma)
thetas[i, :] = gaussian
return thetas
def real_experiment(theta, n_samples):
return simulator(theta, n_samples)
def simulator(theta, n_samples, random_state=None):
rng = check_random_state(random_state)
samples = simulator_rej_sample_costheta(n_samples, theta, rng)
return torch.from_numpy(samples.reshape(-1, 1)).float()
def simulator_rej_sample_costheta(n_samples, theta, rng):
sqrtshalf = theta[0]
gf = theta[1]
ntrials = 0
samples = []
x = torch.linspace(-1, 1, steps=1000)
maxval = torch.max(simulator_diffxsec(x, sqrtshalf, gf))
while len(samples) < n_samples:
ntrials = ntrials + 1
xprop = rng.uniform(-1, 1)
ycut = rng.rand()
yprop = (simulator_diffxsec(xprop, sqrtshalf, gf) / maxval)[0]
if (yprop / maxval) < ycut:
continue
samples.append(xprop)
return np.array(samples)
def simulator_diffxsec(costheta, sqrtshalf, gf):
norm = 2. * (1. + 1. / 3.)
return ((1 + costheta ** 2) + simulator_a_fb(sqrtshalf, gf) * costheta) / norm
def simulator_a_fb(sqrtshalf, gf):
mz = 90
gf_nom = 0.9
sqrts = sqrtshalf * 2.
x = torch.FloatTensor([(sqrts - mz) / mz * 10])
a_fb_en = torch.tanh(x)
a_fb_gf = gf / gf_nom
return 2 * a_fb_en * a_fb_gf
class Critic(torch.nn.Module):
def __init__(self, num_hidden):
super(Critic, self).__init__()
self.fc_1 = torch.nn.Linear(1, num_hidden)
self.fc_2 = torch.nn.Linear(num_hidden, num_hidden)
self.fc_3 = torch.nn.Linear(num_hidden, 1)
def forward(self, x):
x = F.relu(self.fc_1(x))
x = F.relu(self.fc_2(x))
x = (self.fc_3(x))
return x
class CriticWithSigmoid(torch.nn.Module):
def __init__(self, num_hidden):
super(CriticWithSigmoid, self).__init__()
self.fc_1 = torch.nn.Linear(1, num_hidden)
self.fc_2 = torch.nn.Linear(num_hidden, num_hidden)
self.fc_3 = torch.nn.Linear(num_hidden, 1)
def forward(self, x):
x = F.relu(self.fc_1(x))
x = F.relu(self.fc_2(x))
x = F.sigmoid(self.fc_3(x))
return x
if __name__ == '__main__':
main()
| gpl-3.0 |
HaebinShin/tensorflow | tensorflow/contrib/learn/python/learn/estimators/estimator.py | 1 | 33804 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base Estimator class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import inspect
import os
import tempfile
import time
import numpy as np
import six
from tensorflow.contrib import framework as contrib_framework
from tensorflow.contrib import layers
from tensorflow.contrib.learn.python.learn import graph_actions
from tensorflow.contrib.learn.python.learn import monitors as monitors_lib
from tensorflow.contrib.learn.python.learn.estimators import _sklearn as sklearn
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow.contrib.learn.python.learn.estimators import tensor_signature
from tensorflow.contrib.learn.python.learn.estimators._sklearn import NotFittedError
from tensorflow.contrib.learn.python.learn.learn_io import data_feeder
from tensorflow.contrib.learn.python.learn.utils import checkpoints
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import logging_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import device_setter
from tensorflow.python.training import saver
class ModeKeys(object):
"""Standard names for model modes.
The following standard keys are defined:
* `TRAIN`: training mode.
* `EVAL`: evaluation mode.
* `INFER`: inference mode.
"""
TRAIN = 'train'
EVAL = 'eval'
INFER = 'infer'
def _get_input_fn(x, y, input_fn, feed_fn, batch_size, shuffle=False, epochs=1):
"""Make inputs into input and feed functions."""
if input_fn is None:
if x is None:
raise ValueError('Either x or input_fn must be provided.')
if contrib_framework.is_tensor(x) or (y is not None and
contrib_framework.is_tensor(y)):
raise ValueError('Inputs cannot be tensors. Please provide input_fn.')
if feed_fn is not None:
raise ValueError('Can not provide both feed_fn and x or y.')
df = data_feeder.setup_train_data_feeder(x, y, n_classes=None,
batch_size=batch_size,
shuffle=shuffle,
epochs=epochs)
return df.input_builder, df.get_feed_dict_fn()
if (x is not None) or (y is not None):
raise ValueError('Can not provide both input_fn and x or y.')
if batch_size is not None:
raise ValueError('Can not provide both input_fn and batch_size.')
return input_fn, feed_fn
def infer_real_valued_columns_from_input_fn(input_fn):
"""Creates `FeatureColumn` objects for inputs defined by `input_fn`.
This interprets all inputs as dense, fixed-length float values. This creates
a local graph in which it calls `input_fn` to build the tensors, then discards
it.
Args:
input_fn: Function returning a tuple of input and target `Tensor` objects.
Returns:
List of `FeatureColumn` objects.
"""
with ops.Graph().as_default():
features, _ = input_fn()
return layers.infer_real_valued_columns(features)
def infer_real_valued_columns_from_input(x):
"""Creates `FeatureColumn` objects for inputs defined by input `x`.
This interprets all inputs as dense, fixed-length float values.
Args:
x: Real-valued matrix of shape [n_samples, n_features...]. Can be
iterator that returns arrays of features.
Returns:
List of `FeatureColumn` objects.
"""
input_fn, _ = _get_input_fn(
x=x, y=None, input_fn=None, feed_fn=None, batch_size=None)
return infer_real_valued_columns_from_input_fn(input_fn)
def _get_arguments(func):
"""Returns list of arguments this function has."""
if hasattr(func, '__code__'):
# Regular function.
return inspect.getargspec(func).args
elif hasattr(func, '__call__'):
# Callable object.
return _get_arguments(func.__call__)
elif hasattr(func, 'func'):
# Partial function.
return _get_arguments(func.func)
class BaseEstimator(sklearn.BaseEstimator):
"""Abstract BaseEstimator class to train and evaluate TensorFlow models.
Concrete implementation of this class should provide the following functions:
* _get_train_ops
* _get_eval_ops
* _get_predict_ops
`Estimator` implemented below is a good example of how to use this class.
"""
__metaclass__ = abc.ABCMeta
# TODO(wicke): Remove this once launcher takes over config functionality
_Config = run_config.RunConfig # pylint: disable=invalid-name
def __init__(self, model_dir=None, config=None):
"""Initializes a BaseEstimator instance.
Args:
model_dir: Directory to save model parameters, graph and etc.
config: A RunConfig instance.
"""
# Model directory.
self._model_dir = model_dir
if self._model_dir is None:
self._model_dir = tempfile.mkdtemp()
logging.warning('Using temporary folder as model directory: %s',
self._model_dir)
# Create a run configuration
if config is None:
self._config = BaseEstimator._Config()
else:
self._config = config
# Set device function depending if there are replicas or not.
if self._config.num_ps_replicas > 0:
ps_ops = ['Variable', 'AutoReloadVariable']
self._device_fn = device_setter.replica_device_setter(
ps_tasks=self._config.num_ps_replicas,
merge_devices=False, ps_ops=ps_ops)
else:
self._device_fn = None
# Features and targets TensorSignature objects.
# TODO(wicke): Rename these to something more descriptive
self._features_info = None
self._targets_info = None
self._graph = None
def fit(self, x=None, y=None, input_fn=None, steps=None, batch_size=None,
monitors=None, max_steps=None):
"""Trains a model given training data `x` predictions and `y` targets.
Args:
x: Matrix of shape [n_samples, n_features...]. Can be iterator that
returns arrays of features. The training input samples for fitting the
model. If set, `input_fn` must be `None`.
y: Vector or matrix [n_samples] or [n_samples, n_outputs]. Can be
iterator that returns array of targets. The training target values
(class labels in classification, real numbers in regression). If set,
`input_fn` must be `None`.
input_fn: Input function. If set, `x`, `y`, and `batch_size` must be
`None`.
steps: Number of steps for which to train model. If `None`, train forever.
If set, `max_steps` must be `None`.
batch_size: minibatch size to use on the input, defaults to first
dimension of `x`. Must be `None` if `input_fn` is provided.
monitors: List of `BaseMonitor` subclass instances. Used for callbacks
inside the training loop.
max_steps: Number of total steps for which to train model. If `None`,
train forever. If set, `steps` must be `None`.
Two calls to `fit(steps=100)` means 200 training
iterations. On the other hand, two calls to `fit(max_steps=100)` means
that the second call will not do any iteration since first call did
all 100 steps.
Returns:
`self`, for chaining.
Raises:
ValueError: If `x` or `y` are not `None` while `input_fn` is not `None`.
ValueError: If both `steps` and `max_steps` are not `None`.
"""
if (steps is not None) and (max_steps is not None):
raise ValueError('Can not provide both steps and max_steps.')
input_fn, feed_fn = _get_input_fn(x, y, input_fn, feed_fn=None,
batch_size=batch_size, shuffle=True,
epochs=None)
loss = self._train_model(input_fn=input_fn,
feed_fn=feed_fn,
steps=steps,
monitors=monitors,
max_steps=max_steps)
logging.info('Loss for final step: %s.', loss)
return self
def partial_fit(
self, x=None, y=None, input_fn=None, steps=1, batch_size=None,
monitors=None):
"""Incremental fit on a batch of samples.
This method is expected to be called several times consecutively
on different or the same chunks of the dataset. This either can
implement iterative training or out-of-core/online training.
This is especially useful when the whole dataset is too big to
fit in memory at the same time. Or when model is taking long time
to converge, and you want to split up training into subparts.
Args:
x: Matrix of shape [n_samples, n_features...]. Can be iterator that
returns arrays of features. The training input samples for fitting the
model. If set, `input_fn` must be `None`.
y: Vector or matrix [n_samples] or [n_samples, n_outputs]. Can be
iterator that returns array of targets. The training target values
(class labels in classification, real numbers in regression). If set,
`input_fn` must be `None`.
input_fn: Input function. If set, `x`, `y`, and `batch_size` must be
`None`.
steps: Number of steps for which to train model. If `None`, train forever.
batch_size: minibatch size to use on the input, defaults to first
dimension of `x`. Must be `None` if `input_fn` is provided.
monitors: List of `BaseMonitor` subclass instances. Used for callbacks
inside the training loop.
Returns:
`self`, for chaining.
Raises:
ValueError: If at least one of `x` and `y` is provided, and `input_fn` is
provided.
"""
logging.warning('The current implementation of partial_fit is not optimized'
'for use in a loop. Consider using fit() instead.')
return self.fit(x=x, y=y, input_fn=input_fn, steps=steps,
batch_size=batch_size, monitors=monitors)
def evaluate(self,
x=None,
y=None,
input_fn=None,
feed_fn=None,
batch_size=None,
steps=None,
metrics=None,
name=None):
"""Evaluates given model with provided evaluation data.
Evaluates on the given input data. If `input_fn` is provided, that
input function should raise an end-of-input exception (`OutOfRangeError` or
`StopIteration`) after one epoch of the training data has been provided.
By default, the whole evaluation dataset is used. If `steps` is provided,
only `steps` batches of size `batch_size` are processed.
The return value is a dict containing the metrics specified in `metrics`, as
well as an entry `global_step` which contains the value of the global step
for which this evaluation was performed.
Args:
x: Matrix of shape [n_samples, n_features...]. Can be iterator that
returns arrays of features. The training input samples for fitting the
model. If set, `input_fn` must be `None`.
y: Vector or matrix [n_samples] or [n_samples, n_outputs]. Can be
iterator that returns array of targets. The training target values
(class labels in classification, real numbers in regression). If set,
`input_fn` must be `None`.
input_fn: Input function. If set, `x`, `y`, and `batch_size` must be
`None`.
feed_fn: Function creating a feed dict every time it is called. Called
once per iteration.
batch_size: minibatch size to use on the input, defaults to first
dimension of `x`, if specified. Must be `None` if `input_fn` is
provided.
steps: Number of steps for which to evaluate model. If `None`, evaluate
until running tensors generated by `metrics` raises an exception.
metrics: Dict of metric ops to run. If `None`, the default metric
functions are used; if `{}`, no metrics are used. If model has one
output (i.e., returning single predction), keys are `str`, e.g.
`'accuracy'` - just a name of the metric that will show up in
the logs / summaries. Otherwise, keys are tuple of two `str`, e.g.
`('accuracy', 'classes')`- name of the metric and name of `Tensor` in
the predictions to run this metric on.
Metric ops should support streaming, e.g., returning
update_op and value tensors. See more details in
../../../../metrics/python/metrics/ops/streaming_metrics.py.
name: Name of the evaluation if user needs to run multiple evaluations on
different data sets, such as on training data vs test data.
Returns:
Returns `dict` with evaluation results.
Raises:
ValueError: If at least one of `x` or `y` is provided, and at least one of
`input_fn` or `feed_fn` is provided.
Or if `metrics` is not `None` or `dict`.
"""
input_fn, feed_fn = _get_input_fn(x, y, input_fn=input_fn,
feed_fn=feed_fn, batch_size=batch_size,
shuffle=False, epochs=1)
if metrics is not None and not isinstance(metrics, dict):
raise ValueError('Metrics argument should be None or dict. '
'Got %s.' % metrics)
eval_results, global_step = self._evaluate_model(input_fn=input_fn,
feed_fn=feed_fn,
steps=steps,
metrics=metrics,
name=name)
if eval_results is not None:
eval_results.update({'global_step': global_step})
return eval_results
def predict(self, x=None, input_fn=None, batch_size=None, outputs=None):
"""Returns predictions for given features.
Args:
x: Matrix of shape [n_samples, n_features...]. Can be iterator that
returns arrays of features. The training input samples for fitting the
model. If set, `input_fn` must be `None`.
input_fn: Input function. If set, `x` and 'batch_size' must be `None`.
batch_size: Override default batch size. If set, 'input_fn' must be
'None'.
outputs: list of `str`, name of the output to predict.
If `None`, returns all.
Returns:
Numpy array of predicted classes or regression values.
Raises:
ValueError: If x and input_fn are both provided or both `None`.
"""
input_fn, feed_fn = _get_input_fn(x, None, input_fn=input_fn,
feed_fn=None, batch_size=batch_size,
shuffle=False, epochs=1)
return self._infer_model(input_fn=input_fn, feed_fn=feed_fn,
outputs=outputs)
def get_variable_value(self, name):
"""Returns value of the variable given by name.
Args:
name: string, name of the tensor.
Returns:
Numpy array - value of the tensor.
"""
if name.endswith(':0'):
name = name[:-2]
return checkpoints.load_variable(self.model_dir, name)
def get_variable_names(self):
"""Returns list of all variable names in this model.
Returns:
List of names.
"""
return [name for name, _ in checkpoints.list_variables(self.model_dir)]
@property
def model_dir(self):
return self._model_dir
@abc.abstractproperty
def _get_train_ops(self, features, targets):
"""Method that builds model graph and returns trainer ops.
Expected to be overriden by sub-classes that require custom support.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
targets: `Tensor` or `dict` of `Tensor` objects.
Returns:
Tuple of train `Operation` and loss `Tensor`.
"""
pass
@abc.abstractproperty
def _get_predict_ops(self, features):
"""Method that builds model graph and returns prediction ops.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
Returns:
predictions: `Tensor` or `dict` of `Tensor` objects.
"""
pass
def _get_eval_ops(self, features, targets, metrics):
"""Method that builds model graph and returns evaluation ops.
Expected to be overriden by sub-classes that require custom support.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
targets: `Tensor` or `dict` of `Tensor` objects.
metrics: Dict of metric ops to run. If None, the default metric functions
are used; if {}, no metrics are used. If model has one output (i.e.,
returning single predction), keys are `str`, e.g. `'accuracy'` - just a
name of the metric that will show up in the logs / summaries.
Otherwise, keys are tuple of two `str`, e.g. `('accuracy', 'classes')`
- name of the metric and name of `Tensor` in the predictions to run
this metric on. Metric ops should support streaming, e.g., returning
update_op and value tensors. See more details in
../../../../metrics/python/metrics/ops/streaming_metrics.py.
Returns:
metrics: `dict` of `Tensor` objects.
"""
raise NotImplementedError('_get_eval_ops not implemented in BaseEstimator')
def _get_feature_ops_from_example(self, examples_batch):
"""Returns feature parser for given example batch using features info.
This function requires `fit()` has been called.
Args:
examples_batch: batch of tf.Example
Returns:
features: `Tensor` or `dict` of `Tensor` objects.
Raises:
ValueError: If `_features_info` attribute is not available (usually
because `fit()` has not been called).
"""
if self._features_info is None:
raise ValueError('Features information missing, was fit() ever called?')
return tensor_signature.create_example_parser_from_signatures(
self._features_info, examples_batch)
def _check_inputs(self, features, targets):
if self._features_info is not None:
logging.warning('Given features: %s, required signatures: %s.' %
(str(features), str(self._features_info)))
if not tensor_signature.tensors_compatible(features, self._features_info):
raise ValueError('Features are incompatible with given information. '
'Given features: %s, required signatures: %s.' %
(str(features), str(self._features_info)))
else:
self._features_info = tensor_signature.create_signatures(features)
logging.warning('Setting feature info to %s', str(self._features_info))
if targets is not None:
if self._targets_info is not None:
logging.warning('Given targets: %s, required signatures: %s.' %
(str(targets), str(self._targets_info)))
if not tensor_signature.tensors_compatible(targets, self._targets_info):
raise ValueError('Targets are incompatible with given information. '
'Given targets: %s, required signatures: %s.' %
(str(targets), str(self._targets_info)))
else:
self._targets_info = tensor_signature.create_signatures(targets)
logging.warning('Setting targets info to %s', str(self._targets_info))
def _train_model(self,
input_fn,
steps,
feed_fn=None,
init_op=None,
init_feed_fn=None,
init_fn=None,
device_fn=None,
monitors=None,
log_every_steps=100,
fail_on_nan_loss=True,
max_steps=None):
# TODO(wicke): Remove this once Model and associated code are gone.
if hasattr(self._config, 'execution_mode'):
if self._config.execution_mode not in ('all', 'train'):
return
# Stagger startup of worker sessions based on task id.
sleep_secs = min(
self._config.training_worker_max_startup_secs,
self._config.task *
self._config.training_worker_session_startup_stagger_secs)
if sleep_secs:
logging.info('Waiting %d secs before starting task %d.', sleep_secs,
self._config.task)
time.sleep(sleep_secs)
# Device allocation
device_fn = device_fn or self._device_fn
self._graph = ops.Graph()
with self._graph.as_default() as g, g.device(device_fn):
random_seed.set_random_seed(self._config.tf_random_seed)
global_step = contrib_framework.create_global_step(g)
features, targets = input_fn()
self._check_inputs(features, targets)
train_op, loss_op = self._get_train_ops(features, targets)
# Add default monitors.
if monitors is None:
monitors = []
is_chief = self._config.task == 0
if is_chief:
monitors += monitors_lib.get_default_monitors(
loss_op=loss_op,
summary_op=logging_ops.get_summary_op(),
save_summary_steps=self._config.save_summary_steps,
summary_writer=graph_actions.get_summary_writer(self._model_dir))
else:
monitors = []
# Setup monitors.
for monitor in monitors:
monitor.set_estimator(self)
return graph_actions.train(
graph=g,
output_dir=self._model_dir,
train_op=train_op,
loss_op=loss_op,
global_step_tensor=global_step,
init_op=init_op,
init_feed_dict=init_feed_fn() if init_feed_fn is not None else None,
init_fn=init_fn,
log_every_steps=log_every_steps,
supervisor_is_chief=is_chief,
supervisor_master=self._config.master,
supervisor_save_model_secs=self._config.save_checkpoints_secs,
keep_checkpoint_max=self._config.keep_checkpoint_max,
feed_fn=feed_fn,
steps=steps,
fail_on_nan_loss=fail_on_nan_loss,
monitors=monitors,
max_steps=max_steps)
def _extract_metric_update_ops(self, eval_dict):
"""Separate update operations from metric value operations."""
update_ops = []
value_ops = {}
for name, metric_ops in eval_dict.items():
if isinstance(metric_ops, (list, tuple)):
if len(metric_ops) == 2:
value_ops[name] = metric_ops[0]
update_ops.append(metric_ops[1])
else:
logging.warning(
'Ignoring metric {}. It returned a list|tuple with len {}, '
'expected 2'.format(name, len(metric_ops)))
value_ops[name] = metric_ops
else:
value_ops[name] = metric_ops
if update_ops:
update_ops = control_flow_ops.group(*update_ops)
else:
update_ops = None
return update_ops, value_ops
def _evaluate_model(self,
input_fn,
steps,
feed_fn=None,
metrics=None,
name=''):
# TODO(wicke): Remove this once Model and associated code are gone.
if (hasattr(self._config, 'execution_mode') and
self._config.execution_mode not in ('all', 'evaluate', 'eval_evalset')):
return None, None
# Check that model has been trained.
checkpoint_path = self._model_dir
latest_path = saver.latest_checkpoint(checkpoint_path)
if not latest_path:
raise NotFittedError("Couldn't find trained model at %s."
% checkpoint_path)
# Setup output directory.
eval_dir = os.path.join(self._model_dir, 'eval' if not name else
'eval_' + name)
with ops.Graph().as_default() as g:
random_seed.set_random_seed(self._config.tf_random_seed)
global_step = contrib_framework.create_global_step(g)
features, targets = input_fn()
self._check_inputs(features, targets)
eval_dict = self._get_eval_ops(features, targets, metrics)
update_op, eval_dict = self._extract_metric_update_ops(eval_dict)
eval_results, current_global_step = graph_actions.evaluate(
graph=g,
output_dir=eval_dir,
checkpoint_path=checkpoint_path,
eval_dict=eval_dict,
update_op=update_op,
global_step_tensor=global_step,
supervisor_master=self._config.master,
feed_fn=feed_fn,
max_steps=steps)
return eval_results, current_global_step
def _get_features_from_input_fn(self, input_fn):
result = input_fn()
if isinstance(result, (list, tuple)):
return result[0]
return result
def _infer_model(self, input_fn, feed_fn=None, outputs=None):
# Check that model has been trained.
checkpoint_path = saver.latest_checkpoint(self._model_dir)
if not checkpoint_path:
raise NotFittedError("Couldn't find trained model at %s."
% self._model_dir)
with ops.Graph().as_default() as g:
random_seed.set_random_seed(self._config.tf_random_seed)
contrib_framework.create_global_step(g)
features = self._get_features_from_input_fn(input_fn)
predictions = self._get_predict_ops(features)
# If predictions is single output - wrap it into dict, and remember to
# return not a dict.
return_dict = True
if not isinstance(predictions, dict):
predictions, return_dict = {'predictions': predictions}, False
# Filter what to run predictions on, if outputs provided.
if outputs:
existing_keys = predictions.keys()
predictions = {
key: value for key, value in predictions.items() if key in outputs
}
if not predictions:
raise ValueError('Expected to run at least one output from %s, '
'provided %s.' % (existing_keys, outputs))
if feed_fn is None:
preds = graph_actions.infer(checkpoint_path, predictions)
else:
preds = {}
def _feed_fn():
while True:
yield feed_fn()
outputs = graph_actions.run_feeds(
output_dict=predictions,
feed_dicts=_feed_fn(),
restore_checkpoint_path=checkpoint_path)
for key in predictions:
preds[key] = np.concatenate(
[output[key] for output in outputs], axis=0)
if return_dict:
return preds
return preds['predictions']
class Estimator(BaseEstimator):
"""Estimator class is the basic TensorFlow model trainer/evaluator.
"""
def __init__(self,
model_fn=None,
model_dir=None,
config=None,
params=None):
"""Constructs an Estimator instance.
Args:
model_fn: Model function, takes features and targets tensors or dicts of
tensors and returns predictions and loss tensors.
Supports next three signatures for the function:
* `(features, targets) -> (predictions, loss, train_op)`
* `(features, targets, mode) -> (predictions, loss, train_op)`
* `(features, targets, mode, params) -> (predictions, loss, train_op)`
Where
* `features` are single `Tensor` or `dict` of `Tensor`s
(depending on data passed to `fit`),
* `targets` are `Tensor` or
`dict` of `Tensor`s (for multi-head model).
* `mode` represents if this training, evaluation or
prediction. See `ModeKeys` for example keys.
* `params` is a `dict` of hyperparameters. Will receive what
is passed to Estimator in `params` parameter. This allows
to configure Estimators from hyper parameter tunning.
model_dir: Directory to save model parameters, graph and etc.
config: Configuration object.
params: `dict` of hyper parameters that will be passed into `model_fn`.
Keys are names of parameters, values are basic python types.
Raises:
ValueError: parameters of `model_fn` don't match `params`.
"""
super(Estimator, self).__init__(model_dir=model_dir, config=config)
if model_fn is not None:
# Check number of arguments of the given function matches requirements.
model_fn_args = _get_arguments(model_fn)
if params is not None and 'params' not in model_fn_args:
raise ValueError('Estimator\'s model_fn (%s) has less than 4 '
'arguments, but not None params (%s) are passed.' %
(model_fn, params))
if params is None and 'params' in model_fn_args:
logging.warning('Estimator\'s model_fn (%s) has includes params '
'argument, but params are not passed to Estimator.' %
model_fn)
self._model_fn = model_fn
self.params = params
def _call_model_fn(self, features, targets, mode):
"""Calls model function with support of 2, 3 or 4 arguments."""
model_fn_args = _get_arguments(self._model_fn)
if 'mode' in model_fn_args:
if 'params' in model_fn_args:
return self._model_fn(features, targets, mode=mode, params=self.params)
else:
return self._model_fn(features, targets, mode=mode)
return self._model_fn(features, targets)
def _get_train_ops(self, features, targets):
"""Method that builds model graph and returns trainer ops.
Expected to be overriden by sub-classes that require custom support.
This implementation uses `model_fn` passed as parameter to constructor to
build model.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
targets: `Tensor` or `dict` of `Tensor` objects.
Returns:
Tuple of train `Operation` and loss `Tensor`.
"""
_, loss, train_op = self._call_model_fn(features, targets, ModeKeys.TRAIN)
return train_op, loss
def _get_eval_ops(self, features, targets, metrics):
"""Method that builds model graph and returns evaluation ops.
Expected to be overriden by sub-classes that require custom support.
This implementation uses `model_fn` passed as parameter to constructor to
build model.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
targets: `Tensor` or `dict` of `Tensor` objects.
metrics: Dict of metric ops to run. If None, the default metric functions
are used; if {}, no metrics are used. If model has one output (i.e.,
returning single predction), keys are `str`, e.g. `'accuracy'` - just a
name of the metric that will show up in the logs / summaries.
Otherwise, keys are tuple of two `str`, e.g. `('accuracy', 'classes')`
- name of the metric and name of `Tensor` in the predictions to run
this metric on. Metric ops should support streaming, e.g., returning
update_op and value tensors. See more details in
../../../../metrics/python/metrics/ops/streaming_metrics.py.
Returns:
metrics: `dict` of `Tensor` objects.
Raises:
ValueError: if `metrics` don't match `targets`.
"""
predictions, loss, _ = self._call_model_fn(features, targets, ModeKeys.EVAL)
result = {'loss': loss}
metrics = metrics or {}
if isinstance(targets, dict) and len(targets) == 1:
# Unpack single target into just tensor.
targets = targets[list(targets.keys())[0]]
for name, metric in six.iteritems(metrics):
if isinstance(name, tuple):
# Multi-head metrics.
if not isinstance(predictions, dict):
raise ValueError(
'Metrics passed provide (name, prediction), '
'but predictions are not dict. '
'Metrics: %s, Predictions: %s.' % (metrics, predictions))
# Here are two options: targets are single Tensor or a dict.
if isinstance(targets, dict) and name[1] in targets:
# If targets are dict and the prediction name is in it, apply metric.
result[name[0]] = metric(predictions[name[1]], targets[name[1]])
else:
# Otherwise pass the targets to the metric.
result[name[0]] = metric(predictions[name[1]], targets)
else:
# Single head metrics.
if isinstance(predictions, dict):
raise ValueError(
'Metrics passed provide only name, no prediction, '
'but predictions are dict. '
'Metrics: %s, Targets: %s.' % (metrics, targets))
result[name] = metric(predictions, targets)
return result
def _get_predict_ops(self, features):
"""Method that builds model graph and returns prediction ops.
Expected to be overriden by sub-classes that require custom support.
This implementation uses `model_fn` passed as parameter to constructor to
build model.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
Returns:
predictions: `Tensor` or `dict` of `Tensor` objects.
"""
targets = tensor_signature.create_placeholders_from_signatures(
self._targets_info)
predictions, _, _ = self._call_model_fn(features, targets, ModeKeys.INFER)
return predictions
| apache-2.0 |
alexmojaki/blaze | blaze/compute/tests/test_numpy_compute.py | 6 | 16540 | from __future__ import absolute_import, division, print_function
import pytest
import numpy as np
import pandas as pd
from datetime import datetime, date
from blaze.compute.core import compute, compute_up
from blaze.expr import symbol, by, exp, summary, Broadcast, join, concat
from blaze import sin
from odo import into
from datashape import discover, to_numpy, dshape
x = np.array([(1, 'Alice', 100),
(2, 'Bob', -200),
(3, 'Charlie', 300),
(4, 'Denis', 400),
(5, 'Edith', -500)],
dtype=[('id', 'i8'), ('name', 'S7'), ('amount', 'i8')])
t = symbol('t', discover(x))
def eq(a, b):
c = a == b
if isinstance(c, np.ndarray):
return c.all()
return c
def test_symbol():
assert eq(compute(t, x), x)
def test_eq():
assert eq(compute(t['amount'] == 100, x),
x['amount'] == 100)
def test_selection():
assert eq(compute(t[t['amount'] == 100], x), x[x['amount'] == 0])
assert eq(compute(t[t['amount'] < 0], x), x[x['amount'] < 0])
def test_arithmetic():
assert eq(compute(t['amount'] + t['id'], x),
x['amount'] + x['id'])
assert eq(compute(t['amount'] * t['id'], x),
x['amount'] * x['id'])
assert eq(compute(t['amount'] % t['id'], x),
x['amount'] % x['id'])
def test_UnaryOp():
assert eq(compute(exp(t['amount']), x),
np.exp(x['amount']))
assert eq(compute(abs(-t['amount']), x),
abs(-x['amount']))
def test_Neg():
assert eq(compute(-t['amount'], x),
-x['amount'])
def test_invert_not():
assert eq(compute(~(t.amount > 0), x),
~(x['amount'] > 0))
def test_Reductions():
assert compute(t['amount'].mean(), x) == x['amount'].mean()
assert compute(t['amount'].count(), x) == len(x['amount'])
assert compute(t['amount'].sum(), x) == x['amount'].sum()
assert compute(t['amount'].min(), x) == x['amount'].min()
assert compute(t['amount'].max(), x) == x['amount'].max()
assert compute(t['amount'].nunique(), x) == len(np.unique(x['amount']))
assert compute(t['amount'].var(), x) == x['amount'].var()
assert compute(t['amount'].std(), x) == x['amount'].std()
assert compute(t['amount'].var(unbiased=True), x) == x['amount'].var(ddof=1)
assert compute(t['amount'].std(unbiased=True), x) == x['amount'].std(ddof=1)
assert compute((t['amount'] > 150).any(), x) == True
assert compute((t['amount'] > 250).all(), x) == False
assert compute(t['amount'][0], x) == x['amount'][0]
assert compute(t['amount'][-1], x) == x['amount'][-1]
def test_count_string():
s = symbol('name', 'var * ?string')
x = np.array(['Alice', np.nan, 'Bob', 'Denis', 'Edith'], dtype='object')
assert compute(s.count(), x) == 4
def test_reductions_on_recarray():
assert compute(t.count(), x) == len(x)
def test_count_nan():
t = symbol('t', '3 * ?real')
x = np.array([1.0, np.nan, 2.0])
assert compute(t.count(), x) == 2
def test_distinct():
x = np.array([('Alice', 100),
('Alice', -200),
('Bob', 100),
('Bob', 100)],
dtype=[('name', 'S5'), ('amount', 'i8')])
t = symbol('t', 'var * {name: string, amount: int64}')
assert eq(compute(t['name'].distinct(), x),
np.unique(x['name']))
assert eq(compute(t.distinct(), x),
np.unique(x))
def test_distinct_on_recarray():
rec = pd.DataFrame(
[[0, 1],
[0, 2],
[1, 1],
[1, 2]],
columns=('a', 'b'),
).to_records(index=False)
s = symbol('s', discover(rec))
assert (
compute(s.distinct('a'), rec) ==
pd.DataFrame(
[[0, 1],
[1, 1]],
columns=('a', 'b'),
).to_records(index=False)
).all()
def test_distinct_on_structured_array():
arr = np.array(
[(0., 1.),
(0., 2.),
(1., 1.),
(1., 2.)],
dtype=[('a', 'f4'), ('b', 'f4')],
)
s = symbol('s', discover(arr))
assert(
compute(s.distinct('a'), arr) ==
np.array([(0., 1.), (1., 1.)], dtype=arr.dtype)
).all()
def test_distinct_on_str():
rec = pd.DataFrame(
[['a', 'a'],
['a', 'b'],
['b', 'a'],
['b', 'b']],
columns=('a', 'b'),
).to_records(index=False).astype([('a', '<U1'), ('b', '<U1')])
s = symbol('s', discover(rec))
assert (
compute(s.distinct('a'), rec) ==
pd.DataFrame(
[['a', 'a'],
['b', 'a']],
columns=('a', 'b'),
).to_records(index=False).astype([('a', '<U1'), ('b', '<U1')])
).all()
def test_sort():
assert eq(compute(t.sort('amount'), x),
np.sort(x, order='amount'))
assert eq(compute(t.sort('amount', ascending=False), x),
np.sort(x, order='amount')[::-1])
assert eq(compute(t.sort(['amount', 'id']), x),
np.sort(x, order=['amount', 'id']))
assert eq(compute(t.amount.sort(), x),
np.sort(x['amount']))
def test_head():
assert eq(compute(t.head(2), x),
x[:2])
def test_tail():
assert eq(compute(t.tail(2), x),
x[-2:])
def test_label():
expected = x['amount'] * 10
expected = np.array(expected, dtype=[('foo', 'i8')])
assert eq(compute((t['amount'] * 10).label('foo'), x),
expected)
def test_relabel():
expected = np.array(x, dtype=[('ID', 'i8'), ('NAME', 'S7'), ('amount', 'i8')])
result = compute(t.relabel({'name': 'NAME', 'id': 'ID'}), x)
assert result.dtype.names == expected.dtype.names
assert eq(result, expected)
def test_by():
expr = by(t.amount > 0, count=t.id.count())
result = compute(expr, x)
assert set(map(tuple, into(list, result))) == set([(False, 2), (True, 3)])
def test_compute_up_field():
assert eq(compute(t['name'], x), x['name'])
def test_compute_up_projection():
assert eq(compute_up(t[['name', 'amount']], x), x[['name', 'amount']])
ax = np.arange(30, dtype='f4').reshape((5, 3, 2))
a = symbol('a', discover(ax))
def test_slice():
inds = [0, slice(2), slice(1, 3), slice(None, None, 2), [1, 2, 3],
(0, 1), (0, slice(1, 3)), (slice(0, 3), slice(3, 1, -1)),
(0, [1, 2])]
for s in inds:
assert (compute(a[s], ax) == ax[s]).all()
def test_array_reductions():
for axis in [None, 0, 1, (0, 1), (2, 1)]:
assert eq(compute(a.sum(axis=axis), ax), ax.sum(axis=axis))
assert eq(compute(a.std(axis=axis), ax), ax.std(axis=axis))
def test_array_reductions_with_keepdims():
for axis in [None, 0, 1, (0, 1), (2, 1)]:
assert eq(compute(a.sum(axis=axis, keepdims=True), ax),
ax.sum(axis=axis, keepdims=True))
def test_summary_on_ndarray():
assert compute(summary(total=a.sum(), min=a.min()), ax) == \
(ax.min(), ax.sum())
result = compute(summary(total=a.sum(), min=a.min(), keepdims=True), ax)
expected = np.array([(ax.min(), ax.sum())],
dtype=[('min', 'float32'), ('total', 'float64')])
assert result.ndim == ax.ndim
assert eq(expected, result)
def test_summary_on_ndarray_with_axis():
for axis in [0, 1, (1, 0)]:
expr = summary(total=a.sum(), min=a.min(), axis=axis)
result = compute(expr, ax)
shape, dtype = to_numpy(expr.dshape)
expected = np.empty(shape=shape, dtype=dtype)
expected['total'] = ax.sum(axis=axis)
expected['min'] = ax.min(axis=axis)
assert eq(result, expected)
def test_utcfromtimestamp():
t = symbol('t', '1 * int64')
data = np.array([0, 1])
expected = np.array(['1970-01-01T00:00:00Z', '1970-01-01T00:00:01Z'],
dtype='M8[us]')
assert eq(compute(t.utcfromtimestamp, data), expected)
def test_nelements_structured_array():
assert compute(t.nelements(), x) == len(x)
assert compute(t.nelements(keepdims=True), x) == (len(x),)
def test_nelements_array():
t = symbol('t', '5 * 4 * 3 * float64')
x = np.random.randn(*t.shape)
result = compute(t.nelements(axis=(0, 1)), x)
np.testing.assert_array_equal(result, np.array([20, 20, 20]))
result = compute(t.nelements(axis=1), x)
np.testing.assert_array_equal(result, 4 * np.ones((5, 3)))
def test_nrows():
assert compute(t.nrows, x) == len(x)
dts = np.array(['2000-06-25T12:30:04Z', '2000-06-28T12:50:05Z'],
dtype='M8[us]')
s = symbol('s', 'var * datetime')
def test_datetime_truncation():
assert eq(compute(s.truncate(1, 'day'), dts),
dts.astype('M8[D]'))
assert eq(compute(s.truncate(2, 'seconds'), dts),
np.array(['2000-06-25T12:30:04Z', '2000-06-28T12:50:04Z'],
dtype='M8[s]'))
assert eq(compute(s.truncate(2, 'weeks'), dts),
np.array(['2000-06-18', '2000-06-18'], dtype='M8[D]'))
assert into(list, compute(s.truncate(1, 'week'), dts))[0].isoweekday() == 7
def test_hour():
dts = [datetime(2000, 6, 20, 1, 00, 00),
datetime(2000, 6, 20, 12, 59, 59),
datetime(2000, 6, 20, 12, 00, 00),
datetime(2000, 6, 20, 11, 59, 59)]
dts = into(np.ndarray, dts)
assert eq(compute(s.truncate(1, 'hour'), dts),
into(np.ndarray, [datetime(2000, 6, 20, 1, 0),
datetime(2000, 6, 20, 12, 0),
datetime(2000, 6, 20, 12, 0),
datetime(2000, 6, 20, 11, 0)]))
def test_month():
dts = [datetime(2000, 7, 1),
datetime(2000, 6, 30),
datetime(2000, 6, 1),
datetime(2000, 5, 31)]
dts = into(np.ndarray, dts)
assert eq(compute(s.truncate(1, 'month'), dts),
into(np.ndarray, [date(2000, 7, 1),
date(2000, 6, 1),
date(2000, 6, 1),
date(2000, 5, 1)]))
def test_truncate_on_np_datetime64_scalar():
s = symbol('s', 'datetime')
data = np.datetime64('2000-01-02T12:30:00Z')
assert compute(s.truncate(1, 'day'), data) == data.astype('M8[D]')
def test_numpy_and_python_datetime_truncate_agree_on_start_of_week():
s = symbol('s', 'datetime')
n = np.datetime64('2014-11-11')
p = datetime(2014, 11, 11)
expr = s.truncate(1, 'week')
assert compute(expr, n) == compute(expr, p)
def test_add_multiple_ndarrays():
a = symbol('a', '5 * 4 * int64')
b = symbol('b', '5 * 4 * float32')
x = np.arange(9, dtype='int64').reshape(3, 3)
y = (x + 1).astype('float32')
expr = sin(a) + 2 * b
scope = {a: x, b: y}
expected = sin(x) + 2 * y
# check that we cast correctly
assert expr.dshape == dshape('5 * 4 * float64')
np.testing.assert_array_equal(compute(expr, scope), expected)
np.testing.assert_array_equal(compute(expr, scope, optimize=False),
expected)
nA = np.arange(30, dtype='f4').reshape((5, 6))
ny = np.arange(6, dtype='f4')
A = symbol('A', discover(nA))
y = symbol('y', discover(ny))
def test_transpose():
assert eq(compute(A.T, nA), nA.T)
assert eq(compute(A.transpose((0, 1)), nA), nA)
def test_dot():
assert eq(compute(y.dot(y), {y: ny}), np.dot(ny, ny))
assert eq(compute(A.dot(y), {A: nA, y: ny}), np.dot(nA, ny))
def test_subexpr_datetime():
data = pd.date_range(start='01/01/2010', end='01/04/2010', freq='D').values
s = symbol('s', discover(data))
result = compute(s.truncate(days=2).day, data)
expected = np.array([31, 2, 2, 4])
np.testing.assert_array_equal(result, expected)
def test_mixed_types():
x = np.array([[(4, 180), (4, 184), (4, 188), (4, 192), (4, 196)],
[(4, 660), (4, 664), (4, 668), (4, 672), (4, 676)],
[(4, 1140), (4, 1144), (4, 1148), (4, 1152), (4, 1156)],
[(4, 1620), (4, 1624), (4, 1628), (4, 1632), (4, 1636)],
[(4, 2100), (4, 2104), (4, 2108), (4, 2112), (4, 2116)]],
dtype=[('count', '<i4'), ('total', '<i8')])
aggregate = symbol('aggregate', discover(x))
result = compute(aggregate.total.sum(axis=(0,)) /
aggregate['count'].sum(axis=(0,)), x)
expected = (x['total'].sum(axis=0, keepdims=True) /
x['count'].sum(axis=0, keepdims=True)).squeeze()
np.testing.assert_array_equal(result, expected)
def test_broadcast_compute_against_numbers_and_arrays():
A = symbol('A', '5 * float32')
a = symbol('a', 'float32')
b = symbol('b', 'float32')
x = np.arange(5, dtype='f4')
expr = Broadcast((A, b), (a, b), a + b)
result = compute(expr, {A: x, b: 10})
assert eq(result, x + 10)
def test_map():
pytest.importorskip('numba')
a = np.arange(10.0)
f = lambda x: np.sin(x) + 1.03 * np.cos(x) ** 2
x = symbol('x', discover(a))
expr = x.map(f, 'float64')
result = compute(expr, a)
expected = f(a)
# make sure we're not going to pandas here
assert type(result) == np.ndarray
assert type(result) == type(expected)
np.testing.assert_array_equal(result, expected)
def test_vector_norm():
x = np.arange(30).reshape((5, 6))
s = symbol('x', discover(x))
assert eq(compute(s.vnorm(), x),
np.linalg.norm(x))
assert eq(compute(s.vnorm(ord=1), x),
np.linalg.norm(x.flatten(), ord=1))
assert eq(compute(s.vnorm(ord=4, axis=0), x),
np.linalg.norm(x, ord=4, axis=0))
expr = s.vnorm(ord=4, axis=0, keepdims=True)
assert expr.shape == compute(expr, x).shape
def test_join():
cities = np.array([('Alice', 'NYC'),
('Alice', 'LA'),
('Bob', 'Chicago')],
dtype=[('name', 'S7'), ('city', 'O')])
c = symbol('cities', discover(cities))
expr = join(t, c, 'name')
result = compute(expr, {t: x, c: cities})
assert (b'Alice', 1, 100, 'LA') in into(list, result)
def test_query_with_strings():
b = np.array([('a', 1), ('b', 2), ('c', 3)],
dtype=[('x', 'S1'), ('y', 'i4')])
s = symbol('s', discover(b))
assert compute(s[s.x == b'b'], b).tolist() == [(b'b', 2)]
@pytest.mark.parametrize('keys', [['a'], list('bc')])
def test_isin(keys):
b = np.array([('a', 1), ('b', 2), ('c', 3), ('a', 4), ('c', 5), ('b', 6)],
dtype=[('x', 'S1'), ('y', 'i4')])
s = symbol('s', discover(b))
result = compute(s.x.isin(keys), b)
expected = np.in1d(b['x'], keys)
np.testing.assert_array_equal(result, expected)
def test_nunique_recarray():
b = np.array([('a', 1), ('b', 2), ('c', 3), ('a', 4), ('c', 5), ('b', 6),
('a', 1), ('b', 2)],
dtype=[('x', 'S1'), ('y', 'i4')])
s = symbol('s', discover(b))
expr = s.nunique()
assert compute(expr, b) == len(np.unique(b))
def test_str_repeat():
a = np.array(('a', 'b', 'c'))
s = symbol('s', discover(a))
expr = s.repeat(3)
assert all(compute(expr, a) == np.char.multiply(a, 3))
def test_str_interp():
a = np.array(('%s', '%s', '%s'))
s = symbol('s', discover(a))
expr = s.interp(1)
assert all(compute(expr, a) == np.char.mod(a, 1))
def test_timedelta_arith():
dates = np.arange('2014-01-01', '2014-02-01', dtype='datetime64')
delta = np.timedelta64(1, 'D')
sym = symbol('s', discover(dates))
assert (compute(sym + delta, dates) == dates + delta).all()
assert (compute(sym - delta, dates) == dates - delta).all()
def test_coerce():
x = np.arange(1, 3)
s = symbol('s', discover(x))
np.testing.assert_array_equal(compute(s.coerce('float64'), x),
np.arange(1.0, 3.0))
def test_concat_arr():
s_data = np.arange(15)
t_data = np.arange(15, 30)
s = symbol('s', discover(s_data))
t = symbol('t', discover(t_data))
assert (
compute(concat(s, t), {s: s_data, t: t_data}) ==
np.arange(30)
).all()
def test_concat_mat():
s_data = np.arange(15).reshape(5, 3)
t_data = np.arange(15, 30).reshape(5, 3)
s = symbol('s', discover(s_data))
t = symbol('t', discover(t_data))
assert (
compute(concat(s, t), {s: s_data, t: t_data}) ==
np.arange(30).reshape(10, 3)
).all()
assert (
compute(concat(s, t, axis=1), {s: s_data, t: t_data}) ==
np.concatenate((s_data, t_data), axis=1)
).all()
| bsd-3-clause |
brianlorenz/COSMOS_IMACS_Redshifts | PlotCodes/Plotfits.py | 1 | 1041 | #Plot a .fits file
import numpy as np
import matplotlib.pyplot as plt
from astropy.io import ascii, fits
import sys, os, string
import pandas as pd
fitsfile = sys.argv[1]
data = fits.open(fitsfile)[0].data
head = fits.open(fitsfile)[0].header
d0 = data[0]
d1 = data[1]
d2 = data[2]
d3 = data[3]
d4 = data[4]
#d5 = data[5]
#d6 = data[6]
#d7 = data[7]
#d8 = data[8]
crval1 = head["crval1"]
crpix1 = head["crpix1"]
cdelt1 = head["cdelt1"]
naxis1 = head["naxis1"]
dcflag = head["dc-flag"]
exptime = head['exptime']
wavelength = (1.0+np.arange(naxis1)-crpix1)*cdelt1 + crval1
fig,axarr = plt.subplots(figsize=(13,7))
#ax0,ax1,ax2,ax3,ax4,ax5,ax6,ax7,ax8 = axarr[0,0],axarr[0,1],axarr[0,2],axarr[1,0],axarr[1,1],axarr[1,2],axarr[2,0],axarr[2,1],axarr[2,2]
axarr.plot(wavelength,data[0])
#ax1.plot(wavelength,data[1])
#ax2.plot(wavelength,data[2])
#ax3.plot(wavelength,data[3])
#ax4.plot(wavelength,data[4])
#ax5.plot(wavelength,data[5])
#ax6.plot(wavelength,data[6])
#ax7.plot(wavelength,data[7])
#ax8.plot(wavelength,data[8])
plt.show()
| mit |
abhisg/scikit-learn | sklearn/tree/tree.py | 2 | 37683 | """
This module gathers tree-based methods, including decision, regression and
randomized trees. Single and multi-output problems are both handled.
"""
# Authors: Gilles Louppe <[email protected]>
# Peter Prettenhofer <[email protected]>
# Brian Holt <[email protected]>
# Noel Dawe <[email protected]>
# Satrajit Gosh <[email protected]>
# Joly Arnaud <[email protected]>
# Fares Hedayati <[email protected]>
#
# Licence: BSD 3 clause
from __future__ import division
import numbers
from abc import ABCMeta
from abc import abstractmethod
import numpy as np
from scipy.sparse import issparse
from ..base import BaseEstimator
from ..base import ClassifierMixin
from ..base import RegressorMixin
from ..externals import six
from ..feature_selection.from_model import _LearntSelectorMixin
from ..utils import check_array
from ..utils import check_random_state
from ..utils import compute_sample_weight
from ..utils.validation import NotFittedError
from ..utils.multiclass import check_classification_targets
from ._criterion import Criterion
from ._splitter import Splitter
from ._tree import DepthFirstTreeBuilder
from ._tree import BestFirstTreeBuilder
from ._tree import Tree
from . import _tree, _splitter, _criterion
__all__ = ["DecisionTreeClassifier",
"DecisionTreeRegressor",
"ExtraTreeClassifier",
"ExtraTreeRegressor"]
# =============================================================================
# Types and constants
# =============================================================================
DTYPE = _tree.DTYPE
DOUBLE = _tree.DOUBLE
CRITERIA_CLF = {"gini": _criterion.Gini, "entropy": _criterion.Entropy}
CRITERIA_REG = {"mse": _criterion.MSE, "friedman_mse": _criterion.FriedmanMSE}
DENSE_SPLITTERS = {"best": _splitter.BestSplitter,
"random": _splitter.RandomSplitter}
SPARSE_SPLITTERS = {"best": _splitter.BestSparseSplitter,
"random": _splitter.RandomSparseSplitter}
# =============================================================================
# Base decision tree
# =============================================================================
class BaseDecisionTree(six.with_metaclass(ABCMeta, BaseEstimator,
_LearntSelectorMixin)):
"""Base class for decision trees.
Warning: This class should not be used directly.
Use derived classes instead.
"""
@abstractmethod
def __init__(self,
criterion,
splitter,
max_depth,
min_samples_split,
min_samples_leaf,
min_weight_fraction_leaf,
max_features,
max_leaf_nodes,
random_state,
class_weight=None,
presort=False):
self.criterion = criterion
self.splitter = splitter
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.random_state = random_state
self.max_leaf_nodes = max_leaf_nodes
self.class_weight = class_weight
self.presort = presort
self.n_features_ = None
self.n_outputs_ = None
self.classes_ = None
self.n_classes_ = None
self.tree_ = None
self.max_features_ = None
def fit(self, X, y, sample_weight=None, check_input=True,
X_idx_sorted=None):
"""Build a decision tree from the training set (X, y).
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The training input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csc_matrix``.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
The target values (class labels in classification, real numbers in
regression). In the regression case, use ``dtype=np.float64`` and
``order='C'`` for maximum efficiency.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
X_idx_sorted : array-like, shape = [n_samples, n_features], optional
The indexes of the sorted training input samples. If many tree
are grown on the same dataset, this allows the ordering to be
cached between trees. If None, the data will be sorted here.
Don't use this parameter unless you know what to do.
Returns
-------
self : object
Returns self.
"""
random_state = check_random_state(self.random_state)
if check_input:
X = check_array(X, dtype=DTYPE, accept_sparse="csc")
if issparse(X):
X.sort_indices()
if X.indices.dtype != np.intc or X.indptr.dtype != np.intc:
raise ValueError("No support for np.int64 index based "
"sparse matrices")
# Determine output settings
n_samples, self.n_features_ = X.shape
is_classification = isinstance(self, ClassifierMixin)
y = np.atleast_1d(y)
expanded_class_weight = None
if y.ndim == 1:
# reshape is necessary to preserve the data contiguity against vs
# [:, np.newaxis] that does not.
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
if is_classification:
check_classification_targets(y)
y = np.copy(y)
self.classes_ = []
self.n_classes_ = []
if self.class_weight is not None:
y_original = np.copy(y)
y_store_unique_indices = np.zeros(y.shape, dtype=np.int)
for k in range(self.n_outputs_):
classes_k, y_store_unique_indices[:, k] = np.unique(y[:, k], return_inverse=True)
self.classes_.append(classes_k)
self.n_classes_.append(classes_k.shape[0])
y = y_store_unique_indices
if self.class_weight is not None:
expanded_class_weight = compute_sample_weight(
self.class_weight, y_original)
else:
self.classes_ = [None] * self.n_outputs_
self.n_classes_ = [1] * self.n_outputs_
self.n_classes_ = np.array(self.n_classes_, dtype=np.intp)
if getattr(y, "dtype", None) != DOUBLE or not y.flags.contiguous:
y = np.ascontiguousarray(y, dtype=DOUBLE)
# Check parameters
max_depth = ((2 ** 31) - 1 if self.max_depth is None
else self.max_depth)
max_leaf_nodes = (-1 if self.max_leaf_nodes is None
else self.max_leaf_nodes)
if isinstance(self.max_features, six.string_types):
if self.max_features == "auto":
if is_classification:
max_features = max(1, int(np.sqrt(self.n_features_)))
else:
max_features = self.n_features_
elif self.max_features == "sqrt":
max_features = max(1, int(np.sqrt(self.n_features_)))
elif self.max_features == "log2":
max_features = max(1, int(np.log2(self.n_features_)))
else:
raise ValueError(
'Invalid value for max_features. Allowed string '
'values are "auto", "sqrt" or "log2".')
elif self.max_features is None:
max_features = self.n_features_
elif isinstance(self.max_features, (numbers.Integral, np.integer)):
max_features = self.max_features
else: # float
if self.max_features > 0.0:
max_features = max(1, int(self.max_features * self.n_features_))
else:
max_features = 0
self.max_features_ = max_features
if len(y) != n_samples:
raise ValueError("Number of labels=%d does not match "
"number of samples=%d" % (len(y), n_samples))
if self.min_samples_split <= 0:
raise ValueError("min_samples_split must be greater than zero.")
if self.min_samples_leaf <= 0:
raise ValueError("min_samples_leaf must be greater than zero.")
if not 0 <= self.min_weight_fraction_leaf <= 0.5:
raise ValueError("min_weight_fraction_leaf must in [0, 0.5]")
if max_depth <= 0:
raise ValueError("max_depth must be greater than zero. ")
if not (0 < max_features <= self.n_features_):
raise ValueError("max_features must be in (0, n_features]")
if not isinstance(max_leaf_nodes, (numbers.Integral, np.integer)):
raise ValueError("max_leaf_nodes must be integral number but was "
"%r" % max_leaf_nodes)
if -1 < max_leaf_nodes < 2:
raise ValueError(("max_leaf_nodes {0} must be either smaller than "
"0 or larger than 1").format(max_leaf_nodes))
if sample_weight is not None:
if (getattr(sample_weight, "dtype", None) != DOUBLE or
not sample_weight.flags.contiguous):
sample_weight = np.ascontiguousarray(
sample_weight, dtype=DOUBLE)
if len(sample_weight.shape) > 1:
raise ValueError("Sample weights array has more "
"than one dimension: %d" %
len(sample_weight.shape))
if len(sample_weight) != n_samples:
raise ValueError("Number of weights=%d does not match "
"number of samples=%d" %
(len(sample_weight), n_samples))
if expanded_class_weight is not None:
if sample_weight is not None:
sample_weight = sample_weight * expanded_class_weight
else:
sample_weight = expanded_class_weight
# Set min_weight_leaf from min_weight_fraction_leaf
if self.min_weight_fraction_leaf != 0. and sample_weight is not None:
min_weight_leaf = (self.min_weight_fraction_leaf *
np.sum(sample_weight))
else:
min_weight_leaf = 0.
# Set min_samples_split sensibly
min_samples_split = max(self.min_samples_split,
2 * self.min_samples_leaf)
presort = self.presort
# Allow presort to be 'auto', which means True if the dataset is dense,
# otherwise it will be False.
if self.presort == 'auto' and issparse(X):
presort = False
elif self.presort == 'auto':
presort = True
if presort == True and issparse(X):
raise ValueError("Presorting is not supported for sparse matrices.")
# If multiple trees are built on the same dataset, we only want to
# presort once. Splitters now can accept presorted indices if desired,
# but do not handle any presorting themselves. Ensemble algorithms which
# desire presorting must do presorting themselves and pass that matrix
# into each tree.
if X_idx_sorted is None and presort:
X_idx_sorted = np.asfortranarray(np.argsort(X, axis=0),
dtype=np.int32)
if presort and X_idx_sorted.shape != X.shape:
raise ValueError("The shape of X (X.shape = {}) doesn't match "
"the shape of X_idx_sorted (X_idx_sorted"
".shape = {})".format(X.shape,
X_idx_sorted.shape))
# Build tree
criterion = self.criterion
if not isinstance(criterion, Criterion):
if is_classification:
criterion = CRITERIA_CLF[self.criterion](self.n_outputs_,
self.n_classes_)
else:
criterion = CRITERIA_REG[self.criterion](self.n_outputs_)
SPLITTERS = SPARSE_SPLITTERS if issparse(X) else DENSE_SPLITTERS
splitter = self.splitter
if not isinstance(self.splitter, Splitter):
splitter = SPLITTERS[self.splitter](criterion,
self.max_features_,
self.min_samples_leaf,
min_weight_leaf,
random_state,
self.presort)
self.tree_ = Tree(self.n_features_, self.n_classes_, self.n_outputs_)
# Use BestFirst if max_leaf_nodes given; use DepthFirst otherwise
if max_leaf_nodes < 0:
builder = DepthFirstTreeBuilder(splitter, min_samples_split,
self.min_samples_leaf,
min_weight_leaf,
max_depth)
else:
builder = BestFirstTreeBuilder(splitter, min_samples_split,
self.min_samples_leaf,
min_weight_leaf,
max_depth,
max_leaf_nodes)
builder.build(self.tree_, X, y, sample_weight, X_idx_sorted)
if self.n_outputs_ == 1:
self.n_classes_ = self.n_classes_[0]
self.classes_ = self.classes_[0]
return self
def _validate_X_predict(self, X, check_input):
"""Validate X whenever one tries to predict, apply, predict_proba"""
if self.tree_ is None:
raise NotFittedError("Estimator not fitted, "
"call `fit` before exploiting the model.")
if check_input:
X = check_array(X, dtype=DTYPE, accept_sparse="csr")
if issparse(X) and (X.indices.dtype != np.intc or
X.indptr.dtype != np.intc):
raise ValueError("No support for np.int64 index based "
"sparse matrices")
n_features = X.shape[1]
if self.n_features_ != n_features:
raise ValueError("Number of features of the model must "
" match the input. Model n_features is %s and "
" input n_features is %s "
% (self.n_features_, n_features))
return X
def predict(self, X, check_input=True):
"""Predict class or regression value for X.
For a classification model, the predicted class for each sample in X is
returned. For a regression model, the predicted value based on X is
returned.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted classes, or the predict values.
"""
X = self._validate_X_predict(X, check_input)
proba = self.tree_.predict(X)
n_samples = X.shape[0]
# Classification
if isinstance(self, ClassifierMixin):
if self.n_outputs_ == 1:
return self.classes_.take(np.argmax(proba, axis=1), axis=0)
else:
predictions = np.zeros((n_samples, self.n_outputs_))
for k in range(self.n_outputs_):
predictions[:, k] = self.classes_[k].take(
np.argmax(proba[:, k], axis=1),
axis=0)
return predictions
# Regression
else:
if self.n_outputs_ == 1:
return proba[:, 0]
else:
return proba[:, :, 0]
def apply(self, X, check_input=True):
"""
Returns the index of the leaf that each sample is predicted as.
Parameters
----------
X : array_like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
X_leaves : array_like, shape = [n_samples,]
For each datapoint x in X, return the index of the leaf x
ends up in. Leaves are numbered within
``[0; self.tree_.node_count)``, possibly with gaps in the
numbering.
"""
X = self._validate_X_predict(X, check_input)
return self.tree_.apply(X)
@property
def feature_importances_(self):
"""Return the feature importances.
The importance of a feature is computed as the (normalized) total
reduction of the criterion brought by that feature.
It is also known as the Gini importance.
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
if self.tree_ is None:
raise NotFittedError("Estimator not fitted, call `fit` before"
" `feature_importances_`.")
return self.tree_.compute_feature_importances()
# =============================================================================
# Public estimators
# =============================================================================
class DecisionTreeClassifier(BaseDecisionTree, ClassifierMixin):
"""A decision tree classifier.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
splitter : string, optional (default="best")
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : int or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : int, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : int, optional (default=1)
The minimum number of samples required to be at a leaf node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
class_weight : dict, list of dicts, "balanced" or None, optional
(default=None)
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
presort : bool, optional (default=False)
Whether to presort the data to speed up the finding of best splits in
fitting. For the default settings of a decision tree on large
datasets, setting this to true may slow down the training process.
When using either a smaller dataset or a restricted depth, this may
speed up the training.
Attributes
----------
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem),
or a list of arrays of class labels (multi-output problem).
feature_importances_ : array of shape = [n_features]
The feature importances. The higher, the more important the
feature. The importance of a feature is computed as the (normalized)
total reduction of the criterion brought by that feature. It is also
known as the Gini importance [4]_.
max_features_ : int,
The inferred value of max_features.
n_classes_ : int or list
The number of classes (for single output problems),
or a list containing the number of classes for each
output (for multi-output problems).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
tree_ : Tree object
The underlying Tree object.
See also
--------
DecisionTreeRegressor
References
----------
.. [1] http://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn.cross_validation import cross_val_score
>>> from sklearn.tree import DecisionTreeClassifier
>>> clf = DecisionTreeClassifier(random_state=0)
>>> iris = load_iris()
>>> cross_val_score(clf, iris.data, iris.target, cv=10)
... # doctest: +SKIP
...
array([ 1. , 0.93..., 0.86..., 0.93..., 0.93...,
0.93..., 0.93..., 1. , 0.93..., 1. ])
"""
def __init__(self,
criterion="gini",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features=None,
random_state=None,
max_leaf_nodes=None,
class_weight=None,
presort=False):
super(DecisionTreeClassifier, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
class_weight=class_weight,
random_state=random_state,
presort=presort)
def predict_proba(self, X, check_input=True):
"""Predict class probabilities of the input samples X.
The predicted class probability is the fraction of samples of the same
class in a leaf.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
X = self._validate_X_predict(X, check_input)
proba = self.tree_.predict(X)
if self.n_outputs_ == 1:
proba = proba[:, :self.n_classes_]
normalizer = proba.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba /= normalizer
return proba
else:
all_proba = []
for k in range(self.n_outputs_):
proba_k = proba[:, k, :self.n_classes_[k]]
normalizer = proba_k.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba_k /= normalizer
all_proba.append(proba_k)
return all_proba
def predict_log_proba(self, X):
"""Predict class log-probabilities of the input samples X.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class log-probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return np.log(proba)
else:
for k in range(self.n_outputs_):
proba[k] = np.log(proba[k])
return proba
class DecisionTreeRegressor(BaseDecisionTree, RegressorMixin):
"""A decision tree regressor.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
criterion : string, optional (default="mse")
The function to measure the quality of a split. The only supported
criterion is "mse" for the mean squared error, which is equal to
variance reduction as feature selection criterion.
splitter : string, optional (default="best")
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : int or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : int, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : int, optional (default=1)
The minimum number of samples required to be at a leaf node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
presort : bool, optional (default=False)
Whether to presort the data to speed up the finding of best splits in
fitting. For the default settings of a decision tree on large
datasets, setting this to true may slow down the training process.
When using either a smaller dataset or a restricted depth, this may
speed up the training.
Attributes
----------
feature_importances_ : array of shape = [n_features]
The feature importances.
The higher, the more important the feature.
The importance of a feature is computed as the
(normalized) total reduction of the criterion brought
by that feature. It is also known as the Gini importance [4]_.
max_features_ : int,
The inferred value of max_features.
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
tree_ : Tree object
The underlying Tree object.
See also
--------
DecisionTreeClassifier
References
----------
.. [1] http://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_boston
>>> from sklearn.cross_validation import cross_val_score
>>> from sklearn.tree import DecisionTreeRegressor
>>> boston = load_boston()
>>> regressor = DecisionTreeRegressor(random_state=0)
>>> cross_val_score(regressor, boston.data, boston.target, cv=10)
... # doctest: +SKIP
...
array([ 0.61..., 0.57..., -0.34..., 0.41..., 0.75...,
0.07..., 0.29..., 0.33..., -1.42..., -1.77...])
"""
def __init__(self,
criterion="mse",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features=None,
random_state=None,
max_leaf_nodes=None,
presort=False):
super(DecisionTreeRegressor, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
random_state=random_state,
presort=presort)
class ExtraTreeClassifier(DecisionTreeClassifier):
"""An extremely randomized tree classifier.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
Read more in the :ref:`User Guide <tree>`.
See also
--------
ExtraTreeRegressor, ExtraTreesClassifier, ExtraTreesRegressor
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
"""
def __init__(self,
criterion="gini",
splitter="random",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
random_state=None,
max_leaf_nodes=None,
class_weight=None):
super(ExtraTreeClassifier, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
class_weight=class_weight,
random_state=random_state)
class ExtraTreeRegressor(DecisionTreeRegressor):
"""An extremely randomized tree regressor.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
Read more in the :ref:`User Guide <tree>`.
See also
--------
ExtraTreeClassifier, ExtraTreesClassifier, ExtraTreesRegressor
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
"""
def __init__(self,
criterion="mse",
splitter="random",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
random_state=None,
max_leaf_nodes=None):
super(ExtraTreeRegressor, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
random_state=random_state)
| bsd-3-clause |
drewejohnson/drewtils | drewtils/__init__.py | 1 | 1629 | # THIS FILE IS PROVIDED AS IS UNDER THE CONDITIONS DETAILED IN LICENSE
# COPYRIGHT ANDREW JOHNSON, 2017-2020
import operator
from drewtils.parsers import KeywordParser, PatternReader
__versions__ = '0.2.0'
def dfSubset(data, where):
"""
Return a subset of the data given a series of conditions
.. versionadded:: 0.1.9
Parameters
----------
data: :py:class:`pandas.DataFrame`:
DataFrame to view
where: str or list or tuple
Conditions to apply.
Notes
-----
If the argument is a string, it will be converted
to a tuple for iteration. Items in iterable can be either a string
or three-valued iterable of the following form::
string: 'column operand target'
iterable: ('column', 'operand', 'target')
If the first-level item is a string, it will be split at spaces.
Operands are string-representations of operators from the operator module,
e.g.::
'eq', 'ge', 'le', 'ne', 'gt', 'lt', 'contains'
Returns
-------
view: :py:class:`pandas.DataFrame`:
View into the data frame after successive slices
See Also
--------
:py:mod:`operator`
"""
view = data
if isinstance(where, str):
where = where,
for item in where:
if isinstance(item, str):
cond = item.split()
else:
cond = item
assert len(cond) == 3, ('Conditions should have three arguments, '
'not like {}'.format(item))
evalFunc = getattr(operator, cond[1])
view = view[evalFunc(view[cond[0]], cond[2])]
return view
| mit |
dongjoon-hyun/spark | python/pyspark/sql/context.py | 15 | 23877 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import warnings
from pyspark import since, _NoValue
from pyspark.sql.session import _monkey_patch_RDD, SparkSession
from pyspark.sql.dataframe import DataFrame
from pyspark.sql.readwriter import DataFrameReader
from pyspark.sql.streaming import DataStreamReader
from pyspark.sql.udf import UDFRegistration # noqa: F401
from pyspark.sql.utils import install_exception_handler
__all__ = ["SQLContext", "HiveContext"]
class SQLContext(object):
"""The entry point for working with structured data (rows and columns) in Spark, in Spark 1.x.
As of Spark 2.0, this is replaced by :class:`SparkSession`. However, we are keeping the class
here for backward compatibility.
A SQLContext can be used create :class:`DataFrame`, register :class:`DataFrame` as
tables, execute SQL over tables, cache tables, and read parquet files.
.. deprecated:: 3.0.0
Use :func:`SparkSession.builder.getOrCreate()` instead.
Parameters
----------
sparkContext : :class:`SparkContext`
The :class:`SparkContext` backing this SQLContext.
sparkSession : :class:`SparkSession`
The :class:`SparkSession` around which this SQLContext wraps.
jsqlContext : optional
An optional JVM Scala SQLContext. If set, we do not instantiate a new
SQLContext in the JVM, instead we make all calls to this object.
This is only for internal.
Examples
--------
>>> from datetime import datetime
>>> from pyspark.sql import Row
>>> sqlContext = SQLContext(sc)
>>> allTypes = sc.parallelize([Row(i=1, s="string", d=1.0, l=1,
... b=True, list=[1, 2, 3], dict={"s": 0}, row=Row(a=1),
... time=datetime(2014, 8, 1, 14, 1, 5))])
>>> df = allTypes.toDF()
>>> df.createOrReplaceTempView("allTypes")
>>> sqlContext.sql('select i+1, d+1, not b, list[1], dict["s"], time, row.a '
... 'from allTypes where b and i > 0').collect()
[Row((i + 1)=2, (d + 1)=2.0, (NOT b)=False, list[1]=2, \
dict[s]=0, time=datetime.datetime(2014, 8, 1, 14, 1, 5), a=1)]
>>> df.rdd.map(lambda x: (x.i, x.s, x.d, x.l, x.b, x.time, x.row.a, x.list)).collect()
[(1, 'string', 1.0, 1, True, datetime.datetime(2014, 8, 1, 14, 1, 5), 1, [1, 2, 3])]
"""
_instantiatedContext = None
def __init__(self, sparkContext, sparkSession=None, jsqlContext=None):
if sparkSession is None:
warnings.warn(
"Deprecated in 3.0.0. Use SparkSession.builder.getOrCreate() instead.",
FutureWarning
)
self._sc = sparkContext
self._jsc = self._sc._jsc
self._jvm = self._sc._jvm
if sparkSession is None:
sparkSession = SparkSession.builder.getOrCreate()
if jsqlContext is None:
jsqlContext = sparkSession._jwrapped
self.sparkSession = sparkSession
self._jsqlContext = jsqlContext
_monkey_patch_RDD(self.sparkSession)
install_exception_handler()
if (SQLContext._instantiatedContext is None
or SQLContext._instantiatedContext._sc._jsc is None):
SQLContext._instantiatedContext = self
@property
def _ssql_ctx(self):
"""Accessor for the JVM Spark SQL context.
Subclasses can override this property to provide their own
JVM Contexts.
"""
return self._jsqlContext
@property
def _conf(self):
"""Accessor for the JVM SQL-specific configurations"""
return self.sparkSession._jsparkSession.sessionState().conf()
@classmethod
def getOrCreate(cls, sc):
"""
Get the existing SQLContext or create a new one with given SparkContext.
.. versionadded:: 1.6.0
.. deprecated:: 3.0.0
Use :func:`SparkSession.builder.getOrCreate()` instead.
Parameters
----------
sc : :class:`SparkContext`
"""
warnings.warn(
"Deprecated in 3.0.0. Use SparkSession.builder.getOrCreate() instead.",
FutureWarning
)
if (cls._instantiatedContext is None
or SQLContext._instantiatedContext._sc._jsc is None):
jsqlContext = sc._jvm.SparkSession.builder().sparkContext(
sc._jsc.sc()).getOrCreate().sqlContext()
sparkSession = SparkSession(sc, jsqlContext.sparkSession())
cls(sc, sparkSession, jsqlContext)
return cls._instantiatedContext
def newSession(self):
"""
Returns a new SQLContext as new session, that has separate SQLConf,
registered temporary views and UDFs, but shared SparkContext and
table cache.
.. versionadded:: 1.6.0
"""
return self.__class__(self._sc, self.sparkSession.newSession())
def setConf(self, key, value):
"""Sets the given Spark SQL configuration property.
.. versionadded:: 1.3.0
"""
self.sparkSession.conf.set(key, value)
def getConf(self, key, defaultValue=_NoValue):
"""Returns the value of Spark SQL configuration property for the given key.
If the key is not set and defaultValue is set, return
defaultValue. If the key is not set and defaultValue is not set, return
the system default value.
.. versionadded:: 1.3.0
Examples
--------
>>> sqlContext.getConf("spark.sql.shuffle.partitions")
'200'
>>> sqlContext.getConf("spark.sql.shuffle.partitions", "10")
'10'
>>> sqlContext.setConf("spark.sql.shuffle.partitions", "50")
>>> sqlContext.getConf("spark.sql.shuffle.partitions", "10")
'50'
"""
return self.sparkSession.conf.get(key, defaultValue)
@property
def udf(self):
"""Returns a :class:`UDFRegistration` for UDF registration.
.. versionadded:: 1.3.1
Returns
-------
:class:`UDFRegistration`
"""
return self.sparkSession.udf
def range(self, start, end=None, step=1, numPartitions=None):
"""
Create a :class:`DataFrame` with single :class:`pyspark.sql.types.LongType` column named
``id``, containing elements in a range from ``start`` to ``end`` (exclusive) with
step value ``step``.
.. versionadded:: 1.4.0
Parameters
----------
start : int
the start value
end : int, optional
the end value (exclusive)
step : int, optional
the incremental step (default: 1)
numPartitions : int, optional
the number of partitions of the DataFrame
Returns
-------
:class:`DataFrame`
Examples
--------
>>> sqlContext.range(1, 7, 2).collect()
[Row(id=1), Row(id=3), Row(id=5)]
If only one argument is specified, it will be used as the end value.
>>> sqlContext.range(3).collect()
[Row(id=0), Row(id=1), Row(id=2)]
"""
return self.sparkSession.range(start, end, step, numPartitions)
def registerFunction(self, name, f, returnType=None):
"""An alias for :func:`spark.udf.register`.
See :meth:`pyspark.sql.UDFRegistration.register`.
.. versionadded:: 1.2.0
.. deprecated:: 2.3.0
Use :func:`spark.udf.register` instead.
"""
warnings.warn(
"Deprecated in 2.3.0. Use spark.udf.register instead.",
FutureWarning
)
return self.sparkSession.udf.register(name, f, returnType)
def registerJavaFunction(self, name, javaClassName, returnType=None):
"""An alias for :func:`spark.udf.registerJavaFunction`.
See :meth:`pyspark.sql.UDFRegistration.registerJavaFunction`.
.. versionadded:: 2.1.0
.. deprecated:: 2.3.0
Use :func:`spark.udf.registerJavaFunction` instead.
"""
warnings.warn(
"Deprecated in 2.3.0. Use spark.udf.registerJavaFunction instead.",
FutureWarning
)
return self.sparkSession.udf.registerJavaFunction(name, javaClassName, returnType)
# TODO(andrew): delete this once we refactor things to take in SparkSession
def _inferSchema(self, rdd, samplingRatio=None):
"""
Infer schema from an RDD of Row or tuple.
Parameters
----------
rdd : :class:`RDD`
an RDD of Row or tuple
samplingRatio : float, optional
sampling ratio, or no sampling (default)
Returns
-------
:class:`pyspark.sql.types.StructType`
"""
return self.sparkSession._inferSchema(rdd, samplingRatio)
def createDataFrame(self, data, schema=None, samplingRatio=None, verifySchema=True):
"""
Creates a :class:`DataFrame` from an :class:`RDD`, a list or a :class:`pandas.DataFrame`.
When ``schema`` is a list of column names, the type of each column
will be inferred from ``data``.
When ``schema`` is ``None``, it will try to infer the schema (column names and types)
from ``data``, which should be an RDD of :class:`Row`,
or :class:`namedtuple`, or :class:`dict`.
When ``schema`` is :class:`pyspark.sql.types.DataType` or a datatype string it must match
the real data, or an exception will be thrown at runtime. If the given schema is not
:class:`pyspark.sql.types.StructType`, it will be wrapped into a
:class:`pyspark.sql.types.StructType` as its only field, and the field name will be "value",
each record will also be wrapped into a tuple, which can be converted to row later.
If schema inference is needed, ``samplingRatio`` is used to determined the ratio of
rows used for schema inference. The first row will be used if ``samplingRatio`` is ``None``.
.. versionadded:: 1.3.0
.. versionchanged:: 2.0.0
The ``schema`` parameter can be a :class:`pyspark.sql.types.DataType` or a
datatype string after 2.0.
If it's not a :class:`pyspark.sql.types.StructType`, it will be wrapped into a
:class:`pyspark.sql.types.StructType` and each record will also be wrapped into a tuple.
.. versionchanged:: 2.1.0
Added verifySchema.
Parameters
----------
data : :class:`RDD` or iterable
an RDD of any kind of SQL data representation (:class:`Row`,
:class:`tuple`, ``int``, ``boolean``, etc.), or :class:`list`, or
:class:`pandas.DataFrame`.
schema : :class:`pyspark.sql.types.DataType`, str or list, optional
a :class:`pyspark.sql.types.DataType` or a datatype string or a list of
column names, default is None. The data type string format equals to
:class:`pyspark.sql.types.DataType.simpleString`, except that top level struct type can
omit the ``struct<>`` and atomic types use ``typeName()`` as their format, e.g. use
``byte`` instead of ``tinyint`` for :class:`pyspark.sql.types.ByteType`.
We can also use ``int`` as a short name for :class:`pyspark.sql.types.IntegerType`.
samplingRatio : float, optional
the sample ratio of rows used for inferring
verifySchema : bool, optional
verify data types of every row against schema. Enabled by default.
Returns
-------
:class:`DataFrame`
Examples
--------
>>> l = [('Alice', 1)]
>>> sqlContext.createDataFrame(l).collect()
[Row(_1='Alice', _2=1)]
>>> sqlContext.createDataFrame(l, ['name', 'age']).collect()
[Row(name='Alice', age=1)]
>>> d = [{'name': 'Alice', 'age': 1}]
>>> sqlContext.createDataFrame(d).collect()
[Row(age=1, name='Alice')]
>>> rdd = sc.parallelize(l)
>>> sqlContext.createDataFrame(rdd).collect()
[Row(_1='Alice', _2=1)]
>>> df = sqlContext.createDataFrame(rdd, ['name', 'age'])
>>> df.collect()
[Row(name='Alice', age=1)]
>>> from pyspark.sql import Row
>>> Person = Row('name', 'age')
>>> person = rdd.map(lambda r: Person(*r))
>>> df2 = sqlContext.createDataFrame(person)
>>> df2.collect()
[Row(name='Alice', age=1)]
>>> from pyspark.sql.types import *
>>> schema = StructType([
... StructField("name", StringType(), True),
... StructField("age", IntegerType(), True)])
>>> df3 = sqlContext.createDataFrame(rdd, schema)
>>> df3.collect()
[Row(name='Alice', age=1)]
>>> sqlContext.createDataFrame(df.toPandas()).collect() # doctest: +SKIP
[Row(name='Alice', age=1)]
>>> sqlContext.createDataFrame(pandas.DataFrame([[1, 2]])).collect() # doctest: +SKIP
[Row(0=1, 1=2)]
>>> sqlContext.createDataFrame(rdd, "a: string, b: int").collect()
[Row(a='Alice', b=1)]
>>> rdd = rdd.map(lambda row: row[1])
>>> sqlContext.createDataFrame(rdd, "int").collect()
[Row(value=1)]
>>> sqlContext.createDataFrame(rdd, "boolean").collect() # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
Py4JJavaError: ...
"""
return self.sparkSession.createDataFrame(data, schema, samplingRatio, verifySchema)
def registerDataFrameAsTable(self, df, tableName):
"""Registers the given :class:`DataFrame` as a temporary table in the catalog.
Temporary tables exist only during the lifetime of this instance of :class:`SQLContext`.
.. versionadded:: 1.3.0
Examples
--------
>>> sqlContext.registerDataFrameAsTable(df, "table1")
"""
df.createOrReplaceTempView(tableName)
def dropTempTable(self, tableName):
""" Remove the temporary table from catalog.
.. versionadded:: 1.6.0
Examples
--------
>>> sqlContext.registerDataFrameAsTable(df, "table1")
>>> sqlContext.dropTempTable("table1")
"""
self.sparkSession.catalog.dropTempView(tableName)
def createExternalTable(self, tableName, path=None, source=None, schema=None, **options):
"""Creates an external table based on the dataset in a data source.
It returns the DataFrame associated with the external table.
The data source is specified by the ``source`` and a set of ``options``.
If ``source`` is not specified, the default data source configured by
``spark.sql.sources.default`` will be used.
Optionally, a schema can be provided as the schema of the returned :class:`DataFrame` and
created external table.
.. versionadded:: 1.3.0
Returns
-------
:class:`DataFrame`
"""
return self.sparkSession.catalog.createExternalTable(
tableName, path, source, schema, **options)
def sql(self, sqlQuery):
"""Returns a :class:`DataFrame` representing the result of the given query.
.. versionadded:: 1.0.0
Returns
-------
:class:`DataFrame`
Examples
--------
>>> sqlContext.registerDataFrameAsTable(df, "table1")
>>> df2 = sqlContext.sql("SELECT field1 AS f1, field2 as f2 from table1")
>>> df2.collect()
[Row(f1=1, f2='row1'), Row(f1=2, f2='row2'), Row(f1=3, f2='row3')]
"""
return self.sparkSession.sql(sqlQuery)
def table(self, tableName):
"""Returns the specified table or view as a :class:`DataFrame`.
.. versionadded:: 1.0.0
Returns
-------
:class:`DataFrame`
Examples
--------
>>> sqlContext.registerDataFrameAsTable(df, "table1")
>>> df2 = sqlContext.table("table1")
>>> sorted(df.collect()) == sorted(df2.collect())
True
"""
return self.sparkSession.table(tableName)
def tables(self, dbName=None):
"""Returns a :class:`DataFrame` containing names of tables in the given database.
If ``dbName`` is not specified, the current database will be used.
The returned DataFrame has two columns: ``tableName`` and ``isTemporary``
(a column with :class:`BooleanType` indicating if a table is a temporary one or not).
.. versionadded:: 1.3.0
Parameters
----------
dbName: str, optional
name of the database to use.
Returns
-------
:class:`DataFrame`
Examples
--------
>>> sqlContext.registerDataFrameAsTable(df, "table1")
>>> df2 = sqlContext.tables()
>>> df2.filter("tableName = 'table1'").first()
Row(namespace='', tableName='table1', isTemporary=True)
"""
if dbName is None:
return DataFrame(self._ssql_ctx.tables(), self)
else:
return DataFrame(self._ssql_ctx.tables(dbName), self)
def tableNames(self, dbName=None):
"""Returns a list of names of tables in the database ``dbName``.
.. versionadded:: 1.3.0
Parameters
----------
dbName: str
name of the database to use. Default to the current database.
Returns
-------
list
list of table names, in string
>>> sqlContext.registerDataFrameAsTable(df, "table1")
>>> "table1" in sqlContext.tableNames()
True
>>> "table1" in sqlContext.tableNames("default")
True
"""
if dbName is None:
return [name for name in self._ssql_ctx.tableNames()]
else:
return [name for name in self._ssql_ctx.tableNames(dbName)]
@since(1.0)
def cacheTable(self, tableName):
"""Caches the specified table in-memory."""
self._ssql_ctx.cacheTable(tableName)
@since(1.0)
def uncacheTable(self, tableName):
"""Removes the specified table from the in-memory cache."""
self._ssql_ctx.uncacheTable(tableName)
@since(1.3)
def clearCache(self):
"""Removes all cached tables from the in-memory cache. """
self._ssql_ctx.clearCache()
@property
def read(self):
"""
Returns a :class:`DataFrameReader` that can be used to read data
in as a :class:`DataFrame`.
.. versionadded:: 1.4.0
Returns
-------
:class:`DataFrameReader`
"""
return DataFrameReader(self)
@property
def readStream(self):
"""
Returns a :class:`DataStreamReader` that can be used to read data streams
as a streaming :class:`DataFrame`.
.. versionadded:: 2.0.0
Notes
-----
This API is evolving.
Returns
-------
:class:`DataStreamReader`
>>> text_sdf = sqlContext.readStream.text(tempfile.mkdtemp())
>>> text_sdf.isStreaming
True
"""
return DataStreamReader(self)
@property
def streams(self):
"""Returns a :class:`StreamingQueryManager` that allows managing all the
:class:`StreamingQuery` StreamingQueries active on `this` context.
.. versionadded:: 2.0.0
Notes
-----
This API is evolving.
"""
from pyspark.sql.streaming import StreamingQueryManager
return StreamingQueryManager(self._ssql_ctx.streams())
class HiveContext(SQLContext):
"""A variant of Spark SQL that integrates with data stored in Hive.
Configuration for Hive is read from ``hive-site.xml`` on the classpath.
It supports running both SQL and HiveQL commands.
.. deprecated:: 2.0.0
Use SparkSession.builder.enableHiveSupport().getOrCreate().
Parameters
----------
sparkContext : :class:`SparkContext`
The SparkContext to wrap.
jhiveContext : optional
An optional JVM Scala HiveContext. If set, we do not instantiate a new
:class:`HiveContext` in the JVM, instead we make all calls to this object.
This is only for internal use.
"""
def __init__(self, sparkContext, jhiveContext=None):
warnings.warn(
"HiveContext is deprecated in Spark 2.0.0. Please use " +
"SparkSession.builder.enableHiveSupport().getOrCreate() instead.",
FutureWarning
)
if jhiveContext is None:
sparkContext._conf.set("spark.sql.catalogImplementation", "hive")
sparkSession = SparkSession.builder._sparkContext(sparkContext).getOrCreate()
else:
sparkSession = SparkSession(sparkContext, jhiveContext.sparkSession())
SQLContext.__init__(self, sparkContext, sparkSession, jhiveContext)
@classmethod
def _createForTesting(cls, sparkContext):
"""(Internal use only) Create a new HiveContext for testing.
All test code that touches HiveContext *must* go through this method. Otherwise,
you may end up launching multiple derby instances and encounter with incredibly
confusing error messages.
"""
jsc = sparkContext._jsc.sc()
jtestHive = sparkContext._jvm.org.apache.spark.sql.hive.test.TestHiveContext(jsc, False)
return cls(sparkContext, jtestHive)
def refreshTable(self, tableName):
"""Invalidate and refresh all the cached the metadata of the given
table. For performance reasons, Spark SQL or the external data source
library it uses might cache certain metadata about a table, such as the
location of blocks. When those change outside of Spark SQL, users should
call this function to invalidate the cache.
"""
self._ssql_ctx.refreshTable(tableName)
def _test():
import os
import doctest
import tempfile
from pyspark.context import SparkContext
from pyspark.sql import Row, SQLContext
import pyspark.sql.context
os.chdir(os.environ["SPARK_HOME"])
globs = pyspark.sql.context.__dict__.copy()
sc = SparkContext('local[4]', 'PythonTest')
globs['tempfile'] = tempfile
globs['os'] = os
globs['sc'] = sc
globs['sqlContext'] = SQLContext(sc)
globs['rdd'] = rdd = sc.parallelize(
[Row(field1=1, field2="row1"),
Row(field1=2, field2="row2"),
Row(field1=3, field2="row3")]
)
globs['df'] = rdd.toDF()
jsonStrings = [
'{"field1": 1, "field2": "row1", "field3":{"field4":11}}',
'{"field1" : 2, "field3":{"field4":22, "field5": [10, 11]},"field6":[{"field7": "row2"}]}',
'{"field1" : null, "field2": "row3", "field3":{"field4":33, "field5": []}}'
]
globs['jsonStrings'] = jsonStrings
globs['json'] = sc.parallelize(jsonStrings)
(failure_count, test_count) = doctest.testmod(
pyspark.sql.context, globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE)
globs['sc'].stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
| apache-2.0 |
hdmetor/scikit-learn | examples/ensemble/plot_voting_probas.py | 316 | 2824 | """
===========================================================
Plot class probabilities calculated by the VotingClassifier
===========================================================
Plot the class probabilities of the first sample in a toy dataset
predicted by three different classifiers and averaged by the
`VotingClassifier`.
First, three examplary classifiers are initialized (`LogisticRegression`,
`GaussianNB`, and `RandomForestClassifier`) and used to initialize a
soft-voting `VotingClassifier` with weights `[1, 1, 5]`, which means that
the predicted probabilities of the `RandomForestClassifier` count 5 times
as much as the weights of the other classifiers when the averaged probability
is calculated.
To visualize the probability weighting, we fit each classifier on the training
set and plot the predicted class probabilities for the first sample in this
example dataset.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import VotingClassifier
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
clf3 = GaussianNB()
X = np.array([[-1.0, -1.0], [-1.2, -1.4], [-3.4, -2.2], [1.1, 1.2]])
y = np.array([1, 1, 2, 2])
eclf = VotingClassifier(estimators=[('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='soft',
weights=[1, 1, 5])
# predict class probabilities for all classifiers
probas = [c.fit(X, y).predict_proba(X) for c in (clf1, clf2, clf3, eclf)]
# get class probabilities for the first sample in the dataset
class1_1 = [pr[0, 0] for pr in probas]
class2_1 = [pr[0, 1] for pr in probas]
# plotting
N = 4 # number of groups
ind = np.arange(N) # group positions
width = 0.35 # bar width
fig, ax = plt.subplots()
# bars for classifier 1-3
p1 = ax.bar(ind, np.hstack(([class1_1[:-1], [0]])), width, color='green')
p2 = ax.bar(ind + width, np.hstack(([class2_1[:-1], [0]])), width, color='lightgreen')
# bars for VotingClassifier
p3 = ax.bar(ind, [0, 0, 0, class1_1[-1]], width, color='blue')
p4 = ax.bar(ind + width, [0, 0, 0, class2_1[-1]], width, color='steelblue')
# plot annotations
plt.axvline(2.8, color='k', linestyle='dashed')
ax.set_xticks(ind + width)
ax.set_xticklabels(['LogisticRegression\nweight 1',
'GaussianNB\nweight 1',
'RandomForestClassifier\nweight 5',
'VotingClassifier\n(average probabilities)'],
rotation=40,
ha='right')
plt.ylim([0, 1])
plt.title('Class probabilities for sample 1 by different classifiers')
plt.legend([p1[0], p2[0]], ['class 1', 'class 2'], loc='upper left')
plt.show()
| bsd-3-clause |
BDI-pathogens/phyloscanner | tools/EstimateReadCountPerWindow.py | 1 | 14576 | #!/usr/bin/env python
from __future__ import print_function
## Author: Chris Wymant, [email protected]
## Acknowledgement: I wrote this while funded by ERC Advanced Grant PBDR-339251
##
## Overview:
ExplanatoryMessage = '''For each bam file in the list given as input, this
script does the following. The distribution of read lengths, and insert sizes if
reads are found to be paired, is calculated. (Here, length means length of the
mapping reference covered by the read, which will not be the same as the true
read length if there are insertions or deletions.) We then estimate the number
of reads and inserts expected to fully span a window of width W by assuming that
reads are distributed randomly over the genome (i.e. ignoring the actual
location information in the bam). We output this count for each bam file as a
function of W.'''
import os
import sys
import argparse
import pysam
import phyloscanner_funcs as pf
import collections
import numpy as np
# Define a function to check files exist, as a type for the argparse.
def File(MyFile):
if not os.path.isfile(MyFile):
raise argparse.ArgumentTypeError(MyFile+' does not exist or is not a file.')
return MyFile
# A class to have new lines in argument help text
class SmartFormatter(argparse.HelpFormatter):
def _split_lines(self, text, width):
if text.startswith('R|'):
return text[2:].splitlines()
return argparse.HelpFormatter._split_lines(self, text, width)
# Set up the arguments for this script
parser = argparse.ArgumentParser(description=ExplanatoryMessage,
formatter_class=SmartFormatter)
# Positional args
parser.add_argument('BamAndRefList', type=File,
help='''R|A csv-format file listing the bam and reference files
(i.e. the fasta-format file containing the sequence to
which the reads were mapped). The first column should
be the bam file, the second column the corresponding
reference file, with a comma separating the two. An
optional third column, if present, will be used to
rename the bam files in all output. For example:
PatientA.bam,PatientA_ref.fasta,A
PatientB.bam,PatientB_ref.fasta,B''')
parser.add_argument('-N', '--normalise', action='store_true', help='''Normalise
the counts for each bam to the value at a window width of zero, making it easier
to compare the relative decline in number of reads with growing window size
between different bams with different total numbers of reads.''')
parser.add_argument('-O', '--out-filename', help="We'll append '.csv' for the "
"output data file, and '.pdf' for the plot. The default is "
"'EstimatedReadCountsPerWindow'.", default='EstimatedReadCountsPerWindow')
parser.add_argument('-OIS', '--overlapping-insert-sizes', action='store_true',
help='''Just record the insert size distribution for each bam, restricted to
inserts where the mates overlap.''')
parser.add_argument('-DB', '--dont-plot', action='store_true',
help="Don't plot the results.")
parser.add_argument('-MC', '--min-read-count', type=float, help='''Used to
specify a positive number: we'll truncate the x axis when the window width
becomes so large that all bams have a read count per window below this
value. The default is 1.''', default=1)
parser.add_argument('-AS', '--axis-font-size', type=int,
help='For the plot. The default is 15.', default=15)
parser.add_argument('-TS', '--title-font-size', type=int,
help='For the plot. The default is 15.', default=15)
parser.add_argument('-LS', '--legend-font-size', type=int,
help='For the plot. The default is 7.', default=7)
parser.add_argument('-LL', '--legend-location',
help='''For the plot. The default is 'lower left'. The other options are:
'best', 'upper right', 'upper left', 'lower right', 'right', 'center left',
'center right', 'lower center',' upper center', 'center' ''',
default='lower left')
parser.add_argument('-LY', '--linear-y-axis',
help='For the plot. The default is logarithmic.', action='store_true')
parser.add_argument('-XM', '--x-min-max', help='The minimum and maximum for '\
'the x axis in the plot, specified together as a comma-separated pair of '\
'numbers.')
parser.add_argument('-YM', '--y-min-max', help='The minimum and maximum for '\
'the y axis in the plot, specified together as a comma-separated pair of '\
'numbers.')
parser.add_argument('--x-samtools', default='samtools', help=\
'Used to specify the command required to run samtools, if it is needed to index'
' the bam files (by default: samtools).')
args = parser.parse_args()
InsertSizesOnly = args.overlapping_insert_sizes
def GetIntPair(arg, ArgName):
MinMax = arg.split(',')
if len(MinMax) != 2:
print(ArgName, 'should be used to specify a comma-separated pair of',
'numbers. Quitting.', file=sys.stderr)
exit(1)
try:
Min = float(MinMax[0])
Max = float(MinMax[1])
except ValueError:
print(ArgName, 'should be used to specify a comma-separated pair of',
'numbers. Quitting.', file=sys.stderr)
exit(1)
return min(Min, Max), max(Min, Max)
# Get plot limits
if args.x_min_max:
Xmin, Xmax = GetIntPair(args.x_min_max, '--x-min-max')
if args.y_min_max:
Ymin, Ymax = GetIntPair(args.y_min_max, '--y-min-max')
# Read in the input bam and ref files
BamFiles, RefFiles, aliases, BamFileBasenames = \
pf.ReadInputCSVfile(args.BamAndRefList)
NumBams = len(BamFiles)
# Make index files for the bam files if needed.
pf.MakeBamIndices(BamFiles, args.x_samtools)
def FindReadCountAsFuncOfWindowWidth(ReadSizeCountDict, RefLength):
# Return an empty array if there are no reads
if len(ReadSizeCountDict) == 0:
return np.zeros(0)
LargestReadLength = max(ReadSizeCountDict.keys())
RefLengthPlus1 = RefLength + 1
# The nth element of this list will eventually contain the number of reads
# expected to span a window of width n+1 (list is zero-based).
ReadsCountByWindowWidth = np.zeros(LargestReadLength)
for ReadLength, count in ReadSizeCountDict.items():
ReadLengthPlus1 = ReadLength + 1
# The number of positions at which we could place a window of width W is
# RefLength - W + 1
# The number of positions at which we could place a window of width W such
# that it is wholly inside a read is ReadLength - W + 1
# Probability of a given read overlapping a window of width W is therefore
# (ReadLength - W + 1) / (RefLength - W + 1)
for W in range(1, ReadLengthPlus1):
NumSpanningReads = count * \
float(ReadLengthPlus1 - W) / (RefLengthPlus1 - W)
ReadsCountByWindowWidth[W-1] += NumSpanningReads
if args.normalise:
ReadsCountByWindowWidth = [float(count) / ReadsCountByWindowWidth[0] \
for count in ReadsCountByWindowWidth]
return ReadsCountByWindowWidth
ReadLengthCountsByBam = collections.OrderedDict()
InsertSizeCountsByBam = collections.OrderedDict()
InsertSizesOnlyByBam = collections.OrderedDict()
for i, BamFileName in enumerate(BamFiles):
alias = aliases[i]
print('Now counting read and insert sizes for', alias)
bam = pysam.AlignmentFile(BamFileName, "rb")
# Find the reference in the bam file; there should only be one.
AllRefs = bam.references
if len(AllRefs) != 1:
print('Expected exactly one reference in', BamFileName + '; found',\
str(len(AllRefs)) + '.Quitting.', file=sys.stderr)
exit(1)
RefName = AllRefs[0]
# Get the length of the reference.
AllRefLengths = bam.lengths
if len(AllRefLengths) != 1:
print('Pysam error: found one reference but', len(AllRefLengths),
'reference lengths. Quitting.', file=sys.stderr)
exit(1)
RefLength = AllRefLengths[0]
PairedReadCoords = {}
ReadLengthCounts = {}
InsertSizeCounts = {}
TotalReadCount = 0
# Iterate through the reads
for read in bam.fetch(RefName):
MappedPositions = read.get_reference_positions(full_length=False)
# Skip unmapped reads
if not MappedPositions:
continue
TotalReadCount += 1
start = min(MappedPositions[0], MappedPositions[-1])
end = max(MappedPositions[0], MappedPositions[-1])
ReadLength = end - start
try:
ReadLengthCounts[ReadLength] += 1
except KeyError:
ReadLengthCounts[ReadLength] = 1
# The first time we encounter a mate from a pair, record its start and end.
# When we encounter its mate, if they overlap, record the insert size; if
# they don't overlap, record their separate lengths as though they are two
# different inserts (because phyloscanner won't merge them - they are
# effectively two separate inserts from the point of view of merging).
if read.is_paired:
if read.query_name in PairedReadCoords:
MateStart, MateEnd, MateFoundBool = PairedReadCoords[read.query_name]
PairedReadCoords[read.query_name][2] = True
if start <= MateStart <= end:
InsertSize = max(end, MateEnd) - start
try:
InsertSizeCounts[InsertSize] += 1
except KeyError:
InsertSizeCounts[InsertSize] = 1
elif MateStart <= start <= MateEnd:
InsertSize = max(end, MateEnd) - MateStart
try:
InsertSizeCounts[InsertSize] += 1
except KeyError:
InsertSizeCounts[InsertSize] = 1
else:
try:
InsertSizeCounts[ReadLength] += 1
except KeyError:
InsertSizeCounts[ReadLength] = 1
MateLength = MateEnd - MateStart
try:
InsertSizeCounts[MateLength] += 1
except KeyError:
InsertSizeCounts[MateLength] = 1
else:
PairedReadCoords[read.query_name] = [start, end, False]
# For paired reads for which we didn't find a mate, add just the read length
# to the insert size distribution.
NumMissingMates = 0
for start, end, MateFound in PairedReadCoords.values():
if not MateFound:
NumMissingMates += 1
ReadLength = end - start
try:
InsertSizeCounts[ReadLength] += 1
except KeyError:
InsertSizeCounts[ReadLength] = 1
if NumMissingMates > 0:
print('Info:', NumMissingMates, 'of', TotalReadCount, 'reads in',
BamFileName, "are flagged as being paired but don't have a mate present.")
# Skip empty bams
if TotalReadCount == 0:
print('Warning: no reads found in', BamFileName + '. Skipping.')
continue
if InsertSizesOnly:
InsertSizesOnlyByBam[alias] = InsertSizeCounts
ReadLengthCountsByBam[alias] = \
FindReadCountAsFuncOfWindowWidth(ReadLengthCounts, RefLength)
InsertSizeCountsByBam[alias] = \
FindReadCountAsFuncOfWindowWidth(InsertSizeCounts, RefLength)
if InsertSizesOnly:
with open(args.out_filename + '.csv', 'w') as f:
f.write('Bam file,Size of overlapping read pair or length of read in ' + \
'non-overlapping pair,Count\n')
for alias, InsertSizesOnly in InsertSizesOnlyByBam.items():
for size, count in sorted(InsertSizesOnly.items(), key=lambda x:x[0]):
f.write(alias + ',' + str(size) + ',' + str(count) + '\n')
exit(0)
# Make a matrix for which the first column is every window size we need to
# consider, in order, and subsequent columns list the number of reads (and
# inserts, if reads are paired) expected to fully span a window of that size,
# for each different bam.
MaxInsertSize = max(len(list_) for list_ in InsertSizeCountsByBam.values())
SomeDataIsPaired = MaxInsertSize > 0
MaxReadOrInsertSize = max(MaxInsertSize,
max(len(list_) for list_ in ReadLengthCountsByBam.values()))
if SomeDataIsPaired:
matrix = np.zeros((MaxReadOrInsertSize, 2 * NumBams + 1))
else:
matrix = np.zeros((MaxReadOrInsertSize, NumBams + 1))
matrix[:, 0] = np.arange(1, MaxReadOrInsertSize + 1)
header = 'window width'
if SomeDataIsPaired:
for alias in aliases:
header += ',' + 'read count in ' + alias + ',insert size count in ' + alias
for i, ReadLengthCounts in enumerate(ReadLengthCountsByBam.values()):
matrix[:len(ReadLengthCounts), 2 * i + 1] = ReadLengthCounts
for i, InsertSizeCounts in enumerate(InsertSizeCountsByBam.values()):
matrix[:len(InsertSizeCounts), 2 * i + 2] = InsertSizeCounts
else:
for alias in aliases:
header += ',' + 'read count in ' + alias
for i, ReadLengthCounts in enumerate(ReadLengthCountsByBam.values()):
matrix[:len(ReadLengthCounts), i + 1] = ReadLengthCounts
# Write the matrix to a csv file.
with open(args.out_filename + '.csv', 'w') as f:
np.savetxt(f, matrix, delimiter=',', header=header, fmt='%.1f')
if args.dont_plot:
exit(0)
try:
import matplotlib.pyplot as plt
except ImportError:
print("The python library matplotlib does not seem to be installed: you'll "
"need to plot", args.out_filename + '.csv yourself.' )
exit(1)
# For plotting: cut off the tail end of the matrix where read counts are too
# small.
LastDesiredRow = 0
for row in range(MaxReadOrInsertSize - 1, -1, -1):
if max(matrix[row, 1:]) >= args.min_read_count:
LastDesiredRow = row
break
if LastDesiredRow == 0:
print('Warning: no bam has', args.min_read_count, 'reads per window',
'regardless how small the window is. Ignoring the --min-read-count value.')
LastDesiredRow = MaxReadOrInsertSize - 1
matrix = matrix[:LastDesiredRow + 1, :]
ax = plt.figure().add_subplot(111)
if args.x_min_max:
ax.set_xlim(xmin=Xmin, xmax=Xmax)
if args.y_min_max:
ax.set_ylim(ymin=Ymin, ymax=Ymax)
for i in range(1, matrix.shape[1]):
if SomeDataIsPaired:
alias = aliases[(i - 1) / 2]
if i % 2 == 0:
label = 'read pairs, ' + alias
linestyle = '--'
else:
label = 'reads, ' + alias
linestyle = '-'
else:
label = aliases[i - 1]
linestyle = '-'
plt.plot(matrix[:, 0], matrix[:, i], label=label, linestyle=linestyle)
plt.xlabel('window width', fontsize=args.axis_font_size)
YaxisLabel = 'number of reads'
if args.normalise:
YaxisLabel += ' relative to\nwhen the window width of zero'
if SomeDataIsPaired:
title = \
'Estimating the number of unpaired reads and paired reads (merging\n' + \
'read in a pair when they overlap) spanning each window, assuming\n' + \
'reads are randomly distributed over the whole genome'
else:
title = \
'Estimating the number of reads spanning each window, assuming\n' + \
'they are randomly distributed over the whole genome'
plt.ylabel(YaxisLabel, fontsize=args.axis_font_size)
plt.title(title, fontsize=args.title_font_size)
ax.tick_params(axis='both', which='major', labelsize=args.axis_font_size)
if not args.linear_y_axis:
ax.set_yscale('log')
ax.set_xlim(xmin=0, xmax=LastDesiredRow)
plt.legend(loc=args.legend_location, fontsize=args.legend_font_size)
plt.tight_layout()
plt.savefig(args.out_filename + '.pdf')
| gpl-3.0 |
jayflo/scikit-learn | examples/linear_model/plot_sgd_weighted_samples.py | 344 | 1458 | """
=====================
SGD: Weighted samples
=====================
Plot decision function of a weighted dataset, where the size of points
is proportional to its weight.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
# we create 20 points
np.random.seed(0)
X = np.r_[np.random.randn(10, 2) + [1, 1], np.random.randn(10, 2)]
y = [1] * 10 + [-1] * 10
sample_weight = 100 * np.abs(np.random.randn(20))
# and assign a bigger weight to the last 10 samples
sample_weight[:10] *= 10
# plot the weighted data points
xx, yy = np.meshgrid(np.linspace(-4, 5, 500), np.linspace(-4, 5, 500))
plt.figure()
plt.scatter(X[:, 0], X[:, 1], c=y, s=sample_weight, alpha=0.9,
cmap=plt.cm.bone)
## fit the unweighted model
clf = linear_model.SGDClassifier(alpha=0.01, n_iter=100)
clf.fit(X, y)
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
no_weights = plt.contour(xx, yy, Z, levels=[0], linestyles=['solid'])
## fit the weighted model
clf = linear_model.SGDClassifier(alpha=0.01, n_iter=100)
clf.fit(X, y, sample_weight=sample_weight)
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
samples_weights = plt.contour(xx, yy, Z, levels=[0], linestyles=['dashed'])
plt.legend([no_weights.collections[0], samples_weights.collections[0]],
["no weights", "with weights"], loc="lower left")
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
h2oai/h2o-3 | h2o-py/h2o/automl/_h2o_automl_output.py | 2 | 1913 | import h2o
from h2o.automl._base import H2OAutoMLBaseMixin
from h2o.base import Keyed
class H2OAutoMLOutput(H2OAutoMLBaseMixin, Keyed):
"""
AutoML Output object containing the results of AutoML
"""
def __init__(self, state):
self._project_name = state['project_name']
self._key = state['json']['automl_id']['name']
self._leader = state['leader']
self._leaderboard = state['leaderboard']
self._event_log = el = state['event_log']
self._training_info = {r[0]: r[1]
for r in el[el['name'] != '', ['name', 'value']]
.as_data_frame(use_pandas=False, header=False)
}
def __getitem__(self, item):
if (
hasattr(self, item) and
# do not enable user to get anything else than properties through the dictionary interface
hasattr(self.__class__, item) and
isinstance(getattr(self.__class__, item), property)
):
return getattr(self, item)
raise KeyError(item)
@property
def project_name(self):
return self._project_name
@property
def leader(self):
return self._leader
@property
def leaderboard(self):
return self._leaderboard
@property
def training_info(self):
return self._training_info
@property
def event_log(self):
return self._event_log
#-------------------------------------------------------------------------------------------------------------------
# Overrides
#-------------------------------------------------------------------------------------------------------------------
@property
def key(self):
return self._key
def detach(self):
self._project_name = None
h2o.remove(self.leaderboard)
h2o.remove(self.event_log)
| apache-2.0 |
ryanbressler/ClassWar | sklrf.py | 6 | 1280 | import sys
from sklearn.datasets import load_svmlight_file
from sklearn.ensemble import RandomForestClassifier
from time import time
import numpy as np
def dumptree(atree, fn):
from sklearn import tree
f = open(fn,"w")
tree.export_graphviz(atree,out_file=f)
f.close()
# def main():
fn = sys.argv[1]
X,Y = load_svmlight_file(fn)
rf_parameters = {
"n_estimators": 500,
"n_jobs": 1
}
clf = RandomForestClassifier(**rf_parameters)
X = X.toarray()
print clf
print "Starting Training"
t0 = time()
clf.fit(X, Y)
train_time = time() - t0
print "Training on %s took %s"%(fn, train_time)
print "Total training time (seconds): %s"%(train_time)
if len(sys.argv) == 2:
score = clf.score(X, Y)
count = np.sum(clf.predict(X)==Y)
print "Score: %s, %s / %s "%(score, count, len(Y))
else:
fn = sys.argv[2]
X,Y = load_svmlight_file(fn)
X = X.toarray()
score = clf.score(X, Y)
count = np.sum(clf.predict(X)==Y)
c1 = np.sum(clf.predict(X[Y==1])==Y[Y==1] )
c0 = np.sum(clf.predict(X[Y==0])==Y[Y==0] )
l = len(Y)
print "Error: %s"%(1-(float(c1)/float(sum(Y==1))+float(c0)/float(sum(Y==0)))/2.0)
print "Testing Score: %s, %s / %s, %s, %s, %s "%(score, count, l, c1, c0, (float(c1)/float(sum(Y==1))+float(c0)/float(sum(Y==0)))/2.0)
# if __name__ == '__main__':
# main()
| bsd-3-clause |
bzero/statsmodels | statsmodels/sandbox/nonparametric/dgp_examples.py | 37 | 6008 | # -*- coding: utf-8 -*-
"""Examples of non-linear functions for non-parametric regression
Created on Sat Jan 05 20:21:22 2013
Author: Josef Perktold
"""
import numpy as np
## Functions
def fg1(x):
'''Fan and Gijbels example function 1
'''
return x + 2 * np.exp(-16 * x**2)
def fg1eu(x):
'''Eubank similar to Fan and Gijbels example function 1
'''
return x + 0.5 * np.exp(-50 * (x - 0.5)**2)
def fg2(x):
'''Fan and Gijbels example function 2
'''
return np.sin(2 * x) + 2 * np.exp(-16 * x**2)
def func1(x):
'''made up example with sin, square
'''
return np.sin(x * 5) / x + 2. * x - 1. * x**2
## Classes with Data Generating Processes
doc = {'description':
'''Base Class for Univariate non-linear example
Does not work on it's own.
needs additional at least self.func
''',
'ref': ''}
class _UnivariateFunction(object):
#Base Class for Univariate non-linear example.
#Does not work on it's own. needs additionally at least self.func
__doc__ = '''%(description)s
Parameters
----------
nobs : int
number of observations to simulate
x : None or 1d array
If x is given then it is used for the exogenous variable instead of
creating a random sample
distr_x : None or distribution instance
Only used if x is None. The rvs method is used to create a random
sample of the exogenous (explanatory) variable.
distr_noise : None or distribution instance
The rvs method is used to create a random sample of the errors.
Attributes
----------
x : ndarray, 1-D
exogenous or explanatory variable. x is sorted.
y : ndarray, 1-D
endogenous or response variable
y_true : ndarray, 1-D
expected values of endogenous or response variable, i.e. values of y
without noise
func : callable
underlying function (defined by subclass)
%(ref)s
''' #% doc
def __init__(self, nobs=200, x=None, distr_x=None, distr_noise=None):
if x is None:
if distr_x is None:
x = np.random.normal(loc=0, scale=self.s_x, size=nobs)
else:
x = distr_x.rvs(size=nobs)
x.sort()
self.x = x
if distr_noise is None:
noise = np.random.normal(loc=0, scale=self.s_noise, size=nobs)
else:
noise = distr_noise.rvs(size=nobs)
if hasattr(self, 'het_scale'):
noise *= self.het_scale(self.x)
#self.func = fg1
self.y_true = y_true = self.func(x)
self.y = y_true + noise
def plot(self, scatter=True, ax=None):
'''plot the mean function and optionally the scatter of the sample
Parameters
----------
scatter: bool
If true, then add scatterpoints of sample to plot.
ax : None or matplotlib axis instance
If None, then a matplotlib.pyplot figure is created, otherwise
the given axis, ax, is used.
Returns
-------
fig : matplotlib figure
This is either the created figure instance or the one associated
with ax if ax is given.
'''
if ax is None:
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
if scatter:
ax.plot(self.x, self.y, 'o', alpha=0.5)
xx = np.linspace(self.x.min(), self.x.max(), 100)
ax.plot(xx, self.func(xx), lw=2, color='b', label='dgp mean')
return ax.figure
doc = {'description':
'''Fan and Gijbels example function 1
linear trend plus a hump
''',
'ref':
'''
References
----------
Fan, Jianqing, and Irene Gijbels. 1992. "Variable Bandwidth and Local
Linear Regression Smoothers."
The Annals of Statistics 20 (4) (December): 2008-2036. doi:10.2307/2242378.
'''}
class UnivariateFanGijbels1(_UnivariateFunction):
__doc__ = _UnivariateFunction.__doc__ % doc
def __init__(self, nobs=200, x=None, distr_x=None, distr_noise=None):
self.s_x = 1.
self.s_noise = 0.7
self.func = fg1
super(self.__class__, self).__init__(nobs=nobs, x=x,
distr_x=distr_x,
distr_noise=distr_noise)
doc['description'] =\
'''Fan and Gijbels example function 2
sin plus a hump
'''
class UnivariateFanGijbels2(_UnivariateFunction):
__doc__ = _UnivariateFunction.__doc__ % doc
def __init__(self, nobs=200, x=None, distr_x=None, distr_noise=None):
self.s_x = 1.
self.s_noise = 0.5
self.func = fg2
super(self.__class__, self).__init__(nobs=nobs, x=x,
distr_x=distr_x,
distr_noise=distr_noise)
class UnivariateFanGijbels1EU(_UnivariateFunction):
'''
Eubank p.179f
'''
def __init__(self, nobs=50, x=None, distr_x=None, distr_noise=None):
if distr_x is None:
from scipy import stats
distr_x = stats.uniform
self.s_noise = 0.15
self.func = fg1eu
super(self.__class__, self).__init__(nobs=nobs, x=x,
distr_x=distr_x,
distr_noise=distr_noise)
class UnivariateFunc1(_UnivariateFunction):
'''
made up, with sin and quadratic trend
'''
def __init__(self, nobs=200, x=None, distr_x=None, distr_noise=None):
if x is None and distr_x is None:
from scipy import stats
distr_x = stats.uniform(-2, 4)
else:
nobs = x.shape[0]
self.s_noise = 2.
self.func = func1
super(UnivariateFunc1, self).__init__(nobs=nobs, x=x,
distr_x=distr_x,
distr_noise=distr_noise)
def het_scale(self, x):
return np.sqrt(np.abs(3+x))
| bsd-3-clause |
llhe/tensorflow | tensorflow/contrib/learn/python/learn/learn_io/pandas_io.py | 92 | 4535 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Methods to allow pandas.DataFrame."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.estimator.inputs.pandas_io import pandas_input_fn as core_pandas_input_fn
try:
# pylint: disable=g-import-not-at-top
import pandas as pd
HAS_PANDAS = True
except IOError:
# Pandas writes a temporary file during import. If it fails, don't use pandas.
HAS_PANDAS = False
except ImportError:
HAS_PANDAS = False
PANDAS_DTYPES = {
'int8': 'int',
'int16': 'int',
'int32': 'int',
'int64': 'int',
'uint8': 'int',
'uint16': 'int',
'uint32': 'int',
'uint64': 'int',
'float16': 'float',
'float32': 'float',
'float64': 'float',
'bool': 'i'
}
def pandas_input_fn(x,
y=None,
batch_size=128,
num_epochs=1,
shuffle=True,
queue_capacity=1000,
num_threads=1,
target_column='target'):
"""This input_fn diffs from the core version with default `shuffle`."""
return core_pandas_input_fn(x=x,
y=y,
batch_size=batch_size,
shuffle=shuffle,
num_epochs=num_epochs,
queue_capacity=queue_capacity,
num_threads=num_threads,
target_column=target_column)
def extract_pandas_data(data):
"""Extract data from pandas.DataFrame for predictors.
Given a DataFrame, will extract the values and cast them to float. The
DataFrame is expected to contain values of type int, float or bool.
Args:
data: `pandas.DataFrame` containing the data to be extracted.
Returns:
A numpy `ndarray` of the DataFrame's values as floats.
Raises:
ValueError: if data contains types other than int, float or bool.
"""
if not isinstance(data, pd.DataFrame):
return data
bad_data = [column for column in data
if data[column].dtype.name not in PANDAS_DTYPES]
if not bad_data:
return data.values.astype('float')
else:
error_report = [("'" + str(column) + "' type='" +
data[column].dtype.name + "'") for column in bad_data]
raise ValueError('Data types for extracting pandas data must be int, '
'float, or bool. Found: ' + ', '.join(error_report))
def extract_pandas_matrix(data):
"""Extracts numpy matrix from pandas DataFrame.
Args:
data: `pandas.DataFrame` containing the data to be extracted.
Returns:
A numpy `ndarray` of the DataFrame's values.
"""
if not isinstance(data, pd.DataFrame):
return data
return data.as_matrix()
def extract_pandas_labels(labels):
"""Extract data from pandas.DataFrame for labels.
Args:
labels: `pandas.DataFrame` or `pandas.Series` containing one column of
labels to be extracted.
Returns:
A numpy `ndarray` of labels from the DataFrame.
Raises:
ValueError: if more than one column is found or type is not int, float or
bool.
"""
if isinstance(labels,
pd.DataFrame): # pandas.Series also belongs to DataFrame
if len(labels.columns) > 1:
raise ValueError('Only one column for labels is allowed.')
bad_data = [column for column in labels
if labels[column].dtype.name not in PANDAS_DTYPES]
if not bad_data:
return labels.values
else:
error_report = ["'" + str(column) + "' type="
+ str(labels[column].dtype.name) for column in bad_data]
raise ValueError('Data types for extracting labels must be int, '
'float, or bool. Found: ' + ', '.join(error_report))
else:
return labels
| apache-2.0 |
sauloal/cnidaria | scripts/venv/lib/python2.7/site-packages/matplotlib/backends/backend_gtk3cairo.py | 21 | 2321 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from . import backend_gtk3
from . import backend_cairo
from .backend_cairo import cairo, HAS_CAIRO_CFFI
from matplotlib.figure import Figure
class RendererGTK3Cairo(backend_cairo.RendererCairo):
def set_context(self, ctx):
if HAS_CAIRO_CFFI:
ctx = cairo.Context._from_pointer(
cairo.ffi.cast(
'cairo_t **',
id(ctx) + object.__basicsize__)[0],
incref=True)
self.gc.ctx = ctx
class FigureCanvasGTK3Cairo(backend_gtk3.FigureCanvasGTK3,
backend_cairo.FigureCanvasCairo):
def __init__(self, figure):
backend_gtk3.FigureCanvasGTK3.__init__(self, figure)
def _renderer_init(self):
"""use cairo renderer"""
self._renderer = RendererGTK3Cairo(self.figure.dpi)
def _render_figure(self, width, height):
self._renderer.set_width_height (width, height)
self.figure.draw (self._renderer)
def on_draw_event(self, widget, ctx):
""" GtkDrawable draw event, like expose_event in GTK 2.X
"""
# the _need_redraw flag doesnt work. it sometimes prevents
# the rendering and leaving the canvas blank
#if self._need_redraw:
self._renderer.set_context(ctx)
allocation = self.get_allocation()
x, y, w, h = allocation.x, allocation.y, allocation.width, allocation.height
self._render_figure(w, h)
#self._need_redraw = False
return False # finish event propagation?
class FigureManagerGTK3Cairo(backend_gtk3.FigureManagerGTK3):
pass
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
FigureClass = kwargs.pop('FigureClass', Figure)
thisFig = FigureClass(*args, **kwargs)
return new_figure_manager_given_figure(num, thisFig)
def new_figure_manager_given_figure(num, figure):
"""
Create a new figure manager instance for the given figure.
"""
canvas = FigureCanvasGTK3Cairo(figure)
manager = FigureManagerGTK3Cairo(canvas, num)
return manager
FigureCanvas = FigureCanvasGTK3Cairo
FigureManager = FigureManagerGTK3Cairo
show = backend_gtk3.show
| mit |
pythonvietnam/scikit-learn | examples/covariance/plot_covariance_estimation.py | 250 | 5070 | """
=======================================================================
Shrinkage covariance estimation: LedoitWolf vs OAS and max-likelihood
=======================================================================
When working with covariance estimation, the usual approach is to use
a maximum likelihood estimator, such as the
:class:`sklearn.covariance.EmpiricalCovariance`. It is unbiased, i.e. it
converges to the true (population) covariance when given many
observations. However, it can also be beneficial to regularize it, in
order to reduce its variance; this, in turn, introduces some bias. This
example illustrates the simple regularization used in
:ref:`shrunk_covariance` estimators. In particular, it focuses on how to
set the amount of regularization, i.e. how to choose the bias-variance
trade-off.
Here we compare 3 approaches:
* Setting the parameter by cross-validating the likelihood on three folds
according to a grid of potential shrinkage parameters.
* A close formula proposed by Ledoit and Wolf to compute
the asymptotically optimal regularization parameter (minimizing a MSE
criterion), yielding the :class:`sklearn.covariance.LedoitWolf`
covariance estimate.
* An improvement of the Ledoit-Wolf shrinkage, the
:class:`sklearn.covariance.OAS`, proposed by Chen et al. Its
convergence is significantly better under the assumption that the data
are Gaussian, in particular for small samples.
To quantify estimation error, we plot the likelihood of unseen data for
different values of the shrinkage parameter. We also show the choices by
cross-validation, or with the LedoitWolf and OAS estimates.
Note that the maximum likelihood estimate corresponds to no shrinkage,
and thus performs poorly. The Ledoit-Wolf estimate performs really well,
as it is close to the optimal and is computational not costly. In this
example, the OAS estimate is a bit further away. Interestingly, both
approaches outperform cross-validation, which is significantly most
computationally costly.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy import linalg
from sklearn.covariance import LedoitWolf, OAS, ShrunkCovariance, \
log_likelihood, empirical_covariance
from sklearn.grid_search import GridSearchCV
###############################################################################
# Generate sample data
n_features, n_samples = 40, 20
np.random.seed(42)
base_X_train = np.random.normal(size=(n_samples, n_features))
base_X_test = np.random.normal(size=(n_samples, n_features))
# Color samples
coloring_matrix = np.random.normal(size=(n_features, n_features))
X_train = np.dot(base_X_train, coloring_matrix)
X_test = np.dot(base_X_test, coloring_matrix)
###############################################################################
# Compute the likelihood on test data
# spanning a range of possible shrinkage coefficient values
shrinkages = np.logspace(-2, 0, 30)
negative_logliks = [-ShrunkCovariance(shrinkage=s).fit(X_train).score(X_test)
for s in shrinkages]
# under the ground-truth model, which we would not have access to in real
# settings
real_cov = np.dot(coloring_matrix.T, coloring_matrix)
emp_cov = empirical_covariance(X_train)
loglik_real = -log_likelihood(emp_cov, linalg.inv(real_cov))
###############################################################################
# Compare different approaches to setting the parameter
# GridSearch for an optimal shrinkage coefficient
tuned_parameters = [{'shrinkage': shrinkages}]
cv = GridSearchCV(ShrunkCovariance(), tuned_parameters)
cv.fit(X_train)
# Ledoit-Wolf optimal shrinkage coefficient estimate
lw = LedoitWolf()
loglik_lw = lw.fit(X_train).score(X_test)
# OAS coefficient estimate
oa = OAS()
loglik_oa = oa.fit(X_train).score(X_test)
###############################################################################
# Plot results
fig = plt.figure()
plt.title("Regularized covariance: likelihood and shrinkage coefficient")
plt.xlabel('Regularizaton parameter: shrinkage coefficient')
plt.ylabel('Error: negative log-likelihood on test data')
# range shrinkage curve
plt.loglog(shrinkages, negative_logliks, label="Negative log-likelihood")
plt.plot(plt.xlim(), 2 * [loglik_real], '--r',
label="Real covariance likelihood")
# adjust view
lik_max = np.amax(negative_logliks)
lik_min = np.amin(negative_logliks)
ymin = lik_min - 6. * np.log((plt.ylim()[1] - plt.ylim()[0]))
ymax = lik_max + 10. * np.log(lik_max - lik_min)
xmin = shrinkages[0]
xmax = shrinkages[-1]
# LW likelihood
plt.vlines(lw.shrinkage_, ymin, -loglik_lw, color='magenta',
linewidth=3, label='Ledoit-Wolf estimate')
# OAS likelihood
plt.vlines(oa.shrinkage_, ymin, -loglik_oa, color='purple',
linewidth=3, label='OAS estimate')
# best CV estimator likelihood
plt.vlines(cv.best_estimator_.shrinkage, ymin,
-cv.best_estimator_.score(X_test), color='cyan',
linewidth=3, label='Cross-validation best estimate')
plt.ylim(ymin, ymax)
plt.xlim(xmin, xmax)
plt.legend()
plt.show()
| bsd-3-clause |
chintak/scikit-image | doc/examples/plot_hog.py | 2 | 4351 | """
===============================
Histogram of Oriented Gradients
===============================
The `Histogram of Oriented Gradient
<http://en.wikipedia.org/wiki/Histogram_of_oriented_gradients>`__ (HOG) feature
descriptor [1]_ is popular for object detection.
In the following example, we compute the HOG descriptor and display
a visualisation.
Algorithm overview
------------------
Compute a Histogram of Oriented Gradients (HOG) by
1. (optional) global image normalisation
2. computing the gradient image in x and y
3. computing gradient histograms
4. normalising across blocks
5. flattening into a feature vector
The first stage applies an optional global image normalisation
equalisation that is designed to reduce the influence of illumination
effects. In practice we use gamma (power law) compression, either
computing the square root or the log of each colour channel.
Image texture strength is typically proportional to the local surface
illumination so this compression helps to reduce the effects of local
shadowing and illumination variations.
The second stage computes first order image gradients. These capture
contour, silhouette and some texture information, while providing
further resistance to illumination variations. The locally dominant
colour channel is used, which provides colour invariance to a large
extent. Variant methods may also include second order image derivatives,
which act as primitive bar detectors - a useful feature for capturing,
e.g. bar like structures in bicycles and limbs in humans.
The third stage aims to produce an encoding that is sensitive to
local image content while remaining resistant to small changes in
pose or appearance. The adopted method pools gradient orientation
information locally in the same way as the SIFT [2]_
feature. The image window is divided into small spatial regions,
called "cells". For each cell we accumulate a local 1-D histogram
of gradient or edge orientations over all the pixels in the
cell. This combined cell-level 1-D histogram forms the basic
"orientation histogram" representation. Each orientation histogram
divides the gradient angle range into a fixed number of
predetermined bins. The gradient magnitudes of the pixels in the
cell are used to vote into the orientation histogram.
The fourth stage computes normalisation, which takes local groups of
cells and contrast normalises their overall responses before passing
to next stage. Normalisation introduces better invariance to illumination,
shadowing, and edge contrast. It is performed by accumulating a measure
of local histogram "energy" over local groups of cells that we call
"blocks". The result is used to normalise each cell in the block.
Typically each individual cell is shared between several blocks, but
its normalisations are block dependent and thus different. The cell
thus appears several times in the final output vector with different
normalisations. This may seem redundant but it improves the performance.
We refer to the normalised block descriptors as Histogram of Oriented
Gradient (HOG) descriptors.
The final step collects the HOG descriptors from all blocks of a dense
overlapping grid of blocks covering the detection window into a combined
feature vector for use in the window classifier.
References
----------
.. [1] Dalal, N. and Triggs, B., "Histograms of Oriented Gradients for
Human Detection," IEEE Computer Society Conference on Computer
Vision and Pattern Recognition, 2005, San Diego, CA, USA.
.. [2] David G. Lowe, "Distinctive image features from scale-invariant
keypoints," International Journal of Computer Vision, 60, 2 (2004),
pp. 91-110.
"""
import matplotlib.pyplot as plt
from skimage.feature import hog
from skimage import data, color, exposure
image = color.rgb2gray(data.lena())
fd, hog_image = hog(image, orientations=8, pixels_per_cell=(16, 16),
cells_per_block=(1, 1), visualise=True)
plt.figure(figsize=(8, 4))
plt.subplot(121).set_axis_off()
plt.imshow(image, cmap=plt.cm.gray)
plt.title('Input image')
# Rescale histogram for better display
hog_image_rescaled = exposure.rescale_intensity(hog_image, in_range=(0, 0.02))
plt.subplot(122).set_axis_off()
plt.imshow(hog_image_rescaled, cmap=plt.cm.gray)
plt.title('Histogram of Oriented Gradients')
plt.show()
| bsd-3-clause |
BoltzmannBrain/nupic.research | projects/sequence_prediction/reberGrammar/reberSequence_CompareTMvsLSTM.py | 13 | 2320 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import rcParams
plt.ion()
rcParams.update({'figure.autolayout': True})
def plotResult():
resultTM = np.load('result/reberSequenceTM.npz')
resultLSTM = np.load('result/reberSequenceLSTM.npz')
plt.figure()
plt.hold(True)
plt.subplot(2,2,1)
plt.semilogx(resultTM['trainSeqN'], 100*np.mean(resultTM['correctRateAll'],1),'-*',label='TM')
plt.semilogx(resultLSTM['trainSeqN'], 100*np.mean(resultLSTM['correctRateAll'],1),'-s',label='LSTM')
plt.legend()
plt.xlabel(' Training Sequence Number')
plt.ylabel(' Hit Rate (Best Match) (%)')
plt.subplot(2,2,4)
plt.semilogx(resultTM['trainSeqN'], 100*np.mean(resultTM['missRateAll'],1),'-*',label='TM')
plt.semilogx(resultLSTM['trainSeqN'], 100*np.mean(resultLSTM['missRateAll'],1),'-*',label='LSTM')
plt.legend()
plt.xlabel(' Training Sequence Number')
plt.ylabel(' Miss Rate (%)')
plt.subplot(2,2,3)
plt.semilogx(resultTM['trainSeqN'], 100*np.mean(resultTM['fpRateAll'],1),'-*',label='TM')
plt.semilogx(resultLSTM['trainSeqN'], 100*np.mean(resultLSTM['fpRateAll'],1),'-*',label='LSTM')
plt.legend()
plt.xlabel(' Training Sequence Number')
plt.ylabel(' False Positive Rate (%)')
plt.savefig('result/ReberSequence_CompareTM&LSTMperformance.pdf')
if __name__ == "__main__":
plotResult()
| agpl-3.0 |
manipopopo/tensorflow | tensorflow/contrib/eager/python/examples/rnn_colorbot/rnn_colorbot.py | 14 | 13765 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""TensorFlow Eager Execution Example: RNN Colorbot.
This example builds, trains, and evaluates a multi-layer RNN that can be
run with eager execution enabled. The RNN is trained to map color names to
their RGB values: it takes as input a one-hot encoded character sequence and
outputs a three-tuple (R, G, B) (scaled by 1/255).
For example, say we'd like the RNN Colorbot to generate the RGB values for the
color white. To represent our query in a form that the Colorbot could
understand, we would create a sequence of five 256-long vectors encoding the
ASCII values of the characters in "white". The first vector in our sequence
would be 0 everywhere except for the ord("w")-th position, where it would be
1, the second vector would be 0 everywhere except for the
ord("h")-th position, where it would be 1, and similarly for the remaining three
vectors. We refer to such indicator vectors as "one-hot encodings" of
characters. After consuming these vectors, a well-trained Colorbot would output
the three tuple (1, 1, 1), since the RGB values for white are (255, 255, 255).
We are of course free to ask the colorbot to generate colors for any string we'd
like, such as "steel gray," "tensorflow orange," or "green apple," though
your mileage may vary as your queries increase in creativity.
This example shows how to:
1. read, process, (one-hot) encode, and pad text data via the
Datasets API;
2. build a trainable model;
3. implement a multi-layer RNN using Python control flow
constructs (e.g., a for loop);
4. train a model using an iterative gradient-based method; and
The data used in this example is licensed under the Creative Commons
Attribution-ShareAlike License and is available at
https://en.wikipedia.org/wiki/List_of_colors:_A-F
https://en.wikipedia.org/wiki/List_of_colors:_G-M
https://en.wikipedia.org/wiki/List_of_colors:_N-Z
This example was adapted from
https://github.com/random-forests/tensorflow-workshop/tree/master/extras/colorbot
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import functools
import os
import sys
import time
import urllib
import six
import tensorflow as tf
from tensorflow.contrib.eager.python import tfe
try:
import matplotlib.pyplot as plt # pylint: disable=g-import-not-at-top
HAS_MATPLOTLIB = True
except ImportError:
HAS_MATPLOTLIB = False
layers = tf.keras.layers
def parse(line):
"""Parse a line from the colors dataset."""
# Each line of the dataset is comma-separated and formatted as
# color_name, r, g, b
# so `items` is a list [color_name, r, g, b].
items = tf.string_split([line], ",").values
rgb = tf.string_to_number(items[1:], out_type=tf.float32) / 255.
# Represent the color name as a one-hot encoded character sequence.
color_name = items[0]
chars = tf.one_hot(tf.decode_raw(color_name, tf.uint8), depth=256)
# The sequence length is needed by our RNN.
length = tf.cast(tf.shape(chars)[0], dtype=tf.int64)
return rgb, chars, length
def maybe_download(filename, work_directory, source_url):
"""Download the data from source url, unless it's already here.
Args:
filename: string, name of the file in the directory.
work_directory: string, path to working directory.
source_url: url to download from if file doesn't exist.
Returns:
Path to resulting file.
"""
if not tf.gfile.Exists(work_directory):
tf.gfile.MakeDirs(work_directory)
filepath = os.path.join(work_directory, filename)
if not tf.gfile.Exists(filepath):
temp_file_name, _ = urllib.request.urlretrieve(source_url)
tf.gfile.Copy(temp_file_name, filepath)
with tf.gfile.GFile(filepath) as f:
size = f.size()
print("Successfully downloaded", filename, size, "bytes.")
return filepath
def load_dataset(data_dir, url, batch_size):
"""Loads the colors data at path into a PaddedDataset."""
# Downloads data at url into data_dir/basename(url). The dataset has a header
# row (color_name, r, g, b) followed by comma-separated lines.
path = maybe_download(os.path.basename(url), data_dir, url)
# This chain of commands loads our data by:
# 1. skipping the header; (.skip(1))
# 2. parsing the subsequent lines; (.map(parse))
# 3. shuffling the data; (.shuffle(...))
# 3. grouping the data into padded batches (.padded_batch(...)).
dataset = tf.data.TextLineDataset(path).skip(1).map(parse).shuffle(
buffer_size=10000).padded_batch(
batch_size, padded_shapes=([None], [None, None], []))
return dataset
# pylint: disable=not-callable
class RNNColorbot(tf.keras.Model):
"""Multi-layer (LSTM) RNN that regresses on real-valued vector labels.
"""
def __init__(self, rnn_cell_sizes, label_dimension, keep_prob):
"""Constructs an RNNColorbot.
Args:
rnn_cell_sizes: list of integers denoting the size of each LSTM cell in
the RNN; rnn_cell_sizes[i] is the size of the i-th layer cell
label_dimension: the length of the labels on which to regress
keep_prob: (1 - dropout probability); dropout is applied to the outputs of
each LSTM layer
"""
super(RNNColorbot, self).__init__(name="")
self.label_dimension = label_dimension
self.keep_prob = keep_prob
self.cells = tf.contrib.checkpoint.List(
[tf.nn.rnn_cell.BasicLSTMCell(size) for size in rnn_cell_sizes])
self.relu = layers.Dense(
label_dimension, activation=tf.nn.relu, name="relu")
def call(self, inputs, training=False):
"""Implements the RNN logic and prediction generation.
Args:
inputs: A tuple (chars, sequence_length), where chars is a batch of
one-hot encoded color names represented as a Tensor with dimensions
[batch_size, time_steps, 256] and sequence_length holds the length
of each character sequence (color name) as a Tensor with dimension
[batch_size].
training: whether the invocation is happening during training
Returns:
A tensor of dimension [batch_size, label_dimension] that is produced by
passing chars through a multi-layer RNN and applying a ReLU to the final
hidden state.
"""
(chars, sequence_length) = inputs
# Transpose the first and second dimensions so that chars is of shape
# [time_steps, batch_size, dimension].
chars = tf.transpose(chars, [1, 0, 2])
# The outer loop cycles through the layers of the RNN; the inner loop
# executes the time steps for a particular layer.
batch_size = int(chars.shape[1])
for l in range(len(self.cells)):
cell = self.cells[l]
outputs = []
state = cell.zero_state(batch_size, tf.float32)
# Unstack the inputs to obtain a list of batches, one for each time step.
chars = tf.unstack(chars, axis=0)
for ch in chars:
output, state = cell(ch, state)
outputs.append(output)
# The outputs of this layer are the inputs of the subsequent layer.
chars = tf.stack(outputs, axis=0)
if training:
chars = tf.nn.dropout(chars, self.keep_prob)
# Extract the correct output (i.e., hidden state) for each example. All the
# character sequences in this batch were padded to the same fixed length so
# that they could be easily fed through the above RNN loop. The
# `sequence_length` vector tells us the true lengths of the character
# sequences, letting us obtain for each sequence the hidden state that was
# generated by its non-padding characters.
batch_range = [i for i in range(batch_size)]
indices = tf.stack([sequence_length - 1, batch_range], axis=1)
hidden_states = tf.gather_nd(chars, indices)
return self.relu(hidden_states)
def loss(labels, predictions):
"""Computes mean squared loss."""
return tf.reduce_mean(tf.square(predictions - labels))
def test(model, eval_data):
"""Computes the average loss on eval_data, which should be a Dataset."""
avg_loss = tfe.metrics.Mean("loss")
for (labels, chars, sequence_length) in tfe.Iterator(eval_data):
predictions = model((chars, sequence_length), training=False)
avg_loss(loss(labels, predictions))
print("eval/loss: %.6f\n" % avg_loss.result())
with tf.contrib.summary.always_record_summaries():
tf.contrib.summary.scalar("loss", avg_loss.result())
def train_one_epoch(model, optimizer, train_data, log_interval=10):
"""Trains model on train_data using optimizer."""
tf.train.get_or_create_global_step()
def model_loss(labels, chars, sequence_length):
predictions = model((chars, sequence_length), training=True)
loss_value = loss(labels, predictions)
tf.contrib.summary.scalar("loss", loss_value)
return loss_value
for (batch, (labels, chars, sequence_length)) in enumerate(
tfe.Iterator(train_data)):
with tf.contrib.summary.record_summaries_every_n_global_steps(log_interval):
batch_model_loss = functools.partial(model_loss, labels, chars,
sequence_length)
optimizer.minimize(
batch_model_loss, global_step=tf.train.get_global_step())
if log_interval and batch % log_interval == 0:
print("train/batch #%d\tloss: %.6f" % (batch, batch_model_loss()))
SOURCE_TRAIN_URL = "https://raw.githubusercontent.com/random-forests/tensorflow-workshop/master/extras/colorbot/data/train.csv"
SOURCE_TEST_URL = "https://raw.githubusercontent.com/random-forests/tensorflow-workshop/master/extras/colorbot/data/test.csv"
def main(_):
data_dir = os.path.join(FLAGS.dir, "data")
train_data = load_dataset(
data_dir=data_dir, url=SOURCE_TRAIN_URL, batch_size=FLAGS.batch_size)
eval_data = load_dataset(
data_dir=data_dir, url=SOURCE_TEST_URL, batch_size=FLAGS.batch_size)
model = RNNColorbot(
rnn_cell_sizes=FLAGS.rnn_cell_sizes,
label_dimension=3,
keep_prob=FLAGS.keep_probability)
optimizer = tf.train.AdamOptimizer(learning_rate=FLAGS.learning_rate)
if FLAGS.no_gpu or tfe.num_gpus() <= 0:
print(tfe.num_gpus())
device = "/cpu:0"
else:
device = "/gpu:0"
print("Using device %s." % device)
log_dir = os.path.join(FLAGS.dir, "summaries")
tf.gfile.MakeDirs(log_dir)
train_summary_writer = tf.contrib.summary.create_file_writer(
os.path.join(log_dir, "train"), flush_millis=10000)
test_summary_writer = tf.contrib.summary.create_file_writer(
os.path.join(log_dir, "eval"), flush_millis=10000, name="eval")
with tf.device(device):
for epoch in range(FLAGS.num_epochs):
start = time.time()
with train_summary_writer.as_default():
train_one_epoch(model, optimizer, train_data, FLAGS.log_interval)
end = time.time()
print("train/time for epoch #%d: %.2f" % (epoch, end - start))
with test_summary_writer.as_default():
test(model, eval_data)
print("Colorbot is ready to generate colors!")
while True:
try:
color_name = six.moves.input(
"Give me a color name (or press enter to exit): ")
except EOFError:
return
if not color_name:
return
_, chars, length = parse(color_name)
with tf.device(device):
(chars, length) = (tf.identity(chars), tf.identity(length))
chars = tf.expand_dims(chars, 0)
length = tf.expand_dims(length, 0)
preds = tf.unstack(model((chars, length), training=False)[0])
# Predictions cannot be negative, as they are generated by a ReLU layer;
# they may, however, be greater than 1.
clipped_preds = tuple(min(float(p), 1.0) for p in preds)
rgb = tuple(int(p * 255) for p in clipped_preds)
print("rgb:", rgb)
data = [[clipped_preds]]
if HAS_MATPLOTLIB:
plt.imshow(data)
plt.title(color_name)
plt.show()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--dir",
type=str,
default="/tmp/rnn_colorbot/",
help="Directory to download data files and save logs.")
parser.add_argument(
"--log_interval",
type=int,
default=10,
metavar="N",
help="Log training loss every log_interval batches.")
parser.add_argument(
"--num_epochs", type=int, default=20, help="Number of epochs to train.")
parser.add_argument(
"--rnn_cell_sizes",
type=int,
nargs="+",
default=[256, 128],
help="List of sizes for each layer of the RNN.")
parser.add_argument(
"--batch_size",
type=int,
default=64,
help="Batch size for training and eval.")
parser.add_argument(
"--keep_probability",
type=float,
default=0.5,
help="Keep probability for dropout between layers.")
parser.add_argument(
"--learning_rate",
type=float,
default=0.01,
help="Learning rate to be used during training.")
parser.add_argument(
"--no_gpu",
action="store_true",
default=False,
help="Disables GPU usage even if a GPU is available.")
FLAGS, unparsed = parser.parse_known_args()
tfe.run(main=main, argv=[sys.argv[0]] + unparsed)
| apache-2.0 |
Chilipp/psyplot | psyplot/warning.py | 1 | 3411 | # coding: utf-8
"""Warning module of the psyplot python module
This module controls the warning behaviour of the module via the python
builtin warnings module and introduces three new warning classes:
..autosummay::
PsPylotRuntimeWarning
PsyPlotWarning
PsyPlotCritical"""
import warnings
import logging
# disable a warning about "comparison to 'None' in backend_pdf which occurs
# in the matplotlib.backends.backend_pdf.PdfPages class
warnings.filterwarnings(
'ignore', 'comparison', FutureWarning, 'matplotlib.backends.backend_pdf',
2264)
# disable a warning about "np.array_split" that occurs for certain numpy
# versions
warnings.filterwarnings(
'ignore', 'in the future np.array_split will retain', FutureWarning,
'numpy.lib.shape_base', 431)
# disable a warning about "elementwise comparison of a string" in the
# matplotlib.collection.Collection.get_edgecolor method that occurs for certain
# matplotlib and numpy versions
warnings.filterwarnings(
'ignore', 'elementwise comparison failed', FutureWarning,
'matplotlib.collections', 590)
logger = logging.getLogger(__name__)
class PsyPlotRuntimeWarning(RuntimeWarning):
"""Runtime warning that appears only ones"""
pass
class PsyPlotWarning(UserWarning):
"""Normal UserWarning for psyplot module"""
pass
class PsyPlotCritical(UserWarning):
"""Critical UserWarning for psyplot module"""
pass
warnings.simplefilter('always', PsyPlotWarning, append=True)
warnings.simplefilter('always', PsyPlotCritical, append=True)
def disable_warnings(critical=False):
"""Function that disables all warnings and all critical warnings (if
critical evaluates to True) related to the psyplot Module.
Please note that you can also configure the warnings via the
psyplot.warning logger (logging.getLogger(psyplot.warning))."""
warnings.filterwarnings('ignore', '\w', PsyPlotWarning, 'psyplot', 0)
if critical:
warnings.filterwarnings('ignore', '\w', PsyPlotCritical, 'psyplot', 0)
def warn(message, category=PsyPlotWarning, logger=None):
"""wrapper around the warnings.warn function for non-critical warnings.
logger may be a logging.Logger instance"""
if logger is not None:
message = "[Warning by %s]\n%s" % (logger.name, message)
warnings.warn(message, category, stacklevel=3)
def critical(message, category=PsyPlotCritical, logger=None):
"""wrapper around the warnings.warn function for critical warnings.
logger may be a logging.Logger instance"""
if logger is not None:
message = "[Critical warning by %s]\n%s" % (logger.name, message)
warnings.warn(message, category, stacklevel=2)
old_showwarning = warnings.showwarning
def customwarn(message, category, filename, lineno, *args, **kwargs):
"""Use the psyplot.warning logger for categories being out of
PsyPlotWarning and PsyPlotCritical and the default warnings.showwarning
function for all the others."""
if category is PsyPlotWarning:
logger.warning(warnings.formatwarning(
"\n%s" % message, category, filename, lineno))
elif category is PsyPlotCritical:
logger.critical(warnings.formatwarning(
"\n%s" % message, category, filename, lineno),
exc_info=True)
else:
old_showwarning(message, category, filename, lineno, *args, **kwargs)
warnings.showwarning = customwarn
| gpl-2.0 |
JazzeYoung/VeryDeepAutoEncoder | pylearn2/scripts/tests/test_print_monitor_cv.py | 48 | 1927 | """
Test print_monitor_cv.py by training on a short TrainCV YAML file and
analyzing the output pickle.
"""
import os
import tempfile
from pylearn2.config import yaml_parse
from pylearn2.scripts import print_monitor_cv
from pylearn2.testing.skip import skip_if_no_sklearn
def test_print_monitor_cv():
"""Test print_monitor_cv.py."""
skip_if_no_sklearn()
handle, filename = tempfile.mkstemp()
trainer = yaml_parse.load(test_print_monitor_cv_yaml %
{'filename': filename})
trainer.main_loop()
# run print_monitor_cv.py main
print_monitor_cv.main(filename)
# run print_monitor_cv.py main with all=True
print_monitor_cv.main(filename, all=True)
# cleanup
os.remove(filename)
test_print_monitor_cv_yaml = """
!obj:pylearn2.cross_validation.TrainCV {
dataset_iterator:
!obj:pylearn2.cross_validation.dataset_iterators.DatasetKFold {
dataset:
!obj:pylearn2.testing.datasets.random_one_hot_dense_design_matrix
{
rng: !obj:numpy.random.RandomState { seed: 1 },
num_examples: 10,
dim: 10,
num_classes: 2,
},
},
model: !obj:pylearn2.models.mlp.MLP {
layers: [
!obj:pylearn2.models.mlp.Sigmoid {
layer_name: h0,
dim: 8,
irange: 0.05,
},
!obj:pylearn2.models.mlp.Softmax {
layer_name: y,
n_classes: 2,
irange: 0.05,
},
],
nvis: 10,
},
algorithm: !obj:pylearn2.training_algorithms.bgd.BGD {
batch_size: 5,
line_search_mode: 'exhaustive',
conjugate: 1,
termination_criterion:
!obj:pylearn2.termination_criteria.EpochCounter {
max_epochs: 1,
},
},
save_path: %(filename)s,
}
"""
| bsd-3-clause |
CoolProp/CoolProp | Web/scripts/fluid_properties.Mixtures.py | 2 | 2243 | from __future__ import print_function
from CPWeb.BibtexTools import getCitationOrAlternative, getBibtexParser
import CoolProp
import os.path
web_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
csvfile = os.path.join(web_dir, 'fluid_properties', 'Mixtures.csv')
def merge_args(*args):
return " :raw-html:`<br/>` ".join(list(args))
def printCoeff(number):
if number is None or \
len(str(number).strip()) < 1:
return " "
number = float(number)
short = "{0:.4e}".format(number)
long = "{0:.14e}".format(number)
return u':raw-html:`<span title="{1}">{0}</span>`'.format(short, long)
class Dossier:
def __init__(self):
self.data = {}
def add(self, key, value):
if key not in self.data:
self.data[key] = []
self.data[key].append(value)
d = Dossier()
pairs = CoolProp.get('mixture_binary_pairs_list')
print(len(pairs.split(',')))
for pair in pairs.split(','):
CAS1, CAS2 = pair.split('&')
d.add('CAS1', CAS1)
d.add('CAS2', CAS2)
for key in ['name1', 'name2', 'F', 'function', 'BibTeX', 'xi', 'zeta', 'betaT', 'betaV', 'gammaT', 'gammaV']:
try:
d.add(key, CoolProp.CoolProp.get_mixture_binary_pair_data(CAS1, CAS2, key))
except BaseException as BE:
d.add(key, '')
import pandas
df = pandas.DataFrame(d.data)
df = df.sort_values(by=['BibTeX', 'name1'], ascending=[0, 1])
bibtexer = getBibtexParser() # filename = '../../../CoolPropBibTeXLibrary.bib')
with open(csvfile, 'w') as fp:
header = 'Ref.,Name1,Name2,function,F,'
header += merge_args("xi", "zeta,")
header += merge_args("betaT", "betaV,")
header += merge_args("gammaT", "gammaV")
header += '\n'
fp.write(header)
for index, row in df.iterrows():
text = ','.join([ \
getCitationOrAlternative(bibtexer, row['BibTeX']),
row['name1'],
row['name2'],
row['function'],
row['F'],
merge_args(printCoeff(row['xi']), printCoeff(row['zeta'])),
merge_args(printCoeff(row['betaT']), printCoeff(row['betaV'])),
merge_args(printCoeff(row['gammaT']), printCoeff(row['gammaV']))
]) + '\n'
fp.write(text)
| mit |
Garrett-R/scikit-learn | examples/linear_model/plot_ols_ridge_variance.py | 387 | 2060 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Ordinary Least Squares and Ridge Regression Variance
=========================================================
Due to the few points in each dimension and the straight
line that linear regression uses to follow these points
as well as it can, noise on the observations will cause
great variance as shown in the first plot. Every line's slope
can vary quite a bit for each prediction due to the noise
induced in the observations.
Ridge regression is basically minimizing a penalised version
of the least-squared function. The penalising `shrinks` the
value of the regression coefficients.
Despite the few data points in each dimension, the slope
of the prediction is much more stable and the variance
in the line itself is greatly reduced, in comparison to that
of the standard linear regression
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
X_train = np.c_[.5, 1].T
y_train = [.5, 1]
X_test = np.c_[0, 2].T
np.random.seed(0)
classifiers = dict(ols=linear_model.LinearRegression(),
ridge=linear_model.Ridge(alpha=.1))
fignum = 1
for name, clf in classifiers.items():
fig = plt.figure(fignum, figsize=(4, 3))
plt.clf()
plt.title(name)
ax = plt.axes([.12, .12, .8, .8])
for _ in range(6):
this_X = .1 * np.random.normal(size=(2, 1)) + X_train
clf.fit(this_X, y_train)
ax.plot(X_test, clf.predict(X_test), color='.5')
ax.scatter(this_X, y_train, s=3, c='.5', marker='o', zorder=10)
clf.fit(X_train, y_train)
ax.plot(X_test, clf.predict(X_test), linewidth=2, color='blue')
ax.scatter(X_train, y_train, s=30, c='r', marker='+', zorder=10)
ax.set_xticks(())
ax.set_yticks(())
ax.set_ylim((0, 1.6))
ax.set_xlabel('X')
ax.set_ylabel('y')
ax.set_xlim(0, 2)
fignum += 1
plt.show()
| bsd-3-clause |
datacommonsorg/data | scripts/oecd/regional_demography/utils_test.py | 1 | 1248 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import json
import pandas as pd
from pandas.testing import assert_frame_equal
from utils import multi_index_to_single_index
class TestUtils(unittest.TestCase):
def test_multi_index_to_single_index(self):
df = pd.read_csv("test.csv")
df_cleaned = df.pivot_table(values='value',
index=['name'],
columns=['var', 'sex'])
df_cleaned = multi_index_to_single_index(df_cleaned)
df_expected = pd.read_csv("test_expected.csv")
self.assertTrue(assert_frame_equal(df_cleaned, df_expected) is None)
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
jbloom/mutpath | src/trajectory.py | 1 | 39654 | """Module for representing mutational trajectories as directed graphs.
Represents mutational trajectories through sequence space, which is the space in
which each node is a unique sequence and edges are directional connections
between nodes corresponding to mutations.
These digraphs can be used to visualize mutational trajectories through sequence
space. They are designed to be visualized using the GraphViz program.
Written by Jesse Bloom.
Functions defined in this module
------------------------------------
`WriteGraphVizTrajectory` - Writes a GraphViz visualization of a *Trajectory* object.
`WriteMutationDates` - Writes files giving mutations dates and credible intervals.
`WriteNodePersistence` - Writes times that nodes persist (time before next mutation).
`DistanceAlongPath` - Distance along a mutational path.
`HeuristicTraceback` - Tries to find the last high weight predecessor of a node.
`IteratePaths` - Iterates of paths in a mutational path file.
Classes defined in this module
--------------------------------
`Trajectory` - A class for representing a mutational trajectory through sequence space.
Detailed documentation for functions
--------------------------------------
Provided in the individual function documentation strings below.
"""
import os
import re
import sys
import math
import sequtils
import stats
import plot
def DistanceAlongPath(startseq, endseq, s):
"""Returns the distance along the mutational path.
This distance of a sequence *s* along the path is defined
as the Hamming Distance between *startseq* and *s* minus
the Hamming Distance between *endseq* and *s* plus the
Hamming distance between *startseq* and *endseq*.
Sequences are not case sensitive.
"""
assert len(s) == len(startseq) == len(endseq)
s_to_start = len([1 for (x, y) in zip(s.upper(), startseq.upper()) if x != y])
s_to_end = len([1 for (x, y) in zip(s.upper(), endseq.upper()) if x != y])
start_to_end = len([1 for (x, y) in zip(startseq.upper(), endseq.upper()) if x != y])
return s_to_start - s_to_end + start_to_end
def HeuristicTraceback(t, node, cutoff):
"""Traces back to find last high weight precessor of a node.
*t* is a *Trajectory* object.
*node* is the string name of a node in *t*.
*cutoff* is a number > 0 and <= 1.
This function starts at node, and traces back along the trajectory
to return the first predecessor of *node* with a weight >= *cutoff*.
It does this by tracing back from *node* along its highest weight
incoming edge to that predecessor, and then from that predecessor
along its highest weight edge, etc until we find a predecessor
with weight >= *cutoff*. This approach
is not absolutely guaranteed to find the first predecessor with weight
> *cutoff*, but it should work for reasonable trajectories. But beware,
hence the word 'Heuristic' in the name of this function.
The return value is the string name of the first high weight predecessor.
This function recursively calls itself.
"""
assert node in t.nodes
weights_predecessors = []
for ((n1, n2), weight) in t.edges.iteritems():
if n2 == node:
weights_predecessors.append((weight, n1))
if not weights_predecessors:
raise ValueError("failed to find predecessor")
weights_predecessors.sort()
weights_predecessors.reverse()
(weight, predecessor) = weights_predecessors[0]
if t.nodes[predecessor] >= cutoff:
return predecessor
else:
return HeuristicTraceback(t, predecessor, cutoff)
def WriteNodePersistence(t, nodestowrite, interval, persistencefile, cutoff):
"""Writes times for which nodes persist before the next mutation.
The trajectory *t* specifies a set of nodes. For each node specified
by *nodestowrite* and with weight >= *cutoff*, reports the time until that
node experiences the next mutation that moves it to a new node.
If *t* is a trajectory through protein sequence space that was
created from nucleotide sequences (this will be the case if *t*
was created with *translateseqs = True*), the next mutation that
moves it to a new node is a nonsynonymous mutation. This method
then also records the time until the first mutation of any type
(synonymous or nonsynonymous) after the trajectory moves to nodes.
The persistence is written as the posterior median from all
paths containing plus the Bayesian credible interval specified
by *interval*. The persistence
times are written to the text file *persistencefile*.
CALLING VARIABLES:
* *t* is a *Trajectory* object that contains the persistence data.
* *nodestowrite* is a dictionary specifying for which nodes we
write persistence times, and the names used when writing these
nodes. It is keyed by node sequences (which
are the identifiers for the nodes in t.nodes) and the values are
strings giving names that are used to label the nodes in the
output. However, there does not actually have to be a
node with persistence data for each key in *nodestowrite* -- if there
is not persistence data for a node key, nothing is written for it.
* *interval* specifies the range of the Bayesian credible interval,
for example a value of 0.9 means that we print the 90% credible
interval.
* *persistencefile* is a string giving the name of the text file
that we create which contains the persistence times. It has
headers explaning its format. If this file already exists, it
is overwritten. The order in which nodes is written is arbitrary.
* *cutoff* is a weight cutoff (fraction of paths containing this node).
We only write persistence times for nodes that are both in
*nodestowrite* and have weights >= *cutoff*. This keeps us from
writing persistence times for nodes that only occur rarely.
"""
d = dict([(name, node) for (node, name) in nodestowrite.iteritems()])
f = open(persistencefile, 'w')
f.write("# Name : name of the node\n")
f.write("# MedianPersistence : posterior median time to next node\n")
f.write("# MinPersistenceInterval : minimum of %.2f percent credible interval\n" % (interval * 100))
f.write("# MaxPersistenceInterval : maximum of %.2f percent credible interval\n" % (interval * 100))
if t.persistence != t.timetofirstmut:
f.write("# MedianTimeToFirstMut : posterior median time to first mutation\n")
f.write("# MinTimeToFirstMutInterval : minimum of %.2f percent credible interval\n" % (interval * 100))
f.write("# MaxTimeToFirstMutInterval : maximum of %.2f percent credible interval\n" % (interval * 100))
f.write("#\n")
f.write("#Name\tMedianPersistence\tMinPersistenceInterval\tMaxPersistenceInterval\tMedianTimeToFirstMut\tMinTimeToFirstMut\tMaxTimeToFirstMut\n")
else:
f.write("#\n")
f.write("#Name\tMedianPersistence\tMinPersistenceInterval\tMaxPersistenceInterval\n")
for (node, persistence) in t.persistence.iteritems():
if (node not in nodestowrite):
continue
if (len(persistence) / float(t.npaths)) < cutoff:
continue
name = nodestowrite[node]
(median, mininterval, maxinterval) = stats.MedianCredibleInterval(persistence, interval)
f.write("%s\t%f\t%f\t%f" % (name, median, mininterval, maxinterval))
if t.persistence != t.timetofirstmut:
(median, mininterval, maxinterval) = stats.MedianCredibleInterval(t.timetofirstmut[node], interval)
f.write("\t%f\t%f\t%f\n" % (median, mininterval, maxinterval))
else:
f.write("\n")
f.close()
def WriteMutationDates(t, labelcutoff, interval, datesfile, datesplot, lasttipdate):
"""Creates text file and plot showing dates of mutations.
For each mutation that occurs in at least *labelcutoff* fraction of
the paths that form the trajectory *t*, this function writes the
posterior median and a Bayesian credible interval for the date of
first occurrence of that mutation. The posterior is taken over all
paths that contain that mutation.
The output also provides information about whether the mutations
are on the branch from the starting sequence to the common ancestor,
or from the common ancestor to the starting sequence.
CALLING VARIABLES:
* *t* is a *Trajectory* object that contains the mutations data.
* *labelcutoff* is a number > 0 and <= 1. We write the dates of all
mutations in *t* that have weights >= *labelcutoff* (occur in
at least this fraction of the paths).
* *interval* specifies the range of the Bayesian credible interval,
for example a value of 0.9 means that we print the 90% credible
interval.
* *datesfile* is the name of the text file that we create which
contains the dates and intervals. It is overwritten if it does
not already exist.
* *datesplot* is the name of the plot file that we create using
matplotlib. This plot can only be created if matplotlib is
available. So first check on this (for example using
*Plot.PylabAvailable()*. If matplotlib is not available, or if
you don't want to make the plot, make this argument *None*. Otherwise
make it the name of the PDF plot file that you want to create.
* *lasttipdate* specifies the absolute units for the dates. Dates
in *t* will be specified in units of time before the most recent
tip. Here provide a number giving the date of the most recent tip,
and the dates shown are then this number minus the time for
each mutation.
"""
mutdatestoca = [] # keys are (median, mininterval, maxinterval, mut, fractoca, weight)
mutdatesfromca = [] # keys are (median, mininterval, maxinterval, mut, fractoca, weight)
n = t.npaths
for (mut, muttimes) in t.mutations.iteritems():
nmut = len(muttimes)
weight = nmut / float(n)
if weight >= labelcutoff:
# mutation meets the cutoff
fractoca = t.mutationstoca[mut] / float(nmut)
(median, mininterval, maxinterval) = stats.MedianCredibleInterval(muttimes, interval)
# we interchange minimum and median on the next line because the times
# are in units before last tip prior to this conversion
(median, maxinterval, mininterval) = (lasttipdate - median, lasttipdate - mininterval, lasttipdate - maxinterval)
if fractoca > 0.5:
mutdatestoca.append((median, mininterval, maxinterval, mut, fractoca, weight))
else:
mutdatesfromca.append((median, mininterval, maxinterval, mut, fractoca, weight))
mutdatestoca.sort()
mutdatestoca.reverse()
mutdatesfromca.sort()
mutdates = mutdatestoca + mutdatesfromca
f = open(datesfile, 'w')
f.write('# Mutation : mutation in 1, 2, ... numbering\n')
f.write('# FracOccurrence : fraction of paths containing this mutation\n')
f.write('# FracToCommonAncestor : fraction of times which this mutation is on path from starting sequence to common ancestor\n')
f.write('# MedianDate : posterior median date of mutation\n')
f.write('# MinInterval : minimum of %.2f percent Bayesian credible interval (median centered)\n' % interval)
f.write('# MaxInterval : maximum of %.2f percent Bayesian credible interval (median centered)\n' % interval)
f.write('#\n')
f.write('# Mutation\tFracOccurrence\tFracToCommonAncestor\tMedianDate\tMinInterval\tMaxInterval\n')
for (median, mininterval, maxinterval, mut, fractoca, weight) in mutdates:
f.write('%s\t%f\t%f\t%f\t%f\t%f\n' % (mut, weight, fractoca, median, mininterval, maxinterval))
f.close()
if datesplot:
plot.DatesPlot(mutdates, datesplot, interval)
def WriteGraphVizTrajectory(t, graphvizfile, minweight, labelcutoff,\
nodenames=None, nodesize=0.9, ranksep=0.1, nodesep=0.2, penwidth=10,\
fontsize=40, arrowsize=1.4, fontname='Helvetica-Bold', rankdir='TB',\
startendasdiamonds=True):
"""Writes a GraphViz visualization of a *Trajectory* object.
This function creates a file *graphvizfile* that can be used to visualize the
directed graph represented by a *Trajectory* object *t*. Graphviz is a freely
available software package (http://www.graphviz.org/) for visualizing graphs.
The trajectory is written in the DOT language
(http://www.graphviz.org/doc/info/lang.html).
The areas of nodes and the widths of edges are proportional to their weights.
The color saturations of nodes and edges are linearly proportional to their
weights.
The rank of nodes (for example, their vertical position when *rankdir*
is 'LR') is ordered according to their distance along the path from the
starting to ending sequence. This distance is defined as the Hamming
Distance from the starting node minus the Hamming Distance from the ending node
plus the Hamming distance between the starting and ending nodes.
CALLING VARIABLES:
* *t* is the *Trajectory* object that contains the trajectory that we want
to visualize.
* *graphvizfile* is a string giving the name of the GraphViz input file that
we want to create. It will typically end with the extension ``.dot``. If this
file already exists, it is overwritten. You should be able to open this file
directly with Graphviz. The file is written in the DOT language
(http://www.graphviz.org/doc/info/lang.html).
* *minweight* is a number specifying the minimum weight that a node or edge
must possess in order to be shown on the graph. Nodes or edges with
weights < *minweight* are not included. Note that this creates a possibility
for orphan nodes if a node has a weight >= *minweight* but all of its
incoming and outgoing nodes have weights < *minweight*. To show all nodes
and edges regardless of weight, set *minweight* to zero. However, this can
sometimes lead to a very large *graphvizfile* since there can be a huge
number of very low weight nodes / edges.
* *labelcutoff* is the minimum weight that an edge must possess in order
to be labeled on the graph. In addition, all nodes with weight >=
*labelcutoff* have an incoming edge that is labeled. If there is not
such an edge, then traces back to find the first predecessor node with
weight *labelcutoff* and then draws a different colored edge spanning
multiple mutations to connect these nodes. Generally, you would want
*labelcutoff > 0.5*.
* *nodenames* is an optional argument that allows you to specify names
for nodes. It is *None* by default. If you set it to another value,
it should be a dictionary. It is keyed by node sequences (which
are the identifiers for the nodes in t.nodes) and the values are
strings giving names that are used to label the nodes in the
trajectory. These names are written on the nodes only if
the weight for that node is >= *labelcutoff*.
OPTIONAL CALLING VARIABLES SPECIFYING FORMATTING DETAILS:
* *nodesize* is the height of a node with weight.
* *ranksep* is the separation between ranks, as fraction of *nodesize*.
* *nodesep* is the minimum separation between nodes of the same rank,
as fraction of *nodesize*.
* *penwidth* is the pen width of an edge.
* *fontsize* is the font size.
* *arrowsize* is the size of the arrows.
* *fontname* is the font style.
* *rankdir* specifies the direction the ranks move. If set to 'TB' then
the graph moves from top to bottom. If set to 'LR' then the graph moves
from left to right.
* *startendasdiamonds* is a Boolean switch. If True, we show the starting
and ending nodes as diamonds rather than circles. We also make these
starting and ending nodes larger in size to fit their full labels. If
False, we make them circles with size proportional to weights like
all other nodes.
"""
f = open(graphvizfile, 'w')
f.write('digraph G { rankdir=%s; ranksep=%f; nodesep=%f;\n' % (rankdir, ranksep * nodesize, nodesep * nodesize))
# first write the nodes ordered into subgraphs of the same rank by DistanceAlongPath
nodes_by_d = {}
needs_incoming = {} # does node need an incoming edge?
for (node, weight) in t.nodes.iteritems():
if weight < minweight:
continue # weight too low
d = DistanceAlongPath(t.startseq, t.endseq, node)
if d in nodes_by_d:
nodes_by_d[d].append((node, weight))
else:
nodes_by_d[d] = [(node, weight)]
if (weight >= labelcutoff) and node != t.startseq:
needs_incoming[node] = True
for d in range(max(nodes_by_d.keys()) + 1):
if d not in nodes_by_d:
continue # none of this distance
f.write('\tsubgraph %d { label="DistanceAlongPath%d"; rank=same;\n' % (d, d))
for (node, weight) in nodes_by_d[d]:
if startendasdiamonds and (node == t.startseq or node == t.endseq):
shape = 'diamond'
fixedsize = 'false'
else:
shape = 'circle'
fixedsize = 'true'
if nodenames and (node in nodenames) and weight >= labelcutoff:
nodelabel = "%s" % nodenames[node]
else:
nodelabel = ''
f.write('\t\tnode [style=filled shape=%s label="%s" height=%f color="0.7 %f 0.9" penwidth=%f arrowsize=%f fontsize=%d fontname="%s" fontcolor="white" fixedsize=%s] "%s";\n' % (shape, nodelabel, nodesize * math.sqrt(weight), weight, penwidth, arrowsize, fontsize, fontname, fixedsize, node))
f.write('\t}\n')
# now write all of the edges
# In order to get good overlay, first we write unabeled edges, then
# labeled edges, and finally implied connections between major nodes without
# connecting labeled edges.
labeled_edges = []
for ((node1, node2), weight) in t.edges.iteritems():
if weight < minweight:
continue # weight too low
if weight >= labelcutoff:
assert len(node1) == len(node2)
diffs = [i for i in range(len(node1)) if node1[i] != node2[i]]
if len(diffs) != 1:
raise ValueError("Should be exactly one difference")
i = diffs[0]
edgelabel = '%s%d%s' % (node1[i], i + 1, node2[i])
if node2 in needs_incoming:
del needs_incoming[node2]
else:
edgelabel = ''
edgestring = '\t"%s" -> "%s" [weight=%f penwidth=%f color="0.7 %f 0.9" arrowsize=%f label="%s" fontsize=%d fontname="%s"];\n' % (node1, node2, weight, penwidth * weight, weight, arrowsize, edgelabel, fontsize, fontname)
if edgelabel:
labeled_edges.append(edgestring) # write these later
else:
f.write(edgestring)
f.write(''.join(labeled_edges)) # now write labeled edges
# now find implied connections between major nodes without incoming labeled edges
for node in needs_incoming:
predecessor = HeuristicTraceback(t, node, labelcutoff)
diffs = [i for i in range(len(node)) if node[i] != predecessor[i]]
assert len(diffs) >= 1
diffs.sort()
edgelabel = '-'.join(["%s%d%s" % (predecessor[i], i + 1, node[i]) for i in diffs])
f.write('\t"%s" -> "%s" [weight=0 penwidth=%f color="0.0 1.0 0.9" arrowsize=%f label="%s" fontsize=%d fontname="%s" fontcolor="0.0 1.0 0.9"];\n' % (predecessor, node, penwidth, arrowsize, edgelabel, fontsize, fontname))
f.write('}')
f.close()
def IteratePaths(pathfile):
"""Iterates over paths in a mutational path file.
*pathfile* should be a string giving a name of an input file specifying one or
more mutational paths. These files are of the format created by
``mutpath_get_paths.py``. The required format is detailed below.
This function will iterate over all paths in *pathfile*. For each path,
it will return the tuple
*(startseq, starttime, endseq, endtime, caseq, catime, tocamuts, fromcamuts)*.
The entries of these tuples are as follows. All sequences are converted
to upper case, as are all letters in the mutation notations. The times
are measured in units before the most recent tip of the tree. Tuple entries:
* *startseq* is the starting sequence specified by *startstrain_seq*
* *starttime* is the time of *startseq* specified by *startstrain_time*
* *endseq* is the ending sequence specified by *endstrain_seq*
* *endtime* is the time of *endseq* specified by *endstrain_time*
* *caseq* is the common ancestor sequence specified by *commonancestor_seq*
* *catime* is the time of *caseq* specified by *commonancestor_time*
* *tocamuts* is a list of the mutations going from *startseq* to *caseq*,
specified in the order they are listed in the file (should be along
the path) as the 2-tuples of the form *('A6G', 42.713)* where the
entries are the mutation and then the time.
* *fromcamuts* is like *tocamuts*, but for mutations going from
*caseq* to *endseq*.
The format of *pathfile* is as follows. This file should list
mutational paths as::
MUTPATH 1
startstrain_name A/Aichi/2/1968_1968.50
startstrain_seq ATGGCAATGGGCTAA
startstrain_time 42.5
endstrain_name A/Brisbane/10/2007_2007.10
endstrain_seq ATGACGATTGGATAA
endstrain_time 3.9
commonancestor_seq ATGGCGATGGGCTAA
commonancestor_time 43.12713
startstrain_to_commonancestor_path
A6G : 42.713
commonancestor_to_endstrain_path
G9T : 31.732
G4A : 25.1343
C12A : 10.134
MUTPATH 2
startstrain_name A/Aichi/2/1968_1968.50
startstrain_seq ATGGCAATGGGCTAA
startstrain_time 42.5
endstrain_name A/Brisbane/10/2007_2007.10
endstrain_seq ATGACGATTGGATAA
endstrain_time 3.9
commonancestor_seq ATGGCGATGGGCTAA
commonancestor_time 44.12713
startstrain_to_commonancestor_path
A6G : 42.113
G9T : 43.124
commonancestor_to_endstrain_path
G4A : 21.1343
C5A : 19.531
A5C : 19.402
C12A : 9.134
The file lists each of the paths numbered starting at 1.
Within each path, the mutations are indicated with numbering starting
at 1 for the first position in the sequence. The times for the mutations,
the starting and ending strains, and the most recent common ancestor of these
two strains, are also indicated. These times are measured in units before
the most recent tip node (so the root node would have the largest value of time).
The mutations must move from the starting to the ending sequence, and if
multiple paths are specified, then they all must have the same starting and
ending sequences.
"""
mutmatch = re.compile('^(?P<mut>[A-z\*\-]\d+[A-z\*\-]) : (?P<time>\d+\.*\d*)$')
if not os.path.isfile(pathfile):
raise IOError("Cannot find pathfile %s" % pathfile)
f = open(pathfile)
firststartseq = firstendseq = None
while True:
try:
line = f.next()
except StopIteration:
break # no more lines
lines = []
while not line.isspace():
lines.append(line.strip())
line = f.next()
tocamuts = []
fromcamuts = []
assert lines[0][ : 7] == 'MUTPATH'
assert lines[1][ : 16] == 'startstrain_name'
assert lines[2][ : 15] == 'startstrain_seq'
startseq = lines[2].split()[1].strip().upper()
if firststartseq == None:
firststartseq = startseq
elif firststartseq != startseq:
raise IOError("Change in startseq")
assert lines[3][ : 16] == 'startstrain_time'
starttime = float(lines[3].split()[1])
assert lines[4][ : 14] == 'endstrain_name'
assert lines[5][ : 13] == 'endstrain_seq'
endseq = lines[5].split()[1].strip().upper()
if firstendseq == None:
firstendseq = endseq
elif firstendseq != endseq:
raise IOError("Change in endseq")
assert lines[6][ : 14] == 'endstrain_time'
endtime = float(lines[6].split()[1])
assert lines[7][ : 18] == 'commonancestor_seq'
caseq = lines[7].split()[1].strip().upper()
assert lines[8][ : 19] == 'commonancestor_time'
catime = float(lines[8].split()[1])
assert lines[9] == 'startstrain_to_commonancestor_path'
i = 10
while lines[i] != 'commonancestor_to_endstrain_path' and i < len(lines):
m = mutmatch.search(lines[i])
if not m:
raise ValueError("Failed to match mutation line:\n%s" % lines[i])
tocamuts.append((m.group('mut'), float(m.group('time'))))
i += 1
if i < len(lines):
if lines[i] != 'commonancestor_to_endstrain_path':
raise ValueError("Expected 'commonancestor_to_endstrain_path', but got:\n%s" % lines[i])
i += 1
while i < len(lines):
m = mutmatch.search(lines[i])
if not m:
raise ValueError("Failed to match mutation line:\n%s" % lines[i])
fromcamuts.append((m.group('mut'), float(m.group('time'))))
i += 1
yield (startseq, starttime, endseq, endtime, caseq, catime, tocamuts, fromcamuts)
f.close()
class Trajectory(object):
"""Class for representing a mutational trajectory through sequence space.
This class represents a mutational trajectory in sequence space. The trajectory
is a directed graph consisting of nodes (sequences) and edges (mutations
connecting nodes). The trajectory moves from one known sequence to another
known sequence, passing through some number of uncertain intermediates (nodes).
The trajectory is created by passing it a set of possible mutational paths
from the starting to ending sequence. In the trajectory, the weight of each node
corresponds to the fraction of paths that contain that sequence, while the
weight of each edge corresponds to the fraction of paths that contain that edge.
Note that if a path contains a node or edge more than once (which can happen
if there are mutational cycles), the node or edge is still considered to have
occurred once in that path for the purposes of assigning the weights.
Each *Trajectory* object *t* has the following attributes:
* *t.npaths* : the number of individual paths used to construct the
overall trajectory.
* *t.startseq* : a string giving the starting sequence for the trajectory.
* *t.endseq* : a string giving the ending sequence for the trajectory.
* *t.nodes* : a dictionary keyed by strings representing the sequences for
each node found at least once in the trajectory, and with values equal to
the weight of that node (fraction of paths containing the node).
* *t.edges* : a dictionary keyed by 2-tuples of strings *(s1, s2)* and
values giving the weight of the directed edges from sequence *s1* to
*s2*.
* *t.mutations* : a dictionary keyed by mutation strings of the
form 'G5A' specifying mutations where the numbering is in
1, 2, ... For each mutation that occurs in at least one of
the paths passed to this trajectory, there will be a key.
The values are lists giving the times of occurrence for all
occurrences of that mutation in the paths used to create this
trajectory. If a mutation occurs more than once in a path,
only the time for its first occurrence is listed. So the
fraction of paths that contain some mutation *m* is
*t.mutations[m] / float(t.npaths)*. Note that if *translateseqs*
is *True*, then the mutations specified here are only the
protein mutations, not the nucleotide ones in the underlying
nucleotide sequences.
* *t.mutationstoca* : a dictionary keyed by mutation strings just
as for *t.mutations*. Each mutation that is added to the lists
in *t.mutations* can arise on the branch from the starting sequence
to the common ancestor, or on the branch from the common ancestor
to the ending sequence. The value of *t.mutationstoca[mut]* is
the number of times that the first occurrence of *mut* is on
the route from starting sequence to the common ancestor. So if
*mut* is always on the path from the starting sequence to the
common ancestor, then *t.mutationstoca[mut] == len(t.mutations[mut])*.
If it is always on the path from the starting sequence to the
ending sequence, then *t.mutations[mut] == 0*.
* *t.persistence* : a dictionary keyed by the node sequences and
with the values being a list of numbers. If a node occurs on a path,
then the time for which the node sequence persisted before another
mutation is listed (for the first occurrence of the node if it
occurs multiple times on the same path). Note that it *translateseqs*
is True, then these are the persistence times to the first
non-synonymous mutation, as only those mutations change the node
sequences. The total length of the list for each node will be equal
to the number of paths that contained that node.
* *t.timetofirstmut* : if *translateseqs* is False, then this
is just equal to *t.persistence*. But if *translateseqs* is True,
then the entries give the time after the occurrence of a node to
the first mutation of any time -- synonymous or nonsynonymous. In
this case, entries in *t.timetofirstmut* will often be less than
those in *t.persistence*, since the first mutation to a node will
often by synonymous, which will change the nucleotide sequence
but not the actual protein sequence node identity.
To create a *Trajectory* object *t*, use the command::
t = Trajectory(pathfile, translateseqs=False, printprogress=False)
*pathfile* should be a string giving a name of an input file specifying one or
more mutational paths. These files are of the format created by
``mutpath_get_paths.py``. They must be readable by the *IteratePaths*
function.
*translateseqs* is an optional argument that is *False* by default. If it
is set to *True*, then the sequences contained within *mutpathfile* are
taken to represent coding nucleotide sequences, but the trajectory is
built through protein sequence space. In other words, the nucleotide sequences
in the paths are translated, and the trajectory is built from these
translated sequences. All of the nodes and edges will therefore connect
protein sequences. Note that no checking is made to ensure that the
sequences translate properly: any stop codons are simply translated to '*',
codons containing gaps are translated to '-', and sequences that do not have
lengths that are multiples of three have the extra one or two nucleotides
truncated.
*printprogress* is a switch specifying that we print progress as we
process paths. You may want to use this if you are processing a large
number of paths and want to output the progress. By default it is *False*,
meaning that nothing is printed. If you set it to an integer, it will then
print to *sys.stdout* after processing every *printprogress* paths.
"""
def __init__(self, pathfile, translateseqs=False, printprogress=False):
"""Intializes and returns a *Trajectory* object.
Returns a *Trajectory* object *t* constructed from the collection of mutational
paths encoded in *pathfile*. Detailed in main docstring for this class.
"""
if not os.path.isfile(pathfile):
raise IOError("Cannot find pathfile %s" % pathfile)
self.npaths = 0
self.nodes = {}
self.edges = {}
self.mutations = {}
self.mutationstoca = {}
self.persistence = {}
if translateseqs:
self.timetofirstmut = {}
else:
self.timetofirstmut = self.persistence
self.startseq = self.endseq = None
for (startseq, starttime, endseq, endtime, caseq, catime, tocamuts, fromcamuts) in IteratePaths(pathfile):
onthispath = {}
persistenceonthispath = {}
timetofirstmutonthispath = {}
self.npaths += 1
if printprogress:
if not (self.npaths % printprogress):
sys.stdout.write("Processed %d paths...\n" % self.npaths)
sys.stdout.flush()
currentseq = list(startseq)
nodetime = starttime
if translateseqs:
startseq = sequtils.Translate([('head', startseq)], readthrough_n=True, readthrough_stop=True, truncate_incomplete=True, translate_gaps=True)[0][1]
endseq = sequtils.Translate([('head', endseq)], readthrough_n=True, readthrough_stop=True, truncate_incomplete=True, translate_gaps=True)[0][1]
if self.startseq == None:
self.startseq = startseq
self.endseq = endseq
assert self.startseq == startseq and self.endseq == endseq
onthispath[startseq] = True
if startseq in self.nodes:
self.nodes[startseq] += 1
else:
self.nodes[startseq] = 1
firstfromca = True
for (mutlist, toca) in [(tocamuts, True), (fromcamuts, False)]:
for (mut, time) in mutlist:
(wt, i, m) = (mut[0], int(mut[1 : -1]), mut[-1])
if not (1 <= i <= len(currentseq)):
raise ValueError("Position %d is out of range." % i)
if currentseq[i - 1] != wt:
raise ValueError("Identity mismatch for %s" % mut)
if wt == m:
raise ValueError("Invalid mutation %s" % mut)
s1 = ''.join(currentseq)
currentseq[i - 1] = m
s2 = ''.join(currentseq)
if translateseqs:
s1 = sequtils.Translate([('head', s1)], readthrough_n=True, readthrough_stop=True, truncate_incomplete=True, translate_gaps=True)[0][1]
s2 = sequtils.Translate([('head', s2)], readthrough_n=True, readthrough_stop=True, truncate_incomplete=True, translate_gaps=True)[0][1]
if not s2 in onthispath:
onthispath[s2] = True
if s2 in self.nodes:
self.nodes[s2] += 1
else:
self.nodes[s2] = 1
assert len(s1) == len(s2) == len(self.startseq) == len(self.endseq)
if self.persistence != self.timetofirstmut:
assert translateseqs
if s1 not in timetofirstmutonthispath:
timetofirstmutonthispath[s1] = True
if toca:
dt = time - nodetime
elif firstfromca:
dt = catime - nodetime + catime - time
else:
dt = nodetime - time
if s1 in self.timetofirstmut:
self.timetofirstmut[s1].append(dt)
else:
self.timetofirstmut[s1] = [dt]
if s1 != s2:
if s1 not in persistenceonthispath:
persistenceonthispath[s1] = True
if toca:
dt = time - nodetime
elif firstfromca:
firstfromca = False
dt = catime - nodetime + catime - time
else:
dt = nodetime - time
if s1 in self.persistence:
self.persistence[s1].append(dt)
else:
self.persistence[s1] = [dt]
nodetime = time
if translateseqs:
diffs = [i for i in range(len(s1)) if s1[i] != s2[i]]
assert len(diffs) == 1, str(diffs)
i = diffs[0]
mutstring = "%s%d%s" % (s1[i], i + 1, s2[i])
else:
mutstring = mut
if mutstring not in onthispath:
if mutstring in self.mutations:
self.mutations[mutstring].append(time)
if toca:
self.mutationstoca[mutstring] += 1
else:
self.mutations[mutstring] = [time]
if toca:
self.mutationstoca[mutstring] = 1
else:
self.mutationstoca[mutstring] = 0
onthispath[mutstring] = True
tup = (s1, s2)
if not tup in onthispath:
onthispath[tup] = True
if tup in self.edges:
self.edges[tup] += 1
else:
self.edges[tup] = 1
# check that path finished correctly
if translateseqs:
if sequtils.Translate([('head', ''.join(currentseq))], readthrough_n=True, readthrough_stop=True, truncate_incomplete=True, translate_gaps=True)[0][1] != endseq:
raise ValueError("Failed to end on endseq")
elif ''.join(currentseq) != endseq:
raise ValueError("Failed to end on endseq")
if not self.npaths:
raise ValueError("Failed to find any paths in %s" % pathfile)
for key in self.nodes.iterkeys():
if key != self.endseq:
if len(self.persistence[key]) != self.nodes[key]:
raise ValueError("Incorect number of persistence entries")
self.nodes[key] /= float(self.npaths)
for key in self.edges.iterkeys():
self.edges[key] /= float(self.npaths)
if self.nodes[self.startseq] != 1:
raise ValueError("The weight of startseq is not one")
if self.nodes[self.endseq] != 1:
raise ValueError("The weight of endseq is not one")
# Test with doctest
if __name__ == '__main__':
import doctest
doctest.testmod()
| gpl-3.0 |
nrhine1/scikit-learn | sklearn/datasets/tests/test_rcv1.py | 322 | 2414 | """Test the rcv1 loader.
Skipped if rcv1 is not already downloaded to data_home.
"""
import errno
import scipy.sparse as sp
import numpy as np
from sklearn.datasets import fetch_rcv1
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import SkipTest
def test_fetch_rcv1():
try:
data1 = fetch_rcv1(shuffle=False, download_if_missing=False)
except IOError as e:
if e.errno == errno.ENOENT:
raise SkipTest("Download RCV1 dataset to run this test.")
X1, Y1 = data1.data, data1.target
cat_list, s1 = data1.target_names.tolist(), data1.sample_id
# test sparsity
assert_true(sp.issparse(X1))
assert_true(sp.issparse(Y1))
assert_equal(60915113, X1.data.size)
assert_equal(2606875, Y1.data.size)
# test shapes
assert_equal((804414, 47236), X1.shape)
assert_equal((804414, 103), Y1.shape)
assert_equal((804414,), s1.shape)
assert_equal(103, len(cat_list))
# test ordering of categories
first_categories = [u'C11', u'C12', u'C13', u'C14', u'C15', u'C151']
assert_array_equal(first_categories, cat_list[:6])
# test number of sample for some categories
some_categories = ('GMIL', 'E143', 'CCAT')
number_non_zero_in_cat = (5, 1206, 381327)
for num, cat in zip(number_non_zero_in_cat, some_categories):
j = cat_list.index(cat)
assert_equal(num, Y1[:, j].data.size)
# test shuffling and subset
data2 = fetch_rcv1(shuffle=True, subset='train', random_state=77,
download_if_missing=False)
X2, Y2 = data2.data, data2.target
s2 = data2.sample_id
# The first 23149 samples are the training samples
assert_array_equal(np.sort(s1[:23149]), np.sort(s2))
# test some precise values
some_sample_ids = (2286, 3274, 14042)
for sample_id in some_sample_ids:
idx1 = s1.tolist().index(sample_id)
idx2 = s2.tolist().index(sample_id)
feature_values_1 = X1[idx1, :].toarray()
feature_values_2 = X2[idx2, :].toarray()
assert_almost_equal(feature_values_1, feature_values_2)
target_values_1 = Y1[idx1, :].toarray()
target_values_2 = Y2[idx2, :].toarray()
assert_almost_equal(target_values_1, target_values_2)
| bsd-3-clause |
Aloomaio/googleads-python-lib | examples/ad_manager/v201805/report_service/run_report_and_create_match_table.py | 1 | 3150 | #!/usr/bin/env python
#
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Runs a report for LineItems with additional data from a PQL table.
Fetches a basic report over a network's LineItems and then adds some
extra columns which might be useful for future analysis, such as
LineItemType, from the PQL Line_Item table, creating a match table.
"""
from datetime import date
from datetime import timedelta
import tempfile
# Import appropriate modules from the client library.
from googleads import ad_manager
from googleads import errors
try:
import pandas
except ImportError:
raise ImportError('This example requires the pandas library to be installed.')
def main(client):
# Set the start and end dates of the report to run (past 8 days).
end_date = date.today()
start_date = end_date - timedelta(days=8)
# Create report job.
report_job = {
'reportQuery': {
'dimensions': ['LINE_ITEM_ID', 'LINE_ITEM_NAME'],
'columns': ['AD_SERVER_IMPRESSIONS', 'AD_SERVER_CLICKS',
'AD_SERVER_CTR', 'AD_SERVER_CPM_AND_CPC_REVENUE',
'AD_SERVER_WITHOUT_CPD_AVERAGE_ECPM'],
'dateRangeType': 'CUSTOM_DATE',
'startDate': start_date,
'endDate': end_date
}
}
# Initialize a DataDownloader.
report_downloader = client.GetDataDownloader(version='v201805')
try:
# Run the report and wait for it to finish.
report_job_id = report_downloader.WaitForReport(report_job)
except errors.AdManagerReportError, e:
print 'Failed to generate report. Error was: %s' % e
with tempfile.NamedTemporaryFile(
suffix='.csv.gz', mode='wb', delete=False) as report_file:
# Download report data.
report_downloader.DownloadReportToFile(
report_job_id, 'CSV_DUMP', report_file)
# Create a PQL query to fetch the line item data
line_items_pql_query = ('SELECT Id, LineItemType, Status FROM LineItem')
# Download the response from PQL select statement
line_items = report_downloader.DownloadPqlResultToList(line_items_pql_query)
# Use pandas to join the two csv files into a match table
report = pandas.read_csv(report_file.name)
line_items = pandas.DataFrame(data=line_items[1:], columns=line_items[0])
merged_result = pandas.merge(report, line_items,
left_on='Dimension.LINE_ITEM_ID', right_on='id')
merged_result.to_csv('~/complete_line_items_report.csv', index=False)
if __name__ == '__main__':
# Initialize client object.
ad_manager_client = ad_manager.AdManagerClient.LoadFromStorage()
main(ad_manager_client)
| apache-2.0 |
HarllanAndrye/nilmtk | nilmtk/elecmeter.py | 5 | 30305 | from __future__ import print_function, division
from warnings import warn
from collections import namedtuple
from copy import deepcopy
from itertools import izip
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import random
from .preprocessing import Clip
from .stats import TotalEnergy, GoodSections, DropoutRate
from .stats.totalenergyresults import TotalEnergyResults
from .hashable import Hashable
from .appliance import Appliance
from .datastore import Key
from .measurement import (select_best_ac_type, AC_TYPES, PHYSICAL_QUANTITIES,
PHYSICAL_QUANTITIES_WITH_AC_TYPES,
check_ac_type, check_physical_quantity)
from .node import Node
from .electric import Electric
from .timeframe import TimeFrame, list_of_timeframe_dicts
from nilmtk.exceptions import MeasurementError
from .utils import flatten_2d_list, capitalise_first_letter
from nilmtk.timeframegroup import TimeFrameGroup
import nilmtk
ElecMeterID = namedtuple('ElecMeterID', ['instance', 'building', 'dataset'])
class ElecMeter(Hashable, Electric):
"""Represents a physical electricity meter.
Attributes
----------
appliances : list of Appliance objects connected immediately downstream
of this meter. Will be [] if no appliances are connected directly
to this meter.
store : nilmtk.DataStore
key : string
key into nilmtk.DataStore to access data.
metadata : dict.
See http://nilm-metadata.readthedocs.org/en/latest/dataset_metadata.html#elecmeter
STATIC ATTRIBUTES
-----------------
meter_devices : dict, static class attribute
See http://nilm-metadata.readthedocs.org/en/latest/dataset_metadata.html#meterdevice
"""
meter_devices = {}
def __init__(self, store=None, metadata=None, meter_id=None):
# Store and check parameters
self.appliances = []
self.metadata = {} if metadata is None else metadata
assert isinstance(self.metadata, dict)
self.store = store
self.identifier = meter_id
# Insert self into nilmtk.global_meter_group
if self.identifier is not None:
assert isinstance(self.identifier, ElecMeterID)
if self not in nilmtk.global_meter_group.meters:
nilmtk.global_meter_group.meters.append(self)
@property
def key(self):
return self.metadata['data_location']
def instance(self):
return self._identifier_attr('instance')
def building(self):
return self._identifier_attr('building')
def dataset(self):
return self._identifier_attr('dataset')
@property
def name(self):
return self.metadata.get('name')
@name.setter
def name(self, value):
self.metadata['name'] = value
def _identifier_attr(self, attr):
if self.identifier is None:
return
else:
return getattr(self.identifier, attr)
def get_timeframe(self):
self._check_store()
return self.store.get_timeframe(key=self.key)
def _check_store(self):
if self.store is None:
raise RuntimeError("ElecMeter needs `store` attribute set to an"
" instance of a `nilmtk.DataStore` subclass")
def upstream_meter(self, raise_warning=True):
"""
Returns
-------
ElecMeterID of upstream meter or None if is site meter.
"""
if self.is_site_meter():
if raise_warning:
warn("There is no meter upstream of this meter '{}' because"
" it is a site meter.".format(self.identifier))
return
submeter_of = self.metadata.get('submeter_of')
# Sanity checks
if submeter_of is None:
raise ValueError(
"This meter has no 'submeter_of' metadata attribute.")
if submeter_of < 0:
raise ValueError("'submeter_of' must be >= 0.")
upstream_meter_in_building = self.metadata.get(
'upstream_meter_in_building')
if (upstream_meter_in_building is not None and
upstream_meter_in_building != self.identifier.building):
raise NotImplementedError(
"'upstream_meter_in_building' not implemented yet.")
id_of_upstream = ElecMeterID(instance=submeter_of,
building=self.identifier.building,
dataset=self.identifier.dataset)
upstream_meter = nilmtk.global_meter_group[id_of_upstream]
if upstream_meter is None:
warn("No upstream meter found for '{}'.".format(self.identifier))
return upstream_meter
@classmethod
def load_meter_devices(cls, store):
dataset_metadata = store.load_metadata('/')
ElecMeter.meter_devices.update(
dataset_metadata.get('meter_devices', {}))
def save(self, destination, key):
"""
Convert all relevant attributes to a dict to be
saved as metadata in destination at location specified
by key
"""
# destination.write_metadata(key, self.metadata)
# then save data
raise NotImplementedError
@property
def device(self):
"""
Returns
-------
dict describing the MeterDevice for this meter (sample period etc).
"""
device_model = self.metadata.get('device_model')
if device_model:
return deepcopy(ElecMeter.meter_devices[device_model])
else:
return {}
def sample_period(self):
device = self.device
if device:
return device['sample_period']
def is_site_meter(self):
return self.metadata.get('site_meter', False)
def dominant_appliance(self):
"""Tries to find the most dominant appliance on this meter,
and then returns that appliance object. Will return None
if there are no appliances on this meter.
"""
n_appliances = len(self.appliances)
if n_appliances == 0:
return
elif n_appliances == 1:
return self.appliances[0]
else:
for app in self.appliances:
if app.metadata.get('dominant_appliance'):
return app
warn('Multiple appliances are associated with meter {}'
' but none are marked as the dominant appliance. Hence'
' returning the first appliance in the list.', RuntimeWarning)
return self.appliances[0]
def label(self, pretty=True):
"""Returns a string describing this meter.
Parameters
----------
pretty : boolean
If True then just return the type name of the dominant appliance
(without the instance number) or metadata['name'], with the
first letter capitalised.
Returns
-------
string : A label listing all the appliance types.
"""
if pretty:
return self._pretty_label()
meter_names = []
if self.is_site_meter():
meter_names.append('SITE METER')
elif "name" in self.metadata:
meter_names.append(self.metadata["name"])
else:
for appliance in self.appliances:
appliance_name = appliance.label()
if appliance.metadata.get('dominant_appliance'):
appliance_name = appliance_name.upper()
meter_names.append(appliance_name)
label = ", ".join(meter_names)
return label
def _pretty_label(self):
name = self.metadata.get("name")
if name:
label = name
elif self.is_site_meter():
label = 'Site meter'
elif self.dominant_appliance() is not None:
label = self.dominant_appliance().identifier.type
else:
meter_names = []
for appliance in self.appliances:
appliance_name = appliance.label()
if appliance.metadata.get('dominant_appliance'):
appliance_name = appliance_name.upper()
meter_names.append(appliance_name)
label = ", ".join(meter_names)
return label
label = capitalise_first_letter(label)
return label
def available_ac_types(self, physical_quantity):
"""Finds available alternating current types for a specific physical quantity.
Parameters
----------
physical_quantity : str or list of strings
Returns
-------
list of strings e.g. ['apparent', 'active']
"""
if isinstance(physical_quantity, list):
ac_types = [self.available_ac_types(pq) for pq in physical_quantity]
return list(set(flatten_2d_list(ac_types)))
if physical_quantity not in PHYSICAL_QUANTITIES:
raise ValueError("`physical_quantity` must by one of '{}', not '{}'"
.format(PHYSICAL_QUANTITIES, physical_quantity))
measurements = self.device['measurements']
return [m['type'] for m in measurements
if m['physical_quantity'] == physical_quantity
and 'type' in m]
def available_physical_quantities(self):
"""
Returns
-------
list of strings e.g. ['power', 'energy']
"""
measurements = self.device['measurements']
return list(set([m['physical_quantity'] for m in measurements]))
def available_columns(self):
"""
Returns
-------
list of 2-tuples of strings e.g. [('power', 'active')]
"""
measurements = self.device['measurements']
return list(set([(m['physical_quantity'], m.get('type', ''))
for m in measurements]))
def __repr__(self):
string = super(ElecMeter, self).__repr__()
# Now add list of appliances...
string = string[:-1] # remove last bracket
# Site meter
if self.metadata.get('site_meter'):
string += ', site_meter'
# Appliances
string += ', appliances={}'.format(self.appliances)
# METER ROOM
room = self.metadata.get('room')
if room:
string += ', room={}'.format(room)
string += ')'
return string
def matches(self, key):
"""
Parameters
----------
key : dict
Returns
-------
Bool
"""
if not key:
return True
if not isinstance(key, dict):
raise TypeError()
match = True
for k, v in key.iteritems():
if hasattr(self.identifier, k):
if getattr(self.identifier, k) != v:
match = False
elif k in self.metadata:
if self.metadata[k] != v:
match = False
elif k in self.device:
metadata_value = self.device[k]
if (isinstance(metadata_value, list) and
not isinstance(v, list)):
if v not in metadata_value:
match = False
elif metadata_value != v:
match = False
else:
raise KeyError("'{}' not a valid key.".format(k))
return match
def load(self, **kwargs):
"""Returns a generator of DataFrames loaded from the DataStore.
By default, `load` will load all available columns from the DataStore.
Specific columns can be selected in one or two mutually exclusive ways:
1. specify a list of column names using the `cols` parameter.
2. specify a `physical_quantity` and/or an `ac_type` parameter to ask
`load` to automatically select columns.
If 'resample' is set to 'True' then the default behaviour is for
gaps shorter than max_sample_period will be forward filled.
Parameters
---------------
physical_quantity : string or list of strings
e.g. 'power' or 'voltage' or 'energy' or ['power', 'energy'].
If a single string then load columns only for that physical quantity.
If a list of strings then load columns for all those physical
quantities.
ac_type : string or list of strings, defaults to None
Where 'ac_type' is short for 'alternating current type'. e.g.
'reactive' or 'active' or 'apparent'.
If set to None then will load all AC types per physical quantity.
If set to 'best' then load the single best AC type per
physical quantity.
If set to a single AC type then load just that single AC type per
physical quantity, else raise an Exception.
If set to a list of AC type strings then will load all those
AC types and will raise an Exception if any cannot be found.
cols : list of tuples, using NILMTK's vocabulary for measurements.
e.g. [('power', 'active'), ('voltage', ''), ('energy', 'reactive')]
`cols` can't be used if `ac_type` and/or `physical_quantity` are set.
sample_period : int, defaults to None
Number of seconds to use as the new sample period for resampling.
If None then will use self.sample_period()
resample : boolean, defaults to False
If True then will resample data using `sample_period`.
Defaults to True if `sample_period` is not None.
resample_kwargs : dict of key word arguments (other than 'rule') to
`pass to pd.DataFrame.resample()`. Defaults to set 'limit' to
`sample_period / max_sample_period` and sets 'fill_method' to ffill.
preprocessing : list of Node subclass instances
e.g. [Clip()].
**kwargs : any other key word arguments to pass to `self.store.load()`
Returns
-------
Always return a generator of DataFrames (even if it only has a single
column).
Raises
------
nilmtk.exceptions.MeasurementError if a measurement is specified
which is not available.
"""
verbose = kwargs.get('verbose')
if verbose:
print()
print("ElecMeter.load")
print(self)
if 'sample_period' in kwargs:
kwargs['resample'] = True
if kwargs.get('resample'):
# Set default key word arguments for resampling.
resample_kwargs = kwargs.setdefault('resample_kwargs', {})
resample_kwargs.setdefault('fill_method', 'ffill')
if 'limit' not in resample_kwargs:
sample_period = kwargs.get('sample_period', self.sample_period())
max_number_of_rows_to_ffill = int(
np.ceil(self.device['max_sample_period'] / sample_period))
resample_kwargs.update({'limit': max_number_of_rows_to_ffill})
if verbose:
print("kwargs after setting resample setting:")
print(kwargs)
kwargs = self._prep_kwargs_for_sample_period_and_resample(**kwargs)
if verbose:
print("kwargs after processing")
print(kwargs)
# Get source node
preprocessing = kwargs.pop('preprocessing', [])
last_node = self.get_source_node(**kwargs)
generator = last_node.generator
# Connect together all preprocessing nodes
for node in preprocessing:
node.upstream = last_node
last_node = node
generator = last_node.process()
return generator
def _ac_type_to_columns(self, ac_type):
if ac_type is None:
return []
if isinstance(ac_type, list):
cols2d = [self._ac_type_to_columns(a_t) for a_t in ac_type]
return list(set(flatten_2d_list(cols2d)))
check_ac_type(ac_type)
cols_matching = [col for col in self.available_columns()
if col[1] == ac_type]
return cols_matching
def _physical_quantity_to_columns(self, physical_quantity):
if physical_quantity is None:
return []
if isinstance(physical_quantity, list):
cols2d = [self._physical_quantity_to_columns(p_q)
for p_q in physical_quantity]
return list(set(flatten_2d_list(cols2d)))
check_physical_quantity(physical_quantity)
cols_matching = [col for col in self.available_columns()
if col[0] == physical_quantity]
return cols_matching
def _get_columns_with_best_ac_type(self, physical_quantity=None):
if physical_quantity is None:
physical_quantity = self.available_physical_quantities()
if isinstance(physical_quantity, list):
columns = set()
for pq in physical_quantity:
best = self._get_columns_with_best_ac_type(pq)
if best:
columns.update(best)
return list(columns)
check_physical_quantity(physical_quantity)
available_pqs = self.available_physical_quantities()
if physical_quantity not in available_pqs:
return []
ac_types = self.available_ac_types(physical_quantity)
try:
best_ac_type = select_best_ac_type(ac_types)
except KeyError:
return []
else:
return [(physical_quantity, best_ac_type)]
def _convert_physical_quantity_and_ac_type_to_cols(
self, physical_quantity=None, ac_type=None, cols=None,
**kwargs):
"""Returns kwargs dict with physical_quantity and ac_type removed
and cols populated appropriately."""
if cols:
if (ac_type or physical_quantity):
raise ValueError("Cannot use `ac_type` and/or `physical_quantity`"
" with `cols` parameter.")
else:
if set(cols).issubset(self.available_columns()):
kwargs['cols'] = cols
return kwargs
else:
msg = ("'{}' is not a subset of the available columns: '{}'"
.format(cols, self.available_columns()))
raise MeasurementError(msg)
msg = ""
if not (ac_type or physical_quantity):
cols = self.available_columns()
elif ac_type == 'best':
cols = self._get_columns_with_best_ac_type(physical_quantity)
if not cols:
msg += "No AC types for physical quantity {}".format(physical_quantity)
else:
if ac_type:
cols = self._ac_type_to_columns(ac_type)
if not cols:
msg += "AC type '{}' not available. ".format(ac_type)
if physical_quantity:
cols_matching_pq = self._physical_quantity_to_columns(physical_quantity)
if not cols_matching_pq:
msg += ("Physical quantity '{}' not available. "
.format(physical_quantity))
if cols:
cols = list(set(cols).intersection(cols_matching_pq))
if not cols:
msg += ("No measurement matching ({}, {}). "
.format(physical_quantity, ac_type))
else:
cols = cols_matching_pq
if msg:
msg += "Available columns = {}. ".format(self.available_columns())
raise MeasurementError(msg)
kwargs['cols'] = cols
return kwargs
def dry_run_metadata(self):
return self.metadata
def get_metadata(self):
return self.metadata
def get_source_node(self, **loader_kwargs):
if self.store is None:
raise RuntimeError(
"Cannot get source node if meter.store is None!")
loader_kwargs = self._convert_physical_quantity_and_ac_type_to_cols(**loader_kwargs)
generator = self.store.load(key=self.key, **loader_kwargs)
self.metadata['device'] = self.device
return Node(self, generator=generator)
def total_energy(self, **loader_kwargs):
"""
Parameters
----------
full_results : bool, default=False
**loader_kwargs : key word arguments for DataStore.load()
Returns
-------
if `full_results` is True then return TotalEnergyResults object
else returns a pd.Series with a row for each AC type.
"""
nodes = [Clip, TotalEnergy]
return self._get_stat_from_cache_or_compute(
nodes, TotalEnergy.results_class(), loader_kwargs)
def dropout_rate(self, ignore_gaps=True, **loader_kwargs):
"""
Parameters
----------
ignore_gaps : bool, default=True
If True then will only calculate dropout rate for good sections.
full_results : bool, default=False
**loader_kwargs : key word arguments for DataStore.load()
Returns
-------
DropoutRateResults object if `full_results` is True,
else float
"""
nodes = [DropoutRate]
if ignore_gaps:
loader_kwargs['sections'] = self.good_sections(**loader_kwargs)
return self._get_stat_from_cache_or_compute(
nodes, DropoutRate.results_class(), loader_kwargs)
def good_sections(self, **loader_kwargs):
"""
Parameters
----------
full_results : bool, default=False
**loader_kwargs : key word arguments for DataStore.load()
Returns
-------
if `full_results` is True then return nilmtk.stats.GoodSectionsResults
object otherwise return list of TimeFrame objects.
"""
loader_kwargs.setdefault('n_look_ahead_rows', 10)
nodes = [GoodSections]
results_obj = GoodSections.results_class(self.device['max_sample_period'])
return self._get_stat_from_cache_or_compute(
nodes, results_obj, loader_kwargs)
def _get_stat_from_cache_or_compute(self, nodes, results_obj, loader_kwargs):
"""General function for computing statistics and/or loading them from
cache.
Cached statistics lives in the DataStore at
'building<I>/elec/cache/meter<K>/<statistic_name>' e.g.
'building1/elec/cache/meter1/total_energy'. We store the
'full' statistic... i.e we store a representation of the `Results._data`
DataFrame. Some times we need to do some conversion to store
`Results._data` on disk. The logic for doing this conversion lives
in the `Results` class or subclass. The cache can be cleared by calling
`ElecMeter.clear_cache()`.
Parameters
----------
nodes : list of nilmtk.Node classes
results_obj : instance of nilmtk.Results subclass
loader_kwargs : dict
Returns
-------
if `full_results` is True then return nilmtk.Results subclass
instance otherwise return nilmtk.Results.simple().
See Also
--------
clear_cache
_compute_stat
key_for_cached_stat
get_cached_stat
"""
full_results = loader_kwargs.pop('full_results', False)
verbose = loader_kwargs.get('verbose')
if 'ac_type' in loader_kwargs or 'physical_quantity' in loader_kwargs:
loader_kwargs = self._convert_physical_quantity_and_ac_type_to_cols(**loader_kwargs)
cols = loader_kwargs.get('cols', [])
ac_types = set([m[1] for m in cols if m[1]])
results_obj_copy = deepcopy(results_obj)
# Prepare `sections` list
sections = loader_kwargs.get('sections')
if sections is None:
tf = self.get_timeframe()
tf.include_end = True
sections = [tf]
sections = TimeFrameGroup(sections)
sections = [s for s in sections if not s.empty]
# Retrieve usable stats from cache
key_for_cached_stat = self.key_for_cached_stat(results_obj.name)
if loader_kwargs.get('preprocessing') is None:
cached_stat = self.get_cached_stat(key_for_cached_stat)
results_obj.import_from_cache(cached_stat, sections)
def find_sections_to_compute():
# Get sections_to_compute
results_obj_timeframes = results_obj.timeframes()
sections_to_compute = set(sections) - set(results_obj_timeframes)
sections_to_compute = list(sections_to_compute)
sections_to_compute.sort()
return sections_to_compute
try:
ac_type_keys = results_obj.simple().keys()
except:
sections_to_compute = find_sections_to_compute()
else:
if ac_types.issubset(ac_type_keys):
sections_to_compute = find_sections_to_compute()
else:
sections_to_compute = sections
results_obj = results_obj_copy
else:
sections_to_compute = sections
if verbose and not results_obj._data.empty:
print("Using cached result.")
# If we get to here then we have to compute some stats
if sections_to_compute:
loader_kwargs['sections'] = sections_to_compute
computed_result = self._compute_stat(nodes, loader_kwargs)
# Merge cached results with newly computed
results_obj.update(computed_result.results)
# Save to disk newly computed stats
stat_for_store = computed_result.results.export_to_cache()
try:
self.store.append(key_for_cached_stat, stat_for_store)
except ValueError:
# the old table probably had different columns
self.store.remove(key_for_cached_stat)
self.store.put(key_for_cached_stat, results_obj.export_to_cache())
if full_results:
return results_obj
else:
res = results_obj.simple()
if ac_types:
try:
ac_type_keys = res.keys()
except:
return res
else:
return pd.Series(res[ac_types], index=ac_types)
else:
return res
def _compute_stat(self, nodes, loader_kwargs):
"""
Parameters
----------
nodes : list of nilmtk.Node subclass objects
loader_kwargs : dict
Returns
-------
Node subclass object
See Also
--------
clear_cache
_get_stat_from_cache_or_compute
key_for_cached_stat
get_cached_stat
"""
results = self.get_source_node(**loader_kwargs)
for node in nodes:
results = node(results)
results.run()
return results
def key_for_cached_stat(self, stat_name):
"""
Parameters
----------
stat_name : str
Returns
-------
key : str
See Also
--------
clear_cache
_compute_stat
_get_stat_from_cache_or_compute
get_cached_stat
"""
if isinstance(self.instance(), tuple):
meter_str = "_".join([str(i) for i in (self.instance())])
else:
meter_str = "{:d}".format(self.instance())
return ("building{:d}/elec/cache/meter{}/{:s}"
.format(self.building(), meter_str, stat_name))
def clear_cache(self, verbose=False):
"""
See Also
--------
_compute_stat
_get_stat_from_cache_or_compute
key_for_cached_stat
get_cached_stat
"""
if self.store is not None:
key_for_cache = self.key_for_cached_stat('')
try:
self.store.remove(key_for_cache)
except KeyError:
if verbose:
print("No existing cache for", key_for_cache)
else:
print("Removed", key_for_cache)
def get_cached_stat(self, key_for_stat):
"""
Parameters
----------
key_for_stat : str
Returns
-------
pd.DataFrame
See Also
--------
_compute_stat
_get_stat_from_cache_or_compute
key_for_cached_stat
clear_cache
"""
if self.store is None:
return pd.DataFrame()
try:
stat_from_cache = self.store[key_for_stat]
except KeyError:
return pd.DataFrame()
else:
return pd.DataFrame() if stat_from_cache is None else stat_from_cache
# def total_on_duration(self):
# """Return timedelta"""
# raise NotImplementedError
# def on_durations(self):
# raise NotImplementedError
# def activity_distribution(self, bin_size, timespan):
# raise NotImplementedError
# def on_off_events(self):
# use self.metadata.minimum_[off|on]_duration
# raise NotImplementedError
# def discrete_appliance_activations(self):
# """
# Return a Mask defining the start and end times of each appliance
# activation.
# """
# raise NotImplementedError
# def contiguous_sections(self):
# """retuns Mask object"""
# raise NotImplementedError
# def clean_and_export(self, destination_datastore):
# """Apply all cleaning configured in meter.cleaning and then export. Also identifies
# and records the locations of gaps. Also records metadata about exactly which
# cleaning steps have been executed and some summary results (e.g. the number of
# implausible values removed)"""
# raise NotImplementedError
| apache-2.0 |
Funtimezzhou/TradeBuildTools | Document/szse/Quantitative Trading/sat-ebook-and-full-source-20150618/algo-ebook-full-source-code-20150618/chapter14/performance.py | 5 | 1431 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# performance.py
from __future__ import print_function
import numpy as np
import pandas as pd
def create_sharpe_ratio(returns, periods=252):
"""
Create the Sharpe ratio for the strategy, based on a
benchmark of zero (i.e. no risk-free rate information).
Parameters:
returns - A pandas Series representing period percentage returns.
periods - Daily (252), Hourly (252*6.5), Minutely(252*6.5*60) etc.
"""
return np.sqrt(periods) * (np.mean(returns)) / np.std(returns)
def create_drawdowns(pnl):
"""
Calculate the largest peak-to-trough drawdown of the PnL curve
as well as the duration of the drawdown. Requires that the
pnl_returns is a pandas Series.
Parameters:
pnl - A pandas Series representing period percentage returns.
Returns:
drawdown, duration - Highest peak-to-trough drawdown and duration.
"""
# Calculate the cumulative returns curve
# and set up the High Water Mark
hwm = [0]
# Create the drawdown and duration series
idx = pnl.index
drawdown = pd.Series(index = idx)
duration = pd.Series(index = idx)
# Loop over the index range
for t in range(1, len(idx)):
hwm.append(max(hwm[t-1], pnl[t]))
drawdown[t]= (hwm[t]-pnl[t])
duration[t]= (0 if drawdown[t] == 0 else duration[t-1]+1)
return drawdown, drawdown.max(), duration.max()
| gpl-3.0 |
adgon92/optimalization-project | src/plot/ploter.py | 1 | 1397 | __author__ = 'Przemek'
import numpy as np
import matplotlib.pyplot as plt
class Ploter:
def __init__(self):
pass
def plot_temperature(self, temperature, cooling_method, initial_temperature, numb_of_cycles):
plt.plot(temperature, 'b.-')
plt.xlabel('Number of cycles')
plt.ylabel('Temperature')
plt.text(0.7 * numb_of_cycles, max(temperature),
self._get_chart_description(cooling_method, initial_temperature, numb_of_cycles))
def plot(self, objectives, cooling_method, initial_temperature, numb_of_cycles):
plt.plot(objectives, 'r.-')
plt.xlabel('Number of cycles')
plt.ylabel('Quality of solution')
plt.text(0.7 * numb_of_cycles, max(objectives),
self._get_chart_description(cooling_method, initial_temperature, numb_of_cycles),
withdash=False)
@staticmethod
def _get_chart_description(cooling_method, initial_temperature, numb_of_cycles):
return 'Cooling method {}\nInitial temperature: {}\nNumb of cycles: {}'.format(cooling_method,
initial_temperature,
numb_of_cycles)
def save(self, path):
plt.savefig(path)
def show(self):
plt.show()
| gpl-2.0 |
idlead/scikit-learn | examples/neighbors/plot_nearest_centroid.py | 264 | 1804 | """
===============================
Nearest Centroid Classification
===============================
Sample usage of Nearest Centroid classification.
It will plot the decision boundaries for each class.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn import datasets
from sklearn.neighbors import NearestCentroid
n_neighbors = 15
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
h = .02 # step size in the mesh
# Create color maps
cmap_light = ListedColormap(['#FFAAAA', '#AAFFAA', '#AAAAFF'])
cmap_bold = ListedColormap(['#FF0000', '#00FF00', '#0000FF'])
for shrinkage in [None, 0.1]:
# we create an instance of Neighbours Classifier and fit the data.
clf = NearestCentroid(shrink_threshold=shrinkage)
clf.fit(X, y)
y_pred = clf.predict(X)
print(shrinkage, np.mean(y == y_pred))
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure()
plt.pcolormesh(xx, yy, Z, cmap=cmap_light)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=cmap_bold)
plt.title("3-Class classification (shrink_threshold=%r)"
% shrinkage)
plt.axis('tight')
plt.show()
| bsd-3-clause |
pdamodaran/yellowbrick | yellowbrick/model_selection/validation_curve.py | 1 | 13903 | # yellowbrick.model_selection.validation_curve
# Implements a visual validation curve for a hyperparameter.
#
# Author: Benjamin Bengfort <[email protected]>
# Created: Sat Mar 31 06:27:28 2018 -0400
#
# ID: validation_curve.py [] [email protected] $
"""
Implements a visual validation curve for a hyperparameter.
"""
##########################################################################
## Imports
##########################################################################
import numpy as np
from yellowbrick.base import ModelVisualizer
from yellowbrick.style import resolve_colors
from yellowbrick.exceptions import YellowbrickValueError
from sklearn.model_selection import validation_curve as sk_validation_curve
##########################################################################
## ValidationCurve visualizer
##########################################################################
class ValidationCurve(ModelVisualizer):
"""
Visualizes the validation curve for both test and training data for a
range of values for a single hyperparameter of the model. Adjusting the
value of a hyperparameter adjusts the complexity of a model. Less complex
models suffer from increased error due to bias, while more complex models
suffer from increased error due to variance. By inspecting the training
and cross-validated test score error, it is possible to estimate a good
value for a hyperparameter that balances the bias/variance trade-off.
The visualizer evaluates cross-validated training and test scores for the
different hyperparameters supplied. The curve is plotted so that the
x-axis is the value of the hyperparameter and the y-axis is the model
score. This is similar to a grid search with a single hyperparameter.
The cross-validation generator splits the dataset k times, and scores are
averaged over all k runs for the training and test subsets. The curve
plots the mean score, and the filled in area suggests the variability of
cross-validation by plotting one standard deviation above and below the
mean for each split.
Parameters
----------
model : a scikit-learn estimator
An object that implements ``fit`` and ``predict``, can be a
classifier, regressor, or clusterer so long as there is also a valid
associated scoring metric.
Note that the object is cloned for each validation.
param_name : string
Name of the parameter that will be varied.
param_range : array-like, shape (n_values,)
The values of the parameter that will be evaluated.
ax : matplotlib.Axes object, optional
The axes object to plot the figure on.
logx : boolean, optional
If True, plots the x-axis with a logarithmic scale.
groups : array-like, with shape (n_samples,)
Optional group labels for the samples used while splitting the dataset
into train/test sets.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
see the scikit-learn
`cross-validation guide <http://scikit-learn.org/stable/modules/cross_validation.html>`_
for more information on the possible strategies that can be used here.
scoring : string, callable or None, optional, default: None
A string or scorer callable object / function with signature
``scorer(estimator, X, y)``. See scikit-learn model evaluation
documentation for names of possible metrics.
n_jobs : integer, optional
Number of jobs to run in parallel (default 1).
pre_dispatch : integer or string, optional
Number of predispatched jobs for parallel execution (default is
all). The option can reduce the allocated memory. The string can
be an expression like '2*n_jobs'.
kwargs : dict
Keyword arguments that are passed to the base class and may influence
the visualization as defined in other Visualizers.
Attributes
----------
train_scores_ : array, shape (n_ticks, n_cv_folds)
Scores on training sets.
train_scores_mean_ : array, shape (n_ticks,)
Mean training data scores for each training split
train_scores_std_ : array, shape (n_ticks,)
Standard deviation of training data scores for each training split
test_scores_ : array, shape (n_ticks, n_cv_folds)
Scores on test set.
test_scores_mean_ : array, shape (n_ticks,)
Mean test data scores for each test split
test_scores_std_ : array, shape (n_ticks,)
Standard deviation of test data scores for each test split
Examples
--------
>>> import numpy as np
>>> from yellowbrick.model_selection import ValidationCurve
>>> from sklearn.svm import SVC
>>> pr = np.logspace(-6,-1,5)
>>> model = ValidationCurve(SVC(), param_name="gamma", param_range=pr)
>>> model.fit(X, y)
>>> model.poof()
Notes
-----
This visualizer is essentially a wrapper for the
``sklearn.model_selection.validation_curve utility``, discussed in the
`validation curves <http://scikit-learn.org/stable/modules/learning_curve.html#validation-curve>`_
documentation.
.. seealso:: The documentation for the
`validation_curve <http://scikit-learn.org/stable/modules/generated/sklearn.model_selection.validation_curve.html#sklearn.model_selection.validation_curve>`_
function, which this visualizer wraps.
"""
def __init__(self, model, param_name, param_range, ax=None, logx=False,
groups=None, cv=None, scoring=None, n_jobs=1,
pre_dispatch="all", **kwargs):
# Initialize the model visualizer
super(ValidationCurve, self).__init__(model, ax=ax, **kwargs)
# Validate the param_range
param_range = np.asarray(param_range)
if param_range.ndim != 1:
raise YellowbrickValueError(
"must specify array of param values, '{}' is not valid".format(
repr(param_range)
))
# Set the visual and validation curve parameters on the estimator
self.set_params(
param_name=param_name, param_range=param_range, logx=logx,
groups=groups, cv=cv, scoring=scoring, n_jobs=n_jobs,
pre_dispatch=pre_dispatch,
)
def fit(self, X, y=None):
"""
Fits the validation curve with the wrapped estimator and parameter
array to the specified data. Draws training and test score curves and
saves the scores to the visualizer.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
Returns
-------
self : instance
Returns the instance of the validation curve visualizer for use in
pipelines and other sequential transformers.
"""
# arguments to pass to sk_validation_curve
skvc_kwargs = {
key: self.get_params()[key]
for key in (
'param_name', 'param_range', 'groups', 'cv', 'scoring',
'n_jobs', 'pre_dispatch',
)
}
# compute the validation curve and store scores
curve = sk_validation_curve(self.estimator, X, y, **skvc_kwargs)
self.train_scores_, self.test_scores_ = curve
# compute the mean and standard deviation of the training data
self.train_scores_mean_ = np.mean(self.train_scores_, axis=1)
self.train_scores_std_ = np.std(self.train_scores_, axis=1)
# compute the mean and standard deviation of the test data
self.test_scores_mean_ = np.mean(self.test_scores_, axis=1)
self.test_scores_std_ = np.std(self.test_scores_, axis=1)
# draw the curves on the current axes
self.draw()
return self
def draw(self, **kwargs):
"""
Renders the training and test curves.
"""
# Specify the curves to draw and their labels
labels = ("Training Score", "Cross Validation Score")
curves = (
(self.train_scores_mean_, self.train_scores_std_),
(self.test_scores_mean_, self.test_scores_std_),
)
# Get the colors for the train and test curves
colors = resolve_colors(n_colors=2)
# Plot the fill betweens first so they are behind the curves.
for idx, (mean, std) in enumerate(curves):
# Plot one standard deviation above and below the mean
self.ax.fill_between(
self.param_range, mean - std, mean+std, alpha=0.25,
color=colors[idx],
)
# Plot the mean curves so they are in front of the variance fill
for idx, (mean, _) in enumerate(curves):
self.ax.plot(
self.param_range, mean, 'd-', color=colors[idx],
label=labels[idx],
)
if self.logx:
self.ax.set_xscale('log')
return self.ax
def finalize(self, **kwargs):
"""
Add the title, legend, and other visual final touches to the plot.
"""
# Set the title of the figure
self.set_title('Validation Curve for {}'.format(self.name))
# Add the legend
self.ax.legend(frameon=True, loc='best')
# Set the axis labels
self.ax.set_xlabel(self.param_name)
self.ax.set_ylabel('score')
##########################################################################
## Quick Method
##########################################################################
def validation_curve(model, X, y, param_name, param_range, ax=None, logx=False,
groups=None, cv=None, scoring=None, n_jobs=1,
pre_dispatch="all", **kwargs):
"""
Displays a validation curve for the specified param and values, plotting
both the train and cross-validated test scores. The validation curve is a
visual, single-parameter grid search used to tune a model to find the best
balance between error due to bias and error due to variance.
This helper function is a wrapper to use the ValidationCurve in a fast,
visual analysis.
Parameters
----------
model : a scikit-learn estimator
An object that implements ``fit`` and ``predict``, can be a
classifier, regressor, or clusterer so long as there is also a valid
associated scoring metric.
Note that the object is cloned for each validation.
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
param_name : string
Name of the parameter that will be varied.
param_range : array-like, shape (n_values,)
The values of the parameter that will be evaluated.
ax : matplotlib.Axes object, optional
The axes object to plot the figure on.
logx : boolean, optional
If True, plots the x-axis with a logarithmic scale.
groups : array-like, with shape (n_samples,)
Optional group labels for the samples used while splitting the dataset
into train/test sets.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
see the scikit-learn
`cross-validation guide <http://scikit-learn.org/stable/modules/cross_validation.html>`_
for more information on the possible strategies that can be used here.
scoring : string, callable or None, optional, default: None
A string or scorer callable object / function with signature
``scorer(estimator, X, y)``. See scikit-learn model evaluation
documentation for names of possible metrics.
n_jobs : integer, optional
Number of jobs to run in parallel (default 1).
pre_dispatch : integer or string, optional
Number of predispatched jobs for parallel execution (default is
all). The option can reduce the allocated memory. The string can
be an expression like '2*n_jobs'.
kwargs : dict
Keyword arguments that are passed to the base class and may influence
the visualization as defined in other Visualizers. These arguments are
also passed to the `poof()` method, e.g. can pass a path to save the
figure to.
Returns
-------
ax : matplotlib.Axes
The axes object that the validation curves were drawn on.
"""
# Initialize the visualizer
oz = ValidationCurve(
model, param_name, param_range, ax=ax, logx=logx, groups=groups,
cv=cv, scoring=scoring, n_jobs=n_jobs, pre_dispatch=pre_dispatch
)
# Fit and poof the visualizer
oz.fit(X, y)
oz.poof(**kwargs)
return oz.ax
| apache-2.0 |
cainiaocome/scikit-learn | sklearn/preprocessing/data.py | 113 | 56747 | # Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Olivier Grisel <[email protected]>
# Andreas Mueller <[email protected]>
# Eric Martin <[email protected]>
# License: BSD 3 clause
from itertools import chain, combinations
import numbers
import warnings
import numpy as np
from scipy import sparse
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..utils import check_array
from ..utils.extmath import row_norms
from ..utils.fixes import combinations_with_replacement as combinations_w_r
from ..utils.sparsefuncs_fast import (inplace_csr_row_normalize_l1,
inplace_csr_row_normalize_l2)
from ..utils.sparsefuncs import (inplace_column_scale, mean_variance_axis,
min_max_axis, inplace_row_scale)
from ..utils.validation import check_is_fitted, FLOAT_DTYPES
zip = six.moves.zip
map = six.moves.map
range = six.moves.range
__all__ = [
'Binarizer',
'KernelCenterer',
'MinMaxScaler',
'MaxAbsScaler',
'Normalizer',
'OneHotEncoder',
'RobustScaler',
'StandardScaler',
'add_dummy_feature',
'binarize',
'normalize',
'scale',
'robust_scale',
'maxabs_scale',
'minmax_scale',
]
def _mean_and_std(X, axis=0, with_mean=True, with_std=True):
"""Compute mean and std deviation for centering, scaling.
Zero valued std components are reset to 1.0 to avoid NaNs when scaling.
"""
X = np.asarray(X)
Xr = np.rollaxis(X, axis)
if with_mean:
mean_ = Xr.mean(axis=0)
else:
mean_ = None
if with_std:
std_ = Xr.std(axis=0)
std_ = _handle_zeros_in_scale(std_)
else:
std_ = None
return mean_, std_
def _handle_zeros_in_scale(scale):
''' Makes sure that whenever scale is zero, we handle it correctly.
This happens in most scalers when we have constant features.'''
# if we are fitting on 1D arrays, scale might be a scalar
if np.isscalar(scale):
if scale == 0:
scale = 1.
elif isinstance(scale, np.ndarray):
scale[scale == 0.0] = 1.0
scale[~np.isfinite(scale)] = 1.0
return scale
def scale(X, axis=0, with_mean=True, with_std=True, copy=True):
"""Standardize a dataset along any axis
Center to the mean and component wise scale to unit variance.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
X : array-like or CSR matrix.
The data to center and scale.
axis : int (0 by default)
axis used to compute the means and standard deviations along. If 0,
independently standardize each feature, otherwise (if 1) standardize
each sample.
with_mean : boolean, True by default
If True, center the data before scaling.
with_std : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
Notes
-----
This implementation will refuse to center scipy.sparse matrices
since it would make them non-sparse and would potentially crash the
program with memory exhaustion problems.
Instead the caller is expected to either set explicitly
`with_mean=False` (in that case, only variance scaling will be
performed on the features of the CSR matrix) or to call `X.toarray()`
if he/she expects the materialized dense array to fit in memory.
To avoid memory copy the caller should pass a CSR matrix.
See also
--------
:class:`sklearn.preprocessing.StandardScaler` to perform centering and
scaling using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
X = check_array(X, accept_sparse='csr', copy=copy, ensure_2d=False,
warn_on_dtype=True, estimator='the scale function',
dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` instead"
" See docstring for motivation and alternatives.")
if axis != 0:
raise ValueError("Can only scale sparse matrix on axis=0, "
" got axis=%d" % axis)
if not sparse.isspmatrix_csr(X):
X = X.tocsr()
copy = False
if copy:
X = X.copy()
_, var = mean_variance_axis(X, axis=0)
var = _handle_zeros_in_scale(var)
inplace_column_scale(X, 1 / np.sqrt(var))
else:
X = np.asarray(X)
mean_, std_ = _mean_and_std(
X, axis, with_mean=with_mean, with_std=with_std)
if copy:
X = X.copy()
# Xr is a view on the original array that enables easy use of
# broadcasting on the axis in which we are interested in
Xr = np.rollaxis(X, axis)
if with_mean:
Xr -= mean_
mean_1 = Xr.mean(axis=0)
# Verify that mean_1 is 'close to zero'. If X contains very
# large values, mean_1 can also be very large, due to a lack of
# precision of mean_. In this case, a pre-scaling of the
# concerned feature is efficient, for instance by its mean or
# maximum.
if not np.allclose(mean_1, 0):
warnings.warn("Numerical issues were encountered "
"when centering the data "
"and might not be solved. Dataset may "
"contain too large values. You may need "
"to prescale your features.")
Xr -= mean_1
if with_std:
Xr /= std_
if with_mean:
mean_2 = Xr.mean(axis=0)
# If mean_2 is not 'close to zero', it comes from the fact that
# std_ is very small so that mean_2 = mean_1/std_ > 0, even if
# mean_1 was close to zero. The problem is thus essentially due
# to the lack of precision of mean_. A solution is then to
# substract the mean again:
if not np.allclose(mean_2, 0):
warnings.warn("Numerical issues were encountered "
"when scaling the data "
"and might not be solved. The standard "
"deviation of the data is probably "
"very close to 0. ")
Xr -= mean_2
return X
class MinMaxScaler(BaseEstimator, TransformerMixin):
"""Transforms features by scaling each feature to a given range.
This estimator scales and translates each feature individually such
that it is in the given range on the training set, i.e. between
zero and one.
The transformation is given by::
X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))
X_scaled = X_std * (max - min) + min
where min, max = feature_range.
This transformation is often used as an alternative to zero mean,
unit variance scaling.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
feature_range: tuple (min, max), default=(0, 1)
Desired range of transformed data.
copy : boolean, optional, default True
Set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array).
Attributes
----------
min_ : ndarray, shape (n_features,)
Per feature adjustment for minimum.
scale_ : ndarray, shape (n_features,)
Per feature relative scaling of the data.
"""
def __init__(self, feature_range=(0, 1), copy=True):
self.feature_range = feature_range
self.copy = copy
def fit(self, X, y=None):
"""Compute the minimum and maximum to be used for later scaling.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the per-feature minimum and maximum
used for later scaling along the features axis.
"""
X = check_array(X, copy=self.copy, ensure_2d=False, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES)
feature_range = self.feature_range
if feature_range[0] >= feature_range[1]:
raise ValueError("Minimum of desired feature range must be smaller"
" than maximum. Got %s." % str(feature_range))
data_min = np.min(X, axis=0)
data_range = np.max(X, axis=0) - data_min
data_range = _handle_zeros_in_scale(data_range)
self.scale_ = (feature_range[1] - feature_range[0]) / data_range
self.min_ = feature_range[0] - data_min * self.scale_
self.data_range = data_range
self.data_min = data_min
return self
def transform(self, X):
"""Scaling features of X according to feature_range.
Parameters
----------
X : array-like with shape [n_samples, n_features]
Input data that will be transformed.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, copy=self.copy, ensure_2d=False)
X *= self.scale_
X += self.min_
return X
def inverse_transform(self, X):
"""Undo the scaling of X according to feature_range.
Parameters
----------
X : array-like with shape [n_samples, n_features]
Input data that will be transformed.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, copy=self.copy, ensure_2d=False)
X -= self.min_
X /= self.scale_
return X
def minmax_scale(X, feature_range=(0, 1), axis=0, copy=True):
"""Transforms features by scaling each feature to a given range.
This estimator scales and translates each feature individually such
that it is in the given range on the training set, i.e. between
zero and one.
The transformation is given by::
X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))
X_scaled = X_std * (max - min) + min
where min, max = feature_range.
This transformation is often used as an alternative to zero mean,
unit variance scaling.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
feature_range: tuple (min, max), default=(0, 1)
Desired range of transformed data.
axis : int (0 by default)
axis used to scale along. If 0, independently scale each feature,
otherwise (if 1) scale each sample.
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
"""
s = MinMaxScaler(feature_range=feature_range, copy=copy)
if axis == 0:
return s.fit_transform(X)
else:
return s.fit_transform(X.T).T
class StandardScaler(BaseEstimator, TransformerMixin):
"""Standardize features by removing the mean and scaling to unit variance
Centering and scaling happen independently on each feature by computing
the relevant statistics on the samples in the training set. Mean and
standard deviation are then stored to be used on later data using the
`transform` method.
Standardization of a dataset is a common requirement for many
machine learning estimators: they might behave badly if the
individual feature do not more or less look like standard normally
distributed data (e.g. Gaussian with 0 mean and unit variance).
For instance many elements used in the objective function of
a learning algorithm (such as the RBF kernel of Support Vector
Machines or the L1 and L2 regularizers of linear models) assume that
all features are centered around 0 and have variance in the same
order. If a feature has a variance that is orders of magnitude larger
that others, it might dominate the objective function and make the
estimator unable to learn from other features correctly as expected.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
with_mean : boolean, True by default
If True, center the data before scaling.
This does not work (and will raise an exception) when attempted on
sparse matrices, because centering them entails building a dense
matrix which in common use cases is likely to be too large to fit in
memory.
with_std : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default True
If False, try to avoid a copy and do inplace scaling instead.
This is not guaranteed to always work inplace; e.g. if the data is
not a NumPy array or scipy.sparse CSR matrix, a copy may still be
returned.
Attributes
----------
mean_ : array of floats with shape [n_features]
The mean value for each feature in the training set.
std_ : array of floats with shape [n_features]
The standard deviation for each feature in the training set.
Set to one if the standard deviation is zero for a given feature.
See also
--------
:func:`sklearn.preprocessing.scale` to perform centering and
scaling without using the ``Transformer`` object oriented API
:class:`sklearn.decomposition.RandomizedPCA` with `whiten=True`
to further remove the linear correlation across features.
"""
def __init__(self, copy=True, with_mean=True, with_std=True):
self.with_mean = with_mean
self.with_std = with_std
self.copy = copy
def fit(self, X, y=None):
"""Compute the mean and std to be used for later scaling.
Parameters
----------
X : array-like or CSR matrix with shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
"""
X = check_array(X, accept_sparse='csr', copy=self.copy,
ensure_2d=False, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` "
"instead. See docstring for motivation and alternatives.")
self.mean_ = None
if self.with_std:
var = mean_variance_axis(X, axis=0)[1]
self.std_ = np.sqrt(var)
self.std_ = _handle_zeros_in_scale(self.std_)
else:
self.std_ = None
return self
else:
self.mean_, self.std_ = _mean_and_std(
X, axis=0, with_mean=self.with_mean, with_std=self.with_std)
return self
def transform(self, X, y=None, copy=None):
"""Perform standardization by centering and scaling
Parameters
----------
X : array-like with shape [n_samples, n_features]
The data used to scale along the features axis.
"""
check_is_fitted(self, 'std_')
copy = copy if copy is not None else self.copy
X = check_array(X, accept_sparse='csr', copy=copy,
ensure_2d=False, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` "
"instead. See docstring for motivation and alternatives.")
if self.std_ is not None:
inplace_column_scale(X, 1 / self.std_)
else:
if self.with_mean:
X -= self.mean_
if self.with_std:
X /= self.std_
return X
def inverse_transform(self, X, copy=None):
"""Scale back the data to the original representation
Parameters
----------
X : array-like with shape [n_samples, n_features]
The data used to scale along the features axis.
"""
check_is_fitted(self, 'std_')
copy = copy if copy is not None else self.copy
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot uncenter sparse matrices: pass `with_mean=False` "
"instead See docstring for motivation and alternatives.")
if not sparse.isspmatrix_csr(X):
X = X.tocsr()
copy = False
if copy:
X = X.copy()
if self.std_ is not None:
inplace_column_scale(X, self.std_)
else:
X = np.asarray(X)
if copy:
X = X.copy()
if self.with_std:
X *= self.std_
if self.with_mean:
X += self.mean_
return X
class MaxAbsScaler(BaseEstimator, TransformerMixin):
"""Scale each feature by its maximum absolute value.
This estimator scales and translates each feature individually such
that the maximal absolute value of each feature in the
training set will be 1.0. It does not shift/center the data, and
thus does not destroy any sparsity.
This scaler can also be applied to sparse CSR or CSC matrices.
Parameters
----------
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
Attributes
----------
scale_ : ndarray, shape (n_features,)
Per feature relative scaling of the data.
"""
def __init__(self, copy=True):
self.copy = copy
def fit(self, X, y=None):
"""Compute the minimum and maximum to be used for later scaling.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the per-feature minimum and maximum
used for later scaling along the features axis.
"""
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
mins, maxs = min_max_axis(X, axis=0)
scales = np.maximum(np.abs(mins), np.abs(maxs))
else:
scales = np.abs(X).max(axis=0)
scales = np.array(scales)
scales = scales.reshape(-1)
self.scale_ = _handle_zeros_in_scale(scales)
return self
def transform(self, X, y=None):
"""Scale the data
Parameters
----------
X : array-like or CSR matrix.
The data that should be scaled.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if X.shape[0] == 1:
inplace_row_scale(X, 1.0 / self.scale_)
else:
inplace_column_scale(X, 1.0 / self.scale_)
else:
X /= self.scale_
return X
def inverse_transform(self, X):
"""Scale back the data to the original representation
Parameters
----------
X : array-like or CSR matrix.
The data that should be transformed back.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if X.shape[0] == 1:
inplace_row_scale(X, self.scale_)
else:
inplace_column_scale(X, self.scale_)
else:
X *= self.scale_
return X
def maxabs_scale(X, axis=0, copy=True):
"""Scale each feature to the [-1, 1] range without breaking the sparsity.
This estimator scales each feature individually such
that the maximal absolute value of each feature in the
training set will be 1.0.
This scaler can also be applied to sparse CSR or CSC matrices.
Parameters
----------
axis : int (0 by default)
axis used to scale along. If 0, independently scale each feature,
otherwise (if 1) scale each sample.
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
"""
s = MaxAbsScaler(copy=copy)
if axis == 0:
return s.fit_transform(X)
else:
return s.fit_transform(X.T).T
class RobustScaler(BaseEstimator, TransformerMixin):
"""Scale features using statistics that are robust to outliers.
This Scaler removes the median and scales the data according to
the Interquartile Range (IQR). The IQR is the range between the 1st
quartile (25th quantile) and the 3rd quartile (75th quantile).
Centering and scaling happen independently on each feature (or each
sample, depending on the `axis` argument) by computing the relevant
statistics on the samples in the training set. Median and interquartile
range are then stored to be used on later data using the `transform`
method.
Standardization of a dataset is a common requirement for many
machine learning estimators. Typically this is done by removing the mean
and scaling to unit variance. However, outliers can often influence the
sample mean / variance in a negative way. In such cases, the median and
the interquartile range often give better results.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
with_centering : boolean, True by default
If True, center the data before scaling.
This does not work (and will raise an exception) when attempted on
sparse matrices, because centering them entails building a dense
matrix which in common use cases is likely to be too large to fit in
memory.
with_scaling : boolean, True by default
If True, scale the data to interquartile range.
copy : boolean, optional, default is True
If False, try to avoid a copy and do inplace scaling instead.
This is not guaranteed to always work inplace; e.g. if the data is
not a NumPy array or scipy.sparse CSR matrix, a copy may still be
returned.
Attributes
----------
center_ : array of floats
The median value for each feature in the training set.
scale_ : array of floats
The (scaled) interquartile range for each feature in the training set.
See also
--------
:class:`sklearn.preprocessing.StandardScaler` to perform centering
and scaling using mean and variance.
:class:`sklearn.decomposition.RandomizedPCA` with `whiten=True`
to further remove the linear correlation across features.
Notes
-----
See examples/preprocessing/plot_robust_scaling.py for an example.
http://en.wikipedia.org/wiki/Median_(statistics)
http://en.wikipedia.org/wiki/Interquartile_range
"""
def __init__(self, with_centering=True, with_scaling=True, copy=True):
self.with_centering = with_centering
self.with_scaling = with_scaling
self.copy = copy
def _check_array(self, X, copy):
"""Makes sure centering is not enabled for sparse matrices."""
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if self.with_centering:
raise ValueError(
"Cannot center sparse matrices: use `with_centering=False`"
" instead. See docstring for motivation and alternatives.")
return X
def fit(self, X, y=None):
"""Compute the median and quantiles to be used for scaling.
Parameters
----------
X : array-like with shape [n_samples, n_features]
The data used to compute the median and quantiles
used for later scaling along the features axis.
"""
if sparse.issparse(X):
raise TypeError("RobustScaler cannot be fitted on sparse inputs")
X = self._check_array(X, self.copy)
if self.with_centering:
self.center_ = np.median(X, axis=0)
if self.with_scaling:
q = np.percentile(X, (25, 75), axis=0)
self.scale_ = (q[1] - q[0])
self.scale_ = _handle_zeros_in_scale(self.scale_)
return self
def transform(self, X, y=None):
"""Center and scale the data
Parameters
----------
X : array-like or CSR matrix.
The data used to scale along the specified axis.
"""
if self.with_centering:
check_is_fitted(self, 'center_')
if self.with_scaling:
check_is_fitted(self, 'scale_')
X = self._check_array(X, self.copy)
if sparse.issparse(X):
if self.with_scaling:
if X.shape[0] == 1:
inplace_row_scale(X, 1.0 / self.scale_)
elif self.axis == 0:
inplace_column_scale(X, 1.0 / self.scale_)
else:
if self.with_centering:
X -= self.center_
if self.with_scaling:
X /= self.scale_
return X
def inverse_transform(self, X):
"""Scale back the data to the original representation
Parameters
----------
X : array-like or CSR matrix.
The data used to scale along the specified axis.
"""
if self.with_centering:
check_is_fitted(self, 'center_')
if self.with_scaling:
check_is_fitted(self, 'scale_')
X = self._check_array(X, self.copy)
if sparse.issparse(X):
if self.with_scaling:
if X.shape[0] == 1:
inplace_row_scale(X, self.scale_)
else:
inplace_column_scale(X, self.scale_)
else:
if self.with_scaling:
X *= self.scale_
if self.with_centering:
X += self.center_
return X
def robust_scale(X, axis=0, with_centering=True, with_scaling=True, copy=True):
"""Standardize a dataset along any axis
Center to the median and component wise scale
according to the interquartile range.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
X : array-like.
The data to center and scale.
axis : int (0 by default)
axis used to compute the medians and IQR along. If 0,
independently scale each feature, otherwise (if 1) scale
each sample.
with_centering : boolean, True by default
If True, center the data before scaling.
with_scaling : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default is True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
Notes
-----
This implementation will refuse to center scipy.sparse matrices
since it would make them non-sparse and would potentially crash the
program with memory exhaustion problems.
Instead the caller is expected to either set explicitly
`with_centering=False` (in that case, only variance scaling will be
performed on the features of the CSR matrix) or to call `X.toarray()`
if he/she expects the materialized dense array to fit in memory.
To avoid memory copy the caller should pass a CSR matrix.
See also
--------
:class:`sklearn.preprocessing.RobustScaler` to perform centering and
scaling using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
s = RobustScaler(with_centering=with_centering, with_scaling=with_scaling,
copy=copy)
if axis == 0:
return s.fit_transform(X)
else:
return s.fit_transform(X.T).T
class PolynomialFeatures(BaseEstimator, TransformerMixin):
"""Generate polynomial and interaction features.
Generate a new feature matrix consisting of all polynomial combinations
of the features with degree less than or equal to the specified degree.
For example, if an input sample is two dimensional and of the form
[a, b], the degree-2 polynomial features are [1, a, b, a^2, ab, b^2].
Parameters
----------
degree : integer
The degree of the polynomial features. Default = 2.
interaction_only : boolean, default = False
If true, only interaction features are produced: features that are
products of at most ``degree`` *distinct* input features (so not
``x[1] ** 2``, ``x[0] * x[2] ** 3``, etc.).
include_bias : boolean
If True (default), then include a bias column, the feature in which
all polynomial powers are zero (i.e. a column of ones - acts as an
intercept term in a linear model).
Examples
--------
>>> X = np.arange(6).reshape(3, 2)
>>> X
array([[0, 1],
[2, 3],
[4, 5]])
>>> poly = PolynomialFeatures(2)
>>> poly.fit_transform(X)
array([[ 1, 0, 1, 0, 0, 1],
[ 1, 2, 3, 4, 6, 9],
[ 1, 4, 5, 16, 20, 25]])
>>> poly = PolynomialFeatures(interaction_only=True)
>>> poly.fit_transform(X)
array([[ 1, 0, 1, 0],
[ 1, 2, 3, 6],
[ 1, 4, 5, 20]])
Attributes
----------
powers_ : array, shape (n_input_features, n_output_features)
powers_[i, j] is the exponent of the jth input in the ith output.
n_input_features_ : int
The total number of input features.
n_output_features_ : int
The total number of polynomial output features. The number of output
features is computed by iterating over all suitably sized combinations
of input features.
Notes
-----
Be aware that the number of features in the output array scales
polynomially in the number of features of the input array, and
exponentially in the degree. High degrees can cause overfitting.
See :ref:`examples/linear_model/plot_polynomial_interpolation.py
<example_linear_model_plot_polynomial_interpolation.py>`
"""
def __init__(self, degree=2, interaction_only=False, include_bias=True):
self.degree = degree
self.interaction_only = interaction_only
self.include_bias = include_bias
@staticmethod
def _combinations(n_features, degree, interaction_only, include_bias):
comb = (combinations if interaction_only else combinations_w_r)
start = int(not include_bias)
return chain.from_iterable(comb(range(n_features), i)
for i in range(start, degree + 1))
@property
def powers_(self):
check_is_fitted(self, 'n_input_features_')
combinations = self._combinations(self.n_input_features_, self.degree,
self.interaction_only,
self.include_bias)
return np.vstack(np.bincount(c, minlength=self.n_input_features_)
for c in combinations)
def fit(self, X, y=None):
"""
Compute number of output features.
"""
n_samples, n_features = check_array(X).shape
combinations = self._combinations(n_features, self.degree,
self.interaction_only,
self.include_bias)
self.n_input_features_ = n_features
self.n_output_features_ = sum(1 for _ in combinations)
return self
def transform(self, X, y=None):
"""Transform data to polynomial features
Parameters
----------
X : array with shape [n_samples, n_features]
The data to transform, row by row.
Returns
-------
XP : np.ndarray shape [n_samples, NP]
The matrix of features, where NP is the number of polynomial
features generated from the combination of inputs.
"""
check_is_fitted(self, ['n_input_features_', 'n_output_features_'])
X = check_array(X)
n_samples, n_features = X.shape
if n_features != self.n_input_features_:
raise ValueError("X shape does not match training shape")
# allocate output data
XP = np.empty((n_samples, self.n_output_features_), dtype=X.dtype)
combinations = self._combinations(n_features, self.degree,
self.interaction_only,
self.include_bias)
for i, c in enumerate(combinations):
XP[:, i] = X[:, c].prod(1)
return XP
def normalize(X, norm='l2', axis=1, copy=True):
"""Scale input vectors individually to unit norm (vector length).
Read more in the :ref:`User Guide <preprocessing_normalization>`.
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to normalize, element by element.
scipy.sparse matrices should be in CSR format to avoid an
un-necessary copy.
norm : 'l1', 'l2', or 'max', optional ('l2' by default)
The norm to use to normalize each non zero sample (or each non-zero
feature if axis is 0).
axis : 0 or 1, optional (1 by default)
axis used to normalize the data along. If 1, independently normalize
each sample, otherwise (if 0) normalize each feature.
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
See also
--------
:class:`sklearn.preprocessing.Normalizer` to perform normalization
using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
if norm not in ('l1', 'l2', 'max'):
raise ValueError("'%s' is not a supported norm" % norm)
if axis == 0:
sparse_format = 'csc'
elif axis == 1:
sparse_format = 'csr'
else:
raise ValueError("'%d' is not a supported axis" % axis)
X = check_array(X, sparse_format, copy=copy, warn_on_dtype=True,
estimator='the normalize function', dtype=FLOAT_DTYPES)
if axis == 0:
X = X.T
if sparse.issparse(X):
if norm == 'l1':
inplace_csr_row_normalize_l1(X)
elif norm == 'l2':
inplace_csr_row_normalize_l2(X)
elif norm == 'max':
_, norms = min_max_axis(X, 1)
norms = norms.repeat(np.diff(X.indptr))
mask = norms != 0
X.data[mask] /= norms[mask]
else:
if norm == 'l1':
norms = np.abs(X).sum(axis=1)
elif norm == 'l2':
norms = row_norms(X)
elif norm == 'max':
norms = np.max(X, axis=1)
norms = _handle_zeros_in_scale(norms)
X /= norms[:, np.newaxis]
if axis == 0:
X = X.T
return X
class Normalizer(BaseEstimator, TransformerMixin):
"""Normalize samples individually to unit norm.
Each sample (i.e. each row of the data matrix) with at least one
non zero component is rescaled independently of other samples so
that its norm (l1 or l2) equals one.
This transformer is able to work both with dense numpy arrays and
scipy.sparse matrix (use CSR format if you want to avoid the burden of
a copy / conversion).
Scaling inputs to unit norms is a common operation for text
classification or clustering for instance. For instance the dot
product of two l2-normalized TF-IDF vectors is the cosine similarity
of the vectors and is the base similarity metric for the Vector
Space Model commonly used by the Information Retrieval community.
Read more in the :ref:`User Guide <preprocessing_normalization>`.
Parameters
----------
norm : 'l1', 'l2', or 'max', optional ('l2' by default)
The norm to use to normalize each non zero sample.
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix).
Notes
-----
This estimator is stateless (besides constructor parameters), the
fit method does nothing but is useful when used in a pipeline.
See also
--------
:func:`sklearn.preprocessing.normalize` equivalent function
without the object oriented API
"""
def __init__(self, norm='l2', copy=True):
self.norm = norm
self.copy = copy
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
X = check_array(X, accept_sparse='csr')
return self
def transform(self, X, y=None, copy=None):
"""Scale each non zero row of X to unit norm
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to normalize, row by row. scipy.sparse matrices should be
in CSR format to avoid an un-necessary copy.
"""
copy = copy if copy is not None else self.copy
X = check_array(X, accept_sparse='csr')
return normalize(X, norm=self.norm, axis=1, copy=copy)
def binarize(X, threshold=0.0, copy=True):
"""Boolean thresholding of array-like or scipy.sparse matrix
Read more in the :ref:`User Guide <preprocessing_binarization>`.
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to binarize, element by element.
scipy.sparse matrices should be in CSR or CSC format to avoid an
un-necessary copy.
threshold : float, optional (0.0 by default)
Feature values below or equal to this are replaced by 0, above it by 1.
Threshold may not be less than 0 for operations on sparse matrices.
copy : boolean, optional, default True
set to False to perform inplace binarization and avoid a copy
(if the input is already a numpy array or a scipy.sparse CSR / CSC
matrix and if axis is 1).
See also
--------
:class:`sklearn.preprocessing.Binarizer` to perform binarization
using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
X = check_array(X, accept_sparse=['csr', 'csc'], copy=copy)
if sparse.issparse(X):
if threshold < 0:
raise ValueError('Cannot binarize a sparse matrix with threshold '
'< 0')
cond = X.data > threshold
not_cond = np.logical_not(cond)
X.data[cond] = 1
X.data[not_cond] = 0
X.eliminate_zeros()
else:
cond = X > threshold
not_cond = np.logical_not(cond)
X[cond] = 1
X[not_cond] = 0
return X
class Binarizer(BaseEstimator, TransformerMixin):
"""Binarize data (set feature values to 0 or 1) according to a threshold
Values greater than the threshold map to 1, while values less than
or equal to the threshold map to 0. With the default threshold of 0,
only positive values map to 1.
Binarization is a common operation on text count data where the
analyst can decide to only consider the presence or absence of a
feature rather than a quantified number of occurrences for instance.
It can also be used as a pre-processing step for estimators that
consider boolean random variables (e.g. modelled using the Bernoulli
distribution in a Bayesian setting).
Read more in the :ref:`User Guide <preprocessing_binarization>`.
Parameters
----------
threshold : float, optional (0.0 by default)
Feature values below or equal to this are replaced by 0, above it by 1.
Threshold may not be less than 0 for operations on sparse matrices.
copy : boolean, optional, default True
set to False to perform inplace binarization and avoid a copy (if
the input is already a numpy array or a scipy.sparse CSR matrix).
Notes
-----
If the input is a sparse matrix, only the non-zero values are subject
to update by the Binarizer class.
This estimator is stateless (besides constructor parameters), the
fit method does nothing but is useful when used in a pipeline.
"""
def __init__(self, threshold=0.0, copy=True):
self.threshold = threshold
self.copy = copy
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
check_array(X, accept_sparse='csr')
return self
def transform(self, X, y=None, copy=None):
"""Binarize each element of X
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to binarize, element by element.
scipy.sparse matrices should be in CSR format to avoid an
un-necessary copy.
"""
copy = copy if copy is not None else self.copy
return binarize(X, threshold=self.threshold, copy=copy)
class KernelCenterer(BaseEstimator, TransformerMixin):
"""Center a kernel matrix
Let K(x, z) be a kernel defined by phi(x)^T phi(z), where phi is a
function mapping x to a Hilbert space. KernelCenterer centers (i.e.,
normalize to have zero mean) the data without explicitly computing phi(x).
It is equivalent to centering phi(x) with
sklearn.preprocessing.StandardScaler(with_std=False).
Read more in the :ref:`User Guide <kernel_centering>`.
"""
def fit(self, K, y=None):
"""Fit KernelCenterer
Parameters
----------
K : numpy array of shape [n_samples, n_samples]
Kernel matrix.
Returns
-------
self : returns an instance of self.
"""
K = check_array(K)
n_samples = K.shape[0]
self.K_fit_rows_ = np.sum(K, axis=0) / n_samples
self.K_fit_all_ = self.K_fit_rows_.sum() / n_samples
return self
def transform(self, K, y=None, copy=True):
"""Center kernel matrix.
Parameters
----------
K : numpy array of shape [n_samples1, n_samples2]
Kernel matrix.
copy : boolean, optional, default True
Set to False to perform inplace computation.
Returns
-------
K_new : numpy array of shape [n_samples1, n_samples2]
"""
check_is_fitted(self, 'K_fit_all_')
K = check_array(K)
if copy:
K = K.copy()
K_pred_cols = (np.sum(K, axis=1) /
self.K_fit_rows_.shape[0])[:, np.newaxis]
K -= self.K_fit_rows_
K -= K_pred_cols
K += self.K_fit_all_
return K
def add_dummy_feature(X, value=1.0):
"""Augment dataset with an additional dummy feature.
This is useful for fitting an intercept term with implementations which
cannot otherwise fit it directly.
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
Data.
value : float
Value to use for the dummy feature.
Returns
-------
X : array or scipy.sparse matrix with shape [n_samples, n_features + 1]
Same data with dummy feature added as first column.
Examples
--------
>>> from sklearn.preprocessing import add_dummy_feature
>>> add_dummy_feature([[0, 1], [1, 0]])
array([[ 1., 0., 1.],
[ 1., 1., 0.]])
"""
X = check_array(X, accept_sparse=['csc', 'csr', 'coo'])
n_samples, n_features = X.shape
shape = (n_samples, n_features + 1)
if sparse.issparse(X):
if sparse.isspmatrix_coo(X):
# Shift columns to the right.
col = X.col + 1
# Column indices of dummy feature are 0 everywhere.
col = np.concatenate((np.zeros(n_samples), col))
# Row indices of dummy feature are 0, ..., n_samples-1.
row = np.concatenate((np.arange(n_samples), X.row))
# Prepend the dummy feature n_samples times.
data = np.concatenate((np.ones(n_samples) * value, X.data))
return sparse.coo_matrix((data, (row, col)), shape)
elif sparse.isspmatrix_csc(X):
# Shift index pointers since we need to add n_samples elements.
indptr = X.indptr + n_samples
# indptr[0] must be 0.
indptr = np.concatenate((np.array([0]), indptr))
# Row indices of dummy feature are 0, ..., n_samples-1.
indices = np.concatenate((np.arange(n_samples), X.indices))
# Prepend the dummy feature n_samples times.
data = np.concatenate((np.ones(n_samples) * value, X.data))
return sparse.csc_matrix((data, indices, indptr), shape)
else:
klass = X.__class__
return klass(add_dummy_feature(X.tocoo(), value))
else:
return np.hstack((np.ones((n_samples, 1)) * value, X))
def _transform_selected(X, transform, selected="all", copy=True):
"""Apply a transform function to portion of selected features
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Dense array or sparse matrix.
transform : callable
A callable transform(X) -> X_transformed
copy : boolean, optional
Copy X even if it could be avoided.
selected: "all" or array of indices or mask
Specify which features to apply the transform to.
Returns
-------
X : array or sparse matrix, shape=(n_samples, n_features_new)
"""
if selected == "all":
return transform(X)
X = check_array(X, accept_sparse='csc', copy=copy)
if len(selected) == 0:
return X
n_features = X.shape[1]
ind = np.arange(n_features)
sel = np.zeros(n_features, dtype=bool)
sel[np.asarray(selected)] = True
not_sel = np.logical_not(sel)
n_selected = np.sum(sel)
if n_selected == 0:
# No features selected.
return X
elif n_selected == n_features:
# All features selected.
return transform(X)
else:
X_sel = transform(X[:, ind[sel]])
X_not_sel = X[:, ind[not_sel]]
if sparse.issparse(X_sel) or sparse.issparse(X_not_sel):
return sparse.hstack((X_sel, X_not_sel))
else:
return np.hstack((X_sel, X_not_sel))
class OneHotEncoder(BaseEstimator, TransformerMixin):
"""Encode categorical integer features using a one-hot aka one-of-K scheme.
The input to this transformer should be a matrix of integers, denoting
the values taken on by categorical (discrete) features. The output will be
a sparse matrix where each column corresponds to one possible value of one
feature. It is assumed that input features take on values in the range
[0, n_values).
This encoding is needed for feeding categorical data to many scikit-learn
estimators, notably linear models and SVMs with the standard kernels.
Read more in the :ref:`User Guide <preprocessing_categorical_features>`.
Parameters
----------
n_values : 'auto', int or array of ints
Number of values per feature.
- 'auto' : determine value range from training data.
- int : maximum value for all features.
- array : maximum value per feature.
categorical_features: "all" or array of indices or mask
Specify what features are treated as categorical.
- 'all' (default): All features are treated as categorical.
- array of indices: Array of categorical feature indices.
- mask: Array of length n_features and with dtype=bool.
Non-categorical features are always stacked to the right of the matrix.
dtype : number type, default=np.float
Desired dtype of output.
sparse : boolean, default=True
Will return sparse matrix if set True else will return an array.
handle_unknown : str, 'error' or 'ignore'
Whether to raise an error or ignore if a unknown categorical feature is
present during transform.
Attributes
----------
active_features_ : array
Indices for active features, meaning values that actually occur
in the training set. Only available when n_values is ``'auto'``.
feature_indices_ : array of shape (n_features,)
Indices to feature ranges.
Feature ``i`` in the original data is mapped to features
from ``feature_indices_[i]`` to ``feature_indices_[i+1]``
(and then potentially masked by `active_features_` afterwards)
n_values_ : array of shape (n_features,)
Maximum number of values per feature.
Examples
--------
Given a dataset with three features and two samples, we let the encoder
find the maximum value per feature and transform the data to a binary
one-hot encoding.
>>> from sklearn.preprocessing import OneHotEncoder
>>> enc = OneHotEncoder()
>>> enc.fit([[0, 0, 3], [1, 1, 0], [0, 2, 1], \
[1, 0, 2]]) # doctest: +ELLIPSIS
OneHotEncoder(categorical_features='all', dtype=<... 'float'>,
handle_unknown='error', n_values='auto', sparse=True)
>>> enc.n_values_
array([2, 3, 4])
>>> enc.feature_indices_
array([0, 2, 5, 9])
>>> enc.transform([[0, 1, 1]]).toarray()
array([[ 1., 0., 0., 1., 0., 0., 1., 0., 0.]])
See also
--------
sklearn.feature_extraction.DictVectorizer : performs a one-hot encoding of
dictionary items (also handles string-valued features).
sklearn.feature_extraction.FeatureHasher : performs an approximate one-hot
encoding of dictionary items or strings.
"""
def __init__(self, n_values="auto", categorical_features="all",
dtype=np.float, sparse=True, handle_unknown='error'):
self.n_values = n_values
self.categorical_features = categorical_features
self.dtype = dtype
self.sparse = sparse
self.handle_unknown = handle_unknown
def fit(self, X, y=None):
"""Fit OneHotEncoder to X.
Parameters
----------
X : array-like, shape=(n_samples, n_feature)
Input array of type int.
Returns
-------
self
"""
self.fit_transform(X)
return self
def _fit_transform(self, X):
"""Assumes X contains only categorical features."""
X = check_array(X, dtype=np.int)
if np.any(X < 0):
raise ValueError("X needs to contain only non-negative integers.")
n_samples, n_features = X.shape
if self.n_values == 'auto':
n_values = np.max(X, axis=0) + 1
elif isinstance(self.n_values, numbers.Integral):
if (np.max(X, axis=0) >= self.n_values).any():
raise ValueError("Feature out of bounds for n_values=%d"
% self.n_values)
n_values = np.empty(n_features, dtype=np.int)
n_values.fill(self.n_values)
else:
try:
n_values = np.asarray(self.n_values, dtype=int)
except (ValueError, TypeError):
raise TypeError("Wrong type for parameter `n_values`. Expected"
" 'auto', int or array of ints, got %r"
% type(X))
if n_values.ndim < 1 or n_values.shape[0] != X.shape[1]:
raise ValueError("Shape mismatch: if n_values is an array,"
" it has to be of shape (n_features,).")
self.n_values_ = n_values
n_values = np.hstack([[0], n_values])
indices = np.cumsum(n_values)
self.feature_indices_ = indices
column_indices = (X + indices[:-1]).ravel()
row_indices = np.repeat(np.arange(n_samples, dtype=np.int32),
n_features)
data = np.ones(n_samples * n_features)
out = sparse.coo_matrix((data, (row_indices, column_indices)),
shape=(n_samples, indices[-1]),
dtype=self.dtype).tocsr()
if self.n_values == 'auto':
mask = np.array(out.sum(axis=0)).ravel() != 0
active_features = np.where(mask)[0]
out = out[:, active_features]
self.active_features_ = active_features
return out if self.sparse else out.toarray()
def fit_transform(self, X, y=None):
"""Fit OneHotEncoder to X, then transform X.
Equivalent to self.fit(X).transform(X), but more convenient and more
efficient. See fit for the parameters, transform for the return value.
"""
return _transform_selected(X, self._fit_transform,
self.categorical_features, copy=True)
def _transform(self, X):
"""Assumes X contains only categorical features."""
X = check_array(X, dtype=np.int)
if np.any(X < 0):
raise ValueError("X needs to contain only non-negative integers.")
n_samples, n_features = X.shape
indices = self.feature_indices_
if n_features != indices.shape[0] - 1:
raise ValueError("X has different shape than during fitting."
" Expected %d, got %d."
% (indices.shape[0] - 1, n_features))
# We use only those catgorical features of X that are known using fit.
# i.e lesser than n_values_ using mask.
# This means, if self.handle_unknown is "ignore", the row_indices and
# col_indices corresponding to the unknown categorical feature are
# ignored.
mask = (X < self.n_values_).ravel()
if np.any(~mask):
if self.handle_unknown not in ['error', 'ignore']:
raise ValueError("handle_unknown should be either error or "
"unknown got %s" % self.handle_unknown)
if self.handle_unknown == 'error':
raise ValueError("unknown categorical feature present %s "
"during transform." % X[~mask])
column_indices = (X + indices[:-1]).ravel()[mask]
row_indices = np.repeat(np.arange(n_samples, dtype=np.int32),
n_features)[mask]
data = np.ones(np.sum(mask))
out = sparse.coo_matrix((data, (row_indices, column_indices)),
shape=(n_samples, indices[-1]),
dtype=self.dtype).tocsr()
if self.n_values == 'auto':
out = out[:, self.active_features_]
return out if self.sparse else out.toarray()
def transform(self, X):
"""Transform X using one-hot encoding.
Parameters
----------
X : array-like, shape=(n_samples, n_features)
Input array of type int.
Returns
-------
X_out : sparse matrix if sparse=True else a 2-d array, dtype=int
Transformed input.
"""
return _transform_selected(X, self._transform,
self.categorical_features, copy=True)
| bsd-3-clause |
allisony/pyspeckit | ah_bootstrap.py | 16 | 35044 | """
This bootstrap module contains code for ensuring that the astropy_helpers
package will be importable by the time the setup.py script runs. It also
includes some workarounds to ensure that a recent-enough version of setuptools
is being used for the installation.
This module should be the first thing imported in the setup.py of distributions
that make use of the utilities in astropy_helpers. If the distribution ships
with its own copy of astropy_helpers, this module will first attempt to import
from the shipped copy. However, it will also check PyPI to see if there are
any bug-fix releases on top of the current version that may be useful to get
past platform-specific bugs that have been fixed. When running setup.py, use
the ``--offline`` command-line option to disable the auto-upgrade checks.
When this module is imported or otherwise executed it automatically calls a
main function that attempts to read the project's setup.cfg file, which it
checks for a configuration section called ``[ah_bootstrap]`` the presences of
that section, and options therein, determine the next step taken: If it
contains an option called ``auto_use`` with a value of ``True``, it will
automatically call the main function of this module called
`use_astropy_helpers` (see that function's docstring for full details).
Otherwise no further action is taken (however,
``ah_bootstrap.use_astropy_helpers`` may be called manually from within the
setup.py script).
Additional options in the ``[ah_boostrap]`` section of setup.cfg have the same
names as the arguments to `use_astropy_helpers`, and can be used to configure
the bootstrap script when ``auto_use = True``.
See https://github.com/astropy/astropy-helpers for more details, and for the
latest version of this module.
"""
import contextlib
import errno
import imp
import io
import locale
import os
import re
import subprocess as sp
import sys
try:
from ConfigParser import ConfigParser, RawConfigParser
except ImportError:
from configparser import ConfigParser, RawConfigParser
if sys.version_info[0] < 3:
_str_types = (str, unicode)
_text_type = unicode
PY3 = False
else:
_str_types = (str, bytes)
_text_type = str
PY3 = True
# What follows are several import statements meant to deal with install-time
# issues with either missing or misbehaving pacakges (including making sure
# setuptools itself is installed):
# Some pre-setuptools checks to ensure that either distribute or setuptools >=
# 0.7 is used (over pre-distribute setuptools) if it is available on the path;
# otherwise the latest setuptools will be downloaded and bootstrapped with
# ``ez_setup.py``. This used to be included in a separate file called
# setuptools_bootstrap.py; but it was combined into ah_bootstrap.py
try:
import pkg_resources
_setuptools_req = pkg_resources.Requirement.parse('setuptools>=0.7')
# This may raise a DistributionNotFound in which case no version of
# setuptools or distribute is properly installed
_setuptools = pkg_resources.get_distribution('setuptools')
if _setuptools not in _setuptools_req:
# Older version of setuptools; check if we have distribute; again if
# this results in DistributionNotFound we want to give up
_distribute = pkg_resources.get_distribution('distribute')
if _setuptools != _distribute:
# It's possible on some pathological systems to have an old version
# of setuptools and distribute on sys.path simultaneously; make
# sure distribute is the one that's used
sys.path.insert(1, _distribute.location)
_distribute.activate()
imp.reload(pkg_resources)
except:
# There are several types of exceptions that can occur here; if all else
# fails bootstrap and use the bootstrapped version
from ez_setup import use_setuptools
use_setuptools()
# typing as a dependency for 1.6.1+ Sphinx causes issues when imported after
# initializing submodule with ah_boostrap.py
# See discussion and references in
# https://github.com/astropy/astropy-helpers/issues/302
try:
import typing # noqa
except ImportError:
pass
# Note: The following import is required as a workaround to
# https://github.com/astropy/astropy-helpers/issues/89; if we don't import this
# module now, it will get cleaned up after `run_setup` is called, but that will
# later cause the TemporaryDirectory class defined in it to stop working when
# used later on by setuptools
try:
import setuptools.py31compat # noqa
except ImportError:
pass
# matplotlib can cause problems if it is imported from within a call of
# run_setup(), because in some circumstances it will try to write to the user's
# home directory, resulting in a SandboxViolation. See
# https://github.com/matplotlib/matplotlib/pull/4165
# Making sure matplotlib, if it is available, is imported early in the setup
# process can mitigate this (note importing matplotlib.pyplot has the same
# issue)
try:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot
except:
# Ignore if this fails for *any* reason*
pass
# End compatibility imports...
# In case it didn't successfully import before the ez_setup checks
import pkg_resources
from setuptools import Distribution
from setuptools.package_index import PackageIndex
from setuptools.sandbox import run_setup
from distutils import log
from distutils.debug import DEBUG
# TODO: Maybe enable checking for a specific version of astropy_helpers?
DIST_NAME = 'astropy-helpers'
PACKAGE_NAME = 'astropy_helpers'
# Defaults for other options
DOWNLOAD_IF_NEEDED = True
INDEX_URL = 'https://pypi.python.org/simple'
USE_GIT = True
OFFLINE = False
AUTO_UPGRADE = True
# A list of all the configuration options and their required types
CFG_OPTIONS = [
('auto_use', bool), ('path', str), ('download_if_needed', bool),
('index_url', str), ('use_git', bool), ('offline', bool),
('auto_upgrade', bool)
]
class _Bootstrapper(object):
"""
Bootstrapper implementation. See ``use_astropy_helpers`` for parameter
documentation.
"""
def __init__(self, path=None, index_url=None, use_git=None, offline=None,
download_if_needed=None, auto_upgrade=None):
if path is None:
path = PACKAGE_NAME
if not (isinstance(path, _str_types) or path is False):
raise TypeError('path must be a string or False')
if PY3 and not isinstance(path, _text_type):
fs_encoding = sys.getfilesystemencoding()
path = path.decode(fs_encoding) # path to unicode
self.path = path
# Set other option attributes, using defaults where necessary
self.index_url = index_url if index_url is not None else INDEX_URL
self.offline = offline if offline is not None else OFFLINE
# If offline=True, override download and auto-upgrade
if self.offline:
download_if_needed = False
auto_upgrade = False
self.download = (download_if_needed
if download_if_needed is not None
else DOWNLOAD_IF_NEEDED)
self.auto_upgrade = (auto_upgrade
if auto_upgrade is not None else AUTO_UPGRADE)
# If this is a release then the .git directory will not exist so we
# should not use git.
git_dir_exists = os.path.exists(os.path.join(os.path.dirname(__file__), '.git'))
if use_git is None and not git_dir_exists:
use_git = False
self.use_git = use_git if use_git is not None else USE_GIT
# Declared as False by default--later we check if astropy-helpers can be
# upgraded from PyPI, but only if not using a source distribution (as in
# the case of import from a git submodule)
self.is_submodule = False
@classmethod
def main(cls, argv=None):
if argv is None:
argv = sys.argv
config = cls.parse_config()
config.update(cls.parse_command_line(argv))
auto_use = config.pop('auto_use', False)
bootstrapper = cls(**config)
if auto_use:
# Run the bootstrapper, otherwise the setup.py is using the old
# use_astropy_helpers() interface, in which case it will run the
# bootstrapper manually after reconfiguring it.
bootstrapper.run()
return bootstrapper
@classmethod
def parse_config(cls):
if not os.path.exists('setup.cfg'):
return {}
cfg = ConfigParser()
try:
cfg.read('setup.cfg')
except Exception as e:
if DEBUG:
raise
log.error(
"Error reading setup.cfg: {0!r}\n{1} will not be "
"automatically bootstrapped and package installation may fail."
"\n{2}".format(e, PACKAGE_NAME, _err_help_msg))
return {}
if not cfg.has_section('ah_bootstrap'):
return {}
config = {}
for option, type_ in CFG_OPTIONS:
if not cfg.has_option('ah_bootstrap', option):
continue
if type_ is bool:
value = cfg.getboolean('ah_bootstrap', option)
else:
value = cfg.get('ah_bootstrap', option)
config[option] = value
return config
@classmethod
def parse_command_line(cls, argv=None):
if argv is None:
argv = sys.argv
config = {}
# For now we just pop recognized ah_bootstrap options out of the
# arg list. This is imperfect; in the unlikely case that a setup.py
# custom command or even custom Distribution class defines an argument
# of the same name then we will break that. However there's a catch22
# here that we can't just do full argument parsing right here, because
# we don't yet know *how* to parse all possible command-line arguments.
if '--no-git' in argv:
config['use_git'] = False
argv.remove('--no-git')
if '--offline' in argv:
config['offline'] = True
argv.remove('--offline')
return config
def run(self):
strategies = ['local_directory', 'local_file', 'index']
dist = None
# First, remove any previously imported versions of astropy_helpers;
# this is necessary for nested installs where one package's installer
# is installing another package via setuptools.sandbox.run_setup, as in
# the case of setup_requires
for key in list(sys.modules):
try:
if key == PACKAGE_NAME or key.startswith(PACKAGE_NAME + '.'):
del sys.modules[key]
except AttributeError:
# Sometimes mysterious non-string things can turn up in
# sys.modules
continue
# Check to see if the path is a submodule
self.is_submodule = self._check_submodule()
for strategy in strategies:
method = getattr(self, 'get_{0}_dist'.format(strategy))
dist = method()
if dist is not None:
break
else:
raise _AHBootstrapSystemExit(
"No source found for the {0!r} package; {0} must be "
"available and importable as a prerequisite to building "
"or installing this package.".format(PACKAGE_NAME))
# This is a bit hacky, but if astropy_helpers was loaded from a
# directory/submodule its Distribution object gets a "precedence" of
# "DEVELOP_DIST". However, in other cases it gets a precedence of
# "EGG_DIST". However, when activing the distribution it will only be
# placed early on sys.path if it is treated as an EGG_DIST, so always
# do that
dist = dist.clone(precedence=pkg_resources.EGG_DIST)
# Otherwise we found a version of astropy-helpers, so we're done
# Just active the found distribution on sys.path--if we did a
# download this usually happens automatically but it doesn't hurt to
# do it again
# Note: Adding the dist to the global working set also activates it
# (makes it importable on sys.path) by default.
try:
pkg_resources.working_set.add(dist, replace=True)
except TypeError:
# Some (much) older versions of setuptools do not have the
# replace=True option here. These versions are old enough that all
# bets may be off anyways, but it's easy enough to work around just
# in case...
if dist.key in pkg_resources.working_set.by_key:
del pkg_resources.working_set.by_key[dist.key]
pkg_resources.working_set.add(dist)
@property
def config(self):
"""
A `dict` containing the options this `_Bootstrapper` was configured
with.
"""
return dict((optname, getattr(self, optname))
for optname, _ in CFG_OPTIONS if hasattr(self, optname))
def get_local_directory_dist(self):
"""
Handle importing a vendored package from a subdirectory of the source
distribution.
"""
if not os.path.isdir(self.path):
return
log.info('Attempting to import astropy_helpers from {0} {1!r}'.format(
'submodule' if self.is_submodule else 'directory',
self.path))
dist = self._directory_import()
if dist is None:
log.warn(
'The requested path {0!r} for importing {1} does not '
'exist, or does not contain a copy of the {1} '
'package.'.format(self.path, PACKAGE_NAME))
elif self.auto_upgrade and not self.is_submodule:
# A version of astropy-helpers was found on the available path, but
# check to see if a bugfix release is available on PyPI
upgrade = self._do_upgrade(dist)
if upgrade is not None:
dist = upgrade
return dist
def get_local_file_dist(self):
"""
Handle importing from a source archive; this also uses setup_requires
but points easy_install directly to the source archive.
"""
if not os.path.isfile(self.path):
return
log.info('Attempting to unpack and import astropy_helpers from '
'{0!r}'.format(self.path))
try:
dist = self._do_download(find_links=[self.path])
except Exception as e:
if DEBUG:
raise
log.warn(
'Failed to import {0} from the specified archive {1!r}: '
'{2}'.format(PACKAGE_NAME, self.path, str(e)))
dist = None
if dist is not None and self.auto_upgrade:
# A version of astropy-helpers was found on the available path, but
# check to see if a bugfix release is available on PyPI
upgrade = self._do_upgrade(dist)
if upgrade is not None:
dist = upgrade
return dist
def get_index_dist(self):
if not self.download:
log.warn('Downloading {0!r} disabled.'.format(DIST_NAME))
return None
log.warn(
"Downloading {0!r}; run setup.py with the --offline option to "
"force offline installation.".format(DIST_NAME))
try:
dist = self._do_download()
except Exception as e:
if DEBUG:
raise
log.warn(
'Failed to download and/or install {0!r} from {1!r}:\n'
'{2}'.format(DIST_NAME, self.index_url, str(e)))
dist = None
# No need to run auto-upgrade here since we've already presumably
# gotten the most up-to-date version from the package index
return dist
def _directory_import(self):
"""
Import astropy_helpers from the given path, which will be added to
sys.path.
Must return True if the import succeeded, and False otherwise.
"""
# Return True on success, False on failure but download is allowed, and
# otherwise raise SystemExit
path = os.path.abspath(self.path)
# Use an empty WorkingSet rather than the man
# pkg_resources.working_set, since on older versions of setuptools this
# will invoke a VersionConflict when trying to install an upgrade
ws = pkg_resources.WorkingSet([])
ws.add_entry(path)
dist = ws.by_key.get(DIST_NAME)
if dist is None:
# We didn't find an egg-info/dist-info in the given path, but if a
# setup.py exists we can generate it
setup_py = os.path.join(path, 'setup.py')
if os.path.isfile(setup_py):
with _silence():
run_setup(os.path.join(path, 'setup.py'),
['egg_info'])
for dist in pkg_resources.find_distributions(path, True):
# There should be only one...
return dist
return dist
def _do_download(self, version='', find_links=None):
if find_links:
allow_hosts = ''
index_url = None
else:
allow_hosts = None
index_url = self.index_url
# Annoyingly, setuptools will not handle other arguments to
# Distribution (such as options) before handling setup_requires, so it
# is not straightforward to programmatically augment the arguments which
# are passed to easy_install
class _Distribution(Distribution):
def get_option_dict(self, command_name):
opts = Distribution.get_option_dict(self, command_name)
if command_name == 'easy_install':
if find_links is not None:
opts['find_links'] = ('setup script', find_links)
if index_url is not None:
opts['index_url'] = ('setup script', index_url)
if allow_hosts is not None:
opts['allow_hosts'] = ('setup script', allow_hosts)
return opts
if version:
req = '{0}=={1}'.format(DIST_NAME, version)
else:
req = DIST_NAME
attrs = {'setup_requires': [req]}
try:
if DEBUG:
_Distribution(attrs=attrs)
else:
with _silence():
_Distribution(attrs=attrs)
# If the setup_requires succeeded it will have added the new dist to
# the main working_set
return pkg_resources.working_set.by_key.get(DIST_NAME)
except Exception as e:
if DEBUG:
raise
msg = 'Error retrieving {0} from {1}:\n{2}'
if find_links:
source = find_links[0]
elif index_url != INDEX_URL:
source = index_url
else:
source = 'PyPI'
raise Exception(msg.format(DIST_NAME, source, repr(e)))
def _do_upgrade(self, dist):
# Build up a requirement for a higher bugfix release but a lower minor
# release (so API compatibility is guaranteed)
next_version = _next_version(dist.parsed_version)
req = pkg_resources.Requirement.parse(
'{0}>{1},<{2}'.format(DIST_NAME, dist.version, next_version))
package_index = PackageIndex(index_url=self.index_url)
upgrade = package_index.obtain(req)
if upgrade is not None:
return self._do_download(version=upgrade.version)
def _check_submodule(self):
"""
Check if the given path is a git submodule.
See the docstrings for ``_check_submodule_using_git`` and
``_check_submodule_no_git`` for further details.
"""
if (self.path is None or
(os.path.exists(self.path) and not os.path.isdir(self.path))):
return False
if self.use_git:
return self._check_submodule_using_git()
else:
return self._check_submodule_no_git()
def _check_submodule_using_git(self):
"""
Check if the given path is a git submodule. If so, attempt to initialize
and/or update the submodule if needed.
This function makes calls to the ``git`` command in subprocesses. The
``_check_submodule_no_git`` option uses pure Python to check if the given
path looks like a git submodule, but it cannot perform updates.
"""
cmd = ['git', 'submodule', 'status', '--', self.path]
try:
log.info('Running `{0}`; use the --no-git option to disable git '
'commands'.format(' '.join(cmd)))
returncode, stdout, stderr = run_cmd(cmd)
except _CommandNotFound:
# The git command simply wasn't found; this is most likely the
# case on user systems that don't have git and are simply
# trying to install the package from PyPI or a source
# distribution. Silently ignore this case and simply don't try
# to use submodules
return False
stderr = stderr.strip()
if returncode != 0 and stderr:
# Unfortunately the return code alone cannot be relied on, as
# earlier versions of git returned 0 even if the requested submodule
# does not exist
# This is a warning that occurs in perl (from running git submodule)
# which only occurs with a malformatted locale setting which can
# happen sometimes on OSX. See again
# https://github.com/astropy/astropy/issues/2749
perl_warning = ('perl: warning: Falling back to the standard locale '
'("C").')
if not stderr.strip().endswith(perl_warning):
# Some other unknown error condition occurred
log.warn('git submodule command failed '
'unexpectedly:\n{0}'.format(stderr))
return False
# Output of `git submodule status` is as follows:
#
# 1: Status indicator: '-' for submodule is uninitialized, '+' if
# submodule is initialized but is not at the commit currently indicated
# in .gitmodules (and thus needs to be updated), or 'U' if the
# submodule is in an unstable state (i.e. has merge conflicts)
#
# 2. SHA-1 hash of the current commit of the submodule (we don't really
# need this information but it's useful for checking that the output is
# correct)
#
# 3. The output of `git describe` for the submodule's current commit
# hash (this includes for example what branches the commit is on) but
# only if the submodule is initialized. We ignore this information for
# now
_git_submodule_status_re = re.compile(
'^(?P<status>[+-U ])(?P<commit>[0-9a-f]{40}) '
'(?P<submodule>\S+)( .*)?$')
# The stdout should only contain one line--the status of the
# requested submodule
m = _git_submodule_status_re.match(stdout)
if m:
# Yes, the path *is* a git submodule
self._update_submodule(m.group('submodule'), m.group('status'))
return True
else:
log.warn(
'Unexpected output from `git submodule status`:\n{0}\n'
'Will attempt import from {1!r} regardless.'.format(
stdout, self.path))
return False
def _check_submodule_no_git(self):
"""
Like ``_check_submodule_using_git``, but simply parses the .gitmodules file
to determine if the supplied path is a git submodule, and does not exec any
subprocesses.
This can only determine if a path is a submodule--it does not perform
updates, etc. This function may need to be updated if the format of the
.gitmodules file is changed between git versions.
"""
gitmodules_path = os.path.abspath('.gitmodules')
if not os.path.isfile(gitmodules_path):
return False
# This is a minimal reader for gitconfig-style files. It handles a few of
# the quirks that make gitconfig files incompatible with ConfigParser-style
# files, but does not support the full gitconfig syntax (just enough
# needed to read a .gitmodules file).
gitmodules_fileobj = io.StringIO()
# Must use io.open for cross-Python-compatible behavior wrt unicode
with io.open(gitmodules_path) as f:
for line in f:
# gitconfig files are more flexible with leading whitespace; just
# go ahead and remove it
line = line.lstrip()
# comments can start with either # or ;
if line and line[0] in (':', ';'):
continue
gitmodules_fileobj.write(line)
gitmodules_fileobj.seek(0)
cfg = RawConfigParser()
try:
cfg.readfp(gitmodules_fileobj)
except Exception as exc:
log.warn('Malformatted .gitmodules file: {0}\n'
'{1} cannot be assumed to be a git submodule.'.format(
exc, self.path))
return False
for section in cfg.sections():
if not cfg.has_option(section, 'path'):
continue
submodule_path = cfg.get(section, 'path').rstrip(os.sep)
if submodule_path == self.path.rstrip(os.sep):
return True
return False
def _update_submodule(self, submodule, status):
if status == ' ':
# The submodule is up to date; no action necessary
return
elif status == '-':
if self.offline:
raise _AHBootstrapSystemExit(
"Cannot initialize the {0} submodule in --offline mode; "
"this requires being able to clone the submodule from an "
"online repository.".format(submodule))
cmd = ['update', '--init']
action = 'Initializing'
elif status == '+':
cmd = ['update']
action = 'Updating'
if self.offline:
cmd.append('--no-fetch')
elif status == 'U':
raise _AHBootstrapSystemExit(
'Error: Submodule {0} contains unresolved merge conflicts. '
'Please complete or abandon any changes in the submodule so that '
'it is in a usable state, then try again.'.format(submodule))
else:
log.warn('Unknown status {0!r} for git submodule {1!r}. Will '
'attempt to use the submodule as-is, but try to ensure '
'that the submodule is in a clean state and contains no '
'conflicts or errors.\n{2}'.format(status, submodule,
_err_help_msg))
return
err_msg = None
cmd = ['git', 'submodule'] + cmd + ['--', submodule]
log.warn('{0} {1} submodule with: `{2}`'.format(
action, submodule, ' '.join(cmd)))
try:
log.info('Running `{0}`; use the --no-git option to disable git '
'commands'.format(' '.join(cmd)))
returncode, stdout, stderr = run_cmd(cmd)
except OSError as e:
err_msg = str(e)
else:
if returncode != 0:
err_msg = stderr
if err_msg is not None:
log.warn('An unexpected error occurred updating the git submodule '
'{0!r}:\n{1}\n{2}'.format(submodule, err_msg,
_err_help_msg))
class _CommandNotFound(OSError):
"""
An exception raised when a command run with run_cmd is not found on the
system.
"""
def run_cmd(cmd):
"""
Run a command in a subprocess, given as a list of command-line
arguments.
Returns a ``(returncode, stdout, stderr)`` tuple.
"""
try:
p = sp.Popen(cmd, stdout=sp.PIPE, stderr=sp.PIPE)
# XXX: May block if either stdout or stderr fill their buffers;
# however for the commands this is currently used for that is
# unlikely (they should have very brief output)
stdout, stderr = p.communicate()
except OSError as e:
if DEBUG:
raise
if e.errno == errno.ENOENT:
msg = 'Command not found: `{0}`'.format(' '.join(cmd))
raise _CommandNotFound(msg, cmd)
else:
raise _AHBootstrapSystemExit(
'An unexpected error occurred when running the '
'`{0}` command:\n{1}'.format(' '.join(cmd), str(e)))
# Can fail of the default locale is not configured properly. See
# https://github.com/astropy/astropy/issues/2749. For the purposes under
# consideration 'latin1' is an acceptable fallback.
try:
stdio_encoding = locale.getdefaultlocale()[1] or 'latin1'
except ValueError:
# Due to an OSX oddity locale.getdefaultlocale() can also crash
# depending on the user's locale/language settings. See:
# http://bugs.python.org/issue18378
stdio_encoding = 'latin1'
# Unlikely to fail at this point but even then let's be flexible
if not isinstance(stdout, _text_type):
stdout = stdout.decode(stdio_encoding, 'replace')
if not isinstance(stderr, _text_type):
stderr = stderr.decode(stdio_encoding, 'replace')
return (p.returncode, stdout, stderr)
def _next_version(version):
"""
Given a parsed version from pkg_resources.parse_version, returns a new
version string with the next minor version.
Examples
========
>>> _next_version(pkg_resources.parse_version('1.2.3'))
'1.3.0'
"""
if hasattr(version, 'base_version'):
# New version parsing from setuptools >= 8.0
if version.base_version:
parts = version.base_version.split('.')
else:
parts = []
else:
parts = []
for part in version:
if part.startswith('*'):
break
parts.append(part)
parts = [int(p) for p in parts]
if len(parts) < 3:
parts += [0] * (3 - len(parts))
major, minor, micro = parts[:3]
return '{0}.{1}.{2}'.format(major, minor + 1, 0)
class _DummyFile(object):
"""A noop writeable object."""
errors = '' # Required for Python 3.x
encoding = 'utf-8'
def write(self, s):
pass
def flush(self):
pass
@contextlib.contextmanager
def _silence():
"""A context manager that silences sys.stdout and sys.stderr."""
old_stdout = sys.stdout
old_stderr = sys.stderr
sys.stdout = _DummyFile()
sys.stderr = _DummyFile()
exception_occurred = False
try:
yield
except:
exception_occurred = True
# Go ahead and clean up so that exception handling can work normally
sys.stdout = old_stdout
sys.stderr = old_stderr
raise
if not exception_occurred:
sys.stdout = old_stdout
sys.stderr = old_stderr
_err_help_msg = """
If the problem persists consider installing astropy_helpers manually using pip
(`pip install astropy_helpers`) or by manually downloading the source archive,
extracting it, and installing by running `python setup.py install` from the
root of the extracted source code.
"""
class _AHBootstrapSystemExit(SystemExit):
def __init__(self, *args):
if not args:
msg = 'An unknown problem occurred bootstrapping astropy_helpers.'
else:
msg = args[0]
msg += '\n' + _err_help_msg
super(_AHBootstrapSystemExit, self).__init__(msg, *args[1:])
BOOTSTRAPPER = _Bootstrapper.main()
def use_astropy_helpers(**kwargs):
"""
Ensure that the `astropy_helpers` module is available and is importable.
This supports automatic submodule initialization if astropy_helpers is
included in a project as a git submodule, or will download it from PyPI if
necessary.
Parameters
----------
path : str or None, optional
A filesystem path relative to the root of the project's source code
that should be added to `sys.path` so that `astropy_helpers` can be
imported from that path.
If the path is a git submodule it will automatically be initialized
and/or updated.
The path may also be to a ``.tar.gz`` archive of the astropy_helpers
source distribution. In this case the archive is automatically
unpacked and made temporarily available on `sys.path` as a ``.egg``
archive.
If `None` skip straight to downloading.
download_if_needed : bool, optional
If the provided filesystem path is not found an attempt will be made to
download astropy_helpers from PyPI. It will then be made temporarily
available on `sys.path` as a ``.egg`` archive (using the
``setup_requires`` feature of setuptools. If the ``--offline`` option
is given at the command line the value of this argument is overridden
to `False`.
index_url : str, optional
If provided, use a different URL for the Python package index than the
main PyPI server.
use_git : bool, optional
If `False` no git commands will be used--this effectively disables
support for git submodules. If the ``--no-git`` option is given at the
command line the value of this argument is overridden to `False`.
auto_upgrade : bool, optional
By default, when installing a package from a non-development source
distribution ah_boostrap will try to automatically check for patch
releases to astropy-helpers on PyPI and use the patched version over
any bundled versions. Setting this to `False` will disable that
functionality. If the ``--offline`` option is given at the command line
the value of this argument is overridden to `False`.
offline : bool, optional
If `False` disable all actions that require an internet connection,
including downloading packages from the package index and fetching
updates to any git submodule. Defaults to `True`.
"""
global BOOTSTRAPPER
config = BOOTSTRAPPER.config
config.update(**kwargs)
# Create a new bootstrapper with the updated configuration and run it
BOOTSTRAPPER = _Bootstrapper(**config)
BOOTSTRAPPER.run()
| mit |
stinebuu/nest-simulator | pynest/examples/clopath_synapse_spike_pairing.py | 12 | 5804 | # -*- coding: utf-8 -*-
#
# clopath_synapse_spike_pairing.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
Clopath Rule: Spike pairing experiment
----------------------------------------
This script simulates one ``aeif_psc_delta_clopath`` neuron that is connected with
a Clopath connection [1]_. The synapse receives pairs of a pre- and a postsynaptic
spikes that are separated by either 10 ms (pre before post) or -10 ms (post
before pre). The change of the synaptic weight is measured after five of such
pairs. This experiment is repeated five times with different rates of the
sequence of the spike pairs: 10Hz, 20Hz, 30Hz, 40Hz, and 50Hz.
References
~~~~~~~~~~~
.. [1] Clopath C, Büsing L, Vasilaki E, Gerstner W (2010). Connectivity reflects coding:
a model of voltage-based STDP with homeostasis.
Nature Neuroscience 13:3, 344--352
"""
import numpy as np
import matplotlib.pyplot as plt
import nest
##############################################################################
# First we specify the neuron parameters. To enable voltage dependent
# prefactor ``A_LTD(u_bar_bar)`` add ``A_LTD_const: False`` to the dictionary.
nrn_params = {'V_m': -70.6,
'E_L': -70.6,
'C_m': 281.0,
'theta_minus': -70.6,
'theta_plus': -45.3,
'A_LTD': 14.0e-5,
'A_LTP': 8.0e-5,
'tau_minus': 10.0,
'tau_plus': 7.0,
'delay_u_bars': 4.0,
'a': 4.0,
'b': 0.0805,
'V_reset': -70.6 + 21.0,
'V_clamp': 33.0,
't_clamp': 2.0,
't_ref': 0.0,
}
##############################################################################
# Hardcoded spike times of presynaptic spike generator
spike_times_pre = [
# Presynaptic spike before the postsynaptic
[20., 120., 220., 320., 420.],
[20., 70., 120., 170., 220.],
[20., 53.3, 86.7, 120., 153.3],
[20., 45., 70., 95., 120.],
[20., 40., 60., 80., 100.],
# Presynaptic spike after the postsynaptic
[120., 220., 320., 420., 520., 620.],
[70., 120., 170., 220., 270., 320.],
[53.3, 86.6, 120., 153.3, 186.6, 220.],
[45., 70., 95., 120., 145., 170.],
[40., 60., 80., 100., 120., 140.]]
##############################################################################
# Hardcoded spike times of postsynaptic spike generator
spike_times_post = [
[10., 110., 210., 310., 410.],
[10., 60., 110., 160., 210.],
[10., 43.3, 76.7, 110., 143.3],
[10., 35., 60., 85., 110.],
[10., 30., 50., 70., 90.],
[130., 230., 330., 430., 530., 630.],
[80., 130., 180., 230., 280., 330.],
[63.3, 96.6, 130., 163.3, 196.6, 230.],
[55., 80., 105., 130., 155., 180.],
[50., 70., 90., 110., 130., 150.]]
init_w = 0.5
syn_weights = []
resolution = 0.1
##############################################################################
# Loop over pairs of spike trains
for (s_t_pre, s_t_post) in zip(spike_times_pre, spike_times_post):
nest.ResetKernel()
nest.SetKernelStatus({"resolution": resolution})
# Create one neuron
nrn = nest.Create("aeif_psc_delta_clopath", 1, nrn_params)
# We need a parrot neuron since spike generators can only
# be connected with static connections
prrt_nrn = nest.Create("parrot_neuron", 1)
# Create and connect spike generators
spike_gen_pre = nest.Create("spike_generator", 1, {
"spike_times": s_t_pre})
nest.Connect(spike_gen_pre, prrt_nrn,
syn_spec={"delay": resolution})
spike_gen_post = nest.Create("spike_generator", 1, {
"spike_times": s_t_post})
nest.Connect(spike_gen_post, nrn, syn_spec={
"delay": resolution, "weight": 80.0})
# Create weight recorder
wr = nest.Create('weight_recorder', 1)
# Create Clopath connection with weight recorder
nest.CopyModel("clopath_synapse", "clopath_synapse_rec",
{"weight_recorder": wr})
syn_dict = {"synapse_model": "clopath_synapse_rec",
"weight": init_w, "delay": resolution}
nest.Connect(prrt_nrn, nrn, syn_spec=syn_dict)
# Simulation
simulation_time = (10.0 + max(s_t_pre[-1], s_t_post[-1]))
nest.Simulate(simulation_time)
# Extract and save synaptic weights
weights = wr.get("events", "weights")
syn_weights.append(weights[-1])
syn_weights = np.array(syn_weights)
# scaling of the weights so that they are comparable to [1]
syn_weights = 100.0*15.0*(syn_weights - init_w)/init_w + 100.0
# Plot results
fig1, axA = plt.subplots(1, sharex=False)
axA.plot([10., 20., 30., 40., 50.], syn_weights[5:], color='b', lw=2.5, ls='-',
label="pre-post pairing")
axA.plot([10., 20., 30., 40., 50.], syn_weights[:5], color='g', lw=2.5, ls='-',
label="post-pre pairing")
axA.set_ylabel("normalized weight change")
axA.set_xlabel("rho (Hz)")
axA.legend()
axA.set_title("synaptic weight")
plt.show()
| gpl-2.0 |
natj/bender | runs/out/reds.py | 1 | 3348 | import numpy as np
import matplotlib as mpl
from pylab import *
from matplotlib import cm
from matplotlib.colors import LogNorm
mpl.rcParams['image.cmap'] = 'inferno'
mpl.rc('font', family='serif')
mpl.rc('xtick', labelsize='small')
mpl.rc('ytick', labelsize='small')
gs = GridSpec(1, 3)
gs.update(hspace = 0.3)
#Construct output xy image plane from img object
##################################################
x_span = 11.0
y_span = 11.0
x_bins = 500
y_bins = 500
xs = np.linspace(-x_span, x_span, x_bins)
ys = np.linspace(-y_span, y_span, y_bins)
##################################################
# plot values on image plane
def trans(mat):
return np.flipud(mat.T)
#return mat
def detrans(mat):
return np.flipud(mat).T
def clean_image(mat):
#mask all 0.0 elements and transpose
mat_masked = np.ma.masked_where(mat == 0, mat)
return trans(mat_masked)
#read redshift array
fname = "reds_f600pbbr15m1.4i45.csv"
data = np.genfromtxt(fname, delimiter=',')
redshift = np.reshape(data, (x_bins, y_bins) )
redshift = clean_image(redshift)
##################################################
fname2 = 'reds_f600_bb_r15_m1.4_i45.csv'
data2 = np.genfromtxt(fname2, delimiter=',')
redshift2 = np.reshape(data2, (x_bins, y_bins) )
redshift2 = clean_image(redshift2)
# other settings for imshow
extent=( xs[0], xs[-1], ys[0], xs[-1] )
interpolation = 'nearest'
###################################################
ax = subplot(gs[0])
ax.minorticks_on()
cax = ax.imshow(redshift, interpolation=interpolation, origin='lower', extent=extent,
cmap=cm.get_cmap('coolwarm_r'))
ax.contour(redshift, 20, hold='on', colors='w',
origin='lower', extent=extent)
###################################################
ax = subplot(gs[1])
ax.minorticks_on()
cax = ax.imshow(redshift2, interpolation=interpolation, origin='lower', extent=extent,
cmap=cm.get_cmap('coolwarm_r'))
ax.contour(redshift2, 20, hold='on', colors='w',
origin='lower', extent=extent)
ax = subplot(gs[2])
ax.minorticks_on()
###################################################
# relative error
relerr = np.zeros(( x_bins, y_bins))
for i, x in enumerate(xs):
for j, y in enumerate(ys):
val1 = redshift[i,j]
val2 = redshift2[i,j]
errval = 0.0
if not(val2 == 0.0):
errval = np.abs( (val2 - val1)/val2 )
#errval = np.log10( np.abs((val2 - val1)/val2) )
relerr[i,j] = errval
relerr = np.ma.masked_where(relerr == 0, relerr)
#emin = -0.02
#emax = 0.02
print "min :",np.min(relerr)
print "max :",np.max(relerr)
#emin = -3.0
#emax = 1.0
emin = 1.0e-4
emax = 1.0e-1
cax = ax.imshow(relerr,
interpolation=interpolation,
origin='lower', extent=extent,
cmap=cm.get_cmap('inferno_r'),
norm=LogNorm(emin, emax)
#vmin = emin,
#vmax = emax,
)
levels = np.linspace(emin, emax, 10)
#levels = np.array( [1.0e-3, 5.0e-3, 1.0e-2, 5.0e-2, 1.0e-1, 5.0e-1, 1.0e0] )
#levels = np.array( [1.0e-2, 2.0e-2 ] )
levels = np.array( [1.0e-3, 5.0e-3] )
ax.contour(relerr,
levels,
hold='on',
linestyle='dashed',
colors='r',
origin='lower',
extent=extent,
vmin = emin,
vmax = emax
)
colorbar(cax)
show()
savefig('reds.pdf')
| mit |
AlexRobson/scikit-learn | sklearn/cluster/tests/test_k_means.py | 132 | 25860 | """Testing for K-means"""
import sys
import numpy as np
from scipy import sparse as sp
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import if_not_mac_os
from sklearn.utils.validation import DataConversionWarning
from sklearn.utils.extmath import row_norms
from sklearn.metrics.cluster import v_measure_score
from sklearn.cluster import KMeans, k_means
from sklearn.cluster import MiniBatchKMeans
from sklearn.cluster.k_means_ import _labels_inertia
from sklearn.cluster.k_means_ import _mini_batch_step
from sklearn.datasets.samples_generator import make_blobs
from sklearn.externals.six.moves import cStringIO as StringIO
# non centered, sparse centers to check the
centers = np.array([
[0.0, 5.0, 0.0, 0.0, 0.0],
[1.0, 1.0, 4.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 5.0, 1.0],
])
n_samples = 100
n_clusters, n_features = centers.shape
X, true_labels = make_blobs(n_samples=n_samples, centers=centers,
cluster_std=1., random_state=42)
X_csr = sp.csr_matrix(X)
def test_kmeans_dtype():
rnd = np.random.RandomState(0)
X = rnd.normal(size=(40, 2))
X = (X * 10).astype(np.uint8)
km = KMeans(n_init=1).fit(X)
pred_x = assert_warns(DataConversionWarning, km.predict, X)
assert_array_equal(km.labels_, pred_x)
def test_labels_assignment_and_inertia():
# pure numpy implementation as easily auditable reference gold
# implementation
rng = np.random.RandomState(42)
noisy_centers = centers + rng.normal(size=centers.shape)
labels_gold = - np.ones(n_samples, dtype=np.int)
mindist = np.empty(n_samples)
mindist.fill(np.infty)
for center_id in range(n_clusters):
dist = np.sum((X - noisy_centers[center_id]) ** 2, axis=1)
labels_gold[dist < mindist] = center_id
mindist = np.minimum(dist, mindist)
inertia_gold = mindist.sum()
assert_true((mindist >= 0.0).all())
assert_true((labels_gold != -1).all())
# perform label assignment using the dense array input
x_squared_norms = (X ** 2).sum(axis=1)
labels_array, inertia_array = _labels_inertia(
X, x_squared_norms, noisy_centers)
assert_array_almost_equal(inertia_array, inertia_gold)
assert_array_equal(labels_array, labels_gold)
# perform label assignment using the sparse CSR input
x_squared_norms_from_csr = row_norms(X_csr, squared=True)
labels_csr, inertia_csr = _labels_inertia(
X_csr, x_squared_norms_from_csr, noisy_centers)
assert_array_almost_equal(inertia_csr, inertia_gold)
assert_array_equal(labels_csr, labels_gold)
def test_minibatch_update_consistency():
# Check that dense and sparse minibatch update give the same results
rng = np.random.RandomState(42)
old_centers = centers + rng.normal(size=centers.shape)
new_centers = old_centers.copy()
new_centers_csr = old_centers.copy()
counts = np.zeros(new_centers.shape[0], dtype=np.int32)
counts_csr = np.zeros(new_centers.shape[0], dtype=np.int32)
x_squared_norms = (X ** 2).sum(axis=1)
x_squared_norms_csr = row_norms(X_csr, squared=True)
buffer = np.zeros(centers.shape[1], dtype=np.double)
buffer_csr = np.zeros(centers.shape[1], dtype=np.double)
# extract a small minibatch
X_mb = X[:10]
X_mb_csr = X_csr[:10]
x_mb_squared_norms = x_squared_norms[:10]
x_mb_squared_norms_csr = x_squared_norms_csr[:10]
# step 1: compute the dense minibatch update
old_inertia, incremental_diff = _mini_batch_step(
X_mb, x_mb_squared_norms, new_centers, counts,
buffer, 1, None, random_reassign=False)
assert_greater(old_inertia, 0.0)
# compute the new inertia on the same batch to check that it decreased
labels, new_inertia = _labels_inertia(
X_mb, x_mb_squared_norms, new_centers)
assert_greater(new_inertia, 0.0)
assert_less(new_inertia, old_inertia)
# check that the incremental difference computation is matching the
# final observed value
effective_diff = np.sum((new_centers - old_centers) ** 2)
assert_almost_equal(incremental_diff, effective_diff)
# step 2: compute the sparse minibatch update
old_inertia_csr, incremental_diff_csr = _mini_batch_step(
X_mb_csr, x_mb_squared_norms_csr, new_centers_csr, counts_csr,
buffer_csr, 1, None, random_reassign=False)
assert_greater(old_inertia_csr, 0.0)
# compute the new inertia on the same batch to check that it decreased
labels_csr, new_inertia_csr = _labels_inertia(
X_mb_csr, x_mb_squared_norms_csr, new_centers_csr)
assert_greater(new_inertia_csr, 0.0)
assert_less(new_inertia_csr, old_inertia_csr)
# check that the incremental difference computation is matching the
# final observed value
effective_diff = np.sum((new_centers_csr - old_centers) ** 2)
assert_almost_equal(incremental_diff_csr, effective_diff)
# step 3: check that sparse and dense updates lead to the same results
assert_array_equal(labels, labels_csr)
assert_array_almost_equal(new_centers, new_centers_csr)
assert_almost_equal(incremental_diff, incremental_diff_csr)
assert_almost_equal(old_inertia, old_inertia_csr)
assert_almost_equal(new_inertia, new_inertia_csr)
def _check_fitted_model(km):
# check that the number of clusters centers and distinct labels match
# the expectation
centers = km.cluster_centers_
assert_equal(centers.shape, (n_clusters, n_features))
labels = km.labels_
assert_equal(np.unique(labels).shape[0], n_clusters)
# check that the labels assignment are perfect (up to a permutation)
assert_equal(v_measure_score(true_labels, labels), 1.0)
assert_greater(km.inertia_, 0.0)
# check error on dataset being too small
assert_raises(ValueError, km.fit, [[0., 1.]])
def test_k_means_plus_plus_init():
km = KMeans(init="k-means++", n_clusters=n_clusters,
random_state=42).fit(X)
_check_fitted_model(km)
def test_k_means_new_centers():
# Explore the part of the code where a new center is reassigned
X = np.array([[0, 0, 1, 1],
[0, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 1, 0, 0]])
labels = [0, 1, 2, 1, 1, 2]
bad_centers = np.array([[+0, 1, 0, 0],
[.2, 0, .2, .2],
[+0, 0, 0, 0]])
km = KMeans(n_clusters=3, init=bad_centers, n_init=1, max_iter=10,
random_state=1)
for this_X in (X, sp.coo_matrix(X)):
km.fit(this_X)
this_labels = km.labels_
# Reorder the labels so that the first instance is in cluster 0,
# the second in cluster 1, ...
this_labels = np.unique(this_labels, return_index=True)[1][this_labels]
np.testing.assert_array_equal(this_labels, labels)
def _has_blas_lib(libname):
from numpy.distutils.system_info import get_info
return libname in get_info('blas_opt').get('libraries', [])
@if_not_mac_os()
def test_k_means_plus_plus_init_2_jobs():
if _has_blas_lib('openblas'):
raise SkipTest('Multi-process bug with OpenBLAS (see issue #636)')
km = KMeans(init="k-means++", n_clusters=n_clusters, n_jobs=2,
random_state=42).fit(X)
_check_fitted_model(km)
def test_k_means_precompute_distances_flag():
# check that a warning is raised if the precompute_distances flag is not
# supported
km = KMeans(precompute_distances="wrong")
assert_raises(ValueError, km.fit, X)
def test_k_means_plus_plus_init_sparse():
km = KMeans(init="k-means++", n_clusters=n_clusters, random_state=42)
km.fit(X_csr)
_check_fitted_model(km)
def test_k_means_random_init():
km = KMeans(init="random", n_clusters=n_clusters, random_state=42)
km.fit(X)
_check_fitted_model(km)
def test_k_means_random_init_sparse():
km = KMeans(init="random", n_clusters=n_clusters, random_state=42)
km.fit(X_csr)
_check_fitted_model(km)
def test_k_means_plus_plus_init_not_precomputed():
km = KMeans(init="k-means++", n_clusters=n_clusters, random_state=42,
precompute_distances=False).fit(X)
_check_fitted_model(km)
def test_k_means_random_init_not_precomputed():
km = KMeans(init="random", n_clusters=n_clusters, random_state=42,
precompute_distances=False).fit(X)
_check_fitted_model(km)
def test_k_means_perfect_init():
km = KMeans(init=centers.copy(), n_clusters=n_clusters, random_state=42,
n_init=1)
km.fit(X)
_check_fitted_model(km)
def test_k_means_n_init():
rnd = np.random.RandomState(0)
X = rnd.normal(size=(40, 2))
# two regression tests on bad n_init argument
# previous bug: n_init <= 0 threw non-informative TypeError (#3858)
assert_raises_regexp(ValueError, "n_init", KMeans(n_init=0).fit, X)
assert_raises_regexp(ValueError, "n_init", KMeans(n_init=-1).fit, X)
def test_k_means_fortran_aligned_data():
# Check the KMeans will work well, even if X is a fortran-aligned data.
X = np.asfortranarray([[0, 0], [0, 1], [0, 1]])
centers = np.array([[0, 0], [0, 1]])
labels = np.array([0, 1, 1])
km = KMeans(n_init=1, init=centers, precompute_distances=False,
random_state=42)
km.fit(X)
assert_array_equal(km.cluster_centers_, centers)
assert_array_equal(km.labels_, labels)
def test_mb_k_means_plus_plus_init_dense_array():
mb_k_means = MiniBatchKMeans(init="k-means++", n_clusters=n_clusters,
random_state=42)
mb_k_means.fit(X)
_check_fitted_model(mb_k_means)
def test_mb_kmeans_verbose():
mb_k_means = MiniBatchKMeans(init="k-means++", n_clusters=n_clusters,
random_state=42, verbose=1)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
mb_k_means.fit(X)
finally:
sys.stdout = old_stdout
def test_mb_k_means_plus_plus_init_sparse_matrix():
mb_k_means = MiniBatchKMeans(init="k-means++", n_clusters=n_clusters,
random_state=42)
mb_k_means.fit(X_csr)
_check_fitted_model(mb_k_means)
def test_minibatch_init_with_large_k():
mb_k_means = MiniBatchKMeans(init='k-means++', init_size=10, n_clusters=20)
# Check that a warning is raised, as the number clusters is larger
# than the init_size
assert_warns(RuntimeWarning, mb_k_means.fit, X)
def test_minibatch_k_means_random_init_dense_array():
# increase n_init to make random init stable enough
mb_k_means = MiniBatchKMeans(init="random", n_clusters=n_clusters,
random_state=42, n_init=10).fit(X)
_check_fitted_model(mb_k_means)
def test_minibatch_k_means_random_init_sparse_csr():
# increase n_init to make random init stable enough
mb_k_means = MiniBatchKMeans(init="random", n_clusters=n_clusters,
random_state=42, n_init=10).fit(X_csr)
_check_fitted_model(mb_k_means)
def test_minibatch_k_means_perfect_init_dense_array():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
random_state=42, n_init=1).fit(X)
_check_fitted_model(mb_k_means)
def test_minibatch_k_means_init_multiple_runs_with_explicit_centers():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
random_state=42, n_init=10)
assert_warns(RuntimeWarning, mb_k_means.fit, X)
def test_minibatch_k_means_perfect_init_sparse_csr():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
random_state=42, n_init=1).fit(X_csr)
_check_fitted_model(mb_k_means)
def test_minibatch_sensible_reassign_fit():
# check if identical initial clusters are reassigned
# also a regression test for when there are more desired reassignments than
# samples.
zeroed_X, true_labels = make_blobs(n_samples=100, centers=5,
cluster_std=1., random_state=42)
zeroed_X[::2, :] = 0
mb_k_means = MiniBatchKMeans(n_clusters=20, batch_size=10, random_state=42,
init="random")
mb_k_means.fit(zeroed_X)
# there should not be too many exact zero cluster centers
assert_greater(mb_k_means.cluster_centers_.any(axis=1).sum(), 10)
# do the same with batch-size > X.shape[0] (regression test)
mb_k_means = MiniBatchKMeans(n_clusters=20, batch_size=201,
random_state=42, init="random")
mb_k_means.fit(zeroed_X)
# there should not be too many exact zero cluster centers
assert_greater(mb_k_means.cluster_centers_.any(axis=1).sum(), 10)
def test_minibatch_sensible_reassign_partial_fit():
zeroed_X, true_labels = make_blobs(n_samples=n_samples, centers=5,
cluster_std=1., random_state=42)
zeroed_X[::2, :] = 0
mb_k_means = MiniBatchKMeans(n_clusters=20, random_state=42, init="random")
for i in range(100):
mb_k_means.partial_fit(zeroed_X)
# there should not be too many exact zero cluster centers
assert_greater(mb_k_means.cluster_centers_.any(axis=1).sum(), 10)
def test_minibatch_reassign():
# Give a perfect initialization, but a large reassignment_ratio,
# as a result all the centers should be reassigned and the model
# should not longer be good
for this_X in (X, X_csr):
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, batch_size=100,
random_state=42)
mb_k_means.fit(this_X)
score_before = mb_k_means.score(this_X)
try:
old_stdout = sys.stdout
sys.stdout = StringIO()
# Turn on verbosity to smoke test the display code
_mini_batch_step(this_X, (X ** 2).sum(axis=1),
mb_k_means.cluster_centers_,
mb_k_means.counts_,
np.zeros(X.shape[1], np.double),
False, distances=np.zeros(X.shape[0]),
random_reassign=True, random_state=42,
reassignment_ratio=1, verbose=True)
finally:
sys.stdout = old_stdout
assert_greater(score_before, mb_k_means.score(this_X))
# Give a perfect initialization, with a small reassignment_ratio,
# no center should be reassigned
for this_X in (X, X_csr):
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, batch_size=100,
init=centers.copy(),
random_state=42, n_init=1)
mb_k_means.fit(this_X)
clusters_before = mb_k_means.cluster_centers_
# Turn on verbosity to smoke test the display code
_mini_batch_step(this_X, (X ** 2).sum(axis=1),
mb_k_means.cluster_centers_,
mb_k_means.counts_,
np.zeros(X.shape[1], np.double),
False, distances=np.zeros(X.shape[0]),
random_reassign=True, random_state=42,
reassignment_ratio=1e-15)
assert_array_almost_equal(clusters_before, mb_k_means.cluster_centers_)
def test_minibatch_with_many_reassignments():
# Test for the case that the number of clusters to reassign is bigger
# than the batch_size
n_samples = 550
rnd = np.random.RandomState(42)
X = rnd.uniform(size=(n_samples, 10))
# Check that the fit works if n_clusters is bigger than the batch_size.
# Run the test with 550 clusters and 550 samples, because it turned out
# that this values ensure that the number of clusters to reassign
# is always bigger than the batch_size
n_clusters = 550
MiniBatchKMeans(n_clusters=n_clusters,
batch_size=100,
init_size=n_samples,
random_state=42).fit(X)
def test_sparse_mb_k_means_callable_init():
def test_init(X, k, random_state):
return centers
# Small test to check that giving the wrong number of centers
# raises a meaningful error
assert_raises(ValueError,
MiniBatchKMeans(init=test_init, random_state=42).fit, X_csr)
# Now check that the fit actually works
mb_k_means = MiniBatchKMeans(n_clusters=3, init=test_init,
random_state=42).fit(X_csr)
_check_fitted_model(mb_k_means)
def test_mini_batch_k_means_random_init_partial_fit():
km = MiniBatchKMeans(n_clusters=n_clusters, init="random", random_state=42)
# use the partial_fit API for online learning
for X_minibatch in np.array_split(X, 10):
km.partial_fit(X_minibatch)
# compute the labeling on the complete dataset
labels = km.predict(X)
assert_equal(v_measure_score(true_labels, labels), 1.0)
def test_minibatch_default_init_size():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
batch_size=10, random_state=42,
n_init=1).fit(X)
assert_equal(mb_k_means.init_size_, 3 * mb_k_means.batch_size)
_check_fitted_model(mb_k_means)
def test_minibatch_tol():
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, batch_size=10,
random_state=42, tol=.01).fit(X)
_check_fitted_model(mb_k_means)
def test_minibatch_set_init_size():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
init_size=666, random_state=42,
n_init=1).fit(X)
assert_equal(mb_k_means.init_size, 666)
assert_equal(mb_k_means.init_size_, n_samples)
_check_fitted_model(mb_k_means)
def test_k_means_invalid_init():
km = KMeans(init="invalid", n_init=1, n_clusters=n_clusters)
assert_raises(ValueError, km.fit, X)
def test_mini_match_k_means_invalid_init():
km = MiniBatchKMeans(init="invalid", n_init=1, n_clusters=n_clusters)
assert_raises(ValueError, km.fit, X)
def test_k_means_copyx():
# Check if copy_x=False returns nearly equal X after de-centering.
my_X = X.copy()
km = KMeans(copy_x=False, n_clusters=n_clusters, random_state=42)
km.fit(my_X)
_check_fitted_model(km)
# check if my_X is centered
assert_array_almost_equal(my_X, X)
def test_k_means_non_collapsed():
# Check k_means with a bad initialization does not yield a singleton
# Starting with bad centers that are quickly ignored should not
# result in a repositioning of the centers to the center of mass that
# would lead to collapsed centers which in turns make the clustering
# dependent of the numerical unstabilities.
my_X = np.array([[1.1, 1.1], [0.9, 1.1], [1.1, 0.9], [0.9, 1.1]])
array_init = np.array([[1.0, 1.0], [5.0, 5.0], [-5.0, -5.0]])
km = KMeans(init=array_init, n_clusters=3, random_state=42, n_init=1)
km.fit(my_X)
# centers must not been collapsed
assert_equal(len(np.unique(km.labels_)), 3)
centers = km.cluster_centers_
assert_true(np.linalg.norm(centers[0] - centers[1]) >= 0.1)
assert_true(np.linalg.norm(centers[0] - centers[2]) >= 0.1)
assert_true(np.linalg.norm(centers[1] - centers[2]) >= 0.1)
def test_predict():
km = KMeans(n_clusters=n_clusters, random_state=42)
km.fit(X)
# sanity check: predict centroid labels
pred = km.predict(km.cluster_centers_)
assert_array_equal(pred, np.arange(n_clusters))
# sanity check: re-predict labeling for training set samples
pred = km.predict(X)
assert_array_equal(pred, km.labels_)
# re-predict labels for training set using fit_predict
pred = km.fit_predict(X)
assert_array_equal(pred, km.labels_)
def test_score():
km1 = KMeans(n_clusters=n_clusters, max_iter=1, random_state=42)
s1 = km1.fit(X).score(X)
km2 = KMeans(n_clusters=n_clusters, max_iter=10, random_state=42)
s2 = km2.fit(X).score(X)
assert_greater(s2, s1)
def test_predict_minibatch_dense_input():
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, random_state=40).fit(X)
# sanity check: predict centroid labels
pred = mb_k_means.predict(mb_k_means.cluster_centers_)
assert_array_equal(pred, np.arange(n_clusters))
# sanity check: re-predict labeling for training set samples
pred = mb_k_means.predict(X)
assert_array_equal(mb_k_means.predict(X), mb_k_means.labels_)
def test_predict_minibatch_kmeanspp_init_sparse_input():
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, init='k-means++',
n_init=10).fit(X_csr)
# sanity check: re-predict labeling for training set samples
assert_array_equal(mb_k_means.predict(X_csr), mb_k_means.labels_)
# sanity check: predict centroid labels
pred = mb_k_means.predict(mb_k_means.cluster_centers_)
assert_array_equal(pred, np.arange(n_clusters))
# check that models trained on sparse input also works for dense input at
# predict time
assert_array_equal(mb_k_means.predict(X), mb_k_means.labels_)
def test_predict_minibatch_random_init_sparse_input():
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, init='random',
n_init=10).fit(X_csr)
# sanity check: re-predict labeling for training set samples
assert_array_equal(mb_k_means.predict(X_csr), mb_k_means.labels_)
# sanity check: predict centroid labels
pred = mb_k_means.predict(mb_k_means.cluster_centers_)
assert_array_equal(pred, np.arange(n_clusters))
# check that models trained on sparse input also works for dense input at
# predict time
assert_array_equal(mb_k_means.predict(X), mb_k_means.labels_)
def test_input_dtypes():
X_list = [[0, 0], [10, 10], [12, 9], [-1, 1], [2, 0], [8, 10]]
X_int = np.array(X_list, dtype=np.int32)
X_int_csr = sp.csr_matrix(X_int)
init_int = X_int[:2]
fitted_models = [
KMeans(n_clusters=2).fit(X_list),
KMeans(n_clusters=2).fit(X_int),
KMeans(n_clusters=2, init=init_int, n_init=1).fit(X_list),
KMeans(n_clusters=2, init=init_int, n_init=1).fit(X_int),
# mini batch kmeans is very unstable on such a small dataset hence
# we use many inits
MiniBatchKMeans(n_clusters=2, n_init=10, batch_size=2).fit(X_list),
MiniBatchKMeans(n_clusters=2, n_init=10, batch_size=2).fit(X_int),
MiniBatchKMeans(n_clusters=2, n_init=10, batch_size=2).fit(X_int_csr),
MiniBatchKMeans(n_clusters=2, batch_size=2,
init=init_int, n_init=1).fit(X_list),
MiniBatchKMeans(n_clusters=2, batch_size=2,
init=init_int, n_init=1).fit(X_int),
MiniBatchKMeans(n_clusters=2, batch_size=2,
init=init_int, n_init=1).fit(X_int_csr),
]
expected_labels = [0, 1, 1, 0, 0, 1]
scores = np.array([v_measure_score(expected_labels, km.labels_)
for km in fitted_models])
assert_array_equal(scores, np.ones(scores.shape[0]))
def test_transform():
km = KMeans(n_clusters=n_clusters)
km.fit(X)
X_new = km.transform(km.cluster_centers_)
for c in range(n_clusters):
assert_equal(X_new[c, c], 0)
for c2 in range(n_clusters):
if c != c2:
assert_greater(X_new[c, c2], 0)
def test_fit_transform():
X1 = KMeans(n_clusters=3, random_state=51).fit(X).transform(X)
X2 = KMeans(n_clusters=3, random_state=51).fit_transform(X)
assert_array_equal(X1, X2)
def test_n_init():
# Check that increasing the number of init increases the quality
n_runs = 5
n_init_range = [1, 5, 10]
inertia = np.zeros((len(n_init_range), n_runs))
for i, n_init in enumerate(n_init_range):
for j in range(n_runs):
km = KMeans(n_clusters=n_clusters, init="random", n_init=n_init,
random_state=j).fit(X)
inertia[i, j] = km.inertia_
inertia = inertia.mean(axis=1)
failure_msg = ("Inertia %r should be decreasing"
" when n_init is increasing.") % list(inertia)
for i in range(len(n_init_range) - 1):
assert_true(inertia[i] >= inertia[i + 1], failure_msg)
def test_k_means_function():
# test calling the k_means function directly
# catch output
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
cluster_centers, labels, inertia = k_means(X, n_clusters=n_clusters,
verbose=True)
finally:
sys.stdout = old_stdout
centers = cluster_centers
assert_equal(centers.shape, (n_clusters, n_features))
labels = labels
assert_equal(np.unique(labels).shape[0], n_clusters)
# check that the labels assignment are perfect (up to a permutation)
assert_equal(v_measure_score(true_labels, labels), 1.0)
assert_greater(inertia, 0.0)
# check warning when centers are passed
assert_warns(RuntimeWarning, k_means, X, n_clusters=n_clusters,
init=centers)
# to many clusters desired
assert_raises(ValueError, k_means, X, n_clusters=X.shape[0] + 1)
| bsd-3-clause |
mchelem/cref2 | cref/app/terminal.py | 1 | 4737 | #!/usr/bin/env python
import os
import argparse
import logging
import importlib
import tempfile
import subprocess
import pandas
from Bio import SeqIO
from cref.app import BaseApp
from cref.libs import rcsb
logger = logging.getLogger('CReF')
class TerminalApp(BaseApp):
"""
App to be run on the terminal
"""
def reporter(self, state):
pass
def run_cref(aa_sequence, output_dir, params):
pandas.set_option('display.max_columns', 0)
pandas.set_option('display.max_rows', 5)
if not os.path.isdir(output_dir):
os.makedirs(output_dir)
app = TerminalApp(params)
return app.run(aa_sequence, output_dir)
def configure_logger(log_level='INFO', include_pathname=False):
logger = logging.getLogger('CReF')
level = getattr(logging, log_level.upper(), None)
if not isinstance(level, int):
raise ValueError('Invalid log level: %s' % log_level)
logger.propagate = False
logger = logging.getLogger('CReF')
logger.setLevel(level)
ch = logging.StreamHandler()
ch.setLevel(level)
if include_pathname:
template = ('%(asctime)s - %(name)s - %(levelname)s'
'(%(pathname)s, %(lineno)d)- %(message)s')
else:
template = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
formatter = logging.Formatter(template, datefmt='%d/%m/%Y %I:%M:%S %p')
ch.setFormatter(formatter)
logger.addHandler(ch)
def parse_args():
parser = argparse.ArgumentParser(
description='CReF: Protein structure prediction')
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument(
'--sequence', dest='sequence',
help='Aminoacid sequence using one letter code',
)
group.add_argument(
'--fasta', dest='fasta',
help='File containing the fasta sequence',
)
group.add_argument(
'--pdb', dest='pdb',
help='PDB Code from where the sequence will be extracted',
)
parser.add_argument(
'--config', dest='config',
help='File specifying the configurations'
)
parser.add_argument(
'--output', dest='output_dir',
default='predictions/tmp',
help='Directory to save the results'
)
parser.add_argument(
'--log', dest='log_level',
default='INFO',
help='Log level to be used (DEBUG, INFO, WARN, ERROR)'
)
parser.add_argument(
'--pymol', dest='pymol', action='store_true',
help='View prediction in PyMOL'
)
return parser.parse_args()
def read_fasta(filepath):
records = []
with open(filepath, 'rU') as fasta_file:
records = list(SeqIO.parse(fasta_file, 'fasta'))
return records
def predict_fasta(filepath, output_dir, params):
sequences = read_fasta(filepath)
output_filepaths = []
for sequence in sequences:
seq = str(sequence.seq).replace('X', '')
output_dir = os.path.join(output_dir, sequence.id.split(':')[0] + '/')
output = run_cref(seq, output_dir, params)
sequence_file = os.path.join(output_dir, 'sequence.txt')
with open(sequence_file, 'w') as sequence_output:
sequence_output.write(seq)
output_filepaths.append(output)
return output_filepaths
def read_config(module):
try:
config = importlib.import_module(module)
except Exception as e:
logger.error(e)
raise Exception('Invalid config file')
return config
def run_pymol(pdb_code, predicted_filepath):
filepath = os.path.join(
os.path.dirname(predicted_filepath),
'experimental_structure.pdb'
)
experimental_pdb = rcsb.download_pdb(pdb_code, filepath)
subprocess.call([
'pymol',
predicted_filepath,
experimental_pdb,
'-r',
'cref/utils/pymol.py'
])
def main():
params = {}
args = parse_args()
configure_logger(args.log_level)
if args.config:
config = read_config(args.config)
params = config.params
# Sequence input
if args.sequence:
run_cref(args.sequence, args.output_dir, params)
# Fasta file input
elif args.fasta:
predict_fasta(args.fasta, args.output_dir, params)
# PDB code input
elif args.pdb:
handler, fasta_file = tempfile.mkstemp(suffix='.fasta', prefix='tmp')
rcsb.download_fasta(args.pdb, fasta_file)
params['pdb'] = args.pdb
output_files = predict_fasta(fasta_file, args.output_dir, params)
os.remove(fasta_file)
if args.pymol:
run_pymol(args.pdb, output_files[0])
else:
raise ValueError('You must specify a sequence, fasta file or pdb code')
if __name__ == '__main__':
main()
| mit |
ilius/hazm | data.py | 1 | 5329 | # coding: utf8
from __future__ import print_function, unicode_literals
import codecs, subprocess
from collections import Counter
from sklearn.cross_validation import train_test_split
from hazm import *
from hazm.Chunker import tree2brackets
def create_words_file(dic_file='resources/persian.dic', output='hazm/data/words.dat'):
""" prepares list of persian word words from [Virastyar](https://sourceforge.net/projects/virastyar/) dic file.
"""
dic_words = sorted([line.split('\t')[0] for line in codecs.open(dic_file, encoding='utf8')])
print(*dic_words, sep='\n', file=codecs.open(output, 'w', 'utf8'))
print(output, 'created')
def evaluate_lemmatizer(conll_file='resources/train.conll', peykare_root='corpora/peykare'):
lemmatizer = Lemmatizer()
errors = []
with codecs.open('resources/lemmatizer_errors.txt', 'w', 'utf8') as output:
dadegan = DadeganReader(conll_file)
for tree in dadegan.trees():
for node in tree.nodelist[1:]:
word, lemma, pos = node['word'], node['lemma'], node['mtag']
if lemmatizer.lemmatize(word, pos) != lemma:
errors.append((word, lemma, pos, lemmatizer.lemmatize(word, pos)))
print(len(errors), 'errors', file=output)
counter = Counter(errors)
for item, count in sorted(counter.items(), key=lambda t: t[1], reverse=True):
print(count, *item, file=output)
missed = []
with codecs.open('resources/lemmatizer_missed.txt', 'w', 'utf8') as output:
peykare = PeykareReader(peykare_root)
for sentence in peykare.sents():
for word in sentence:
if word[1] == 'V':
if word[0] == lemmatizer.lemmatize(word[0]):
missed.append(word[0])
print(len(missed), 'missed', file=output)
counter = Counter(missed)
for item, count in sorted(counter.items(), key=lambda t: t[1], reverse=True):
print(count, item, file=output)
def evaluate_chunker(treebank_root='corpora/treebank'):
treebank = TreebankReader(treebank_root, join_clitics=True, join_verb_parts=True)
chunker = Chunker()
chunked_trees = list(treebank.chunked_trees())
print(chunker.evaluate(chunked_trees))
output = codecs.open('resources/chunker_errors.txt', 'w', 'utf8')
for sentence, gold in zip(treebank.sents(), chunked_trees):
chunked = chunker.parse(sentence)
if chunked != gold:
print(tree2brackets(chunked), file=output)
print(tree2brackets(gold), file=output)
print(file=output)
def train_postagger(peykare_root='corpora/peykare', path_to_model='resources/persian.tagger', path_to_jar='resources/stanford-postagger.jar', properties_file='resources/stanford-postagger.props', memory_min='-Xms1g', memory_max='-Xmx6g', test_size=.1):
peykare = PeykareReader(peykare_root)
train_file = 'resources/tagger_train_data.txt'
train, test = train_test_split(list(peykare.sents()), test_size=float(test_size), random_state=0)
print('Peykare loaded.')
output = codecs.open(train_file, 'w', 'utf8')
for sentence in train:
print(*(map(lambda w: '/'.join(w).replace(' ', '_'), sentence)), file=output)
subprocess.Popen(['java', memory_min, memory_max, '-classpath', path_to_jar, 'edu.stanford.nlp.tagger.maxent.MaxentTagger', '-prop', properties_file, '-model', path_to_model, '-trainFile', train_file, '-tagSeparator', '/', '-search', 'owlqn2']).wait()
tagger = POSTagger()
print('Tagger Accuracy on Test Split:')
print(tagger.evaluate(test))
def train_maltparser(train_file='resources/train.conll', validation_file='resources/validation.conll', test_file='resources/test.conll', model_file='langModel.mco', path_to_jar='resources/malt.jar', options_file='resources/malt-options.xml', features_file='resources/malt-features.xml', memory_min='-Xms7g', memory_max='-Xmx8g'):
lemmatizer, tagger = Lemmatizer(), POSTagger()
train, validation, test = DadeganReader(train_file), DadeganReader(validation_file), DadeganReader(test_file)
train_sents = list(train.sents()) + list(validation.sents())
train_trees = list(train.trees()) + list(validation.trees())
train_data = train_file +'.data'
with codecs.open(train_data, 'w', 'utf8') as output:
for tree, sentence in zip(train_trees, tagger.tag_sents(train_sents)):
for i, (node, word) in enumerate(zip(tree.nodelist[1:], sentence), start=1):
node['tag'] = word[1]
node['lemma'] = lemmatizer.lemmatize(node['word'].replace('_', ' '), node['tag'])
print(i, node['word'].replace(' ', '_'), node['lemma'].replace(' ', '_'), node['tag'], node['tag'], '_', node['head'], node['rel'], '_', '_', sep='\t', file=output)
print(file=output)
subprocess.Popen(['java', memory_min, memory_max, '-jar', path_to_jar, '-w', 'resources', '-c', model_file, '-i', train_data, '-f', options_file, '-F', features_file, '-m', 'learn']).wait()
# evaluation
print('\nEvaluating trained model on test data:')
parser = DependencyParser(tagger=tagger, model_file=model_file)
tagged = tagger.tag_sents(test.sents())
parsed = parser.tagged_parse_sents(tagged)
test_data, test_results = test_file +'.data', test_file +'.results'
print('\n'.join([sentence.to_conll(10).replace('/', '') for sentence in test.trees()]).strip(), file=codecs.open(test_data, 'w', 'utf8'))
print('\n'.join([sentence.to_conll(10) for sentence in parsed]).strip(), file=codecs.open(test_results, 'w', 'utf8'))
subprocess.Popen(['java', '-jar', 'resources/MaltEval.jar', '-g', test_data, '-s', test_results]).wait()
| mit |
uthaipon/SkillsWorkshop2017 | Week03/PCA_aplied_to_ComputerHardware_data_set.py | 2 | 4589 | # -*- coding: utf-8 -*-
"""
Created on Tue Aug 1 13:28:49 2017
@author: Aster
"""
#=========================================================================
# Preparing the Dataset
#=========================================================================
import pandas as pd
df = pd.read_csv(
filepath_or_buffer='https://archive.ics.uci.edu/ml/machine-learning-databases/cpu-performance/machine.data',
header=None,
sep=',')
df.columns=['vendor_name','Model_Name', 'MYCT', 'MMIN', 'MMAX', 'CACH','CHMIN','CHMAX','PRP','ERP']
df.dropna(how="all", inplace=True) # drops the empty line at file-end
df.tail()
# split data table into data X and class labels y
X = df.ix[:,3:12].values
y = df.ix[:,0].values
#--------------------------------------
# Standardizing
#--------------------------------------
from sklearn.preprocessing import StandardScaler
X_std = StandardScaler().fit_transform(X)
#=========================================================================
#Eigendecomposition - Computing Eigenvectors and Eigenvalues
#=========================================================================
#--------------------------------------
#Covariance Matrix
#--------------------------------------
import numpy as np
mean_vec = np.mean(X_std, axis=0)
cov_mat = (X_std - mean_vec).T.dot((X_std - mean_vec)) / (X_std.shape[0]-1)
# or
cov_mat = np.cov(X_std.T)
print('Covariance matrix: \n%s' %cov_mat)
eig_vals, eig_vecs = np.linalg.eig(cov_mat)
print('Eigenvectors \n%s' %eig_vecs)
print('\nEigenvalues \n%s' %eig_vals)
#--------------------------------------
#Correlation Matrix
#--------------------------------------
# The eigendecomposition of the covariance matrix (if the input data was standardized) yields the same results as a eigendecomposition on the correlation matrix, since the correlation matrix can be understood as the normalized covariance matrix.
'''
Eigendecomposition of the standardized data based on the correlation matrix:
'''
cor_mat1 = np.corrcoef(X_std.T)
eig_vals, eig_vecs = np.linalg.eig(cor_mat1)
print('Eigenvectors \n%s' %eig_vecs)
print('\nEigenvalues \n%s' %eig_vals)
'''
Eigendecomposition of the raw data based on the correlation matrix:
'''
cor_mat2 = np.corrcoef(X.T)
eig_vals, eig_vecs = np.linalg.eig(cor_mat2)
print('Eigenvectors \n%s' %eig_vecs)
print('\nEigenvalues \n%s' %eig_vals)
#--------------------------------------
#Singular Vector Decomposition
#--------------------------------------
# Most PCA implementations perform a Singular Vector Decomposition (SVD) to improve the computational efficiency.
u,s,v = np.linalg.svd(X_std.T)
u
#=========================================================================
# Selecting Principal Components
#=========================================================================
for ev in eig_vecs:
np.testing.assert_array_almost_equal(1.0, np.linalg.norm(ev))
print('Everything ok!')
# Make a list of (eigenvalue, eigenvector) tuples
eig_pairs = [(np.abs(eig_vals[i]), eig_vecs[:,i]) for i in range(len(eig_vals))]
# Sort the (eigenvalue, eigenvector) tuples from high to low
eig_pairs.sort()
eig_pairs.reverse()
# Visually confirm that the list is correctly sorted by decreasing eigenvalues
print('Eigenvalues in descending order:')
for i in eig_pairs:
print(i[0])
print(eig_pairs)
#matrix_w = np.hstack((eig_pairs[0][1].reshape(7,1),
# eig_pairs[1][1].reshape(7,1),
# eig_pairs[2][1].reshape(7,1),
# eig_pairs[3][1].reshape(7,1)))
matrix_w = np.hstack((eig_pairs[i][1].reshape(7,1) for i in [0,1]))
print('Matrix W:\n', matrix_w)
#=========================================================================
#Projection Onto the New Feature Space
#=========================================================================
Y = X_std.dot(matrix_w)
#=========================================================================
# Clustering
#=========================================================================
from sklearn.cluster import KMeans
import matplotlib.pyplot as plt
from matplotlib import style
kmeans = KMeans(n_clusters=2).fit(Y)
centroid = kmeans.cluster_centers_
labels = kmeans.labels_
colors = ["g.","r.","c.","y.","m.","k."]
plt.figure(figsize=(15,10))
for i in range(len(Y)):
plt.plot(Y[i,0],Y[i,1],"k.",markersize=10)
plt.show()
plt.figure(figsize=(15,10))
for i in range(len(Y)):
plt.plot(Y[i,0],Y[i,1],colors[labels[i]],markersize=10)
plt.scatter(centroid[:,0],centroid[:,1], marker = "x", s=150, linewidths = 5, zorder =10)
plt.show() | bsd-3-clause |
beiko-lab/gengis | bin/Lib/site-packages/matplotlib/tri/trifinder.py | 4 | 3221 | from __future__ import print_function
from matplotlib.tri import Triangulation
import matplotlib._tri as _tri
class TriFinder(object):
"""
Abstract base class for classes used to find the triangles of a
Triangulation in which (x,y) points lie.
Rather than instantiate an object of a class derived from TriFinder, it is
usually better to use the function
:func:`matplotlib.tri.Triangulation.get_trifinder`.
Derived classes implement __call__(x,y) where x,y are array_like point
coordinates of the same shape.
"""
def __init__(self, triangulation):
if not isinstance(triangulation, Triangulation):
raise ValueError('Expected a Triangulation object')
self._triangulation = triangulation
class TrapezoidMapTriFinder(TriFinder):
"""
:class:`~matplotlib.tri.TriFinder` class implemented using the trapezoid
map algorithm from the book "Computational Geometry, Algorithms and
Applications", second edition, by M. de Berg, M. van Kreveld, M. Overmars
and O. Schwarzkopf.
The triangulation must be valid, i.e. it must not have duplicate points,
triangles formed from colinear points, or overlapping triangles. The
algorithm has some tolerance to triangles formed from colinear points, but
this should not be relied upon.
"""
def __init__(self, triangulation):
TriFinder.__init__(self, triangulation)
self._cpp_trifinder = _tri.TrapezoidMapTriFinder(
triangulation.get_cpp_triangulation())
self._initialize()
def __call__(self, x, y):
"""
Return an array containing the indices of the triangles in which the
specified x,y points lie, or -1 for points that do not lie within a
triangle.
*x*, *y* are array_like x and y coordinates of the same shape and any
number of dimensions.
Returns integer array with the same shape and *x* and *y*.
"""
# C++ checks arguments are OK.
return self._cpp_trifinder.find_many(x, y)
def _get_tree_stats(self):
"""
Return a python list containing the statistics about the node tree:
0: number of nodes (tree size)
1: number of unique nodes
2: number of trapezoids (tree leaf nodes)
3: number of unique trapezoids
4: maximum parent count (max number of times a node is repeated in
tree)
5: maximum depth of tree (one more than the maximum number of
comparisons needed to search through the tree)
6: mean of all trapezoid depths (one more than the average number
of comparisons needed to search through the tree)
"""
return self._cpp_trifinder.get_tree_stats()
def _initialize(self):
"""
Initialize the underlying C++ object. Can be called multiple times if,
for example, the triangulation is modified.
"""
self._cpp_trifinder.initialize()
def _print_tree(self):
"""
Print a text representation of the node tree, which is useful for
debugging purposes.
"""
self._cpp_trifinder.print_tree()
| gpl-3.0 |
FCH808/FCH808.github.io | Intro to Machine Learning/ud120-projects/feature_selection/find_signature.py | 2 | 1243 | #!/usr/bin/python
import pickle
import numpy
numpy.random.seed(42)
### the words (features) and authors (labels), already largely processed
words_file = "word_data_overfit.pkl" ### like the file you made in the last mini-project
authors_file = "email_authors_overfit.pkl" ### this too
word_data = pickle.load( open(words_file, "r"))
authors = pickle.load( open(authors_file, "r") )
### test_size is the percentage of events assigned to the test set (remainder go into training)
from sklearn import cross_validation
features_train, features_test, labels_train, labels_test = cross_validation.train_test_split(word_data, authors, test_size=0.1, random_state=42)
from sklearn.feature_extraction.text import TfidfVectorizer
vectorizer = TfidfVectorizer(sublinear_tf=True, max_df=0.5,
stop_words='english')
features_train = vectorizer.fit_transform(features_train).toarray()
features_test = vectorizer.transform(features_test).toarray()
### a classic way to overfit is to use a small number
### of data points and a large number of features
### train on only 150 events to put ourselves in this regime
features_train = features_train[:150]
labels_train = labels_train[:150]
### your code goes here
| mit |
dingocuster/scikit-learn | examples/ensemble/plot_adaboost_hastie_10_2.py | 355 | 3576 | """
=============================
Discrete versus Real AdaBoost
=============================
This example is based on Figure 10.2 from Hastie et al 2009 [1] and illustrates
the difference in performance between the discrete SAMME [2] boosting
algorithm and real SAMME.R boosting algorithm. Both algorithms are evaluated
on a binary classification task where the target Y is a non-linear function
of 10 input features.
Discrete SAMME AdaBoost adapts based on errors in predicted class labels
whereas real SAMME.R uses the predicted class probabilities.
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", Springer, 2009.
.. [2] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
print(__doc__)
# Author: Peter Prettenhofer <[email protected]>,
# Noel Dawe <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import zero_one_loss
from sklearn.ensemble import AdaBoostClassifier
n_estimators = 400
# A learning rate of 1. may not be optimal for both SAMME and SAMME.R
learning_rate = 1.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X_test, y_test = X[2000:], y[2000:]
X_train, y_train = X[:2000], y[:2000]
dt_stump = DecisionTreeClassifier(max_depth=1, min_samples_leaf=1)
dt_stump.fit(X_train, y_train)
dt_stump_err = 1.0 - dt_stump.score(X_test, y_test)
dt = DecisionTreeClassifier(max_depth=9, min_samples_leaf=1)
dt.fit(X_train, y_train)
dt_err = 1.0 - dt.score(X_test, y_test)
ada_discrete = AdaBoostClassifier(
base_estimator=dt_stump,
learning_rate=learning_rate,
n_estimators=n_estimators,
algorithm="SAMME")
ada_discrete.fit(X_train, y_train)
ada_real = AdaBoostClassifier(
base_estimator=dt_stump,
learning_rate=learning_rate,
n_estimators=n_estimators,
algorithm="SAMME.R")
ada_real.fit(X_train, y_train)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot([1, n_estimators], [dt_stump_err] * 2, 'k-',
label='Decision Stump Error')
ax.plot([1, n_estimators], [dt_err] * 2, 'k--',
label='Decision Tree Error')
ada_discrete_err = np.zeros((n_estimators,))
for i, y_pred in enumerate(ada_discrete.staged_predict(X_test)):
ada_discrete_err[i] = zero_one_loss(y_pred, y_test)
ada_discrete_err_train = np.zeros((n_estimators,))
for i, y_pred in enumerate(ada_discrete.staged_predict(X_train)):
ada_discrete_err_train[i] = zero_one_loss(y_pred, y_train)
ada_real_err = np.zeros((n_estimators,))
for i, y_pred in enumerate(ada_real.staged_predict(X_test)):
ada_real_err[i] = zero_one_loss(y_pred, y_test)
ada_real_err_train = np.zeros((n_estimators,))
for i, y_pred in enumerate(ada_real.staged_predict(X_train)):
ada_real_err_train[i] = zero_one_loss(y_pred, y_train)
ax.plot(np.arange(n_estimators) + 1, ada_discrete_err,
label='Discrete AdaBoost Test Error',
color='red')
ax.plot(np.arange(n_estimators) + 1, ada_discrete_err_train,
label='Discrete AdaBoost Train Error',
color='blue')
ax.plot(np.arange(n_estimators) + 1, ada_real_err,
label='Real AdaBoost Test Error',
color='orange')
ax.plot(np.arange(n_estimators) + 1, ada_real_err_train,
label='Real AdaBoost Train Error',
color='green')
ax.set_ylim((0.0, 0.5))
ax.set_xlabel('n_estimators')
ax.set_ylabel('error rate')
leg = ax.legend(loc='upper right', fancybox=True)
leg.get_frame().set_alpha(0.7)
plt.show()
| bsd-3-clause |
macks22/gensim | gensim/test/test_keras_integration.py | 1 | 6627 | import unittest
import os
import numpy as np
from gensim.models import word2vec
try:
from sklearn.datasets import fetch_20newsgroups
except ImportError:
raise unittest.SkipTest("Test requires sklearn to be installed, which is not available")
try:
import keras
from keras.engine import Input
from keras.models import Model
from keras.layers.merge import dot
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.utils.np_utils import to_categorical
from keras.layers import Dense, Flatten
from keras.layers import Conv1D, MaxPooling1D
except ImportError:
raise unittest.SkipTest("Test requires Keras to be installed, which is not available")
sentences = [
['human', 'interface', 'computer'],
['survey', 'user', 'computer', 'system', 'response', 'time'],
['eps', 'user', 'interface', 'system'],
['system', 'human', 'system', 'eps'],
['user', 'response', 'time'],
['trees'],
['graph', 'trees'],
['graph', 'minors', 'trees'],
['graph', 'minors', 'survey']
]
module_path = os.path.dirname(__file__) # needed because sample data files are located in the same folder
datapath = lambda fname: os.path.join(module_path, 'test_data', fname)
class TestKerasWord2VecWrapper(unittest.TestCase):
def setUp(self):
self.model_cos_sim = word2vec.Word2Vec(sentences, size=100, min_count=1, hs=1)
# self.model_twenty_ng = word2vec.Word2Vec(word2vec.LineSentence(datapath('20_newsgroup_keras_w2v_data.txt')), min_count=1)
self.model_twenty_ng = word2vec.Word2Vec(min_count=1)
def testWord2VecTraining(self):
"""
Test word2vec training.
"""
model = self.model_cos_sim
self.assertTrue(model.wv.syn0.shape == (len(model.wv.vocab), 100))
self.assertTrue(model.syn1.shape == (len(model.wv.vocab), 100))
sims = model.most_similar('graph', topn=10)
# self.assertTrue(sims[0][0] == 'trees', sims) # most similar
# test querying for "most similar" by vector
graph_vector = model.wv.syn0norm[model.wv.vocab['graph'].index]
sims2 = model.most_similar(positive=[graph_vector], topn=11)
sims2 = [(w, sim) for w, sim in sims2 if w != 'graph'] # ignore 'graph' itself
self.assertEqual(sims, sims2)
def testEmbeddingLayerCosineSim(self):
"""
Test Keras 'Embedding' layer returned by 'get_embedding_layer' function for a simple word similarity task.
"""
keras_w2v_model = self.model_cos_sim
keras_w2v_model_wv = keras_w2v_model.wv
embedding_layer = keras_w2v_model_wv.get_embedding_layer()
input_a = Input(shape=(1,), dtype='int32', name='input_a')
input_b = Input(shape=(1,), dtype='int32', name='input_b')
embedding_a = embedding_layer(input_a)
embedding_b = embedding_layer(input_b)
similarity = dot([embedding_a, embedding_b], axes=2, normalize=True)
model = Model(input=[input_a, input_b], output=similarity)
model.compile(optimizer='sgd', loss='mse')
word_a = 'graph'
word_b = 'trees'
output = model.predict([
np.asarray([keras_w2v_model.wv.vocab[word_a].index]),
np.asarray([keras_w2v_model.wv.vocab[word_b].index])
])
# output is the cosine distance between the two words (as a similarity measure)
self.assertTrue(type(output[0][0][0]) == np.float32) # verify that a float is returned
def testEmbeddingLayer20NewsGroup(self):
"""
Test Keras 'Embedding' layer returned by 'get_embedding_layer' function for a smaller version of the 20NewsGroup classification problem.
"""
MAX_SEQUENCE_LENGTH = 1000
# Prepare text samples and their labels
# Processing text dataset
texts = [] # list of text samples
texts_w2v = [] # used to train the word embeddings
labels = [] # list of label ids
data = fetch_20newsgroups(subset='train', categories=['alt.atheism', 'comp.graphics', 'sci.space'])
for index in range(len(data)):
label_id = data.target[index]
file_data = data.data[index]
i = file_data.find('\n\n') # skip header
if i > 0:
file_data = file_data[i:]
try:
curr_str = str(file_data)
sentence_list = curr_str.split('\n')
for sentence in sentence_list:
sentence = (sentence.strip()).lower()
texts.append(sentence)
texts_w2v.append(sentence.split(' '))
labels.append(label_id)
except Exception:
pass
# Vectorize the text samples into a 2D integer tensor
tokenizer = Tokenizer()
tokenizer.fit_on_texts(texts)
sequences = tokenizer.texts_to_sequences(texts)
# word_index = tokenizer.word_index
data = pad_sequences(sequences, maxlen=MAX_SEQUENCE_LENGTH)
labels = to_categorical(np.asarray(labels))
x_train = data
y_train = labels
# prepare the embedding layer using the wrapper
keras_w2v = self.model_twenty_ng
keras_w2v.build_vocab(texts_w2v)
keras_w2v.train(texts, total_examples=keras_w2v.corpus_count, epochs=keras_w2v.iter)
keras_w2v_wv = keras_w2v.wv
embedding_layer = keras_w2v_wv.get_embedding_layer()
# create a 1D convnet to solve our classification task
sequence_input = Input(shape=(MAX_SEQUENCE_LENGTH,), dtype='int32')
embedded_sequences = embedding_layer(sequence_input)
x = Conv1D(128, 5, activation='relu')(embedded_sequences)
x = MaxPooling1D(5)(x)
x = Conv1D(128, 5, activation='relu')(x)
x = MaxPooling1D(5)(x)
x = Conv1D(128, 5, activation='relu')(x)
x = MaxPooling1D(35)(x) # global max pooling
x = Flatten()(x)
x = Dense(128, activation='relu')(x)
preds = Dense(y_train.shape[1], activation='softmax')(x)
model = Model(sequence_input, preds)
model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['acc'])
fit_ret_val = model.fit(x_train, y_train, epochs=1)
# verify the type of the object returned after training
self.assertTrue(type(fit_ret_val) == keras.callbacks.History) # value returned is a `History` instance. Its `history` attribute contains all information collected during training.
if __name__ == '__main__':
unittest.main()
| lgpl-2.1 |
Transkribus/TranskribusDU | TranskribusDU/tasks/DU_Table/rowDetection.py | 1 | 90019 | # -*- coding: utf-8 -*-
"""
Build Rows for a BIESO model
H. Déjean
copyright Xerox 2017, Naver 2017, 2018
READ project
Developed for the EU project READ. The READ project has received funding
from the European Union's Horizon 2020 research and innovation programme
under grant agreement No 674943.
"""
import sys, os.path
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(sys.argv[0]))))
import collections
from lxml import etree
from sklearn.metrics import adjusted_rand_score
from sklearn.metrics import homogeneity_score
from sklearn.metrics import completeness_score
import common.Component as Component
from common.trace import traceln
import config.ds_xml_def as ds_xml
from ObjectModel.xmlDSDocumentClass import XMLDSDocument
from ObjectModel.XMLDSTEXTClass import XMLDSTEXTClass
from ObjectModel.XMLDSTABLEClass import XMLDSTABLEClass
from ObjectModel.XMLDSCELLClass import XMLDSTABLECELLClass
from ObjectModel.XMLDSTableRowClass import XMLDSTABLEROWClass
from ObjectModel.XMLDSTableColumnClass import XMLDSTABLECOLUMNClass
from spm.spmTableRow import tableRowMiner
from xml_formats.Page2DS import primaAnalysis
from util.partitionEvaluation import evalPartitions, jaccard, iuo
from util.geoTools import sPoints2tuplePoints
from shapely.geometry import Polygon
from shapely import affinity
from shapely.ops import cascaded_union
class RowDetection(Component.Component):
"""
row detection
@precondition: column detection done, BIES tagging done for text elements
11/9/2018: last idea: suppose the cell segmentation good enough: group cells which are unambiguous
with the cell in the (none empty) next column .
12/11/2018: already done in mergehorinzontalCells !!
12/11/2018: assume perfect cells: build simple: take next lright as same row
then look for elements belonging to several rows
"""
usage = ""
version = "v.1.1"
description = "description: rowDetection from BIO textlines"
#--- INIT -------------------------------------------------------------------------------------------------------------
def __init__(self):
"""
Always call first the Component constructor.
"""
Component.Component.__init__(self, "RowDetection", self.usage, self.version, self.description)
self.colname = None
self.docid= None
self.do2DS= False
self.THHighSupport = 0.20
self.bYCut = False
self.bCellOnly = False
# for --test
self.bCreateRef = False
self.bCreateRefCluster = False
self.BTAG= 'B'
self.STAG = 'S'
self.bNoTable = False
self.bEvalCluster=False
self.evalData = None
def setParams(self, dParams):
"""
Always call first the Component setParams
Here, we set our internal attribute according to a possibly specified value (otherwise it stays at its default value)
"""
Component.Component.setParams(self, dParams)
# if dParams.has_key("coldir"):
# self.colname = dParams["coldir"]
if "docid" in dParams:
self.docid = dParams["docid"]
if "dsconv" in dParams:
self.do2DS = dParams["dsconv"]
if "createref" in dParams:
self.bCreateRef = dParams["createref"]
if "bNoColumn" in dParams:
self.bNoTable = dParams["bNoColumn"]
if "createrefCluster" in dParams:
self.bCreateRefCluster = dParams["createrefCluster"]
if "evalCluster" in dParams:
self.bEvalCluster = dParams["evalCluster"]
if "thhighsupport" in dParams:
self.THHighSupport = dParams["thhighsupport"] * 0.01
if 'BTAG' in dParams: self.BTAG = dParams["BTAG"]
if 'STAG' in dParams: self.STAG = dParams["STAG"]
if 'YCut' in dParams: self.bYCut = dParams["YCut"]
if 'bCellOnly' in dParams: self.bCellOnly = dParams["bCellOnly"]
def createCells(self, table):
"""
create new cells using BIESO tags
@input: tableObeject with old cells
@return: tableObject with BIES cells
@precondition: requires columns
if DU_col = M : ignore
"""
# print ('nbcells:',len(table.getAllNamedObjects(XMLDSTABLECELLClass)))
table._lObjects = []
lSkipped =[]
for col in table.getColumns():
# print (col)
lNewCells=[]
# keep original positions
try:col.resizeMe(XMLDSTABLECELLClass)
except: pass
# in order to ignore existing cells from GT: collect all objects from cells
lObjects = [txt for cell in col.getCells() for txt in cell.getObjects() ]
lObjects.sort(key=lambda x:x.getY())
curChunk=[]
lChunks = []
for txt in lObjects:
# do no yse it for the moment
if txt.getAttribute("DU_col") == 'Mx':
lSkipped.append(txt)
elif txt.getAttribute("DU_row") == self.STAG:
if curChunk != []:
lChunks.append(curChunk)
curChunk=[]
lChunks.append([txt])
elif txt.getAttribute("DU_row") in ['I', 'E']:
curChunk.append(txt)
elif txt.getAttribute("DU_row") == self.BTAG:
if curChunk != []:
lChunks.append(curChunk)
curChunk=[txt]
elif txt.getAttribute("DU_row") == 'O':
## add Other as well??? no
curChunk.append(txt)
# pass
if curChunk != []:
lChunks.append(curChunk)
if lChunks != []:
# create new cells
# table.delCell(cell)
irow= txt.getParent().getIndex()[0]
for i,c in enumerate(lChunks):
#create a new cell per chunk and replace 'cell'
newCell = XMLDSTABLECELLClass()
newCell.setPage(txt.getParent().getPage())
newCell.setParent(table)
newCell.setName(ds_xml.sCELL)
# newCell.setIndex(irow+i,txt.getParent().getIndex()[1])
newCell.setIndex(i,txt.getParent().getIndex()[1])
newCell.setObjectsList(c)
# newCell.addAttribute('type','new')
newCell.resizeMe(XMLDSTEXTClass)
newCell.tagMe2()
for o in newCell.getObjects():
o.setParent(newCell)
o.tagMe()
# contour = self.createContourFromListOfElements(newCell.getObjects())
# if contour is not None:
# # newCell.addAttribute('points',','.join("%s,%s"%(x[0],x[1]) for x in contour.lXY))
# newCell.addAttribute('points',','.join("%s,%s"%(x[0],x[1]) for x in contour))
# newCell.tagMe2()
# table.addCell(newCell)
lNewCells.append(newCell)
# if txt.getParent().getNode().getparent() is not None: txt.getParent().getNode().getparent().remove(txt.getParent().getNode())
# del(txt.getParent())
#delete all cells
for cell in col.getCells():
# print (cell)
try:
if cell.getNode().getparent() is not None: cell.getNode().getparent().remove(cell.getNode())
except: pass
[table.delCell(cell) for cell in col.getCells() ]
# print ('\t nbcells 2:',len(table.getAllNamedObjects(XMLDSTABLECELLClass)))
col._lcells= []
col._lObjects=[]
# print (col.getAllNamedObjects(XMLDSTABLECELLClass))
[table.addCell(c) for c in lNewCells]
[col.addCell(c) for c in lNewCells]
# print ('\t nbcells 3:',len(table.getAllNamedObjects(XMLDSTABLECELLClass)))
# print ('\tnbcells:',len(table.getAllNamedObjects(XMLDSTABLECELLClass)))
def matchCells(self,table):
"""
use lcs (dtw?) for matching
dtw: detect merging situation
for each col: match with next col
series 1 : col1 set of cells
series 2 : col2 set of cells
distance = Yoverlap
"""
dBest = {}
#in(self.y2, tb.y2) - max(self.y1, tb.y1)
def distY(c1,c2):
o = min(c1.getY2() , c2.getY2()) - max(c1.getY() , c2.getY())
if o < 0:
return 1
# d = (2* (min(c1.getY2() , c2.getY2()) - max(c1.getY() , c2.getY()))) / (c1.getHeight() + c2.getHeight())
# print(c1,c1.getY(),c1.getY2(), c2,c2.getY(),c2.getY2(),o,d)
return 1 - (1 * (min(c1.getY2() , c2.getY2()) - max(c1.getY() , c2.getY()))) / min(c1.getHeight() , c2.getHeight())
laErr=[]
for icol, col in enumerate(table.getColumns()):
lc = col.getCells() + laErr
lc.sort(key=lambda x:x.getY())
if icol+1 < table.getNbColumns():
col2 = table.getColumns()[icol+1]
if col2.getCells() != []:
cntOk,cntErr,cntMissed, lFound,lErr,lMissed = evalPartitions(lc,col2.getCells(), .25,distY)
[laErr.append(x) for x in lErr if x not in laErr]
[laErr.remove(x) for x,y in lFound if x in laErr]
# lErr: cell not matched in col1
# lMissed: cell not matched in col2
print (col,col2,cntOk,cntErr,cntMissed,lErr) #, lFound,lErr,lMissed)
for x,y in lFound:
dBest[x]=y
else:
[laErr.append(x) for x in lc if x not in laErr]
# create row
#sort keys by x
skeys = sorted(dBest.keys(),key=lambda x:x.getX())
lcovered=[]
llR=[]
for key in skeys:
# print (key,lcovered)
if key not in lcovered:
lcovered.append(key)
nextC = dBest[key]
# print ("\t",key,nextC,lcovered)
lrow = [key]
while nextC:
lrow.append(nextC)
lcovered.append(nextC)
try:
nextC=dBest[nextC]
except KeyError:
print ('\txx\t',lrow)
llR.append(lrow)
nextC=None
for lrow in llR:
contour = self.createContourFromListOfElements(lrow)
if contour is not None:
spoints = ','.join("%s,%s"%(x[0],x[1]) for x in contour)
r = XMLDSTABLEROWClass(1)
r.setParent(table)
r.addAttribute('points',spoints)
r.tagMe('VV')
def assessCuts(self,table,lYCuts):
"""
input: table, ycuts
output:
"""
# features or values ?
try:lYCuts = map(lambda x:x.getValue(),lYCuts)
except:pass
lCells = table.getCells()
prevCut = table.getY()
irowIndex = 0
lRows= []
dCellOverlap = {}
for _,cut in enumerate(lYCuts):
row=[]
if cut - prevCut > 0:
[b1,b2] = prevCut, cut
for c in lCells:
[a1, a2] = c.getY(),c.getY() + c.getHeight()
if min(a2, b2) >= max(a1, b1):
row.append(c)
lRows.append(row)
irowIndex += 1
prevCut = cut
## BIO coherence
def buildLineCandidates(self,table):
"""
return a lits of lines corresponding to top row line candidates
"""
def mineTableRowPattern(self,table):
"""
find rows and columns patterns in terms of typographical position // mandatory cells,...
input: a set of rows (table)
action: seq mining of rows
output: pattern
Mining at table/page level
# of cells per row
# of cells per colmun
# cell with content (j: freq ; i: freq)
Sequential pattern:(itemset: setofrows; item cells?)
"""
# which col is mandatory
# text alignment in cells (per col)
for row in table.getRows():
# self.mineTypography()
a = row.computeSkewing()
"""
skewing detection: use synthetic data !!
simply scan row by row with previous row and adjust with coherence
"""
def getSkewingRepresentation(self,lcuts):
"""
input: list of featureObject
output: skewed cut (a,b)
alog: for each feature: get the text nodes baselines and create a skewed line (a,b)
"""
def miningSeparatorShape(self,table,lCuts):
# import numpy as np
from shapely.geometry import MultiLineString
for cut in lCuts:
xordered= list(cut.getNodes())
print(cut,[x.getX() for x in xordered])
xordered.sort(key = lambda x:x.getX())
lSeparators = [ (x.getX(),x.getY()) for x in [xordered[0],xordered[-1]]]
print( lSeparators)
ml = MultiLineString(lSeparators)
print (ml.wkt)
# X = [x[0] for x in lSeparators]
# Y = [x[1] for x in lSeparators]
# print(X,Y)
# a, b = np.polynomial.polynomial.polyfit(X, Y, 1)
# xmin, xmax = table.getX(), table.getX2()
# y1 = a + b * xmin
# y2 = a + b * xmax
# print (y1,y2)
# print ([ (x.getObjects()[0].getBaseline().getY(),x.getObjects()[0].getBaseline().getAngle(),x.getY()) for x in xordered])
def processRows(self, table, predefinedCuts=[]):
"""
Apply mining to get Y cuts for rows
If everything is centered?
Try thnum= [5,10,20,30,40,50] and keep better coherence!
Then adjust skewing ? using features values: for c in lYcuts: print (c, [x.getY() for x in c.getNodes()])
replace columnMining by cell matching from col to col!!
simply best match (max overlap) between two cells NONONO
perform chk of cells (tagging is now very good!) and use it for column mining (chk + remaining cells)
"""
# self.matchCells(table)
# return
fMaxCoherence = 0.0
rowMiner= tableRowMiner()
# % of columns needed
lTHSUP= [0.2,0.3,0.4]
# lTHSUP= [0.2]
bestTHSUP =None
bestthnum= None
bestYcuts = None
for thnum in [10,20,30]: # must be correlated with leading/text height?
# for thnum in [30]: # must be correlated with leading/text height?
# for thnum in [50]: # must be correlated with leading/text height?
"""
07/1/2018: to be replace by HChunks
for each hchunks: % of cuts(beginning) = validate the top line as segmentor
## hchunk at cell level : if yes select hchunks at textline level as well?
"""
lLYcuts = rowMiner.columnMining(table,thnum,lTHSUP,predefinedCuts)
# print (lLYcuts)
# get skewing represenation
# [ x.setValue(x.getValue()-0) for x in lYcuts ]
for iy,lYcuts in enumerate(lLYcuts):
# print ("%s %s " %(thnum, lTHSUP[iy]))
# lYcuts.sort(key= lambda x:x.getValue())
# self.miningSeparatorShape(table,lYcuts)
# self.assessCuts(table, lYcuts)
# self.createRowsWithCuts2(table,lYcuts)
table.createRowsWithCuts(lYcuts)
table.reintegrateCellsInColRow()
coherence = self.computeCoherenceScore(table)
if coherence > fMaxCoherence:
fMaxCoherence = coherence
bestYcuts= lYcuts[:]
bestTHSUP = lTHSUP[iy]
bestthnum= thnum
# else: break
# print ('coherence Score for (%s,%s): %f\t%s'%(thnum,lTHSUP[iy],coherence,bestYcuts))
if bestYcuts is not None:
### create the separation with the hullcontour : row as polygon!!
## if no intersection with previous row : OK
## if intersection
# print (bestYcuts)
# for y in bestYcuts:
# ## get top elements of the cells to build the boundary ??
# print ('%s %s'%(y.getValue(),[(c.getX(),c.getY()) for c in sorted(y.getNodes(),key=lambda x:x.getX())]))
## what about elements outside the cut (beforeà)
## try "skew option and evaluate""!!
## take max -H
## take skew
table.createRowsWithCuts(bestYcuts)
table.reintegrateCellsInColRow()
for row in table.getRows():
row.addAttribute('points',"0,0")
contour = self.createContourFromListOfElements([x for c in row.getCells() for x in c.getObjects()])
if contour is not None:
spoints = ','.join("%s,%s"%(x[0],x[1]) for x in contour)
row.addAttribute('points',spoints)
# print (len(table.getPage().getAllNamedObjects(XMLDSTABLECELLClass)))
table.buildNDARRAY()
# self.mineTableRowPattern(table)
# def defineRowTopBoundary(self,row,ycut):
# """
# define a top row boundary
# """
def findBoundaryLinesFromChunks(self,table,lhckh):
"""
create lines from chunks (create with cells)
take each chunk and create (a,b) with top contour
"""
from util.Polygon import Polygon as dspp
import numpy as np
dTop_lSgmt = collections.defaultdict(list)
for chk in lhckh:
sPoints = chk.getAttribute('points') #.replace(',',' ')
spoints = ' '.join("%s,%s"%((x,y)) for x,y in zip(*[iter(sPoints.split(','))]*2))
it_sXsY = (sPair.split(',') for sPair in spoints.split(' '))
plgn = dspp((float(sx), float(sy)) for sx, sy in it_sXsY)
try:
lT, lR, lB, lL = plgn.partitionSegmentTopRightBottomLeft()
dTop_lSgmt[chk].extend(lT)
except ValueError: pass
#now make linear regression to draw relevant separators
def getX(lSegment):
lX = list()
for x1,y1,x2,y2 in lSegment:
lX.append(x1)
lX.append(x2)
return lX
def getY(lSegment):
lY = list()
for x1,y1,x2,y2 in lSegment:
lY.append(y1)
lY.append(y2)
return lY
dAB = collections.defaultdict(list)
icmpt=0
for icol, lSegment in dTop_lSgmt.items(): #sorted(dTop_lSgmt.items()):
print (icol,lSegment)
X = getX(lSegment)
Y = getY(lSegment)
#sum(l,())
lfNorm = [np.linalg.norm([[x1,y1], [x2,y2]]) for x1,y1,x2,y2 in lSegment]
#duplicate each element
W = [fN for fN in lfNorm for _ in (0,1)]
# a * x + b
a, b = np.polynomial.polynomial.polyfit(X, Y, 1, w=W)
xmin, xmax = min(X), max(X)
y1 = a + b * (0)
y2 = a + b * table.getX2()
dAB[b].append((a,b))
rowline = XMLDSTABLEROWClass(icmpt)
rowline.setPage(table.getPage())
rowline.setParent(table)
icmpt+=1
# table.addColumn(rowline) # prevx1, prevymin,x1, ymin, x2, ymax, prevx2, prevymax))
rowline.addAttribute('points',"%s,%s %s,%s"%(0, y1, table.getX2(),y2))
# rowline.setX(prevxmin)
# rowline.setY(prevy1)
# rowline.setHeight(y2 - prevy1)
# rowline.setWidth(xmax- xmin)
rowline.tagMe('SeparatorRegion')
# print (a,b)
# for b in sorted(dAB.keys()):
# print (b,dAB[b])
def processRows3(self,table,predefinedCuts=[] ):
"""
build rows:
for a given cell: if One single Y overlapping cell in the next column: integrate it in the row
"""
from tasks.TwoDChunking import TwoDChunking
hchk = TwoDChunking()
lElts=[]
[lElts.append(x) for col in table.getColumns() for x in col.getCells()]
lhchk = hchk.HorizonalChunk(table.getPage(),lElts=lElts,bStrict=False)
# lRows = []
# curRow = []
# for col in table.getColumns():
# lcells = col.getCells()
def processRows2(self,table,predefinedCuts=[]):
"""
Apply mining to get Y cuts for rows
"""
from tasks.TwoDChunking import TwoDChunking
hchk = TwoDChunking()
lhchk = hchk.HorizonalChunk(table.getPage(),lElts=table.getCells())
# create bounday lines from lhckh
# lYcuts = self.findBoundaryLinesFromChunks(table,lhchk)
# lYcuts.sort(key= lambda x:x.getValue())
# self.getSkewingRepresentation(lYcuts)
# self.assessCuts(table, lYcuts)
# self.createRowsWithCuts2(table,lYcuts)
# table.createRowsWithCuts(lYcuts)
# table.reintegrateCellsInColRow()
#
# table.buildNDARRAY()
def checkInputFormat(self,lPages):
"""
delete regions : copy regions elements at page object
unlink subnodes
"""
for page in lPages:
lTables = page.getAllNamedObjects(XMLDSTABLEClass)
for table in lTables:
lRegions = table.getAllNamedObjects("CELL")
lElts=[]
[lElts.extend(x.getObjects()) for x in lRegions]
[table.addObject(x,bDom=True) for x in lElts]
[table.removeObject(x,bDom=True) for x in lRegions]
def processYCuts(self,ODoc):
from util.XYcut import mergeSegments
self.checkInputFormat(ODoc.getPages())
for page in ODoc.getPages():
traceln("page: %d" % page.getNumber())
lTables = page.getAllNamedObjects(XMLDSTABLEClass)
for table in lTables:
print ('nb Y: %s'% len(set([round(x.getY()) for x in page.getAllNamedObjects(XMLDSTEXTClass)])),len(page.getAllNamedObjects(XMLDSTEXTClass)))
# lCuts, _, _ = mergeSegments([(x.getY(),x.getY() + x.getHeight(),x) for x in page.getAllNamedObjects(XMLDSTEXTClass)],0)
# for i, (y,_,cut) in enumerate(lCuts):
# ll =list(cut)
# ll.sort(key=lambda x:x.getY())
# #add column
# myRow= XMLDSTABLEROWClass(i)
# myRow.setPage(page)
# myRow.setParent(table)
# table.addObject(myRow)
# myRow.setY(y)
# myRow.setX(table.getX())
# myRow.setWidth(table.getWidth())
# if i +1 < len(lCuts):
# myRow.setHeight(lCuts[i+1][0]-y)
# else: # use table
# myRow.setHeight(table.getY2()-y)
# table.addRow(myRow)
# print (myRow)
# myRow.tagMe(ds_xml.sROW)
def mergeHorizontalCells(self,table):
"""
merge cell a to b|next col iff b overlap horizontally with a (using right border from points)
input: a table, with candidate cells
output: cluster of cells as row candidates
simply ignore cells which overlap several cells in the next column
then: extend row candidates if needed
if no column known: simply take the first cell in lright if cells in lright do ot X overlap (the first nearest w/o issue)
"""
# firtst create an index for hor neighbours
lNBNeighboursNextCol=collections.defaultdict(list)
lNBNeighboursPrevCol=collections.defaultdict(list)
for cell in table.getCells():
# get next col
icol = cell.getIndex()[1]
if icol < table.getNbColumns()-1:
nextColCells=table.getColumns()[icol+1].getCells()
sorted(nextColCells,key=lambda x:x.getY())
lHOverlap= []
[lHOverlap.append(c) for c in nextColCells if cell.signedRatioOverlapY(c)> 1]
# if no overlap: take icol + 2
lNBNeighboursNextCol[cell].extend(lHOverlap)
if icol > 1:
prevColCells=table.getColumns()[icol-1].getCells()
sorted(prevColCells,key=lambda x:x.getY())
lHOverlap= []
[lHOverlap.append(c) for c in prevColCells if cell.signedRatioOverlapY(c)> 1]
# if not overlap take icol-2
lNBNeighboursPrevCol[cell].extend(lHOverlap)
lcovered=[]
for icol,col in enumerate(table.getColumns()):
sortedC = sorted(col.getCells(),key=lambda x:x.getY())
for cell in sortedC:
if len(lNBNeighboursNextCol[cell]) < 2 and len(lNBNeighboursPrevCol[cell]) < 2:
if cell not in lcovered:
print(type(cell.getContent()))
print ('START :', icol,cell, cell.getContent(),cell.getY(),cell.getY2())
lcovered.append(cell)
lcurRow = [cell]
iicol=icol
curCell = cell
while iicol < table.getNbColumns()-1:
nextColCells=table.getColumns()[iicol+1].getCells()
sorted(nextColCells,key=lambda x:x.getY())
for c in nextColCells:
if len(lNBNeighboursNextCol[c]) < 2 and len(lNBNeighboursPrevCol[c]) < 2:
if curCell.signedRatioOverlapY(c) > 0.25 * curCell.getHeight():
lcovered.append(c)
lcurRow.append(c)
print (curCell, curCell.getY(),curCell.getHeight(),c, curCell.signedRatioOverlapY(c),c.getY(), c.getHeight(),list(map(lambda x:x.getContent(),lcurRow)))
curCell = c
iicol +=1
print ("FINAL", list(map(lambda x:(x,x.getContent()),lcurRow)) )
print ("\t", list(map(lambda x:x.getIndex(),lcurRow)) )
if len(lcurRow)>1:
# create a contour for visualization
# order by col: get top and bottom polylines for them
contour = self.createContourFromListOfElements(lcurRow)
spoints = ','.join("%s,%s"%(x[0],x[1]) for x in contour)
r = XMLDSTABLEROWClass(1)
r.setParent(table)
r.addAttribute('points',spoints)
r.tagMe('HH')
# def mergeHorizontalTextLines(self,table):
# """
# merge text lines which are aligned
# input: a table, with candidate textlines
# output: cluster of textlines as row candidates
#
# """
# from shapely.geometry import Polygon as pp
# from rtree import index
#
# cellidx = index.Index()
# lTexts = []
# lPText=[]
# lReverseIndex = {}
# # Populate R-tree index with bounds of grid cells
# it=0
# for cell in table.getCells():
# for text in cell.getObjects():
# tt = pp( [(text.getX(),text.getY()),(text.getX2(),text.getY()),(text.getX2(),text.getY2()), ((text.getX(),text.getY2()))] )
# lTexts.append(text)
# lPText.append(tt)
# cellidx.insert(it, tt.bounds)
# it += 1
# lReverseIndex[tt.bounds] = text
#
# lcovered=[]
# lfulleval= []
# for text in lTexts:
# if text not in lcovered:
# # print ('START :', text, text.getContent())
# lcovered.append(text)
# lcurRow = [text]
# curText= text
# while curText is not None:
# # print (curText, lcurRow)
# # sPoints = text.getAttribute('points')
# sPoints = curText.getAttribute('blpoints')
# # print (sPoints)
# # modify for creating aline to the right
# # take the most right X
# lastx,lasty = list([(float(x),float(y)) for x,y in zip(*[iter(sPoints.split(','))]*2)])[-1]
# # polytext = pp([(float(x),float(y)) for x,y in zip(*[iter(sPoints.split(','))]*2)])
# polytext = pp([(lastx,lasty-10),(lastx+1000,lasty-10),(lastx+1000,lasty),(lastx,lasty)])
# # print([(lastx,lasty-10),(lastx+1000,lasty-10),(lastx+1000,lasty),(lastx,lasty)])
# ltover = [lPText[pos] for pos in cellidx.intersection(polytext.bounds)]
# ltover.sort(key=lambda x:x.centroid.coords[0])
# lnextStep=[]
# # print ('\tnext\t',list(map(lambda x:lReverseIndex[x.bounds].getContent(),ltover)))
#
# for t1 in ltover:
# # here conditions: vertical porjection and Y overlap ; not area!
# if polytext.intersection(t1).area > 0.1: #t1.area*0.5:
# if t1 not in lnextStep and lReverseIndex[t1.bounds] not in lcovered:
# lnextStep.append(t1)
# if lnextStep != []:
# lnextStep.sort(key=lambda x:x.centroid.coords[0])
# # print ('\t',list(map(lambda x:(lReverseIndex[x.bounds].getX(),lReverseIndex[x.bounds].getContent()),lnextStep)))
# nextt = lnextStep[0]
# lcurRow.append(lReverseIndex[nextt.bounds])
# lcovered.append(lReverseIndex[nextt.bounds])
# curText = lReverseIndex[nextt.bounds]
# else:curText = None
#
# # print ("FINAL", list(map(lambda x:(x,x.getContent()),lcurRow)) )
# # print ("FINAL", list(map(lambda x:(x,x.getParent()),lcurRow)) )
# lfulleval.append(self.comptureClusterHomogeneity(lcurRow,0))
#
# if len(lcurRow)>1:
# # create a contour for visualization
# # order by col: get top and bottom polylines for them
# contour = self.createContourFromListOfElements(lcurRow)
# spoints = ','.join("%s,%s"%(x[0],x[1]) for x in contour)
# r = XMLDSTABLEROWClass(1)
# r.setParent(table)
# r.addAttribute('points',spoints)
# r.tagMe('VV')
# r.tagMe()
#
# print (sum(lfulleval)/len(lfulleval))
def mergeHorVerTextLines(self,table):
"""
build HV lines
"""
from util import TwoDNeighbourhood as TwoDRel
lTexts = []
if self.bNoTable:
lTexts = table.getAllNamedObjects(XMLDSTEXTClass)
else:
for cell in table.getCells():
# bug to be fixed!!
if cell.getRowSpan() == 1 and cell.getColSpan() == 1:
lTexts.extend(set(cell.getObjects()))
for e in lTexts:
e.lright=[]
e.lleft=[]
e.ltop=[]
e.lbottom=[]
lVEdge = TwoDRel.findVerticalNeighborEdges(lTexts)
for a,b in lVEdge:
a.lbottom.append( b )
b.ltop.append(a)
for elt in lTexts:
# dirty!
elt.setHeight(max(5,elt.getHeight()-3))
elt.setWidth(max(5,elt.getWidth()-3))
TwoDRel.rotateMinus90degOLD(elt)
lHEdge = TwoDRel.findVerticalNeighborEdges(lTexts)
for elt in lTexts:
# elt.tagMe()
TwoDRel.rotatePlus90degOLD(elt)
# return
for a,b in lHEdge:
a.lright.append( b )
b.lleft.append(a)
# ss
for elt in lTexts:
elt.lleft.sort(key = lambda x:x.getX(),reverse=True)
# elt.lright.sort(key = lambda x:x.getX())
if len(elt.lright) > 1:
elt.lright = []
elt.lright.sort(key = lambda x:elt.signedRatioOverlapY(x),reverse=True)
# print (elt, elt.getY(), elt.lright)
elt.ltop.sort(key = lambda x:x.getY())
if len(elt.lbottom) >1:
elt.lbottom = []
elt.lbottom.sort(key = lambda x:elt.signedRatioOverlapX(x),reverse=True)
# Horizontal
lTexts.sort(key = lambda x:x.getX())
lcovered=[]
lfulleval = []
for text in lTexts:
if text not in lcovered:
# print ('START :', text, text.getContent())
lcovered.append(text)
lcurRow = [text]
curText= text
while curText is not None:
try:
nextT = curText.lright[0]
# print ('\t',[(x,curText.signedRatioOverlapY(x)) for x in curText.lright])
if nextT not in lcovered:
lcurRow.append(nextT)
lcovered.append(nextT)
curText = nextT
except IndexError:curText = None
# print ("FINAL", list(map(lambda x:(x,x.getContent()),lcurRow)) )
# lfulleval.append(self.comptureClusterHomogeneity(lcurRow,0))
if len(lcurRow) > 1:
# create a contour for visualization
# order by col: get top and bottom polylines for them
contour = self.createContourFromListOfElements(lcurRow)
if contour is not None:
spoints = ','.join("%s,%s"%(x[0],x[1]) for x in contour)
r = XMLDSTABLEROWClass(1)
r.setParent(table)
r.addAttribute('points',spoints)
r.tagMe('HH')
# print (sum(lfulleval)/len(lfulleval))
# Vertical
lTexts.sort(key = lambda x:x.getY())
lcovered=[]
lfulleval = []
for text in lTexts:
if text not in lcovered:
# print ('START :', text, text.getContent())
lcovered.append(text)
lcurCol = [text]
curText= text
while curText is not None:
try:
nextT = curText.lbottom[0]
# print ('\t',[(x,curText.signedRatioOverlapY(x)) for x in curText.lright])
if nextT not in lcovered and len(nextT.lbottom) == 1:
lcurCol.append(nextT)
lcovered.append(nextT)
curText = nextT
except IndexError:curText = None
# print ("FINAL", list(map(lambda x:(x,x.getContent()),lcurCol)) )
# lfulleval.append(self.comptureClusterHomogeneity(lcurCol,1))
if len(lcurCol)>1:
# create a contour for visualization
# order by col: get top and bottom polylines for them
contour = self.createContourFromListOfElements(lcurCol)
if contour is not None:
spoints = ','.join("%s,%s"%(x[0],x[1]) for x in contour)
r = XMLDSTABLEROWClass(1)
r.setParent(table)
r.addAttribute('points',spoints)
# r.setDimensions(...)
r.tagMe('VV')
# print (sum(lfulleval)/len(lfulleval))
def mergeHorVerCells(self,table):
"""
build HV chunks cells
"""
from util import TwoDNeighbourhood as TwoDRel
lTexts = []
for cell in table.getCells():
# bug to be fixed!!
if cell.getRowSpan() == 1 and cell.getColSpan() == 1:
# lTexts.extend(set(cell.getObjects()))
lTexts.append(cell)
for e in lTexts:
e.lright=[]
e.lleft=[]
e.ltop=[]
e.lbottom=[]
lVEdge = TwoDRel.findVerticalNeighborEdges(lTexts)
for a,b in lVEdge:
a.lbottom.append( b )
b.ltop.append(a)
for elt in lTexts:
# dirty!
elt.setHeight(max(5,elt.getHeight()-3))
elt.setWidth(max(5,elt.getWidth()-3))
TwoDRel.rotateMinus90degOLD(elt)
lHEdge = TwoDRel.findVerticalNeighborEdges(lTexts)
for elt in lTexts:
# elt.tagMe()
TwoDRel.rotatePlus90degOLD(elt)
# return
for a,b in lHEdge:
a.lright.append( b )
b.lleft.append(a)
# ss
for elt in lTexts:
elt.lleft.sort(key = lambda x:x.getX(),reverse=True)
# elt.lright.sort(key = lambda x:x.getX())
elt.lright.sort(key = lambda x:elt.signedRatioOverlapY(x),reverse=True)
if len(elt.lright) >1:
elt.lright = []
# print (elt, elt.getY(), elt.lright)
elt.ltop.sort(key = lambda x:x.getY())
elt.lbottom.sort(key = lambda x:elt.signedRatioOverlapX(x),reverse=True)
# Horizontal
lTexts.sort(key = lambda x:x.getX())
lcovered=[]
lfulleval = []
for text in lTexts:
if text not in lcovered:
# print ('START :', text, text.getContent())
lcovered.append(text)
lcurRow = [text]
curText= text
while curText is not None:
try:
nextT = curText.lright[0]
# print ('\t',[(x,curText.signedRatioOverlapY(x)) for x in curText.lright])
if nextT not in lcovered:
lcurRow.append(nextT)
lcovered.append(nextT)
curText = nextT
except IndexError:curText = None
print ("FINAL", list(map(lambda x:(x,x.getContent()),lcurRow)) )
# lfulleval.append(self.comptureClusterHomogeneity(lcurRow,0))
if len(lcurRow) > 1:
# create a contour for visualization
# order by col: get top and bottom polylines for them
contour = self.createContourFromListOfElements(lcurRow)
if contour is not None:
spoints = ','.join("%s,%s"%(x[0],x[1]) for x in contour)
r = XMLDSTABLEROWClass(1)
r.setParent(table)
r.addAttribute('points',spoints)
r.tagMe('HH')
# print (sum(lfulleval)/len(lfulleval))
# # Vertical
# lTexts.sort(key = lambda x:x.getY())
# lcovered=[]
# lfulleval = []
# for text in lTexts:
# if text not in lcovered:
# # print ('START :', text, text.getContent())
# lcovered.append(text)
# lcurCol = [text]
# curText= text
# while curText is not None:
# try:
# nextT = curText.lbottom[0]
# # print ('\t',[(x,curText.signedRatioOverlapY(x)) for x in curText.lright])
# if nextT not in lcovered:
# lcurCol.append(nextT)
# lcovered.append(nextT)
# curText = nextT
# except IndexError:curText = None
#
# # print ("FINAL", list(map(lambda x:(x,x.getContent()),lcurRow)) )
# lfulleval.append(self.comptureClusterHomogeneity(lcurCol,1))
# if len(lcurCol)>1:
# # create a contour for visualization
# # order by col: get top and bottom polylines for them
# contour = self.createContourFromListOfElements(lcurCol)
# if contour is not None:
# spoints = ','.join("%s,%s"%(x[0],x[1]) for x in contour)
# r = XMLDSTABLEROWClass(1)
# r.setParent(table)
# r.addAttribute('points',spoints)
# r.tagMe('VV')
# print (sum(lfulleval)/len(lfulleval))
def createContourFromListOfElements(self, lElts):
"""
create a polyline from a list of elements
input : list of elements
output: Polygon object
"""
from shapely.geometry import Polygon as pp
from shapely.ops import cascaded_union
lP = []
for elt in lElts:
sPoints = elt.getAttribute('points')
if sPoints is None:
lP.append(pp([(elt.getX(),elt.getY()),(elt.getX(),elt.getY2()), (elt.getX2(),elt.getY2()),(elt.getX2(),elt.getY())] ))
else:
lP.append(pp([(float(x),float(y)) for x,y in zip(*[iter(sPoints.split(','))]*2)]))
try:ss = cascaded_union(lP)
except ValueError:
# print(lElts,lP)
return None
if not ss.is_empty:
return list(ss.convex_hull.exterior.coords)
else: return None
def comptureClusterHomogeneity(self,c,dir):
"""
% of elements belonging to the same structre
dir: 0 : row, 1 column
"""
ldict = collections.defaultdict(list)
[ ldict[elt.getParent().getIndex()[dir]].append(elt) for elt in c]
lstat = ([(k,len(ldict[k])) for k in ldict])
total = sum([x[1] for x in lstat])
leval = (max(([len(ldict[x])/total for x in ldict])))
return leval
def findRowsInDoc(self,ODoc):
"""
find rows for each table in document
input: a document
output: a document where tables have rows
"""
from tasks.TwoDChunking import TwoDChunking
self.lPages = ODoc.getPages()
# hchk = TwoDChunking()
# not always?
# self.mergeLineAndCells(self.lPages)
for page in self.lPages:
traceln("page: %d" % page.getNumber())
# print (len(page.getAllNamedObjects(XMLDSTABLECELLClass)))
lTables = page.getAllNamedObjects(XMLDSTABLEClass)
for table in lTables:
# col as polygon
self.getPolylinesForRowsColumns(table)
# self.getPolylinesForRows(table)
# rowscuts = list(map(lambda r:r.getY(),table.getRows()))
rowscuts=[]
# traceln ('initial cuts:',rowscuts)
self.createCells(table)
# lhchk = hchk.HorizonalChunk(page,lElts=table.getCells())
# hchk.VerticalChunk(page,tag=XMLDSTEXTClass)
# self.mergeHorizontalCells(table)
# then merge overlaping then sort Y and index : then insert ambiguous textlines
# self.mergeHorizontalTextLines(table)
# self.mergeHorVerTextLines(table)
# self.processRows3(table)
if self.bCellOnly:
continue
# self.mergeHorizontalCells(table)
# self.mergeHorVerCells(table)
self.processRows(table,rowscuts)
# self.mineTableRowPattern(table)
table.tagMe()
if self.bNoTable:
self.mergeHorVerTextLines(page)
# def extendLines(self,table):
# """
# Extend textlines up to table width using baseline
# input:table
# output: table with extended baselines
# """
# for col in table.getColumns():
# for cell in col.getCells():
# for elt in cell.getObjects():
# if elt.getWidth()> 100:
# #print ([ (x.getObjects()[0].getBaseline().getY(),x.getObjects()[0].getBaseline().getAngle(),x.getY()) for x in xordered])
# print (elt,elt.getBaseline().getAngle(), elt.getBaseline().getBx(),elt.getBaseline().getPoints())
# newBl = [(table.getX(),elt.getBaseline().getAngle()* table.getX() + elt.getBaseline().getBx()),
# (table.getX2(),elt.getBaseline().getAngle()* table.getX2() + elt.getBaseline().getBx())
# ]
# elt.getBaseline().setPoints(newBl)
# myPoints = '%f,%f,%f,%f'%(newBl[0][0],newBl[0][1],newBl[1][0],newBl[1][1])
# elt.addAttribute('blpoints',myPoints)
# sys.exit(0)
def getPolylinesForRowsColumns(self,table):
"""
input: list of cells (=table)
output: columns defined by polylines (not Bounding box)
"""
import numpy as np
from util.Polygon import Polygon
from shapely.geometry import Polygon as pp
# from shapely.ops import cascaded_union
from rtree import index
cellidx = index.Index()
lCells = []
lReverseIndex = {}
# Populate R-tree index with bounds of grid cells
for pos, cell in enumerate(table.getCells()):
# assuming cell is a shapely object
cc = pp( [(cell.getX(),cell.getY()),(cell.getX2(),cell.getY()),(cell.getX2(),cell.getY2()), ((cell.getX(),cell.getY2()))] )
lCells.append(cc)
cellidx.insert(pos, cc.bounds)
lReverseIndex[cc.bounds] = cell
dColSep_lSgmt = collections.defaultdict(list)
dRowSep_lSgmt = collections.defaultdict(list)
for cell in table.getCells():
row, col, rowSpan, colSpan = [int(cell.getAttribute(sProp)) for sProp \
in ["row", "col", "rowSpan", "colSpan"] ]
sPoints = cell.getAttribute('points') #.replace(',',' ')
# print (cell,sPoints)
spoints = ' '.join("%s,%s"%((x,y)) for x,y in zip(*[iter(sPoints.split(','))]*2))
it_sXsY = (sPair.split(',') for sPair in spoints.split(' '))
plgn = Polygon((float(sx), float(sy)) for sx, sy in it_sXsY)
# print (plgn.getBoundingBox(),spoints)
try:
lT, lR, lB, lL = plgn.partitionSegmentTopRightBottomLeft()
#now the top segments contribute to row separator of index: row
dRowSep_lSgmt[row].extend(lT)
dRowSep_lSgmt[row+rowSpan].extend(lB)
dColSep_lSgmt[col].extend(lL)
dColSep_lSgmt[col+colSpan].extend(lR)
except ValueError: pass
#now make linear regression to draw relevant separators
def getX(lSegment):
lX = list()
for x1,y1,x2,y2 in lSegment:
lX.append(x1)
lX.append(x2)
return lX
def getY(lSegment):
lY = list()
for x1,y1,x2,y2 in lSegment:
lY.append(y1)
lY.append(y2)
return lY
prevx1 , prevx2 , prevymin , prevymax = None,None,None,None #table.getX(),table.getX(),table.getY(),table.getY2()
# erase columns:
table.eraseColumns()
icmpt=0
for icol, lSegment in sorted(dColSep_lSgmt.items()):
X = getX(lSegment)
Y = getY(lSegment)
#sum(l,())
lfNorm = [np.linalg.norm([[x1,y1], [x2,y2]]) for x1,y1,x2,y2 in lSegment]
#duplicate each element
W = [fN for fN in lfNorm for _ in (0,1)]
# a * x + b
a, b = np.polynomial.polynomial.polyfit(Y, X, 1, w=W)
ymin, ymax = min(Y), max(Y)
x1 = a + b * ymin
x2 = a + b * ymax
if prevx1:
col = XMLDSTABLECOLUMNClass()
col.setPage(table.getPage())
col.setParent(table)
col.setIndex(icmpt)
icmpt+=1
table.addColumn(col)
col.addAttribute('points',"%s,%s %s,%s,%s,%s %s,%s"%(prevx1, prevymin,x1, ymin, x2, ymax, prevx2, prevymax))
col.setX(prevx1)
col.setY(prevymin)
col.setHeight(ymax- ymin)
col.setWidth(x2-prevx1)
col.tagMe()
# from shapely.geometry import Polygon as pp
polycol = pp([(prevx1, prevymin),(x1, ymin), (x2, ymax), (prevx2, prevymax)] )
# print ((prevx1, prevymin),(x1, ymin), (x2, ymax), (prevx2, prevymax))
# colCells = cascaded_union([cells[pos] for pos in cellidx.intersection(polycol.bounds)])
colCells = [lCells[pos] for pos in cellidx.intersection(polycol.bounds)]
for cell in colCells:
try:
if polycol.intersection(cell).area > cell.area*0.5:
col.addCell(lReverseIndex[cell.bounds])
except:
pass
prevx1 , prevx2 , prevymin , prevymax = x1, x2, ymin, ymax
def getPolylinesForRows(self,table):
"""
input: list of candidate cells (=table)
output: "rows" defined by top polylines
"""
import numpy as np
from util.Polygon import Polygon
from shapely.geometry import Polygon as pp
# from shapely.ops import cascaded_union
from rtree import index
cellidx = index.Index()
lCells = []
lReverseIndex = {}
# Populate R-tree index with bounds of grid cells
for pos, cell in enumerate(table.getCells()):
# assuming cell is a shapely object
cc = pp( [(cell.getX(),cell.getY()),(cell.getX2(),cell.getY()),(cell.getX2(),cell.getY2()), ((cell.getX(),cell.getY2()))] )
lCells.append(cc)
cellidx.insert(pos, cc.bounds)
lReverseIndex[cc.bounds] = cell
dColSep_lSgmt = collections.defaultdict(list)
dRowSep_lSgmt = collections.defaultdict(list)
for cell in table.getCells():
row, col, rowSpan, colSpan = [int(cell.getAttribute(sProp)) for sProp \
in ["row", "col", "rowSpan", "colSpan"] ]
sPoints = cell.getAttribute('points') #.replace(',',' ')
# print (cell,sPoints)
spoints = ' '.join("%s,%s"%((x,y)) for x,y in zip(*[iter(sPoints.split(','))]*2))
it_sXsY = (sPair.split(',') for sPair in spoints.split(' '))
plgn = Polygon((float(sx), float(sy)) for sx, sy in it_sXsY)
# print (plgn.getBoundingBox(),spoints)
try:
lT, lR, lB, lL = plgn.partitionSegmentTopRightBottomLeft()
#now the top segments contribute to row separator of index: row
dRowSep_lSgmt[row].extend(lT)
dRowSep_lSgmt[row+rowSpan].extend(lB)
dColSep_lSgmt[col].extend(lL)
dColSep_lSgmt[col+colSpan].extend(lR)
except ValueError: pass
#now make linear regression to draw relevant separators
def getX(lSegment):
lX = list()
for x1,y1,x2,y2 in lSegment:
lX.append(x1)
lX.append(x2)
return lX
def getY(lSegment):
lY = list()
for x1,y1,x2,y2 in lSegment:
lY.append(y1)
lY.append(y2)
return lY
prevxmin , prevxmax , prevy1 , prevy2 = None,None,None,None #table.getX(),table.getX(),table.getY(),table.getY2()
# erase columns:
table.eraseColumns()
icmpt=0
for _, lSegment in sorted(dRowSep_lSgmt.items()):
X = getX(lSegment)
Y = getY(lSegment)
#sum(l,())
lfNorm = [np.linalg.norm([[x1,y1], [x2,y2]]) for x1,y1,x2,y2 in lSegment]
#duplicate each element
W = [fN for fN in lfNorm for _ in (0,1)]
# a * x + b
a, b = np.polynomial.polynomial.polyfit(X, Y, 1, w=W)
xmin, xmax = min(X), max(X)
y1 = a + b * xmin
y2 = a + b * xmax
if prevy1:
col = XMLDSTABLEROWClass(icmpt)
col.setPage(table.getPage())
col.setParent(table)
icmpt+=1
table.addColumn(col) # prevx1, prevymin,x1, ymin, x2, ymax, prevx2, prevymax))
col.addAttribute('points',"%s,%s %s,%s,%s,%s %s,%s"%(prevxmin, prevy1, prevxmax,prevy2, xmax,y2, prevxmax,y1))
col.setX(prevxmin)
col.setY(prevy1)
col.setHeight(y2 - prevy1)
col.setWidth(xmax- xmin)
col.tagMe()
# from shapely.geometry import Polygon as pp
# polycol = pp([(prevx1, prevymin),(x1, ymin), (x2, ymax), (prevx2, prevymax)] )
# # print ((prevx1, prevymin),(x1, ymin), (x2, ymax), (prevx2, prevymax))
# # colCells = cascaded_union([cells[pos] for pos in cellidx.intersection(polycol.bounds)])
# colCells = [lCells[pos] for pos in cellidx.intersection(polycol.bounds)]
# for cell in colCells:
# if polycol.intersection(cell).area > cell.area*0.5:
# col.addCell(lReverseIndex[cell.bounds])
prevy1 , prevy2 , prevxmin , prevxmax = y1, y2, xmin, xmax
for cell in table.getCells():
del cell._lAttributes['points']
def testscale(self,ltexts):
return
for t in ltexts:
if True or t.getAttribute('id')[-4:] == '1721':
# print (t)
# print (etree.tostring(t.getNode()))
shrinked = affinity.scale(t.toPolygon(),3,-0.8)
# print (list(t.toPolygon().exterior.coords), list(shrinked.exterior.coords))
ss = ",".join(["%s,%s"%(x,y) for x,y in shrinked.exterior.coords])
# print (ss)
t.getNode().set("points",ss)
# print (etree.tostring(t.getNode()))
def testshapely(self,Odoc):
for page in Odoc.lPages:
self.testscale(page.getAllNamedObjects(XMLDSTEXTClass))
traceln("page: %d" % page.getNumber())
# lTables = page.getAllNamedObjects(XMLDSTABLEClass)
# for table in lTables:
# table.testPopulate()
def run(self,doc):
"""
load dom and find rows
"""
# conver to DS if needed
if self.bCreateRef:
if self.do2DS:
dsconv = primaAnalysis()
doc = dsconv.convert2DS(doc,self.docid)
refdoc = self.createRef(doc)
return refdoc
# single ref per page
# refdoc= self.createRefPerPage(doc)
# return None
if self.bCreateRefCluster:
if self.do2DS:
dsconv = primaAnalysis()
doc = dsconv.convert2DS(doc,self.docid)
# refdoc = self.createRefCluster(doc)
refdoc = self.createRefPartition(doc)
return refdoc
if self.do2DS:
dsconv = primaAnalysis()
self.doc = dsconv.convert2DS(doc,self.docid)
else:
self.doc= doc
self.ODoc = XMLDSDocument()
self.ODoc.loadFromDom(self.doc,listPages = range(self.firstPage,self.lastPage+1))
# self.testshapely(self.ODoc)
# # self.ODoc.loadFromDom(self.doc,listPages = range(30,31))
if self.bYCut:
self.processYCuts(self.ODoc)
else:
self.findRowsInDoc(self.ODoc)
return self.doc
def computeCoherenceScore(self,table):
"""
input: table with rows, BIEOS tagged textlines
BIO now !
output: coherence score
coherence score: float
percentage of textlines those BIESO tagged is 'coherent with the row segmentation'
"""
coherenceScore = 0
nbTotalTextLines = 0
for row in table.getRows():
for cell in row.getCells():
nbTextLines = len(cell.getObjects())
nbTotalTextLines += nbTextLines
if nbTextLines == 1 and cell.getObjects()[0].getAttribute("DU_row") == self.STAG: coherenceScore+=1
else:
for ipos, textline in enumerate(cell.getObjects()):
if ipos == 0:
if textline.getAttribute("DU_row") in [self.BTAG]: coherenceScore += 1
else:
if textline.getAttribute("DU_row") in ['I']: coherenceScore += 1
# if ipos == nbTextLines-1:
# if textline.getAttribute("DU_row") in ['E']: coherenceScore += 1
# if ipos not in [0, nbTextLines-1]:
# if textline.getAttribute("DU_row") in ['I']: coherenceScore += 1
if nbTotalTextLines == 0: return 0
else : return coherenceScore /nbTotalTextLines
################ TEST ##################
def testRun(self, filename, outFile=None):
"""
evaluate using ABP new table dataset with tablecell
"""
self.evalData=None
doc = self.loadDom(filename)
doc =self.run(doc)
if self.bEvalCluster:
self._evalData = self.createRunPartition( self.ODoc)
# self.evalData = self.createRefCluster(doc)
else:
self.evalData = self.createRef(doc)
if outFile: self.writeDom(doc)
return etree.tostring(self._evalData,encoding='unicode',pretty_print=True)
def testCluster(self, srefData, srunData, bVisual=False):
"""
<DOCUMENT>
<PAGE number="1" imageFilename="g" width="1457.52" height="1085.04">
<TABLE x="120.72" y="90.72" width="1240.08" height="923.28">
<ROW>
<TEXT id="line_1502076498510_2209"/>
<TEXT id="line_1502076500291_2210"/>
<TEXT id="line_1502076502635_2211"/>
<TEXT id="line_1502076505260_2212"/>
NEED to work at page level !!??
then average?
"""
cntOk = cntErr = cntMissed = 0
RefData = etree.XML(srefData.strip("\n").encode('utf-8'))
RunData = etree.XML(srunData.strip("\n").encode('utf-8'))
lPages = RefData.xpath('//%s' % ('PAGE[@number]'))
lRefKeys={}
dY = {}
lY={}
dIDMap={}
for page in lPages:
pnum=page.get('number')
key=page.get('pagekey')
dIDMap[key]={}
lY[key]=[]
dY[key]={}
xpath = ".//%s" % ("R")
lrows = page.xpath(xpath)
if len(lrows) > 0:
for i,row in enumerate(lrows):
xpath = ".//@id"
lids = row.xpath(xpath)
for id in lids:
# with spanning an element can belong to several rows?
if id not in dY[key]:
dY[key][id]=i
lY[key].append(i)
dIDMap[key][id]=len(lY[key])-1
try:lRefKeys[key].append((pnum,key,lids))
except KeyError:lRefKeys[key] = [(pnum,key,lids)]
rand_score = completeness = homogen_score = 0
if RunData is not None:
lpages = RunData.xpath('//%s' % ('PAGE[@number]'))
for page in lpages:
pnum=page.get('number')
key=page.get('pagekey')
if key in lRefKeys:
lX=[-1 for i in range(len(dIDMap[key]))]
xpath = ".//%s" % ("ROW")
lrows = page.xpath(xpath)
if len(lrows) > 0:
for i,row in enumerate(lrows):
xpath = ".//@id"
lids = row.xpath(xpath)
for id in lids:
lX[ dIDMap[key][id]] = i
#adjusted_rand_score(ref,run)
rand_score += adjusted_rand_score(lY[key],lX)
completeness += completeness_score(lY[key], lX)
homogen_score += homogeneity_score(lY[key], lX)
ltisRefsRunbErrbMiss= list()
return (rand_score/len(lRefKeys), completeness/len(lRefKeys), homogen_score/len(lRefKeys),ltisRefsRunbErrbMiss)
def testGeometry(self, th, srefData, srunData, bVisual=False):
"""
compare geometrical zones (dtw + iou)
:param
returns tuple (cntOk, cntErr, cntMissed,ltisRefsRunbErrbMiss
"""
cntOk = cntErr = cntMissed = 0
ltisRefsRunbErrbMiss = list()
RefData = etree.XML(srefData.strip("\n").encode('utf-8'))
RunData = etree.XML(srunData.strip("\n").encode('utf-8'))
lPages = RefData.xpath('//%s' % ('PAGE[@number]'))
for ip,page in enumerate(lPages):
lY=[]
key=page.get('pagekey')
xpath = ".//%s" % ("ROW")
lrows = page.xpath(xpath)
if len(lrows) > 0:
for col in lrows:
xpath = ".//@points"
lpoints = col.xpath(xpath)
colgeo = cascaded_union([ Polygon(sPoints2tuplePoints(p)) for p in lpoints])
if lpoints != []:
lY.append(colgeo)
if RunData is not None:
lpages = RunData.xpath('//%s' % ('PAGE[@pagekey="%s"]' % key))
lX=[]
if lpages != []:
for page in lpages[0]:
xpath = ".//%s" % ("ROW")
lrows = page.xpath(xpath)
if len(lrows) > 0:
for col in lrows:
xpath = ".//@points"
lpoints = col.xpath(xpath)
if lpoints != []:
lX.append( Polygon(sPoints2tuplePoints(lpoints[0])))
lX = list(filter(lambda x:x.is_valid,lX))
ok , err , missed,lfound,lerr,lmissed = evalPartitions(lX, lY, th,iuo)
cntOk += ok
cntErr += err
cntMissed +=missed
[ltisRefsRunbErrbMiss.append((ip, y1.bounds, x1.bounds,False, False)) for (x1,y1) in lfound]
[ltisRefsRunbErrbMiss.append((ip, y1.bounds, None,False, True)) for y1 in lmissed]
[ltisRefsRunbErrbMiss.append((ip, None, x1.bounds,True, False)) for x1 in lerr]
# ltisRefsRunbErrbMiss.append(( lfound, ip, ok,err, missed))
# print (key, cntOk , cntErr , cntMissed)
return (cntOk , cntErr , cntMissed,ltisRefsRunbErrbMiss)
def testCluster2(self, th, srefData, srunData, bVisual=False):
"""
<DOCUMENT>
<PAGE number="1" imageFilename="g" width="1457.52" height="1085.04">
<TABLE x="120.72" y="90.72" width="1240.08" height="923.28">
<ROW>
<TEXT id="line_1502076498510_2209"/>
<TEXT id="line_1502076500291_2210"/>
<TEXT id="line_1502076502635_2211"/>
<TEXT id="line_1502076505260_2212"/>
NEED to work at page level !!??
then average?
"""
RefData = etree.XML(srefData.strip("\n").encode('utf-8'))
RunData = etree.XML(srunData.strip("\n").encode('utf-8'))
lPages = RefData.xpath('//%s' % ('PAGE[@number]'))
for page in lPages:
lY=[]
key=page.get('pagekey')
xpath = ".//%s" % ("ROW")
lrows = page.xpath(xpath)
if len(lrows) > 0:
for row in lrows:
xpath = ".//@id"
lid = row.xpath(xpath)
if lid != []:
lY.append(lid)
# print (row.xpath(xpath))
if RunData is not None:
lpages = RunData.xpath('//%s' % ('PAGE[@pagekey="%s"]' % key))
lX=[]
for page in lpages[:1]:
xpath = ".//%s" % ("ROW")
lrows = page.xpath(xpath)
if len(lrows) > 0:
for row in lrows:
xpath = ".//@id"
lid = row.xpath(xpath)
if lid != []:
lX.append( lid)
cntOk , cntErr , cntMissed,lf,le,lm = evalPartitions(lX, lY, th,jaccard)
# print ( cntOk , cntErr , cntMissed)
ltisRefsRunbErrbMiss= list()
return (cntOk , cntErr , cntMissed,ltisRefsRunbErrbMiss)
def overlapX(self,zone):
[a1,a2] = self.getX(),self.getX()+ self.getWidth()
[b1,b2] = zone.getX(),zone.getX()+ zone.getWidth()
return min(a2, b2) >= max(a1, b1)
def overlapY(self,zone):
[a1,a2] = self.getY(),self.getY() + self.getHeight()
[b1,b2] = zone.getY(),zone.getY() + zone.getHeight()
return min(a2, b2) >= max(a1, b1)
def signedRatioOverlap(self,z1,z2):
"""
overlap self and zone
return surface of self in zone
"""
[x1,y1,h1,w1] = z1.getX(),z1.getY(),z1.getHeight(),z1.getWidth()
[x2,y2,h2,w2] = z2.getX(),z2.getY(),z2.getHeight(),z2.getWidth()
fOverlap = 0.0
if self.overlapX(z2) and self.overlapY(z2):
[x11,y11,x12,y12] = [x1,y1,x1+w1,y1+h1]
[x21,y21,x22,y22] = [x2,y2,x2+w2,y2+h2]
s1 = w1 * h1
# possible ?
if s1 == 0: s1 = 1.0
#intersection
nx1 = max(x11,x21)
nx2 = min(x12,x22)
ny1 = max(y11,y21)
ny2 = min(y12,y22)
h = abs(nx2 - nx1)
w = abs(ny2 - ny1)
inter = h * w
if inter > 0 :
fOverlap = inter/s1
else:
# if overX and Y this is not possible !
fOverlap = 0.0
return fOverlap
def findSignificantOverlap(self,TOverlap,ref,run):
"""
return
"""
pref,rowref= ref
prun, rowrun= run
if pref != prun: return False
return rowref.ratioOverlap(rowrun) >=TOverlap
def testCPOUM(self, TOverlap, srefData, srunData, bVisual=False):
"""
TOverlap: Threshols used for comparing two surfaces
Correct Detections:
under and over segmentation?
"""
cntOk = cntErr = cntMissed = 0
RefData = etree.XML(srefData.strip("\n").encode('utf-8'))
RunData = etree.XML(srunData.strip("\n").encode('utf-8'))
# try:
# RunData = libxml2.parseMemory(srunData.strip("\n"), len(srunData.strip("\n")))
# except:
# RunData = None
# return (cntOk, cntErr, cntMissed)
lRun = []
if RunData is not None:
lpages = RunData.xpath('//%s' % ('PAGE'))
for page in lpages:
pnum=page.get('number')
#record level!
lRows = page.xpath(".//%s" % ("ROW"))
lORows = map(lambda x:XMLDSTABLEROWClass(0,x),lRows)
for row in lORows:
row.fromDom(row._domNode)
row.setIndex(row.getAttribute('id'))
lRun.append((pnum,row))
# print (lRun)
lRef = []
lPages = RefData.xpath('//%s' % ('PAGE'))
for page in lPages:
pnum=page.get('number')
lRows = page.xpath(".//%s" % ("ROW"))
lORows = map(lambda x:XMLDSTABLEROWClass(0,x),lRows)
for row in lORows:
row.fromDom(row._domNode)
row.setIndex(row.getAttribute('id'))
lRef.append((pnum,row))
refLen = len(lRef)
# bVisual = True
ltisRefsRunbErrbMiss= list()
lRefCovered = []
for i in range(0,len(lRun)):
iRef = 0
bFound = False
bErr , bMiss= False, False
runElt = lRun[i]
# print '\t\t===',runElt
while not bFound and iRef <= refLen - 1:
curRef = lRef[iRef]
if runElt and curRef not in lRefCovered and self.findSignificantOverlap(TOverlap,runElt, curRef):
bFound = True
lRefCovered.append(curRef)
iRef+=1
if bFound:
if bVisual:print("FOUND:", runElt, ' -- ', lRefCovered[-1])
cntOk += 1
else:
curRef=''
cntErr += 1
bErr = True
if bVisual:print("ERROR:", runElt)
if bFound or bErr:
ltisRefsRunbErrbMiss.append( (int(runElt[0]), curRef, runElt,bErr, bMiss) )
for i,curRef in enumerate(lRef):
if curRef not in lRefCovered:
if bVisual:print("MISSED:", curRef)
ltisRefsRunbErrbMiss.append( (int(curRef[0]), curRef, '',False, True) )
cntMissed+=1
ltisRefsRunbErrbMiss.sort(key=lambda xyztu:xyztu[0])
# print cntOk, cntErr, cntMissed,ltisRefsRunbErrbMiss
return (cntOk, cntErr, cntMissed,ltisRefsRunbErrbMiss)
def testCompare(self, srefData, srunData, bVisual=False):
"""
as in Shahad et al, DAS 2010
Correct Detections
Partial Detections
Over-Segmented
Under-Segmented
Missed
False Positive
"""
dicTestByTask = dict()
if self.bEvalCluster:
# dicTestByTask['CLUSTER']= self.testCluster(srefData,srunData,bVisual)
dicTestByTask['CLUSTER100']= self.testCluster2(1.0,srefData,srunData,bVisual)
dicTestByTask['CLUSTER90']= self.testCluster2(0.9,srefData,srunData,bVisual)
dicTestByTask['CLUSTER80']= self.testCluster2(0.8,srefData,srunData,bVisual)
# dicTestByTask['CLUSTER50']= self.testCluster2(0.5,srefData,srunData,bVisual)
else:
dicTestByTask['T80']= self.testGeometry(0.50,srefData,srunData,bVisual)
# dicTestByTask['T50']= self.testCPOUM(0.50,srefData,srunData,bVisual)
return dicTestByTask
def createRowsWithCuts2(self,table,lYCuts):
"""
input: lcells, horizontal lcuts
output: list of rows populated with appropriate cells (main overlap)
Algo: create cell chunks and determine (a,b) for the cut (a.X +b = Y)
does not solve everything ("russian mountains" in weddings)
"""
from tasks.TwoDChunking import TwoDChunking
if lYCuts == []:
return
#reinit rows
self._lrows = []
#build horizontal chunks
hchk = TwoDChunking()
hchk.HorizonalChunk(table.getPage(),tag=XMLDSTABLECELLClass)
# #get all texts
# lTexts = []
# [ lTexts.extend(colcell.getObjects()) for col in table.getColumns() for colcell in col.getObjects()]
# lTexts.sort(lambda x:x.getY())
#
# #initial Y: table top border
# prevCut = self.getY()
#
# # ycuts: features or float
# try:lYCuts = map(lambda x:x.getValue(),lYCuts)
# except:pass
#
# itext = 0
# irowIndex = 0
# lrowcells = []
# lprevrowcells = []
# prevRowCoherenceScore = 0
# for irow,cut in enumerate(lYCuts):
# yrow = prevCut
# y2 = cut
# h = cut - prevCut
# lrowcells =[]
# while lTexts[itext].getY() <= cut:
# lrowcells.append(lTexts[itext])
# itext += 1
# if lprevrowcells == []:
# pass
# else:
# # a new row: evaluate if this is better to create it or to merge ltext with current row
# # check coherence of new texts
# # assume columns!
# coherence = self.computeCoherenceScoreForRows(lrowcells)
# coherenceMerge = self.computeCoherenceScoreForRows(lrowcells+lprevrowcells)
# if prevRowCoherenceScore + coherence > coherenceMerge:
# cuthere
# else:
# merge
#
def createRowsWithCuts(self,lYCuts,table,tableNode,bTagDoc=False):
"""
REF XML
"""
prevCut = None
# prevCut = table.getY()
lYCuts.sort()
for index,cut in enumerate(lYCuts):
# first correspond to the table: no rpw
if prevCut is not None:
rowNode= etree.Element("ROW")
if bTagDoc:
tableNode.append(rowNode)
else:
tableNode.append(rowNode)
rowNode.set('y',str(prevCut))
rowNode.set('height',str(cut - prevCut))
rowNode.set('x',str(table.getX()))
rowNode.set('width',str(table.getWidth()))
rowNode.set('id',str(index-1))
prevCut= cut
#last
cut=table.getY2()
rowNode= etree.Element("ROW")
tableNode.append(rowNode)
rowNode.set('y',str(prevCut))
rowNode.set('height',str(cut - prevCut))
rowNode.set('x',str(table.getX()))
rowNode.set('width',str(table.getWidth()))
rowNode.set('id',str(index))
def createRefCluster(self,doc):
"""
Ref: a row = set of textlines
"""
self.ODoc = XMLDSDocument()
self.ODoc.loadFromDom(doc,listPages = range(self.firstPage,self.lastPage+1))
root=etree.Element("DOCUMENT")
refdoc=etree.ElementTree(root)
for page in self.ODoc.getPages():
pageNode = etree.Element('PAGE')
pageNode.set("number",page.getAttribute('number'))
pageNode.set("pagekey",os.path.basename(page.getAttribute('imageFilename')))
pageNode.set("width",page.getAttribute('width'))
pageNode.set("height",page.getAttribute('height'))
root.append(pageNode)
lTables = page.getAllNamedObjects(XMLDSTABLEClass)
for table in lTables:
dRows={}
tableNode = etree.Element('TABLE')
tableNode.set("x",table.getAttribute('x'))
tableNode.set("y",table.getAttribute('y'))
tableNode.set("width",table.getAttribute('width'))
tableNode.set("height",table.getAttribute('height'))
pageNode.append(tableNode)
for cell in table.getAllNamedObjects(XMLDSTABLECELLClass):
try:dRows[int(cell.getAttribute("row"))].extend(cell.getObjects())
except KeyError:dRows[int(cell.getAttribute("row"))] = cell.getObjects()
for rowid in sorted(dRows.keys()):
rowNode= etree.Element("ROW")
tableNode.append(rowNode)
for elt in dRows[rowid]:
txtNode = etree.Element("TEXT")
txtNode.set('id',elt.getAttribute('id'))
rowNode.append(txtNode)
return refdoc
def createRef(self,doc):
"""
create a ref file from the xml one
"""
self.ODoc = XMLDSDocument()
self.ODoc.loadFromDom(doc,listPages = range(self.firstPage,self.lastPage+1))
root=etree.Element("DOCUMENT")
refdoc=etree.ElementTree(root)
for page in self.ODoc.getPages():
#imageFilename="..\col\30275\S_Freyung_021_0001.jpg" width="977.52" height="780.0">
pageNode = etree.Element('PAGE')
pageNode.set("number",page.getAttribute('number'))
pageNode.set("pagekey",os.path.basename(page.getAttribute('imageFilename')))
pageNode.set("width",str(page.getAttribute('width')))
pageNode.set("height",str(page.getAttribute('height')))
root.append(pageNode)
lTables = page.getAllNamedObjects(XMLDSTABLEClass)
for table in lTables:
print (table)
dRows={}
tableNode = etree.Element('TABLE')
tableNode.set("x",str(table.getAttribute('x')))
tableNode.set("y",str(table.getAttribute('y')))
tableNode.set("width",str(table.getAttribute('width')))
tableNode.set("height",str(table.getAttribute('height')))
for cell in table.getAllNamedObjects(XMLDSTABLECELLClass):
print (cell)
try:dRows[int(cell.getAttribute("row"))].append(cell)
except KeyError:dRows[int(cell.getAttribute("row"))] = [cell]
lYcuts = []
for rowid in sorted(dRows.keys()):
# print rowid, min(map(lambda x:x.getY(),dRows[rowid]))
lYcuts.append(min(list(map(lambda x:x.getY(),dRows[rowid]))))
if lYcuts != []:
pageNode.append(tableNode)
self.createRowsWithCuts(lYcuts,table,tableNode)
return refdoc
def createRefPerPage(self,doc):
"""
create a ref file from the xml one
for DAS 2018
"""
self.ODoc = XMLDSDocument()
self.ODoc.loadFromDom(doc,listPages = range(self.firstPage,self.lastPage+1))
dRows={}
for page in self.ODoc.getPages():
#imageFilename="..\col\30275\S_Freyung_021_0001.jpg" width="977.52" height="780.0">
pageNode = etree.Element('PAGE')
# pageNode.set("number",page.getAttribute('number'))
#SINGLER PAGE pnum=1
pageNode.set("number",'1')
pageNode.set("imageFilename",page.getAttribute('imageFilename'))
pageNode.set("width",page.getAttribute('width'))
pageNode.set("height",page.getAttribute('height'))
root=etree.Element("DOCUMENT")
refdoc=etree.ElementTree(root)
root.append(pageNode)
lTables = page.getAllNamedObjects(XMLDSTABLEClass)
for table in lTables:
tableNode = etree.Element('TABLE')
tableNode.set("x",table.getAttribute('x'))
tableNode.set("y",table.getAttribute('y'))
tableNode.set("width",table.getAttribute('width'))
tableNode.set("height",table.getAttribute('height'))
pageNode.append(tableNode)
for cell in table.getAllNamedObjects(XMLDSTABLECELLClass):
try:dRows[int(cell.getAttribute("row"))].append(cell)
except KeyError:dRows[int(cell.getAttribute("row"))] = [cell]
lYcuts = []
for rowid in sorted(dRows.keys()):
# print rowid, min(map(lambda x:x.getY(),dRows[rowid]))
lYcuts.append(min(list(map(lambda x:x.getY(),dRows[rowid]))))
self.createRowsWithCuts(lYcuts,table,tableNode)
self.outputFileName = os.path.basename(page.getAttribute('imageFilename')[:-3]+'ref')
# print(self.outputFileName)
self.writeDom(refdoc, bIndent=True)
return refdoc
# print refdoc.serialize('utf-8', True)
# self.testCPOUM(0.5,refdoc.serialize('utf-8', True),refdoc.serialize('utf-8', True))
def createRefPartition(self,doc):
"""
Ref: a row = set of textlines
:param doc: dox xml
returns a doc (ref format): each column contains a set of ids (textlines ids)
"""
self.ODoc = XMLDSDocument()
self.ODoc.loadFromDom(doc,listPages = range(self.firstPage,self.lastPage+1))
root=etree.Element("DOCUMENT")
refdoc=etree.ElementTree(root)
for page in self.ODoc.getPages():
pageNode = etree.Element('PAGE')
pageNode.set("number",page.getAttribute('number'))
pageNode.set("pagekey",os.path.basename(page.getAttribute('imageFilename')))
pageNode.set("width",str(page.getAttribute('width')))
pageNode.set("height",str(page.getAttribute('height')))
root.append(pageNode)
lTables = page.getAllNamedObjects(XMLDSTABLEClass)
for table in lTables:
dCols={}
tableNode = etree.Element('TABLE')
tableNode.set("x",table.getAttribute('x'))
tableNode.set("y",table.getAttribute('y'))
tableNode.set("width",str(table.getAttribute('width')))
tableNode.set("height",str(table.getAttribute('height')))
pageNode.append(tableNode)
for cell in table.getAllNamedObjects(XMLDSTABLECELLClass):
try:dCols[int(cell.getAttribute("row"))].extend(cell.getObjects())
except KeyError:dCols[int(cell.getAttribute("row"))] = cell.getObjects()
for rowid in sorted(dCols.keys()):
rowNode= etree.Element("ROW")
tableNode.append(rowNode)
for elt in dCols[rowid]:
txtNode = etree.Element("TEXT")
txtNode.set('id',elt.getAttribute('id'))
rowNode.append(txtNode)
return refdoc
def createRunPartition(self,doc):
"""
Ref: a row = set of textlines
:param doc: dox xml
returns a doc (ref format): each column contains a set of ids (textlines ids)
"""
# self.ODoc = doc #XMLDSDocument()
# self.ODoc.loadFromDom(doc,listPages = range(self.firstPage,self.lastPage+1))
root=etree.Element("DOCUMENT")
refdoc=etree.ElementTree(root)
for page in self.ODoc.getPages():
pageNode = etree.Element('PAGE')
pageNode.set("number",page.getAttribute('number'))
pageNode.set("pagekey",os.path.basename(page.getAttribute('imageFilename')))
pageNode.set("width",str(page.getAttribute('width')))
pageNode.set("height",str(page.getAttribute('height')))
root.append(pageNode)
tableNode = etree.Element('TABLE')
tableNode.set("x","0")
tableNode.set("y","0")
tableNode.set("width","0")
tableNode.set("height","0")
pageNode.append(tableNode)
table = page.getAllNamedObjects(XMLDSTABLEClass)[0]
lRows = table.getRows()
for row in lRows:
cNode= etree.Element("ROW")
tableNode.append(cNode)
for elt in row.getAllNamedObjects(XMLDSTEXTClass):
txtNode= etree.Element("TEXT")
txtNode.set('id',elt.getAttribute('id'))
cNode.append(txtNode)
return refdoc
if __name__ == "__main__":
rdc = RowDetection()
#prepare for the parsing of the command line
rdc.createCommandLineParser()
# rdc.add_option("--coldir", dest="coldir", action="store", type="string", help="collection folder")
rdc.add_option("--docid", dest="docid", action="store", type="string", help="document id")
rdc.add_option("--dsconv", dest="dsconv", action="store_true", default=False, help="convert page format to DS")
rdc.add_option("--createref", dest="createref", action="store_true", default=False, help="create REF file for component")
rdc.add_option("--createrefC", dest="createrefCluster", action="store_true", default=False, help="create REF file for component (cluster of textlines)")
rdc.add_option("--evalC", dest="evalCluster", action="store_true", default=False, help="evaluation using clusters (of textlines)")
rdc.add_option("--cell", dest="bCellOnly", action="store_true", default=False, help="generate cell candidate from BIO (no row)")
rdc.add_option("--nocolumn", dest="bNoColumn", action="store_true", default=False, help="no existing table/colunm)")
# rdc.add_option("--raw", dest="bRaw", action="store_true", default=False, help="no existing table/colunm)")
rdc.add_option("--YC", dest="YCut", action="store_true", default=False, help="use Ycut")
rdc.add_option("--BTAG", dest="BTAG", action="store", default='B',type="string", help="BTAG = B or S")
rdc.add_option("--STAG", dest="STAG", action="store", default='S',type="string", help="STAG = S or None")
rdc.add_option("--thhighsupport", dest="thhighsupport", action="store", type="int", default=33,help="TH for high support", metavar="NN")
rdc.add_option('-f',"--first", dest="first", action="store", type="int", help="first page to be processed")
rdc.add_option('-l',"--last", dest="last", action="store", type="int", help="last page to be processed")
#parse the command line
dParams, args = rdc.parseCommandLine()
#Now we are back to the normal programmatic mode, we set the component parameters
rdc.setParams(dParams)
doc = rdc.loadDom()
doc = rdc.run(doc)
if doc is not None and rdc.getOutputFileName() != '-':
rdc.writeDom(doc, bIndent=True)
| bsd-3-clause |
karstenw/nodebox-pyobjc | examples/Extended Application/matplotlib/examples/mplot3d/text3d.py | 1 | 1226 | '''
======================
Text annotations in 3D
======================
Demonstrates the placement of text annotations on a 3D plot.
Functionality shown:
- Using the text function with three types of 'zdir' values: None, an axis
name (ex. 'x'), or a direction tuple (ex. (1, 1, 0)).
- Using the text function with the color keyword.
- Using the text2D function to place text on a fixed position on the ax
object.
'''
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.gca(projection='3d')
# Demo 1: zdir
zdirs = (None, 'x', 'y', 'z', (1, 1, 0), (1, 1, 1))
xs = (1, 4, 4, 9, 4, 1)
ys = (2, 5, 8, 10, 1, 2)
zs = (10, 3, 8, 9, 1, 8)
for zdir, x, y, z in zip(zdirs, xs, ys, zs):
label = '(%d, %d, %d), dir=%s' % (x, y, z, zdir)
ax.text(x, y, z, label, zdir)
# Demo 2: color
ax.text(9, 0, 0, "red", color='red')
# Demo 3: text2D
# Placement 0, 0 would be the bottom left, 1, 1 would be the top right.
ax.text2D(0.05, 0.95, "2D Text", transform=ax.transAxes)
# Tweaking display region and labels
ax.set_xlim(0, 10)
ax.set_ylim(0, 10)
ax.set_zlim(0, 10)
ax.set_xlabel('X axis')
ax.set_ylabel('Y axis')
ax.set_zlabel('Z axis')
plt.show()
| mit |
befelix/GPy | GPy/plotting/matplot_dep/base_plots.py | 5 | 8394 | # #Copyright (c) 2012, GPy authors (see AUTHORS.txt).
# Licensed under the BSD 3-clause license (see LICENSE.txt)
from matplotlib import pyplot as plt
import numpy as np
def ax_default(fignum, ax):
if ax is None:
fig = plt.figure(fignum)
ax = fig.add_subplot(111)
else:
fig = ax.figure
return fig, ax
def meanplot(x, mu, color='#3300FF', ax=None, fignum=None, linewidth=2,**kw):
_, axes = ax_default(fignum, ax)
return axes.plot(x,mu,color=color,linewidth=linewidth,**kw)
def gpplot(x, mu, lower, upper, edgecol='#3300FF', fillcol='#33CCFF', ax=None, fignum=None, **kwargs):
_, axes = ax_default(fignum, ax)
mu = mu.flatten()
x = x.flatten()
lower = lower.flatten()
upper = upper.flatten()
plots = []
#here's the mean
plots.append(meanplot(x, mu, edgecol, axes))
#here's the box
kwargs['linewidth']=0.5
if not 'alpha' in kwargs.keys():
kwargs['alpha'] = 0.3
plots.append(axes.fill(np.hstack((x,x[::-1])),np.hstack((upper,lower[::-1])),color=fillcol,**kwargs))
#this is the edge:
plots.append(meanplot(x, upper,color=edgecol, linewidth=0.2, ax=axes))
plots.append(meanplot(x, lower,color=edgecol, linewidth=0.2, ax=axes))
return plots
def gradient_fill(x, percentiles, ax=None, fignum=None, **kwargs):
_, ax = ax_default(fignum, ax)
plots = []
#here's the box
if 'linewidth' not in kwargs:
kwargs['linewidth'] = 0.5
if not 'alpha' in kwargs.keys():
kwargs['alpha'] = 1./(len(percentiles))
# pop where from kwargs
where = kwargs.pop('where') if 'where' in kwargs else None
# pop interpolate, which we actually do not do here!
if 'interpolate' in kwargs: kwargs.pop('interpolate')
def pairwise(inlist):
l = len(inlist)
for i in range(int(np.ceil(l/2.))):
yield inlist[:][i], inlist[:][(l-1)-i]
polycol = []
for y1, y2 in pairwise(percentiles):
import matplotlib.mlab as mlab
# Handle united data, such as dates
ax._process_unit_info(xdata=x, ydata=y1)
ax._process_unit_info(ydata=y2)
# Convert the arrays so we can work with them
from numpy import ma
x = ma.masked_invalid(ax.convert_xunits(x))
y1 = ma.masked_invalid(ax.convert_yunits(y1))
y2 = ma.masked_invalid(ax.convert_yunits(y2))
if y1.ndim == 0:
y1 = np.ones_like(x) * y1
if y2.ndim == 0:
y2 = np.ones_like(x) * y2
if where is None:
where = np.ones(len(x), np.bool)
else:
where = np.asarray(where, np.bool)
if not (x.shape == y1.shape == y2.shape == where.shape):
raise ValueError("Argument dimensions are incompatible")
mask = reduce(ma.mask_or, [ma.getmask(a) for a in (x, y1, y2)])
if mask is not ma.nomask:
where &= ~mask
polys = []
for ind0, ind1 in mlab.contiguous_regions(where):
xslice = x[ind0:ind1]
y1slice = y1[ind0:ind1]
y2slice = y2[ind0:ind1]
if not len(xslice):
continue
N = len(xslice)
X = np.zeros((2 * N + 2, 2), np.float)
# the purpose of the next two lines is for when y2 is a
# scalar like 0 and we want the fill to go all the way
# down to 0 even if none of the y1 sample points do
start = xslice[0], y2slice[0]
end = xslice[-1], y2slice[-1]
X[0] = start
X[N + 1] = end
X[1:N + 1, 0] = xslice
X[1:N + 1, 1] = y1slice
X[N + 2:, 0] = xslice[::-1]
X[N + 2:, 1] = y2slice[::-1]
polys.append(X)
polycol.extend(polys)
from matplotlib.collections import PolyCollection
plots.append(PolyCollection(polycol, **kwargs))
ax.add_collection(plots[-1], autolim=True)
ax.autoscale_view()
return plots
def gperrors(x, mu, lower, upper, edgecol=None, ax=None, fignum=None, **kwargs):
_, axes = ax_default(fignum, ax)
mu = mu.flatten()
x = x.flatten()
lower = lower.flatten()
upper = upper.flatten()
plots = []
if edgecol is None:
edgecol='#3300FF'
if not 'alpha' in kwargs.keys():
kwargs['alpha'] = 1.
if not 'lw' in kwargs.keys():
kwargs['lw'] = 1.
plots.append(axes.errorbar(x,mu,yerr=np.vstack([mu-lower,upper-mu]),color=edgecol,**kwargs))
plots[-1][0].remove()
return plots
def removeRightTicks(ax=None):
ax = ax or plt.gca()
for i, line in enumerate(ax.get_yticklines()):
if i%2 == 1: # odd indices
line.set_visible(False)
def removeUpperTicks(ax=None):
ax = ax or plt.gca()
for i, line in enumerate(ax.get_xticklines()):
if i%2 == 1: # odd indices
line.set_visible(False)
def fewerXticks(ax=None,divideby=2):
ax = ax or plt.gca()
ax.set_xticks(ax.get_xticks()[::divideby])
def align_subplots(N,M,xlim=None, ylim=None):
"""make all of the subplots have the same limits, turn off unnecessary ticks"""
#find sensible xlim,ylim
if xlim is None:
xlim = [np.inf,-np.inf]
for i in range(N*M):
plt.subplot(N,M,i+1)
xlim[0] = min(xlim[0],plt.xlim()[0])
xlim[1] = max(xlim[1],plt.xlim()[1])
if ylim is None:
ylim = [np.inf,-np.inf]
for i in range(N*M):
plt.subplot(N,M,i+1)
ylim[0] = min(ylim[0],plt.ylim()[0])
ylim[1] = max(ylim[1],plt.ylim()[1])
for i in range(N*M):
plt.subplot(N,M,i+1)
plt.xlim(xlim)
plt.ylim(ylim)
if (i)%M:
plt.yticks([])
else:
removeRightTicks()
if i<(M*(N-1)):
plt.xticks([])
else:
removeUpperTicks()
def align_subplot_array(axes,xlim=None, ylim=None):
"""
Make all of the axes in the array hae the same limits, turn off unnecessary ticks
use plt.subplots() to get an array of axes
"""
#find sensible xlim,ylim
if xlim is None:
xlim = [np.inf,-np.inf]
for ax in axes.flatten():
xlim[0] = min(xlim[0],ax.get_xlim()[0])
xlim[1] = max(xlim[1],ax.get_xlim()[1])
if ylim is None:
ylim = [np.inf,-np.inf]
for ax in axes.flatten():
ylim[0] = min(ylim[0],ax.get_ylim()[0])
ylim[1] = max(ylim[1],ax.get_ylim()[1])
N,M = axes.shape
for i,ax in enumerate(axes.flatten()):
ax.set_xlim(xlim)
ax.set_ylim(ylim)
if (i)%M:
ax.set_yticks([])
else:
removeRightTicks(ax)
if i<(M*(N-1)):
ax.set_xticks([])
else:
removeUpperTicks(ax)
def x_frame1D(X,plot_limits=None,resolution=None):
"""
Internal helper function for making plots, returns a set of input values to plot as well as lower and upper limits
"""
assert X.shape[1] ==1, "x_frame1D is defined for one-dimensional inputs"
if plot_limits is None:
from ...core.parameterization.variational import VariationalPosterior
if isinstance(X, VariationalPosterior):
xmin,xmax = X.mean.min(0),X.mean.max(0)
else:
xmin,xmax = X.min(0),X.max(0)
xmin, xmax = xmin-0.2*(xmax-xmin), xmax+0.2*(xmax-xmin)
elif len(plot_limits)==2:
xmin, xmax = plot_limits
else:
raise ValueError("Bad limits for plotting")
Xnew = np.linspace(xmin,xmax,resolution or 200)[:,None]
return Xnew, xmin, xmax
def x_frame2D(X,plot_limits=None,resolution=None):
"""
Internal helper function for making plots, returns a set of input values to plot as well as lower and upper limits
"""
assert X.shape[1] ==2, "x_frame2D is defined for two-dimensional inputs"
if plot_limits is None:
xmin,xmax = X.min(0),X.max(0)
xmin, xmax = xmin-0.2*(xmax-xmin), xmax+0.2*(xmax-xmin)
elif len(plot_limits)==2:
xmin, xmax = plot_limits
else:
raise ValueError("Bad limits for plotting")
resolution = resolution or 50
xx,yy = np.mgrid[xmin[0]:xmax[0]:1j*resolution,xmin[1]:xmax[1]:1j*resolution]
Xnew = np.vstack((xx.flatten(),yy.flatten())).T
return Xnew, xx, yy, xmin, xmax
| bsd-3-clause |
wscullin/spack | var/spack/repos/builtin/packages/py-phonopy/package.py | 3 | 1832 | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class PyPhonopy(PythonPackage):
"""Phonopy is an open source package for phonon
calculations at harmonic and quasi-harmonic levels."""
homepage = "http://atztogo.github.io/phonopy/index.html"
url = "http://sourceforge.net/projects/phonopy/files/phonopy/phonopy-1.10/phonopy-1.10.0.tar.gz"
version('1.10.0', '973ed1bcea46e21b9bf747aab9061ff6')
depends_on('py-numpy', type=('build', 'run'))
depends_on('py-scipy', type=('build', 'run'))
depends_on('py-matplotlib', type=('build', 'run'))
depends_on('py-pyyaml', type=('build', 'run'))
| lgpl-2.1 |
sonnyhu/scikit-learn | examples/cluster/plot_birch_vs_minibatchkmeans.py | 333 | 3694 | """
=================================
Compare BIRCH and MiniBatchKMeans
=================================
This example compares the timing of Birch (with and without the global
clustering step) and MiniBatchKMeans on a synthetic dataset having
100,000 samples and 2 features generated using make_blobs.
If ``n_clusters`` is set to None, the data is reduced from 100,000
samples to a set of 158 clusters. This can be viewed as a preprocessing
step before the final (global) clustering step that further reduces these
158 clusters to 100 clusters.
"""
# Authors: Manoj Kumar <[email protected]
# Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
print(__doc__)
from itertools import cycle
from time import time
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as colors
from sklearn.preprocessing import StandardScaler
from sklearn.cluster import Birch, MiniBatchKMeans
from sklearn.datasets.samples_generator import make_blobs
# Generate centers for the blobs so that it forms a 10 X 10 grid.
xx = np.linspace(-22, 22, 10)
yy = np.linspace(-22, 22, 10)
xx, yy = np.meshgrid(xx, yy)
n_centres = np.hstack((np.ravel(xx)[:, np.newaxis],
np.ravel(yy)[:, np.newaxis]))
# Generate blobs to do a comparison between MiniBatchKMeans and Birch.
X, y = make_blobs(n_samples=100000, centers=n_centres, random_state=0)
# Use all colors that matplotlib provides by default.
colors_ = cycle(colors.cnames.keys())
fig = plt.figure(figsize=(12, 4))
fig.subplots_adjust(left=0.04, right=0.98, bottom=0.1, top=0.9)
# Compute clustering with Birch with and without the final clustering step
# and plot.
birch_models = [Birch(threshold=1.7, n_clusters=None),
Birch(threshold=1.7, n_clusters=100)]
final_step = ['without global clustering', 'with global clustering']
for ind, (birch_model, info) in enumerate(zip(birch_models, final_step)):
t = time()
birch_model.fit(X)
time_ = time() - t
print("Birch %s as the final step took %0.2f seconds" % (
info, (time() - t)))
# Plot result
labels = birch_model.labels_
centroids = birch_model.subcluster_centers_
n_clusters = np.unique(labels).size
print("n_clusters : %d" % n_clusters)
ax = fig.add_subplot(1, 3, ind + 1)
for this_centroid, k, col in zip(centroids, range(n_clusters), colors_):
mask = labels == k
ax.plot(X[mask, 0], X[mask, 1], 'w',
markerfacecolor=col, marker='.')
if birch_model.n_clusters is None:
ax.plot(this_centroid[0], this_centroid[1], '+', markerfacecolor=col,
markeredgecolor='k', markersize=5)
ax.set_ylim([-25, 25])
ax.set_xlim([-25, 25])
ax.set_autoscaley_on(False)
ax.set_title('Birch %s' % info)
# Compute clustering with MiniBatchKMeans.
mbk = MiniBatchKMeans(init='k-means++', n_clusters=100, batch_size=100,
n_init=10, max_no_improvement=10, verbose=0,
random_state=0)
t0 = time()
mbk.fit(X)
t_mini_batch = time() - t0
print("Time taken to run MiniBatchKMeans %0.2f seconds" % t_mini_batch)
mbk_means_labels_unique = np.unique(mbk.labels_)
ax = fig.add_subplot(1, 3, 3)
for this_centroid, k, col in zip(mbk.cluster_centers_,
range(n_clusters), colors_):
mask = mbk.labels_ == k
ax.plot(X[mask, 0], X[mask, 1], 'w', markerfacecolor=col, marker='.')
ax.plot(this_centroid[0], this_centroid[1], '+', markeredgecolor='k',
markersize=5)
ax.set_xlim([-25, 25])
ax.set_ylim([-25, 25])
ax.set_title("MiniBatchKMeans")
ax.set_autoscaley_on(False)
plt.show()
| bsd-3-clause |
wilmerhenao/Tomotherapy-Without-Pulse | SinogramComparisons.py | 1 | 5247 | __author__ = 'wilmer'
# This one corresponds to the AverageOpeningTime.pdf document (first model)
try:
import mkl
have_mkl = True
print("Running with MKL Acceleration")
except ImportError:
have_mkl = False
print("Running with normal backends")
import pickle
import time
import socket
import numpy as np
import matplotlib.pyplot as plt
from pylab import Line2D, gca
from scipy.stats import describe
from gurobipy import *
import math
from itertools import product
import pylab as pl
from matplotlib import collections as mc
import itertools
def plotSinogramIndependent(t, L, nameChunk, outputDirectory):
plt.figure()
ax = gca()
lines = []
for l in range(L):
for aperture in range(len(t[l])):
a, b = t[l][aperture]
lines.append([(a, l), (b, l)])
lc = mc.LineCollection(lines, linewidths = 3, colors = 'blue')
fig, ax = pl.subplots()
ax.add_collection(lc)
ax.autoscale()
plt.title('Sinogram')
plt.xlabel('time in seconds')
plt.ylabel('leaves')
plt.savefig(outputDirectory + 'SinogramIndependent' + nameChunk + '.png')
nameoutputdirectory = 'outputMultiProj/'
#nameChunk1 = 'pickleresults-ProstatefullModel-MinLOT-0.03-minAvgLot-0.17-vxls-8340-ntnsty-700'
#nameChunk1 = 'pickleresults-ProstatefullModel-MinLOT-0.03-minAvgLot-0.17-vxls-8340-ntnsty-700'
#nameChunk2 = 'pickleresults-ProstatepairModel-MinLOT-0.03-minAvgLot-0.17-vxls-16677-ntnsty-700'
#nameChunk1 = 'pickleresults-Prostate-51-pairModel-MinLOT-0.02-minAvgLot-0.17-vxls-16677-ntnsty-700'
#nameChunk2 = 'pickleresults-Prostate-51-fullModel-MinLOT-0.02-minAvgLot-0.17-vxls-16677-ntnsty-700'
nameChunk1 = 'pickleresults-Prostate-51-fullModel-MinLOT-0.02-minAvgLot-0.17-vxls-1385-ntnsty-700'
nameChunk2 = 'pickleresults-Prostate-51-fullModel-MinLOT-0.02-minAvgLot-0.17-vxls-16677-ntnsty-700'
picklefile1 = nameoutputdirectory + nameChunk1 + '.pkl'
picklefile2 = nameoutputdirectory + nameChunk2 + '.pkl'
input = open(picklefile1, 'rb')
sData1 = pickle.load(input)
input = open(picklefile2, 'rb')
sData2 = pickle.load(input)
t1 = sData1['t']
t2 = sData2['t']
L = 64
#plotSinogramIndependent(t1, L, nameChunk1, nameoutputdirectory)
#plotSinogramIndependent(t2, L, nameChunk2, nameoutputdirectory)
#bothOn = [[max(first[0], second[0]), min(first[1], second[1])] for first in t1 for second in t2 if max(first[0], second[0]) <= min(first[1], second[1])]
myeps = 0.001
def range_diff(r1, r2):
s1, e1 = r1
s2, e2 = r2
endpoints = sorted((s1, s2, e1, e2))
result = []
if endpoints[0] == s1 and (endpoints[1] - endpoints[0]) > myeps:
result.append((endpoints[0], endpoints[1]))
if endpoints[3] == e1 and (endpoints[3] - endpoints[2]) > myeps:
result.append((endpoints[2], endpoints[3]))
return result
def multirange_diff(r1_list, r2_list):
for r2 in r2_list:
r1_list = list(itertools.chain(*[range_diff(r1, r2) for r1 in r1_list]))
return r1_list
r1_list = [(1, 1001), (1100, 1201)]
r2_list = [(30, 51), (60, 201), (1150, 1301)]
print(multirange_diff(r1_list, r2_list))
firstOnly = []
secondOnly = []
bothOn = []
for l in range(L):
firstOnly.append(multirange_diff(t1[l], t2[l]))
secondOnly.append(multirange_diff(t2[l], t1[l]))
if len(t1[l]) > 0 and len(t2[l]) > 0:
bothOn.append([[max(first[0], second[0]), min(first[1], second[1])] for first in t1[l] for second in t2[l] if max(first[0], second[0]) <= min(first[1], second[1])])
else:
bothOn.append([])
def plotSinogramIndependentMixed(firstOnly, secondOnly, middleOnly, L, nameChunk1, nameChunk2, outputDirectory):
plt.figure()
ax = gca()
linesFirst = []
linesSecond = []
linesMiddle = []
for l in range(L):
for aperture in range(len(firstOnly[l])):
a, b = firstOnly[l][aperture]
linesFirst.append([(a, l), (b, l)])
for aperture in range(len(secondOnly[l])):
a, b = secondOnly[l][aperture]
linesSecond.append([(a, l), (b, l)])
for aperture in range(len(middleOnly[l])):
a, b = middleOnly[l][aperture]
linesMiddle.append([(a, l), (b, l)])
lc = mc.LineCollection(linesFirst, linewidths = 3, colors = 'red')
rc = mc.LineCollection(linesSecond, linewidths = 3, colors = 'blue')
middlec = mc.LineCollection(linesMiddle, linewidths = 3, colors = 'purple')
fig, ax = pl.subplots()
ax.add_collection(lc)
ax.add_collection(rc)
ax.add_collection(middlec)
ax.autoscale()
#plt.title('Sinogram Comparison of Odd-Even Model vs. Detailed Model')
plt.title('Sinograms of Low Resolution Model (red) vs. Full Resolution Model (blue)')
plt.xlabel('time in seconds')
plt.ylabel('leaves')
plt.tick_params(
axis='y', # changes apply to the x-axis
which='both', # both major and minor ticks are affected
left=False, # ticks along the bottom edge are off
top=False, # ticks along the top edge are off
labelbottom=True) #
ax.set_yticklabels([])
plt.savefig(outputDirectory + 'Sinogram-Comparison-FullModelvspairModel.pdf', format = 'pdf')
plotSinogramIndependentMixed(firstOnly, secondOnly, bothOn, L, nameChunk1, nameChunk2, nameoutputdirectory) | mit |
appapantula/scikit-learn | examples/applications/plot_tomography_l1_reconstruction.py | 204 | 5442 | """
======================================================================
Compressive sensing: tomography reconstruction with L1 prior (Lasso)
======================================================================
This example shows the reconstruction of an image from a set of parallel
projections, acquired along different angles. Such a dataset is acquired in
**computed tomography** (CT).
Without any prior information on the sample, the number of projections
required to reconstruct the image is of the order of the linear size
``l`` of the image (in pixels). For simplicity we consider here a sparse
image, where only pixels on the boundary of objects have a non-zero
value. Such data could correspond for example to a cellular material.
Note however that most images are sparse in a different basis, such as
the Haar wavelets. Only ``l/7`` projections are acquired, therefore it is
necessary to use prior information available on the sample (its
sparsity): this is an example of **compressive sensing**.
The tomography projection operation is a linear transformation. In
addition to the data-fidelity term corresponding to a linear regression,
we penalize the L1 norm of the image to account for its sparsity. The
resulting optimization problem is called the :ref:`lasso`. We use the
class :class:`sklearn.linear_model.Lasso`, that uses the coordinate descent
algorithm. Importantly, this implementation is more computationally efficient
on a sparse matrix, than the projection operator used here.
The reconstruction with L1 penalization gives a result with zero error
(all pixels are successfully labeled with 0 or 1), even if noise was
added to the projections. In comparison, an L2 penalization
(:class:`sklearn.linear_model.Ridge`) produces a large number of labeling
errors for the pixels. Important artifacts are observed on the
reconstructed image, contrary to the L1 penalization. Note in particular
the circular artifact separating the pixels in the corners, that have
contributed to fewer projections than the central disk.
"""
print(__doc__)
# Author: Emmanuelle Gouillart <[email protected]>
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from scipy import ndimage
from sklearn.linear_model import Lasso
from sklearn.linear_model import Ridge
import matplotlib.pyplot as plt
def _weights(x, dx=1, orig=0):
x = np.ravel(x)
floor_x = np.floor((x - orig) / dx)
alpha = (x - orig - floor_x * dx) / dx
return np.hstack((floor_x, floor_x + 1)), np.hstack((1 - alpha, alpha))
def _generate_center_coordinates(l_x):
X, Y = np.mgrid[:l_x, :l_x]
center = l_x / 2.
X += 0.5 - center
Y += 0.5 - center
return X, Y
def build_projection_operator(l_x, n_dir):
""" Compute the tomography design matrix.
Parameters
----------
l_x : int
linear size of image array
n_dir : int
number of angles at which projections are acquired.
Returns
-------
p : sparse matrix of shape (n_dir l_x, l_x**2)
"""
X, Y = _generate_center_coordinates(l_x)
angles = np.linspace(0, np.pi, n_dir, endpoint=False)
data_inds, weights, camera_inds = [], [], []
data_unravel_indices = np.arange(l_x ** 2)
data_unravel_indices = np.hstack((data_unravel_indices,
data_unravel_indices))
for i, angle in enumerate(angles):
Xrot = np.cos(angle) * X - np.sin(angle) * Y
inds, w = _weights(Xrot, dx=1, orig=X.min())
mask = np.logical_and(inds >= 0, inds < l_x)
weights += list(w[mask])
camera_inds += list(inds[mask] + i * l_x)
data_inds += list(data_unravel_indices[mask])
proj_operator = sparse.coo_matrix((weights, (camera_inds, data_inds)))
return proj_operator
def generate_synthetic_data():
""" Synthetic binary data """
rs = np.random.RandomState(0)
n_pts = 36.
x, y = np.ogrid[0:l, 0:l]
mask_outer = (x - l / 2) ** 2 + (y - l / 2) ** 2 < (l / 2) ** 2
mask = np.zeros((l, l))
points = l * rs.rand(2, n_pts)
mask[(points[0]).astype(np.int), (points[1]).astype(np.int)] = 1
mask = ndimage.gaussian_filter(mask, sigma=l / n_pts)
res = np.logical_and(mask > mask.mean(), mask_outer)
return res - ndimage.binary_erosion(res)
# Generate synthetic images, and projections
l = 128
proj_operator = build_projection_operator(l, l / 7.)
data = generate_synthetic_data()
proj = proj_operator * data.ravel()[:, np.newaxis]
proj += 0.15 * np.random.randn(*proj.shape)
# Reconstruction with L2 (Ridge) penalization
rgr_ridge = Ridge(alpha=0.2)
rgr_ridge.fit(proj_operator, proj.ravel())
rec_l2 = rgr_ridge.coef_.reshape(l, l)
# Reconstruction with L1 (Lasso) penalization
# the best value of alpha was determined using cross validation
# with LassoCV
rgr_lasso = Lasso(alpha=0.001)
rgr_lasso.fit(proj_operator, proj.ravel())
rec_l1 = rgr_lasso.coef_.reshape(l, l)
plt.figure(figsize=(8, 3.3))
plt.subplot(131)
plt.imshow(data, cmap=plt.cm.gray, interpolation='nearest')
plt.axis('off')
plt.title('original image')
plt.subplot(132)
plt.imshow(rec_l2, cmap=plt.cm.gray, interpolation='nearest')
plt.title('L2 penalization')
plt.axis('off')
plt.subplot(133)
plt.imshow(rec_l1, cmap=plt.cm.gray, interpolation='nearest')
plt.title('L1 penalization')
plt.axis('off')
plt.subplots_adjust(hspace=0.01, wspace=0.01, top=1, bottom=0, left=0,
right=1)
plt.show()
| bsd-3-clause |
florian-f/sklearn | sklearn/manifold/locally_linear.py | 3 | 24871 | """Locally Linear Embedding"""
# Author: Fabian Pedregosa -- <[email protected]>
# Jake Vanderplas -- <[email protected]>
# License: BSD, (C) INRIA 2011
import numpy as np
from scipy.linalg import eigh, svd, qr, solve
from scipy.sparse import eye, csr_matrix
from ..base import BaseEstimator, TransformerMixin
from ..utils import array2d, check_random_state, check_arrays
from ..utils.arpack import eigsh
from ..neighbors import NearestNeighbors
def barycenter_weights(X, Z, reg=1e-3):
"""Compute barycenter weights of X from Y along the first axis
We estimate the weights to assign to each point in Y[i] to recover
the point X[i]. The barycenter weights sum to 1.
Parameters
----------
X : array-like, shape (n_samples, n_dim)
Z : array-like, shape (n_samples, n_neighbors, n_dim)
reg: float, optional
amount of regularization to add for the problem to be
well-posed in the case of n_neighbors > n_dim
Returns
-------
B : array-like, shape (n_samples, n_neighbors)
Notes
-----
See developers note for more information.
"""
X = np.asarray(X)
Z = np.asarray(Z)
n_samples, n_neighbors = X.shape[0], Z.shape[1]
if X.dtype.kind == 'i':
X = X.astype(np.float)
if Z.dtype.kind == 'i':
Z = Z.astype(np.float)
B = np.empty((n_samples, n_neighbors), dtype=X.dtype)
v = np.ones(n_neighbors, dtype=X.dtype)
# this might raise a LinalgError if G is singular and has trace
# zero
for i, A in enumerate(Z.transpose(0, 2, 1)):
C = A.T - X[i] # broadcasting
G = np.dot(C, C.T)
trace = np.trace(G)
if trace > 0:
R = reg * trace
else:
R = reg
G.flat[::Z.shape[1] + 1] += R
w = solve(G, v, sym_pos=True)
B[i, :] = w / np.sum(w)
return B
def barycenter_kneighbors_graph(X, n_neighbors, reg=1e-3):
"""Computes the barycenter weighted graph of k-Neighbors for points in X
Parameters
----------
X : {array-like, sparse matrix, BallTree, cKDTree, NearestNeighbors}
Sample data, shape = (n_samples, n_features), in the form of a
numpy array, sparse array, precomputed tree, or NearestNeighbors
object.
n_neighbors : int
Number of neighbors for each sample.
reg : float, optional
Amount of regularization when solving the least-squares
problem. Only relevant if mode='barycenter'. If None, use the
default.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
See also
--------
sklearn.neighbors.kneighbors_graph
sklearn.neighbors.radius_neighbors_graph
"""
knn = NearestNeighbors(n_neighbors + 1).fit(X)
X = knn._fit_X
n_samples = X.shape[0]
ind = knn.kneighbors(X, return_distance=False)[:, 1:]
data = barycenter_weights(X, X[ind], reg=reg)
indptr = np.arange(0, n_samples * n_neighbors + 1, n_neighbors)
return csr_matrix((data.ravel(), ind.ravel(), indptr),
shape=(n_samples, n_samples))
def null_space(M, k, k_skip=1, eigen_solver='arpack', tol=1E-6, max_iter=100,
random_state=None):
"""
Find the null space of a matrix M.
Parameters
----------
M : {array, matrix, sparse matrix, LinearOperator}
Input covariance matrix: should be symmetric positive semi-definite
k : integer
Number of eigenvalues/vectors to return
k_skip : integer, optional
Number of low eigenvalues to skip.
eigen_solver : string, {'auto', 'arpack', 'dense'}
auto : algorithm will attempt to choose the best method for input data
arpack : use arnoldi iteration in shift-invert mode.
For this method, M may be a dense matrix, sparse matrix,
or general linear operator.
Warning: ARPACK can be unstable for some problems. It is
best to try several random seeds in order to check results.
dense : use standard dense matrix operations for the eigenvalue
decomposition. For this method, M must be an array
or matrix type. This method should be avoided for
large problems.
tol : float, optional
Tolerance for 'arpack' method.
Not used if eigen_solver=='dense'.
max_iter : maximum number of iterations for 'arpack' method
not used if eigen_solver=='dense'
random_state: numpy.RandomState or int, optional
The generator or seed used to determine the starting vector for arpack
iterations. Defaults to numpy.random.
"""
if eigen_solver == 'auto':
if M.shape[0] > 200 and k + k_skip < 10:
eigen_solver = 'arpack'
else:
eigen_solver = 'dense'
if eigen_solver == 'arpack':
random_state = check_random_state(random_state)
v0 = random_state.rand(M.shape[0])
try:
eigen_values, eigen_vectors = eigsh(M, k + k_skip, sigma=0.0,
tol=tol, maxiter=max_iter,
v0=v0)
except RuntimeError as msg:
raise ValueError("Error in determining null-space with ARPACK. "
"Error message: '%s'. "
"Note that method='arpack' can fail when the "
"weight matrix is singular or otherwise "
"ill-behaved. method='dense' is recommended. "
"See online documentation for more information."
% msg)
return eigen_vectors[:, k_skip:], np.sum(eigen_values[k_skip:])
elif eigen_solver == 'dense':
if hasattr(M, 'toarray'):
M = M.toarray()
eigen_values, eigen_vectors = eigh(
M, eigvals=(k_skip, k + k_skip - 1), overwrite_a=True)
index = np.argsort(np.abs(eigen_values))
return eigen_vectors[:, index], np.sum(eigen_values)
else:
raise ValueError("Unrecognized eigen_solver '%s'" % eigen_solver)
def locally_linear_embedding(
X, n_neighbors, n_components, reg=1e-3, eigen_solver='auto', tol=1e-6,
max_iter=100, method='standard', hessian_tol=1E-4, modified_tol=1E-12,
random_state=None):
"""Perform a Locally Linear Embedding analysis on the data.
Parameters
----------
X : {array-like, sparse matrix, BallTree, cKDTree, NearestNeighbors}
Sample data, shape = (n_samples, n_features), in the form of a
numpy array, sparse array, precomputed tree, or NearestNeighbors
object.
n_neighbors : integer
number of neighbors to consider for each point.
n_components : integer
number of coordinates for the manifold.
reg : float
regularization constant, multiplies the trace of the local covariance
matrix of the distances.
eigen_solver : string, {'auto', 'arpack', 'dense'}
auto : algorithm will attempt to choose the best method for input data
arpack : use arnoldi iteration in shift-invert mode.
For this method, M may be a dense matrix, sparse matrix,
or general linear operator.
Warning: ARPACK can be unstable for some problems. It is
best to try several random seeds in order to check results.
dense : use standard dense matrix operations for the eigenvalue
decomposition. For this method, M must be an array
or matrix type. This method should be avoided for
large problems.
tol : float, optional
Tolerance for 'arpack' method
Not used if eigen_solver=='dense'.
max_iter : integer
maximum number of iterations for the arpack solver.
method : {'standard', 'hessian', 'modified', 'ltsa'}
standard : use the standard locally linear embedding algorithm.
see reference [1]_
hessian : use the Hessian eigenmap method. This method requires
n_neighbors > n_components * (1 + (n_components + 1) / 2.
see reference [2]_
modified : use the modified locally linear embedding algorithm.
see reference [3]_
ltsa : use local tangent space alignment algorithm
see reference [4]_
hessian_tol : float, optional
Tolerance for Hessian eigenmapping method.
Only used if method == 'hessian'
modified_tol : float, optional
Tolerance for modified LLE method.
Only used if method == 'modified'
random_state: numpy.RandomState or int, optional
The generator or seed used to determine the starting vector for arpack
iterations. Defaults to numpy.random.
Returns
-------
Y : array-like, shape [n_samples, n_components]
Embedding vectors.
squared_error : float
Reconstruction error for the embedding vectors. Equivalent to
``norm(Y - W Y, 'fro')**2``, where W are the reconstruction weights.
References
----------
.. [1] `Roweis, S. & Saul, L. Nonlinear dimensionality reduction
by locally linear embedding. Science 290:2323 (2000).`
.. [2] `Donoho, D. & Grimes, C. Hessian eigenmaps: Locally
linear embedding techniques for high-dimensional data.
Proc Natl Acad Sci U S A. 100:5591 (2003).`
.. [3] `Zhang, Z. & Wang, J. MLLE: Modified Locally Linear
Embedding Using Multiple Weights.`
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.70.382
.. [4] `Zhang, Z. & Zha, H. Principal manifolds and nonlinear
dimensionality reduction via tangent space alignment.
Journal of Shanghai Univ. 8:406 (2004)`
"""
if eigen_solver not in ('auto', 'arpack', 'dense'):
raise ValueError("unrecognized eigen_solver '%s'" % eigen_solver)
if method not in ('standard', 'hessian', 'modified', 'ltsa'):
raise ValueError("unrecognized method '%s'" % method)
nbrs = NearestNeighbors(n_neighbors=n_neighbors + 1)
nbrs.fit(X)
X = nbrs._fit_X
N, d_in = X.shape
if n_components > d_in:
raise ValueError("output dimension must be less than or equal "
"to input dimension")
if n_neighbors >= N:
raise ValueError("n_neighbors must be less than number of points")
if n_neighbors <= 0:
raise ValueError("n_neighbors must be positive")
M_sparse = (eigen_solver != 'dense')
if method == 'standard':
W = barycenter_kneighbors_graph(
nbrs, n_neighbors=n_neighbors, reg=reg)
# we'll compute M = (I-W)'(I-W)
# depending on the solver, we'll do this differently
if M_sparse:
M = eye(*W.shape, format=W.format) - W
M = (M.T * M).tocsr()
else:
M = (W.T * W - W.T - W).toarray()
M.flat[::M.shape[0] + 1] += 1 # W = W - I = W - I
elif method == 'hessian':
dp = n_components * (n_components + 1) / 2
if n_neighbors <= n_components + dp:
raise ValueError("for method='hessian', n_neighbors must be "
"greater than "
"[n_components * (n_components + 3) / 2]")
neighbors = nbrs.kneighbors(X, n_neighbors=n_neighbors + 1,
return_distance=False)
neighbors = neighbors[:, 1:]
Yi = np.empty((n_neighbors, 1 + n_components + dp), dtype=np.float)
Yi[:, 0] = 1
M = np.zeros((N, N), dtype=np.float)
use_svd = (n_neighbors > d_in)
for i in range(N):
Gi = X[neighbors[i]]
Gi -= Gi.mean(0)
#build Hessian estimator
if use_svd:
U = svd(Gi, full_matrices=0)[0]
else:
Ci = np.dot(Gi, Gi.T)
U = eigh(Ci)[1][:, ::-1]
Yi[:, 1:1 + n_components] = U[:, :n_components]
j = 1 + n_components
for k in range(n_components):
Yi[:, j:j + n_components - k] = (U[:, k:k + 1]
* U[:, k:n_components])
j += n_components - k
Q, R = qr(Yi)
w = Q[:, n_components + 1:]
S = w.sum(0)
S[np.where(abs(S) < hessian_tol)] = 1
w /= S
nbrs_x, nbrs_y = np.meshgrid(neighbors[i], neighbors[i])
M[nbrs_x, nbrs_y] += np.dot(w, w.T)
if M_sparse:
M = csr_matrix(M)
elif method == 'modified':
if n_neighbors < n_components:
raise ValueError("modified LLE requires "
"n_neighbors >= n_components")
neighbors = nbrs.kneighbors(X, n_neighbors=n_neighbors + 1,
return_distance=False)
neighbors = neighbors[:, 1:]
#find the eigenvectors and eigenvalues of each local covariance
# matrix. We want V[i] to be a [n_neighbors x n_neighbors] matrix,
# where the columns are eigenvectors
V = np.zeros((N, n_neighbors, n_neighbors))
nev = min(d_in, n_neighbors)
evals = np.zeros([N, nev])
#choose the most efficient way to find the eigenvectors
use_svd = (n_neighbors > d_in)
if use_svd:
for i in range(N):
X_nbrs = X[neighbors[i]] - X[i]
V[i], evals[i], _ = svd(X_nbrs,
full_matrices=True)
evals **= 2
else:
for i in range(N):
X_nbrs = X[neighbors[i]] - X[i]
C_nbrs = np.dot(X_nbrs, X_nbrs.T)
evi, vi = eigh(C_nbrs)
evals[i] = evi[::-1]
V[i] = vi[:, ::-1]
#find regularized weights: this is like normal LLE.
# because we've already computed the SVD of each covariance matrix,
# it's faster to use this rather than np.linalg.solve
reg = 1E-3 * evals.sum(1)
tmp = np.dot(V.transpose(0, 2, 1), np.ones(n_neighbors))
tmp[:, :nev] /= evals + reg[:, None]
tmp[:, nev:] /= reg[:, None]
w_reg = np.zeros((N, n_neighbors))
for i in range(N):
w_reg[i] = np.dot(V[i], tmp[i])
w_reg /= w_reg.sum(1)[:, None]
#calculate eta: the median of the ratio of small to large eigenvalues
# across the points. This is used to determine s_i, below
rho = evals[:, n_components:].sum(1) / evals[:, :n_components].sum(1)
eta = np.median(rho)
#find s_i, the size of the "almost null space" for each point:
# this is the size of the largest set of eigenvalues
# such that Sum[v; v in set]/Sum[v; v not in set] < eta
s_range = np.zeros(N, dtype=int)
evals_cumsum = np.cumsum(evals, 1)
eta_range = evals_cumsum[:, -1:] / evals_cumsum[:, :-1] - 1
for i in range(N):
s_range[i] = np.searchsorted(eta_range[i, ::-1], eta)
s_range += n_neighbors - nev # number of zero eigenvalues
#Now calculate M.
# This is the [N x N] matrix whose null space is the desired embedding
M = np.zeros((N, N), dtype=np.float)
for i in range(N):
s_i = s_range[i]
#select bottom s_i eigenvectors and calculate alpha
Vi = V[i, :, n_neighbors - s_i:]
alpha_i = np.linalg.norm(Vi.sum(0)) / np.sqrt(s_i)
#compute Householder matrix which satisfies
# Hi*Vi.T*ones(n_neighbors) = alpha_i*ones(s)
# using prescription from paper
h = alpha_i * np.ones(s_i) - np.dot(Vi.T, np.ones(n_neighbors))
norm_h = np.linalg.norm(h)
if norm_h < modified_tol:
h *= 0
else:
h /= norm_h
#Householder matrix is
# >> Hi = np.identity(s_i) - 2*np.outer(h,h)
#Then the weight matrix is
# >> Wi = np.dot(Vi,Hi) + (1-alpha_i) * w_reg[i,:,None]
#We do this much more efficiently:
Wi = (Vi - 2 * np.outer(np.dot(Vi, h), h)
+ (1 - alpha_i) * w_reg[i, :, None])
#Update M as follows:
# >> W_hat = np.zeros( (N,s_i) )
# >> W_hat[neighbors[i],:] = Wi
# >> W_hat[i] -= 1
# >> M += np.dot(W_hat,W_hat.T)
#We can do this much more efficiently:
nbrs_x, nbrs_y = np.meshgrid(neighbors[i], neighbors[i])
M[nbrs_x, nbrs_y] += np.dot(Wi, Wi.T)
Wi_sum1 = Wi.sum(1)
M[i, neighbors[i]] -= Wi_sum1
M[neighbors[i], i] -= Wi_sum1
M[i, i] += s_i
if M_sparse:
M = csr_matrix(M)
elif method == 'ltsa':
neighbors = nbrs.kneighbors(X, n_neighbors=n_neighbors + 1,
return_distance=False)
neighbors = neighbors[:, 1:]
M = np.zeros((N, N))
use_svd = (n_neighbors > d_in)
for i in range(N):
Xi = X[neighbors[i]]
Xi -= Xi.mean(0)
# compute n_components largest eigenvalues of Xi * Xi^T
if use_svd:
v = svd(Xi, full_matrices=True)[0]
else:
Ci = np.dot(Xi, Xi.T)
v = eigh(Ci)[1][:, ::-1]
Gi = np.zeros((n_neighbors, n_components + 1))
Gi[:, 1:] = v[:, :n_components]
Gi[:, 0] = 1. / np.sqrt(n_neighbors)
GiGiT = np.dot(Gi, Gi.T)
nbrs_x, nbrs_y = np.meshgrid(neighbors[i], neighbors[i])
M[nbrs_x, nbrs_y] -= GiGiT
M[neighbors[i], neighbors[i]] += 1
return null_space(M, n_components, k_skip=1, eigen_solver=eigen_solver,
tol=tol, max_iter=max_iter, random_state=random_state)
class LocallyLinearEmbedding(BaseEstimator, TransformerMixin):
"""Locally Linear Embedding
Parameters
----------
n_neighbors : integer
number of neighbors to consider for each point.
n_components : integer
number of coordinates for the manifold
reg : float
regularization constant, multiplies the trace of the local covariance
matrix of the distances.
eigen_solver : string, {'auto', 'arpack', 'dense'}
auto : algorithm will attempt to choose the best method for input data
arpack : use arnoldi iteration in shift-invert mode.
For this method, M may be a dense matrix, sparse matrix,
or general linear operator.
Warning: ARPACK can be unstable for some problems. It is
best to try several random seeds in order to check results.
dense : use standard dense matrix operations for the eigenvalue
decomposition. For this method, M must be an array
or matrix type. This method should be avoided for
large problems.
tol : float, optional
Tolerance for 'arpack' method
Not used if eigen_solver=='dense'.
max_iter : integer
maximum number of iterations for the arpack solver.
Not used if eigen_solver=='dense'.
method : string ('standard', 'hessian', 'modified' or 'ltsa')
standard : use the standard locally linear embedding algorithm. see
reference [1]
hessian : use the Hessian eigenmap method. This method requires
``n_neighbors > n_components * (1 + (n_components + 1) / 2``
see reference [2]
modified : use the modified locally linear embedding algorithm.
see reference [3]
ltsa : use local tangent space alignment algorithm
see reference [4]
hessian_tol : float, optional
Tolerance for Hessian eigenmapping method.
Only used if ``method == 'hessian'``
modified_tol : float, optional
Tolerance for modified LLE method.
Only used if ``method == 'modified'``
neighbors_algorithm : string ['auto'|'brute'|'kd_tree'|'ball_tree']
algorithm to use for nearest neighbors search,
passed to neighbors.NearestNeighbors instance
random_state: numpy.RandomState or int, optional
The generator or seed used to determine the starting vector for arpack
iterations. Defaults to numpy.random.
Attributes
----------
`embedding_vectors_` : array-like, shape [n_components, n_samples]
Stores the embedding vectors
`reconstruction_error_` : float
Reconstruction error associated with `embedding_vectors_`
`nbrs_` : NearestNeighbors object
Stores nearest neighbors instance, including BallTree or KDtree
if applicable.
References
----------
.. [1] `Roweis, S. & Saul, L. Nonlinear dimensionality reduction
by locally linear embedding. Science 290:2323 (2000).`
.. [2] `Donoho, D. & Grimes, C. Hessian eigenmaps: Locally
linear embedding techniques for high-dimensional data.
Proc Natl Acad Sci U S A. 100:5591 (2003).`
.. [3] `Zhang, Z. & Wang, J. MLLE: Modified Locally Linear
Embedding Using Multiple Weights.`
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.70.382
.. [4] `Zhang, Z. & Zha, H. Principal manifolds and nonlinear
dimensionality reduction via tangent space alignment.
Journal of Shanghai Univ. 8:406 (2004)`
"""
def __init__(self, n_neighbors=5, n_components=2, reg=1E-3,
eigen_solver='auto', tol=1E-6, max_iter=100,
method='standard', hessian_tol=1E-4, modified_tol=1E-12,
neighbors_algorithm='auto', random_state=None):
self.n_neighbors = n_neighbors
self.n_components = n_components
self.reg = reg
self.eigen_solver = eigen_solver
self.tol = tol
self.max_iter = max_iter
self.method = method
self.hessian_tol = hessian_tol
self.modified_tol = modified_tol
self.random_state = random_state
self.neighbors_algorithm = neighbors_algorithm
def _fit_transform(self, X):
self.nbrs_ = NearestNeighbors(self.n_neighbors,
algorithm=self.neighbors_algorithm)
random_state = check_random_state(self.random_state)
X, = check_arrays(X, sparse_format='dense')
self.nbrs_.fit(X)
self.embedding_, self.reconstruction_error_ = \
locally_linear_embedding(
self.nbrs_, self.n_neighbors, self.n_components,
eigen_solver=self.eigen_solver, tol=self.tol,
max_iter=self.max_iter, method=self.method,
hessian_tol=self.hessian_tol, modified_tol=self.modified_tol,
random_state=random_state)
def fit(self, X, y=None):
"""Compute the embedding vectors for data X
Parameters
----------
X : array-like of shape [n_samples, n_features]
training set.
Returns
-------
self : returns an instance of self.
"""
self._fit_transform(X)
return self
def fit_transform(self, X, y=None):
"""Compute the embedding vectors for data X and transform X.
Parameters
----------
X : array-like of shape [n_samples, n_features]
training set.
Returns
-------
X_new: array-like, shape (n_samples, n_components)
"""
self._fit_transform(X)
return self.embedding_
def transform(self, X):
"""
Transform new points into embedding space.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
X_new : array, shape = [n_samples, n_components]
Notes
-----
Because of scaling performed by this method, it is discouraged to use
it together with methods that are not scale-invariant (like SVMs)
"""
X = array2d(X)
ind = self.nbrs_.kneighbors(X, n_neighbors=self.n_neighbors,
return_distance=False)
weights = barycenter_weights(X, self.nbrs_._fit_X[ind],
reg=self.reg)
X_new = np.empty((X.shape[0], self.n_components))
for i in range(X.shape[0]):
X_new[i] = np.dot(self.embedding_[ind[i]].T, weights[i])
return X_new
| bsd-3-clause |
equialgo/scikit-learn | benchmarks/bench_plot_omp_lars.py | 72 | 4514 | """Benchmarks of orthogonal matching pursuit (:ref:`OMP`) versus least angle
regression (:ref:`least_angle_regression`)
The input data is mostly low rank but is a fat infinite tail.
"""
from __future__ import print_function
import gc
import sys
from time import time
import six
import numpy as np
from sklearn.linear_model import lars_path, orthogonal_mp
from sklearn.datasets.samples_generator import make_sparse_coded_signal
def compute_bench(samples_range, features_range):
it = 0
results = dict()
lars = np.empty((len(features_range), len(samples_range)))
lars_gram = lars.copy()
omp = lars.copy()
omp_gram = lars.copy()
max_it = len(samples_range) * len(features_range)
for i_s, n_samples in enumerate(samples_range):
for i_f, n_features in enumerate(features_range):
it += 1
n_informative = n_features / 10
print('====================')
print('Iteration %03d of %03d' % (it, max_it))
print('====================')
# dataset_kwargs = {
# 'n_train_samples': n_samples,
# 'n_test_samples': 2,
# 'n_features': n_features,
# 'n_informative': n_informative,
# 'effective_rank': min(n_samples, n_features) / 10,
# #'effective_rank': None,
# 'bias': 0.0,
# }
dataset_kwargs = {
'n_samples': 1,
'n_components': n_features,
'n_features': n_samples,
'n_nonzero_coefs': n_informative,
'random_state': 0
}
print("n_samples: %d" % n_samples)
print("n_features: %d" % n_features)
y, X, _ = make_sparse_coded_signal(**dataset_kwargs)
X = np.asfortranarray(X)
gc.collect()
print("benchmarking lars_path (with Gram):", end='')
sys.stdout.flush()
tstart = time()
G = np.dot(X.T, X) # precomputed Gram matrix
Xy = np.dot(X.T, y)
lars_path(X, y, Xy=Xy, Gram=G, max_iter=n_informative)
delta = time() - tstart
print("%0.3fs" % delta)
lars_gram[i_f, i_s] = delta
gc.collect()
print("benchmarking lars_path (without Gram):", end='')
sys.stdout.flush()
tstart = time()
lars_path(X, y, Gram=None, max_iter=n_informative)
delta = time() - tstart
print("%0.3fs" % delta)
lars[i_f, i_s] = delta
gc.collect()
print("benchmarking orthogonal_mp (with Gram):", end='')
sys.stdout.flush()
tstart = time()
orthogonal_mp(X, y, precompute=True,
n_nonzero_coefs=n_informative)
delta = time() - tstart
print("%0.3fs" % delta)
omp_gram[i_f, i_s] = delta
gc.collect()
print("benchmarking orthogonal_mp (without Gram):", end='')
sys.stdout.flush()
tstart = time()
orthogonal_mp(X, y, precompute=False,
n_nonzero_coefs=n_informative)
delta = time() - tstart
print("%0.3fs" % delta)
omp[i_f, i_s] = delta
results['time(LARS) / time(OMP)\n (w/ Gram)'] = (lars_gram / omp_gram)
results['time(LARS) / time(OMP)\n (w/o Gram)'] = (lars / omp)
return results
if __name__ == '__main__':
samples_range = np.linspace(1000, 5000, 5).astype(np.int)
features_range = np.linspace(1000, 5000, 5).astype(np.int)
results = compute_bench(samples_range, features_range)
max_time = max(np.max(t) for t in results.values())
import matplotlib.pyplot as plt
fig = plt.figure('scikit-learn OMP vs. LARS benchmark results')
for i, (label, timings) in enumerate(sorted(six.iteritems(results))):
ax = fig.add_subplot(1, 2, i+1)
vmax = max(1 - timings.min(), -1 + timings.max())
plt.matshow(timings, fignum=False, vmin=1 - vmax, vmax=1 + vmax)
ax.set_xticklabels([''] + [str(each) for each in samples_range])
ax.set_yticklabels([''] + [str(each) for each in features_range])
plt.xlabel('n_samples')
plt.ylabel('n_features')
plt.title(label)
plt.subplots_adjust(0.1, 0.08, 0.96, 0.98, 0.4, 0.63)
ax = plt.axes([0.1, 0.08, 0.8, 0.06])
plt.colorbar(cax=ax, orientation='horizontal')
plt.show()
| bsd-3-clause |
bikong2/scikit-learn | sklearn/mixture/tests/test_dpgmm.py | 261 | 4490 | import unittest
import sys
import numpy as np
from sklearn.mixture import DPGMM, VBGMM
from sklearn.mixture.dpgmm import log_normalize
from sklearn.datasets import make_blobs
from sklearn.utils.testing import assert_array_less, assert_equal
from sklearn.mixture.tests.test_gmm import GMMTester
from sklearn.externals.six.moves import cStringIO as StringIO
np.seterr(all='warn')
def test_class_weights():
# check that the class weights are updated
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm = Model(n_components=10, random_state=1, alpha=20, n_iter=50)
dpgmm.fit(X)
# get indices of components that are used:
indices = np.unique(dpgmm.predict(X))
active = np.zeros(10, dtype=np.bool)
active[indices] = True
# used components are important
assert_array_less(.1, dpgmm.weights_[active])
# others are not
assert_array_less(dpgmm.weights_[~active], .05)
def test_verbose_boolean():
# checks that the output for the verbose output is the same
# for the flag values '1' and 'True'
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm_bool = Model(n_components=10, random_state=1, alpha=20,
n_iter=50, verbose=True)
dpgmm_int = Model(n_components=10, random_state=1, alpha=20,
n_iter=50, verbose=1)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
# generate output with the boolean flag
dpgmm_bool.fit(X)
verbose_output = sys.stdout
verbose_output.seek(0)
bool_output = verbose_output.readline()
# generate output with the int flag
dpgmm_int.fit(X)
verbose_output = sys.stdout
verbose_output.seek(0)
int_output = verbose_output.readline()
assert_equal(bool_output, int_output)
finally:
sys.stdout = old_stdout
def test_verbose_first_level():
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm = Model(n_components=10, random_state=1, alpha=20, n_iter=50,
verbose=1)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
dpgmm.fit(X)
finally:
sys.stdout = old_stdout
def test_verbose_second_level():
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm = Model(n_components=10, random_state=1, alpha=20, n_iter=50,
verbose=2)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
dpgmm.fit(X)
finally:
sys.stdout = old_stdout
def test_log_normalize():
v = np.array([0.1, 0.8, 0.01, 0.09])
a = np.log(2 * v)
assert np.allclose(v, log_normalize(a), rtol=0.01)
def do_model(self, **kwds):
return VBGMM(verbose=False, **kwds)
class DPGMMTester(GMMTester):
model = DPGMM
do_test_eval = False
def score(self, g, train_obs):
_, z = g.score_samples(train_obs)
return g.lower_bound(train_obs, z)
class TestDPGMMWithSphericalCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'spherical'
setUp = GMMTester._setUp
class TestDPGMMWithDiagCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'diag'
setUp = GMMTester._setUp
class TestDPGMMWithTiedCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'tied'
setUp = GMMTester._setUp
class TestDPGMMWithFullCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'full'
setUp = GMMTester._setUp
class VBGMMTester(GMMTester):
model = do_model
do_test_eval = False
def score(self, g, train_obs):
_, z = g.score_samples(train_obs)
return g.lower_bound(train_obs, z)
class TestVBGMMWithSphericalCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'spherical'
setUp = GMMTester._setUp
class TestVBGMMWithDiagCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'diag'
setUp = GMMTester._setUp
class TestVBGMMWithTiedCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'tied'
setUp = GMMTester._setUp
class TestVBGMMWithFullCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'full'
setUp = GMMTester._setUp
| bsd-3-clause |
manashmndl/scikit-learn | sklearn/feature_selection/variance_threshold.py | 238 | 2594 | # Author: Lars Buitinck <[email protected]>
# License: 3-clause BSD
import numpy as np
from ..base import BaseEstimator
from .base import SelectorMixin
from ..utils import check_array
from ..utils.sparsefuncs import mean_variance_axis
from ..utils.validation import check_is_fitted
class VarianceThreshold(BaseEstimator, SelectorMixin):
"""Feature selector that removes all low-variance features.
This feature selection algorithm looks only at the features (X), not the
desired outputs (y), and can thus be used for unsupervised learning.
Read more in the :ref:`User Guide <variance_threshold>`.
Parameters
----------
threshold : float, optional
Features with a training-set variance lower than this threshold will
be removed. The default is to keep all features with non-zero variance,
i.e. remove the features that have the same value in all samples.
Attributes
----------
variances_ : array, shape (n_features,)
Variances of individual features.
Examples
--------
The following dataset has integer features, two of which are the same
in every sample. These are removed with the default setting for threshold::
>>> X = [[0, 2, 0, 3], [0, 1, 4, 3], [0, 1, 1, 3]]
>>> selector = VarianceThreshold()
>>> selector.fit_transform(X)
array([[2, 0],
[1, 4],
[1, 1]])
"""
def __init__(self, threshold=0.):
self.threshold = threshold
def fit(self, X, y=None):
"""Learn empirical variances from X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Sample vectors from which to compute variances.
y : any
Ignored. This parameter exists only for compatibility with
sklearn.pipeline.Pipeline.
Returns
-------
self
"""
X = check_array(X, ('csr', 'csc'), dtype=np.float64)
if hasattr(X, "toarray"): # sparse matrix
_, self.variances_ = mean_variance_axis(X, axis=0)
else:
self.variances_ = np.var(X, axis=0)
if np.all(self.variances_ <= self.threshold):
msg = "No feature in X meets the variance threshold {0:.5f}"
if X.shape[0] == 1:
msg += " (X contains only one sample)"
raise ValueError(msg.format(self.threshold))
return self
def _get_support_mask(self):
check_is_fitted(self, 'variances_')
return self.variances_ > self.threshold
| bsd-3-clause |
lthurlow/Network-Grapher | proj/external/matplotlib-1.2.1/build/lib.linux-i686-2.7/matplotlib/backends/tkagg.py | 6 | 1063 | from __future__ import print_function
from matplotlib.backends import _tkagg
import Tkinter as Tk
def blit(photoimage, aggimage, bbox=None, colormode=1):
tk = photoimage.tk
if bbox is not None:
bbox_array = bbox.__array__()
else:
bbox_array = None
try:
tk.call("PyAggImagePhoto", photoimage, id(aggimage), colormode, id(bbox_array))
except Tk.TclError:
try:
try:
_tkagg.tkinit(tk.interpaddr(), 1)
except AttributeError:
_tkagg.tkinit(id(tk), 0)
tk.call("PyAggImagePhoto", photoimage, id(aggimage), colormode, id(bbox_array))
except (ImportError, AttributeError, Tk.TclError):
raise
def test(aggimage):
import time
r = Tk.Tk()
c = Tk.Canvas(r, width=aggimage.width, height=aggimage.height)
c.pack()
p = Tk.PhotoImage(width=aggimage.width, height=aggimage.height)
blit(p, aggimage)
c.create_image(aggimage.width,aggimage.height,image=p)
blit(p, aggimage)
while 1: r.update_idletasks()
| mit |
blancha/abcngspipelines | sra/getsra.py | 1 | 2427 | #!/usr/bin/env python3
# Author Alexis Blanchet-Cohen
# Date: 09/06/2014
import argparse
import os
import os.path
import pandas
import subprocess
import util
# Read the command line arguments.
parser = argparse.ArgumentParser(description="Generates scripts to download SRA files.")
parser.add_argument("-s", "--scriptsDirectory", help="Scripts directory. DEFAULT=.", default=".")
parser.add_argument("-i", "--samplesFile", help="Input file with names of SRA runs. DEFAULT=.", default="./SraRunTable.txt")
parser.add_argument("-o", "--outputDirectory", help="Output directory with SRA files. DEFAULT=.", default=".")
parser.add_argument("-q", "--submitJobsToQueue", help="Submit jobs to queue immediately.", choices=["yes", "no", "y", "n"], default="no")
args = parser.parse_args()
# If not in the main scripts directory, cd to the main scripts directory, if it exists.
#util.cdMainScriptsDirectory()
# Process the command line arguments.
scriptsDirectory = os.path.abspath(args.scriptsDirectory)
samplesFile = os.path.abspath(args.samplesFile)
outputDirectory = os.path.abspath(args.outputDirectory)
# Check if the samplesFile exists, and is a file.
if not(os.path.exists(samplesFile) and os.path.isfile(samplesFile)):
exit(samplesFile + " does not exist or is not a file.")
# Read configuration files
config = util.readConfigurationFiles()
header = config.getboolean("server", "PBS_header")
# Read input file.
samplesFile = pandas.read_csv(samplesFile, sep="\t")
# Create scripts directory, if it does not exist yet, and cd to it.
if not os.path.exists(scriptsDirectory):
os.mkdir(scriptsDirectory)
os.chdir(scriptsDirectory)
# Create output directory, if it does not exist yet.
if not os.path.exists(outputDirectory):
os.makedirs(outputDirectory)
# Cycle through all the samples and write the star scripts.
for index, row in samplesFile.iterrows():
run = row["Run_s"]
# Create script file.
scriptName = "getsra_" + run + ".sh"
script = open(scriptName, 'w')
if header:
util.writeHeader(script, config, "getsra")
script.write("wget" + " \\\n")
script.write("ftp://ftp.ncbi.nih.gov/sra/sra-instant/reads/ByRun/sra/SRR/" + os.path.join(run[0:6], run, run + ".sra") + " \\\n")
script.write("&> " + scriptName + ".log")
if (args.submitJobsToQueue.lower() == "yes") | (args.submitJobsToQueue.lower() == "y"):
subprocess.call("submitJobs.py", shell=True)
| gpl-3.0 |
billy-inn/scikit-learn | examples/ensemble/plot_gradient_boosting_regression.py | 227 | 2520 | """
============================
Gradient Boosting regression
============================
Demonstrate Gradient Boosting on the Boston housing dataset.
This example fits a Gradient Boosting model with least squares loss and
500 regression trees of depth 4.
"""
print(__doc__)
# Author: Peter Prettenhofer <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import ensemble
from sklearn import datasets
from sklearn.utils import shuffle
from sklearn.metrics import mean_squared_error
###############################################################################
# Load data
boston = datasets.load_boston()
X, y = shuffle(boston.data, boston.target, random_state=13)
X = X.astype(np.float32)
offset = int(X.shape[0] * 0.9)
X_train, y_train = X[:offset], y[:offset]
X_test, y_test = X[offset:], y[offset:]
###############################################################################
# Fit regression model
params = {'n_estimators': 500, 'max_depth': 4, 'min_samples_split': 1,
'learning_rate': 0.01, 'loss': 'ls'}
clf = ensemble.GradientBoostingRegressor(**params)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
print("MSE: %.4f" % mse)
###############################################################################
# Plot training deviance
# compute test set deviance
test_score = np.zeros((params['n_estimators'],), dtype=np.float64)
for i, y_pred in enumerate(clf.staged_decision_function(X_test)):
test_score[i] = clf.loss_(y_test, y_pred)
plt.figure(figsize=(12, 6))
plt.subplot(1, 2, 1)
plt.title('Deviance')
plt.plot(np.arange(params['n_estimators']) + 1, clf.train_score_, 'b-',
label='Training Set Deviance')
plt.plot(np.arange(params['n_estimators']) + 1, test_score, 'r-',
label='Test Set Deviance')
plt.legend(loc='upper right')
plt.xlabel('Boosting Iterations')
plt.ylabel('Deviance')
###############################################################################
# Plot feature importance
feature_importance = clf.feature_importances_
# make importances relative to max importance
feature_importance = 100.0 * (feature_importance / feature_importance.max())
sorted_idx = np.argsort(feature_importance)
pos = np.arange(sorted_idx.shape[0]) + .5
plt.subplot(1, 2, 2)
plt.barh(pos, feature_importance[sorted_idx], align='center')
plt.yticks(pos, boston.feature_names[sorted_idx])
plt.xlabel('Relative Importance')
plt.title('Variable Importance')
plt.show()
| bsd-3-clause |
nicolasmiller/pyculiarity | setup.py | 1 | 1256 | """
Usage details and source available here: https://github.com/nicolasmiller/pyculiarity.
The original R source and examples are available here: https://github.com/twitter/AnomalyDetection.
Copyright and License
Python port Copyright 2015 Nicolas Steven Miller
Original R source Copyright 2015 Twitter, Inc and other contributors
Licensed under the GPLv3
"""
from setuptools import setup, find_packages
setup(
name='pyculiarity',
version='0.0.7',
description='A Python port of Twitter\'s AnomalyDetection R Package.',
long_description=__doc__,
url='https://github.com/nicolasmiller/pyculiarity',
author='Nicolas Steven Miller',
author_email='[email protected]',
license='GPL',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering',
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
'Programming Language :: Python :: 2.7',
],
keywords='data anomaly detection pandas timeseries',
packages=['pyculiarity'],
install_requires=['numpy', 'scipy', 'pandas', 'pytz',
'statsmodels', 'rstl'],
extras_require={
'test': ['nose', 'mock']
}
)
| gpl-3.0 |
ningchi/scikit-learn | sklearn/metrics/cluster/tests/test_supervised.py | 44 | 7663 | import numpy as np
from sklearn.metrics.cluster import adjusted_rand_score
from sklearn.metrics.cluster import homogeneity_score
from sklearn.metrics.cluster import completeness_score
from sklearn.metrics.cluster import v_measure_score
from sklearn.metrics.cluster import homogeneity_completeness_v_measure
from sklearn.metrics.cluster import adjusted_mutual_info_score
from sklearn.metrics.cluster import normalized_mutual_info_score
from sklearn.metrics.cluster import mutual_info_score
from sklearn.metrics.cluster import expected_mutual_information
from sklearn.metrics.cluster import contingency_matrix
from sklearn.metrics.cluster import entropy
from sklearn.utils.testing import assert_raise_message
from nose.tools import assert_almost_equal
from nose.tools import assert_equal
from numpy.testing import assert_array_almost_equal
score_funcs = [
adjusted_rand_score,
homogeneity_score,
completeness_score,
v_measure_score,
adjusted_mutual_info_score,
normalized_mutual_info_score,
]
def test_error_messages_on_wrong_input():
for score_func in score_funcs:
expected = ('labels_true and labels_pred must have same size,'
' got 2 and 3')
assert_raise_message(ValueError, expected, score_func,
[0, 1], [1, 1, 1])
expected = "labels_true must be 1D: shape is (2"
assert_raise_message(ValueError, expected, score_func,
[[0, 1], [1, 0]], [1, 1, 1])
expected = "labels_pred must be 1D: shape is (2"
assert_raise_message(ValueError, expected, score_func,
[0, 1, 0], [[1, 1], [0, 0]])
def test_perfect_matches():
for score_func in score_funcs:
assert_equal(score_func([], []), 1.0)
assert_equal(score_func([0], [1]), 1.0)
assert_equal(score_func([0, 0, 0], [0, 0, 0]), 1.0)
assert_equal(score_func([0, 1, 0], [42, 7, 42]), 1.0)
assert_equal(score_func([0., 1., 0.], [42., 7., 42.]), 1.0)
assert_equal(score_func([0., 1., 2.], [42., 7., 2.]), 1.0)
assert_equal(score_func([0, 1, 2], [42, 7, 2]), 1.0)
def test_homogeneous_but_not_complete_labeling():
# homogeneous but not complete clustering
h, c, v = homogeneity_completeness_v_measure(
[0, 0, 0, 1, 1, 1],
[0, 0, 0, 1, 2, 2])
assert_almost_equal(h, 1.00, 2)
assert_almost_equal(c, 0.69, 2)
assert_almost_equal(v, 0.81, 2)
def test_complete_but_not_homogeneous_labeling():
# complete but not homogeneous clustering
h, c, v = homogeneity_completeness_v_measure(
[0, 0, 1, 1, 2, 2],
[0, 0, 1, 1, 1, 1])
assert_almost_equal(h, 0.58, 2)
assert_almost_equal(c, 1.00, 2)
assert_almost_equal(v, 0.73, 2)
def test_not_complete_and_not_homogeneous_labeling():
# neither complete nor homogeneous but not so bad either
h, c, v = homogeneity_completeness_v_measure(
[0, 0, 0, 1, 1, 1],
[0, 1, 0, 1, 2, 2])
assert_almost_equal(h, 0.67, 2)
assert_almost_equal(c, 0.42, 2)
assert_almost_equal(v, 0.52, 2)
def test_non_consicutive_labels():
# regression tests for labels with gaps
h, c, v = homogeneity_completeness_v_measure(
[0, 0, 0, 2, 2, 2],
[0, 1, 0, 1, 2, 2])
assert_almost_equal(h, 0.67, 2)
assert_almost_equal(c, 0.42, 2)
assert_almost_equal(v, 0.52, 2)
h, c, v = homogeneity_completeness_v_measure(
[0, 0, 0, 1, 1, 1],
[0, 4, 0, 4, 2, 2])
assert_almost_equal(h, 0.67, 2)
assert_almost_equal(c, 0.42, 2)
assert_almost_equal(v, 0.52, 2)
ari_1 = adjusted_rand_score([0, 0, 0, 1, 1, 1], [0, 1, 0, 1, 2, 2])
ari_2 = adjusted_rand_score([0, 0, 0, 1, 1, 1], [0, 4, 0, 4, 2, 2])
assert_almost_equal(ari_1, 0.24, 2)
assert_almost_equal(ari_2, 0.24, 2)
def uniform_labelings_scores(score_func, n_samples, k_range, n_runs=10,
seed=42):
"""Compute score for random uniform cluster labelings"""
random_labels = np.random.RandomState(seed).random_integers
scores = np.zeros((len(k_range), n_runs))
for i, k in enumerate(k_range):
for j in range(n_runs):
labels_a = random_labels(low=0, high=k - 1, size=n_samples)
labels_b = random_labels(low=0, high=k - 1, size=n_samples)
scores[i, j] = score_func(labels_a, labels_b)
return scores
def test_adjustment_for_chance():
"""Check that adjusted scores are almost zero on random labels"""
n_clusters_range = [2, 10, 50, 90]
n_samples = 100
n_runs = 10
scores = uniform_labelings_scores(
adjusted_rand_score, n_samples, n_clusters_range, n_runs)
max_abs_scores = np.abs(scores).max(axis=1)
assert_array_almost_equal(max_abs_scores, [0.02, 0.03, 0.03, 0.02], 2)
def test_adjusted_mutual_info_score():
"""Compute the Adjusted Mutual Information and test against known values"""
labels_a = np.array([1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3])
labels_b = np.array([1, 1, 1, 1, 2, 1, 2, 2, 2, 2, 3, 1, 3, 3, 3, 2, 2])
# Mutual information
mi = mutual_info_score(labels_a, labels_b)
assert_almost_equal(mi, 0.41022, 5)
# Expected mutual information
C = contingency_matrix(labels_a, labels_b)
n_samples = np.sum(C)
emi = expected_mutual_information(C, n_samples)
assert_almost_equal(emi, 0.15042, 5)
# Adjusted mutual information
ami = adjusted_mutual_info_score(labels_a, labels_b)
assert_almost_equal(ami, 0.27502, 5)
ami = adjusted_mutual_info_score([1, 1, 2, 2], [2, 2, 3, 3])
assert_equal(ami, 1.0)
# Test with a very large array
a110 = np.array([list(labels_a) * 110]).flatten()
b110 = np.array([list(labels_b) * 110]).flatten()
ami = adjusted_mutual_info_score(a110, b110)
# This is not accurate to more than 2 places
assert_almost_equal(ami, 0.37, 2)
def test_entropy():
ent = entropy([0, 0, 42.])
assert_almost_equal(ent, 0.6365141, 5)
assert_almost_equal(entropy([]), 1)
def test_contingency_matrix():
labels_a = np.array([1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3])
labels_b = np.array([1, 1, 1, 1, 2, 1, 2, 2, 2, 2, 3, 1, 3, 3, 3, 2, 2])
C = contingency_matrix(labels_a, labels_b)
C2 = np.histogram2d(labels_a, labels_b,
bins=(np.arange(1, 5),
np.arange(1, 5)))[0]
assert_array_almost_equal(C, C2)
C = contingency_matrix(labels_a, labels_b, eps=.1)
assert_array_almost_equal(C, C2 + .1)
def test_exactly_zero_info_score():
"""Check numerical stability when information is exactly zero"""
for i in np.logspace(1, 4, 4).astype(np.int):
labels_a, labels_b = np.ones(i, dtype=np.int),\
np.arange(i, dtype=np.int)
assert_equal(normalized_mutual_info_score(labels_a, labels_b), 0.0)
assert_equal(v_measure_score(labels_a, labels_b), 0.0)
assert_equal(adjusted_mutual_info_score(labels_a, labels_b), 0.0)
assert_equal(normalized_mutual_info_score(labels_a, labels_b), 0.0)
def test_v_measure_and_mutual_information(seed=36):
"""Check relation between v_measure, entropy and mutual information"""
for i in np.logspace(1, 4, 4).astype(np.int):
random_state = np.random.RandomState(seed)
labels_a, labels_b = random_state.random_integers(0, 10, i),\
random_state.random_integers(0, 10, i)
assert_almost_equal(v_measure_score(labels_a, labels_b),
2.0 * mutual_info_score(labels_a, labels_b) /
(entropy(labels_a) + entropy(labels_b)), 0)
| bsd-3-clause |
tosanai/wbai_hackathon_2017 | agent/ml/vae.py | 1 | 5878 | import datetime
from threading import Thread, Lock
from keras import backend as K
from keras.models import clone_model, Model
from keras.layers import Input, Dense, Lambda
from keras.callbacks import TensorBoard
import tensorflow as tf
from config.model import TENSORBOARD_LOG_DIR
from config.model import VAE_MODEL
LOCK = Lock()
latent_dim = 3
epochs = 1
class VAE:
def __init__(self, x_shape, save_interval=100):
"""
Initialize VAE setting
:param x_shape: X shape(not x(i) shape)
"""
m, n = x_shape
hidden_unit_size = n >> 2
self.graph = tf.Graph()
with self.graph.as_default():
self.example = tf.placeholder(shape=(None, n), dtype=tf.float32)
self.queue = tf.FIFOQueue(capacity=20, dtypes=[tf.float32])
self.enqueue = self.queue.enqueue((self.example, ))
self.qr = tf.train.QueueRunner(self.queue, [self.enqueue] * 4)
self.coord = tf.train.Coordinator()
# x = Input(shape=(n, ), name='x')
x = Input(shape=(n, ), dtype=tf.float32, tensor=self.queue.dequeue(), name='x')
h1 = Dense(hidden_unit_size, activation='relu', dtype=tf.float32, name='h1')(x)
mean = Dense(latent_dim, name='mean')(h1)
var = Dense(latent_dim, name='var')(h1)
def sampling(args):
z_mean, z_var = args
epsilon = K.random_normal(shape=K.shape(z_var))
return z_mean + z_var * epsilon
# return z_mean + K.exp(z_var / 2) * epsilon
z = Lambda(sampling, name='z')([mean, var])
decoder_h1 = Dense(hidden_unit_size, activation='relu', name='decoder_h1')(z)
y = Dense(n, activation='sigmoid', name='y')(decoder_h1)
def loss(y_true, y_pred):
kld = (-1 / 2) * (K.sum(1 + K.log(K.square(var)) - K.square(mean) - K.square(var), axis=1))
# kld = (-1 / 2) * K.sum(1 + var - K.square(mean) - K.exp(var))
re = K.mean(K.sum(K.binary_crossentropy(y_true, y_pred), axis=1))
return K.mean(kld + re)
model = Model(inputs=x, outputs=y)
model.compile(optimizer='adam', loss=loss)
# using learn
self._model = model
# using predict without being affected by learning
self.model = clone_model(self._model)
self.y = y
e_x = Input(shape=(n, ), name='e_x')
e_h1 = Dense(hidden_unit_size, activation='relu', name='e_h1')(e_x)
e_mean = Dense(latent_dim, name='e_mean')(e_h1)
e_var = Dense(latent_dim, name='e_var')(e_h1)
e_z = Lambda(sampling, name='e_z')([e_mean, e_var])
self.encoder = Model(inputs=e_x, outputs=e_z)
z_input = Input(shape=(latent_dim,))
d_h1 = Dense(hidden_unit_size, activation='relu', name='d_h1')(z_input)
d_y = Dense(n, activation='sigmoid', name='d_y')(d_h1)
self.decoder = Model(inputs=z_input, outputs=d_y)
# self.a = tf.placeholder(dtype=tf.float32, shape=(None, 2))
# self.b = tf.placeholder(dtype=tf.float32, shape=(None, 2))
# self.ab = self.a + self.b
self.session = tf.Session(graph=self.graph)
K.set_session(self.session)
def learn(self, x_train, x_test=None):
if x_test is not None:
validation_data = (x_test, x_test)
else:
validation_data = None
enqueue_threads = self.qr.create_threads(self.session, coord=self.coord, start=True)
with LOCK:
for i in range(1):
self.session.run(self.enqueue, feed_dict={self.example: x_train})
self.coord.join(enqueue_threads)
# with tf.Session(graph=K.get_session().graph):
# self._model.fit(x=x_train, y=x_train, epochs=epochs, validation_data=validation_data,
# callbacks=[TensorBoard(log_dir=TENSORBOARD_LOG_DIR, histogram_freq=1)])
with LOCK:
w = self._model.get_weights()
self.model.set_weights(w)
self.encoder.set_weights(w[0:len(w) - 4])
self.decoder.set_weights(w[-4:])
self.model.save(VAE_MODEL + datetime.datetime.now().strftime("%Y%m%d%H%M%S") + '.h5')
def predict(self, x):
return self.decoder.predict(self.encoder.predict(x))
def encode(self, x):
# with K.get_session() as sess:
return self.encoder.predict(x)
def decode(self, z):
# with K.get_session() as sess:
return self.decoder.predict(z)
def _show_predict_image(self, x):
import matplotlib.pyplot as plt
import numpy as np
pred = self.predict(x)
plt.imshow(np.reshape(x[0], (28, 28)), cmap='Greys_r')
plt.show()
plt.imshow(np.reshape(pred[0], (28, 28)), cmap='Greys_r')
plt.show()
plt.imshow(np.reshape(x[5000], (28, 28)), cmap='Greys_r')
plt.show()
plt.imshow(np.reshape(pred[5000], (28, 28)), cmap='Greys_r')
plt.show()
def _main(args):
x_train, x_test = args
vae = VAE(x_shape=x_train.shape)
for _ in range(2):
thread = Thread(target=vae.learn, kwargs={'x_train': x_train, 'x_test': x_test})
thread.start()
# vae.learn(x_train, x_test)
# vae.learn(x_train, x_test)
# print(thread.is_alive())
# thread.join()
# print(thread.is_alive())
# vae._show_predict_image(x_test)
if __name__ == '__main__':
from keras.datasets import mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.reshape(60000, 784)
x_test = x_test.reshape(10000, 784)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
_main((x_train, x_test))
| apache-2.0 |
andrewv587/pycharm-project | keras_module/data_draw/draw_pascal.py | 1 | 2998 | import matplotlib.pylab as plt
import numpy as np
from ..image_utils import deprocess_image
def test_img(pascal_iter, index, pasacl_train):
datum, label = pascal_iter.get_example(index)
img = pasacl_train.visualize_example(index)
print(img.shape)
plt.figure()
plt.imshow(pasacl_train.datum_to_img(datum))
plt.show()
plt.figure()
plt.imshow(img)
plt.show()
print(datum.shape)
print(label.shape)
return
def draw_batch_images(generator, pasacl_train, batch_size=5):
my_gen_datas, my_gen_labels = generator.next()
plt.figure(figsize=(124, 124))
for index in range(batch_size):
datum = my_gen_datas[index]
label = my_gen_labels[index]
label = np.argmax(label, axis=-1)
img_pred = pasacl_train.visualize_pairs(datum, label)
plt.subplot(2, batch_size, index + 1)
plt.imshow(pasacl_train.datum_to_img(datum))
plt.subplot(2, batch_size, batch_size + index + 1)
plt.imshow(img_pred)
plt.show()
return
def draw_images_pair(img1_datas, img2_datas, index_pro=1, batch_size=5, is_save=True, prefix='st-',is_block=False):
plt.figure(figsize=(100, 40))
for index in range(batch_size):
datum = img1_datas[index].copy()
datum = deprocess_image(datum)
label = img2_datas[index].copy()
label = deprocess_image(label)
plt.subplot(2, batch_size, index + 1)
plt.imshow(datum)
plt.subplot(2, batch_size, batch_size + index + 1)
plt.imshow(label)
if is_save:
plt.savefig(prefix + str(index_pro) + '.jpg')
else:
plt.show(block=is_block)
return
def draw_batch_label(datas, label_pred, label_true, pasacl_train, batch_size=6):
plt.figure(figsize=(124, 124))
for inner_index in range(batch_size):
datum = datas[inner_index]
label_pred_datum = label_pred[inner_index]
label_pred_datum = np.argmax(label_pred_datum, axis=-1)
label_true_datum = label_true[inner_index]
label_true_datum = np.argmax(label_true_datum, axis=-1)
tmp_img_pred = pasacl_train.visualize_pairs(datum, label_pred_datum)
tmp_img_true = pasacl_train.visualize_pairs(datum, label_true_datum)
plt.subplot(2, batch_size, inner_index + 1)
plt.imshow(tmp_img_true)
plt.subplot(2, batch_size, batch_size + inner_index + 1)
plt.imshow(tmp_img_pred)
plt.show()
return
def draw_segment_pair(data_labels_pred, data_labels, batch_size=5):
plt.figure(figsize=(124, 124))
for index in range(batch_size):
label_pred = data_labels_pred[index]
label_pred = np.argmax(label_pred, axis=-1)
# label_pred += 1
label = data_labels[index]
label = np.argmax(label, axis=-1)
# label += 1
plt.subplot(2, batch_size, index + 1)
plt.imshow(label_pred)
plt.subplot(2, batch_size, batch_size + index + 1)
plt.imshow(label)
plt.show()
return
| apache-2.0 |
UltronAI/Deep-Learning | Pattern-Recognition/hw1-Linear-Classifier/scripts/svm/svm-20-2.py | 1 | 1329 | import numpy as np
import random
from sklearn import svm
trainDir = "../../traindata.txt"
testDir = "../../testdata.txt"
feature = [3, 4]
Xtrain = np.loadtxt(trainDir)[:, feature]
Ytrain = np.loadtxt(trainDir)[:, 10]
Xtest = np.loadtxt(testDir)[:, feature]
Ytest = np.loadtxt(testDir)[:, 10]
MIndex = np.where(Ytrain==1)[0]
FIndex = np.where(Ytrain==0)[0]
subN = 10
minErr = 1
mean = 0
for n in range(10):
MIndexSub = MIndex[np.array(random.sample(range(MIndex.shape[0]), subN))]
FIndexSub = FIndex[np.array(random.sample(range(FIndex.shape[0]), subN))]
indexSub = np.concatenate((MIndexSub, FIndexSub))
XtrainSub = Xtrain[indexSub]
YtrainSub = Ytrain[indexSub]
clf = svm.SVC(kernel='linear', C=3)
clf.fit(XtrainSub, YtrainSub)
Ypred = clf.predict(Xtest)
err = np.sum(np.abs(Ypred-Ytest))/Ytest.shape[0]
mean = mean + err
if minErr > err:
minErr = err
minMIndex = MIndexSub
minFIndex = FIndexSub
else:
pass
print('Mean Error Ratio:', mean/10) # 0.132926829268
print('Min Error Ratio:', minErr) # 0.0792682926829
print('Male Index:', minMIndex) # [756 770 496 666 519 540 506 635 818 867]
print('Female Index:', minFIndex) # [278 328 434 51 209 466 270 93 439 213] | mit |
abhisg/scikit-learn | sklearn/cluster/mean_shift_.py | 96 | 15434 | """Mean shift clustering algorithm.
Mean shift clustering aims to discover *blobs* in a smooth density of
samples. It is a centroid based algorithm, which works by updating candidates
for centroids to be the mean of the points within a given region. These
candidates are then filtered in a post-processing stage to eliminate
near-duplicates to form the final set of centroids.
Seeding is performed using a binning technique for scalability.
"""
# Authors: Conrad Lee <[email protected]>
# Alexandre Gramfort <[email protected]>
# Gael Varoquaux <[email protected]>
# Martino Sorbaro <[email protected]>
import numpy as np
import warnings
from collections import defaultdict
from ..externals import six
from ..utils.validation import check_is_fitted
from ..utils import extmath, check_random_state, gen_batches, check_array
from ..base import BaseEstimator, ClusterMixin
from ..neighbors import NearestNeighbors
from ..metrics.pairwise import pairwise_distances_argmin
from ..externals.joblib import Parallel
from ..externals.joblib import delayed
def estimate_bandwidth(X, quantile=0.3, n_samples=None, random_state=0):
"""Estimate the bandwidth to use with the mean-shift algorithm.
That this function takes time at least quadratic in n_samples. For large
datasets, it's wise to set that parameter to a small value.
Parameters
----------
X : array-like, shape=[n_samples, n_features]
Input points.
quantile : float, default 0.3
should be between [0, 1]
0.5 means that the median of all pairwise distances is used.
n_samples : int, optional
The number of samples to use. If not given, all samples are used.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
Returns
-------
bandwidth : float
The bandwidth parameter.
"""
random_state = check_random_state(random_state)
if n_samples is not None:
idx = random_state.permutation(X.shape[0])[:n_samples]
X = X[idx]
nbrs = NearestNeighbors(n_neighbors=int(X.shape[0] * quantile))
nbrs.fit(X)
bandwidth = 0.
for batch in gen_batches(len(X), 500):
d, _ = nbrs.kneighbors(X[batch, :], return_distance=True)
bandwidth += np.max(d, axis=1).sum()
return bandwidth / X.shape[0]
# separate function for each seed's iterative loop
def _mean_shift_single_seed(my_mean, X, nbrs, max_iter):
# For each seed, climb gradient until convergence or max_iter
bandwidth = nbrs.get_params()['radius']
stop_thresh = 1e-3 * bandwidth # when mean has converged
completed_iterations = 0
while True:
# Find mean of points within bandwidth
i_nbrs = nbrs.radius_neighbors([my_mean], bandwidth,
return_distance=False)[0]
points_within = X[i_nbrs]
if len(points_within) == 0:
break # Depending on seeding strategy this condition may occur
my_old_mean = my_mean # save the old mean
my_mean = np.mean(points_within, axis=0)
# If converged or at max_iter, adds the cluster
if (extmath.norm(my_mean - my_old_mean) < stop_thresh or
completed_iterations == max_iter):
return tuple(my_mean), len(points_within)
completed_iterations += 1
def mean_shift(X, bandwidth=None, seeds=None, bin_seeding=False,
min_bin_freq=1, cluster_all=True, max_iter=300,
max_iterations=None, n_jobs=1):
"""Perform mean shift clustering of data using a flat kernel.
Read more in the :ref:`User Guide <mean_shift>`.
Parameters
----------
X : array-like, shape=[n_samples, n_features]
Input data.
bandwidth : float, optional
Kernel bandwidth.
If bandwidth is not given, it is determined using a heuristic based on
the median of all pairwise distances. This will take quadratic time in
the number of samples. The sklearn.cluster.estimate_bandwidth function
can be used to do this more efficiently.
seeds : array-like, shape=[n_seeds, n_features] or None
Point used as initial kernel locations. If None and bin_seeding=False,
each data point is used as a seed. If None and bin_seeding=True,
see bin_seeding.
bin_seeding : boolean, default=False
If true, initial kernel locations are not locations of all
points, but rather the location of the discretized version of
points, where points are binned onto a grid whose coarseness
corresponds to the bandwidth. Setting this option to True will speed
up the algorithm because fewer seeds will be initialized.
Ignored if seeds argument is not None.
min_bin_freq : int, default=1
To speed up the algorithm, accept only those bins with at least
min_bin_freq points as seeds.
cluster_all : boolean, default True
If true, then all points are clustered, even those orphans that are
not within any kernel. Orphans are assigned to the nearest kernel.
If false, then orphans are given cluster label -1.
max_iter : int, default 300
Maximum number of iterations, per seed point before the clustering
operation terminates (for that seed point), if has not converged yet.
n_jobs : int
The number of jobs to use for the computation. This works by computing
each of the n_init runs in parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
Returns
-------
cluster_centers : array, shape=[n_clusters, n_features]
Coordinates of cluster centers.
labels : array, shape=[n_samples]
Cluster labels for each point.
Notes
-----
See examples/cluster/plot_meanshift.py for an example.
"""
# FIXME To be removed in 0.18
if max_iterations is not None:
warnings.warn("The `max_iterations` parameter has been renamed to "
"`max_iter` from version 0.16. The `max_iterations` "
"parameter will be removed in 0.18", DeprecationWarning)
max_iter = max_iterations
if bandwidth is None:
bandwidth = estimate_bandwidth(X)
elif bandwidth <= 0:
raise ValueError("bandwidth needs to be greater than zero or None,\
got %f" % bandwidth)
if seeds is None:
if bin_seeding:
seeds = get_bin_seeds(X, bandwidth, min_bin_freq)
else:
seeds = X
n_samples, n_features = X.shape
center_intensity_dict = {}
nbrs = NearestNeighbors(radius=bandwidth).fit(X)
# execute iterations on all seeds in parallel
all_res = Parallel(n_jobs=n_jobs)(
delayed(_mean_shift_single_seed)
(seed, X, nbrs, max_iter) for seed in seeds)
# copy results in a dictionary
for i in range(len(seeds)):
if all_res[i] is not None:
center_intensity_dict[all_res[i][0]] = all_res[i][1]
if not center_intensity_dict:
# nothing near seeds
raise ValueError("No point was within bandwidth=%f of any seed."
" Try a different seeding strategy \
or increase the bandwidth."
% bandwidth)
# POST PROCESSING: remove near duplicate points
# If the distance between two kernels is less than the bandwidth,
# then we have to remove one because it is a duplicate. Remove the
# one with fewer points.
sorted_by_intensity = sorted(center_intensity_dict.items(),
key=lambda tup: tup[1], reverse=True)
sorted_centers = np.array([tup[0] for tup in sorted_by_intensity])
unique = np.ones(len(sorted_centers), dtype=np.bool)
nbrs = NearestNeighbors(radius=bandwidth).fit(sorted_centers)
for i, center in enumerate(sorted_centers):
if unique[i]:
neighbor_idxs = nbrs.radius_neighbors([center],
return_distance=False)[0]
unique[neighbor_idxs] = 0
unique[i] = 1 # leave the current point as unique
cluster_centers = sorted_centers[unique]
# ASSIGN LABELS: a point belongs to the cluster that it is closest to
nbrs = NearestNeighbors(n_neighbors=1).fit(cluster_centers)
labels = np.zeros(n_samples, dtype=np.int)
distances, idxs = nbrs.kneighbors(X)
if cluster_all:
labels = idxs.flatten()
else:
labels.fill(-1)
bool_selector = distances.flatten() <= bandwidth
labels[bool_selector] = idxs.flatten()[bool_selector]
return cluster_centers, labels
def get_bin_seeds(X, bin_size, min_bin_freq=1):
"""Finds seeds for mean_shift.
Finds seeds by first binning data onto a grid whose lines are
spaced bin_size apart, and then choosing those bins with at least
min_bin_freq points.
Parameters
----------
X : array-like, shape=[n_samples, n_features]
Input points, the same points that will be used in mean_shift.
bin_size : float
Controls the coarseness of the binning. Smaller values lead
to more seeding (which is computationally more expensive). If you're
not sure how to set this, set it to the value of the bandwidth used
in clustering.mean_shift.
min_bin_freq : integer, optional
Only bins with at least min_bin_freq will be selected as seeds.
Raising this value decreases the number of seeds found, which
makes mean_shift computationally cheaper.
Returns
-------
bin_seeds : array-like, shape=[n_samples, n_features]
Points used as initial kernel positions in clustering.mean_shift.
"""
# Bin points
bin_sizes = defaultdict(int)
for point in X:
binned_point = np.round(point / bin_size)
bin_sizes[tuple(binned_point)] += 1
# Select only those bins as seeds which have enough members
bin_seeds = np.array([point for point, freq in six.iteritems(bin_sizes) if
freq >= min_bin_freq], dtype=np.float32)
if len(bin_seeds) == len(X):
warnings.warn("Binning data failed with provided bin_size=%f,"
" using data points as seeds." % bin_size)
return X
bin_seeds = bin_seeds * bin_size
return bin_seeds
class MeanShift(BaseEstimator, ClusterMixin):
"""Mean shift clustering using a flat kernel.
Mean shift clustering aims to discover "blobs" in a smooth density of
samples. It is a centroid-based algorithm, which works by updating
candidates for centroids to be the mean of the points within a given
region. These candidates are then filtered in a post-processing stage to
eliminate near-duplicates to form the final set of centroids.
Seeding is performed using a binning technique for scalability.
Read more in the :ref:`User Guide <mean_shift>`.
Parameters
----------
bandwidth : float, optional
Bandwidth used in the RBF kernel.
If not given, the bandwidth is estimated using
sklearn.cluster.estimate_bandwidth; see the documentation for that
function for hints on scalability (see also the Notes, below).
seeds : array, shape=[n_samples, n_features], optional
Seeds used to initialize kernels. If not set,
the seeds are calculated by clustering.get_bin_seeds
with bandwidth as the grid size and default values for
other parameters.
bin_seeding : boolean, optional
If true, initial kernel locations are not locations of all
points, but rather the location of the discretized version of
points, where points are binned onto a grid whose coarseness
corresponds to the bandwidth. Setting this option to True will speed
up the algorithm because fewer seeds will be initialized.
default value: False
Ignored if seeds argument is not None.
min_bin_freq : int, optional
To speed up the algorithm, accept only those bins with at least
min_bin_freq points as seeds. If not defined, set to 1.
cluster_all : boolean, default True
If true, then all points are clustered, even those orphans that are
not within any kernel. Orphans are assigned to the nearest kernel.
If false, then orphans are given cluster label -1.
n_jobs : int
The number of jobs to use for the computation. This works by computing
each of the n_init runs in parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
Attributes
----------
cluster_centers_ : array, [n_clusters, n_features]
Coordinates of cluster centers.
labels_ :
Labels of each point.
Notes
-----
Scalability:
Because this implementation uses a flat kernel and
a Ball Tree to look up members of each kernel, the complexity will is
to O(T*n*log(n)) in lower dimensions, with n the number of samples
and T the number of points. In higher dimensions the complexity will
tend towards O(T*n^2).
Scalability can be boosted by using fewer seeds, for example by using
a higher value of min_bin_freq in the get_bin_seeds function.
Note that the estimate_bandwidth function is much less scalable than the
mean shift algorithm and will be the bottleneck if it is used.
References
----------
Dorin Comaniciu and Peter Meer, "Mean Shift: A robust approach toward
feature space analysis". IEEE Transactions on Pattern Analysis and
Machine Intelligence. 2002. pp. 603-619.
"""
def __init__(self, bandwidth=None, seeds=None, bin_seeding=False,
min_bin_freq=1, cluster_all=True, n_jobs=1):
self.bandwidth = bandwidth
self.seeds = seeds
self.bin_seeding = bin_seeding
self.cluster_all = cluster_all
self.min_bin_freq = min_bin_freq
self.n_jobs = n_jobs
def fit(self, X, y=None):
"""Perform clustering.
Parameters
-----------
X : array-like, shape=[n_samples, n_features]
Samples to cluster.
"""
X = check_array(X)
self.cluster_centers_, self.labels_ = \
mean_shift(X, bandwidth=self.bandwidth, seeds=self.seeds,
min_bin_freq=self.min_bin_freq,
bin_seeding=self.bin_seeding,
cluster_all=self.cluster_all, n_jobs=self.n_jobs)
return self
def predict(self, X):
"""Predict the closest cluster each sample in X belongs to.
Parameters
----------
X : {array-like, sparse matrix}, shape=[n_samples, n_features]
New data to predict.
Returns
-------
labels : array, shape [n_samples,]
Index of the cluster each sample belongs to.
"""
check_is_fitted(self, "cluster_centers_")
return pairwise_distances_argmin(X, self.cluster_centers_)
| bsd-3-clause |
ScreamingUdder/mantid | qt/applications/workbench/workbench/plugins/jupyterconsole.py | 1 | 2648 | # This file is part of the mantid workbench.
#
# Copyright (C) 2017 mantidproject
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, unicode_literals)
# system imports
import sys
# third-party library imports
from mantidqt.widgets.jupyterconsole import InProcessJupyterConsole
try:
from IPython.core.usage import quick_guide
except ImportError: # quick_guide was removed in IPython 6.0
quick_guide = ''
from IPython.core.usage import release as ipy_release
from matplotlib import __version__ as mpl_version
from numpy.version import version as np_version
from qtpy.QtWidgets import QVBoxLayout
# local package imports
from workbench.plugins.base import PluginWidget
DEFAULT_BANNER_PARTS = [
'IPython {version} -- An enhanced Interactive Python.\n'.format(
version=ipy_release.version,
),
quick_guide,
'\nPython {}, numpy {}, matplotlib {}\n'.format(sys.version.split('\n')[0].strip(), np_version, mpl_version),
'Type "copyright", "credits" or "license" for more information.\n',
]
BANNER = ''.join(DEFAULT_BANNER_PARTS)
# should we share this with plugins.editor?
STARTUP_CODE = """from __future__ import (absolute_import, division, print_function, unicode_literals)
from mantid.simpleapi import *
import matplotlib.pyplot as plt
import numpy as np
"""
class JupyterConsole(PluginWidget):
"""Provides an in-process Jupyter Qt-based console"""
def __init__(self, parent):
super(JupyterConsole, self).__init__(parent)
# layout
self.console = InProcessJupyterConsole(self, banner=BANNER,
startup_code=STARTUP_CODE)
layout = QVBoxLayout()
layout.addWidget(self.console)
self.setLayout(layout)
# ----------------- Plugin API --------------------
def get_plugin_title(self):
return "IPython"
def read_user_settings(self, _):
pass
def register_plugin(self, menu=None):
self.main.add_dockwidget(self)
| gpl-3.0 |
marcsans/cnn-physics-perception | phy/lib/python2.7/site-packages/matplotlib/tests/test_colorbar.py | 6 | 11086 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
import numpy as np
from numpy import ma
import matplotlib
from matplotlib import rc_context
from matplotlib.testing.decorators import image_comparison, cleanup
import matplotlib.pyplot as plt
from matplotlib import rcParams
from matplotlib.colors import BoundaryNorm
from matplotlib.cm import get_cmap
from matplotlib import cm
from matplotlib.colorbar import ColorbarBase
def _get_cmap_norms():
"""
Define a colormap and appropriate norms for each of the four
possible settings of the extend keyword.
Helper function for _colorbar_extension_shape and
colorbar_extension_length.
"""
# Create a color map and specify the levels it represents.
cmap = get_cmap("RdBu", lut=5)
clevs = [-5., -2.5, -.5, .5, 1.5, 3.5]
# Define norms for the color maps.
norms = dict()
norms['neither'] = BoundaryNorm(clevs, len(clevs) - 1)
norms['min'] = BoundaryNorm([-10] + clevs[1:], len(clevs) - 1)
norms['max'] = BoundaryNorm(clevs[:-1] + [10], len(clevs) - 1)
norms['both'] = BoundaryNorm([-10] + clevs[1:-1] + [10], len(clevs) - 1)
return cmap, norms
def _colorbar_extension_shape(spacing):
'''
Produce 4 colorbars with rectangular extensions for either uniform
or proportional spacing.
Helper function for test_colorbar_extension_shape.
'''
# Get a colormap and appropriate norms for each extension type.
cmap, norms = _get_cmap_norms()
# Create a figure and adjust whitespace for subplots.
fig = plt.figure()
fig.subplots_adjust(hspace=4)
for i, extension_type in enumerate(('neither', 'min', 'max', 'both')):
# Get the appropriate norm and use it to get colorbar boundaries.
norm = norms[extension_type]
boundaries = values = norm.boundaries
# Create a subplot.
cax = fig.add_subplot(4, 1, i + 1)
# Turn off text and ticks.
for item in cax.get_xticklabels() + cax.get_yticklabels() +\
cax.get_xticklines() + cax.get_yticklines():
item.set_visible(False)
# Generate the colorbar.
cb = ColorbarBase(cax, cmap=cmap, norm=norm,
boundaries=boundaries, values=values,
extend=extension_type, extendrect=True,
orientation='horizontal', spacing=spacing)
# Return the figure to the caller.
return fig
def _colorbar_extension_length(spacing):
'''
Produce 12 colorbars with variable length extensions for either
uniform or proportional spacing.
Helper function for test_colorbar_extension_length.
'''
# Get a colormap and appropriate norms for each extension type.
cmap, norms = _get_cmap_norms()
# Create a figure and adjust whitespace for subplots.
fig = plt.figure()
fig.subplots_adjust(hspace=.6)
for i, extension_type in enumerate(('neither', 'min', 'max', 'both')):
# Get the appropriate norm and use it to get colorbar boundaries.
norm = norms[extension_type]
boundaries = values = norm.boundaries
for j, extendfrac in enumerate((None, 'auto', 0.1)):
# Create a subplot.
cax = fig.add_subplot(12, 1, i*3 + j + 1)
# Turn off text and ticks.
for item in cax.get_xticklabels() + cax.get_yticklabels() +\
cax.get_xticklines() + cax.get_yticklines():
item.set_visible(False)
# Generate the colorbar.
cb = ColorbarBase(cax, cmap=cmap, norm=norm,
boundaries=boundaries, values=values,
extend=extension_type, extendfrac=extendfrac,
orientation='horizontal', spacing=spacing)
# Return the figure to the caller.
return fig
@image_comparison(
baseline_images=['colorbar_extensions_shape_uniform',
'colorbar_extensions_shape_proportional'],
extensions=['png'])
def test_colorbar_extension_shape():
'''Test rectangular colorbar extensions.'''
# Create figures for uniform and proportionally spaced colorbars.
fig1 = _colorbar_extension_shape('uniform')
fig2 = _colorbar_extension_shape('proportional')
@image_comparison(baseline_images=['colorbar_extensions_uniform',
'colorbar_extensions_proportional'],
extensions=['png'])
def test_colorbar_extension_length():
'''Test variable length colorbar extensions.'''
# Create figures for uniform and proportionally spaced colorbars.
fig1 = _colorbar_extension_length('uniform')
fig2 = _colorbar_extension_length('proportional')
@image_comparison(baseline_images=['cbar_with_orientation',
'cbar_locationing',
'double_cbar',
'cbar_sharing',
],
extensions=['png'], remove_text=True,
savefig_kwarg={'dpi': 40})
def test_colorbar_positioning():
data = np.arange(1200).reshape(30, 40)
levels = [0, 200, 400, 600, 800, 1000, 1200]
# -------------------
plt.figure()
plt.contourf(data, levels=levels)
plt.colorbar(orientation='horizontal', use_gridspec=False)
locations = ['left', 'right', 'top', 'bottom']
plt.figure()
for i, location in enumerate(locations):
plt.subplot(2, 2, i + 1)
plt.contourf(data, levels=levels)
plt.colorbar(location=location, use_gridspec=False)
# -------------------
plt.figure()
# make some other data (random integers)
data_2nd = np.array([[2, 3, 2, 3], [1.5, 2, 2, 3], [2, 3, 3, 4]])
# make the random data expand to the shape of the main data
data_2nd = np.repeat(np.repeat(data_2nd, 10, axis=1), 10, axis=0)
color_mappable = plt.contourf(data, levels=levels, extend='both')
# test extend frac here
hatch_mappable = plt.contourf(data_2nd, levels=[1, 2, 3], colors='none',
hatches=['/', 'o', '+'], extend='max')
plt.contour(hatch_mappable, colors='black')
plt.colorbar(color_mappable, location='left', label='variable 1',
use_gridspec=False)
plt.colorbar(hatch_mappable, location='right', label='variable 2',
use_gridspec=False)
# -------------------
plt.figure()
ax1 = plt.subplot(211, anchor='NE', aspect='equal')
plt.contourf(data, levels=levels)
ax2 = plt.subplot(223)
plt.contourf(data, levels=levels)
ax3 = plt.subplot(224)
plt.contourf(data, levels=levels)
plt.colorbar(ax=[ax2, ax3, ax1], location='right', pad=0.0, shrink=0.5,
panchor=False, use_gridspec=False)
plt.colorbar(ax=[ax2, ax3, ax1], location='left', shrink=0.5,
panchor=False, use_gridspec=False)
plt.colorbar(ax=[ax1], location='bottom', panchor=False,
anchor=(0.8, 0.5), shrink=0.6, use_gridspec=False)
@image_comparison(baseline_images=['cbar_with_subplots_adjust'],
extensions=['png'], remove_text=True,
savefig_kwarg={'dpi': 40})
def test_gridspec_make_colorbar():
plt.figure()
data = np.arange(1200).reshape(30, 40)
levels = [0, 200, 400, 600, 800, 1000, 1200]
plt.subplot(121)
plt.contourf(data, levels=levels)
plt.colorbar(use_gridspec=True, orientation='vertical')
plt.subplot(122)
plt.contourf(data, levels=levels)
plt.colorbar(use_gridspec=True, orientation='horizontal')
plt.subplots_adjust(top=0.95, right=0.95, bottom=0.2, hspace=0.25)
@image_comparison(baseline_images=['colorbar_single_scatter'],
extensions=['png'], remove_text=True,
savefig_kwarg={'dpi': 40})
def test_colorbar_single_scatter():
# Issue #2642: if a path collection has only one entry,
# the norm scaling within the colorbar must ensure a
# finite range, otherwise a zero denominator will occur in _locate.
plt.figure()
x = np.arange(4)
y = x.copy()
z = np.ma.masked_greater(np.arange(50, 54), 50)
cmap = plt.get_cmap('jet', 16)
cs = plt.scatter(x, y, z, c=z, cmap=cmap)
plt.colorbar(cs)
def _test_remove_from_figure(use_gridspec):
"""
Test `remove_from_figure` with the specified ``use_gridspec`` setting
"""
fig = plt.figure()
ax = fig.add_subplot(111)
sc = ax.scatter([1, 2], [3, 4], cmap="spring")
sc.set_array(np.array([5, 6]))
pre_figbox = np.array(ax.figbox)
cb = fig.colorbar(sc, use_gridspec=use_gridspec)
fig.subplots_adjust()
cb.remove()
fig.subplots_adjust()
post_figbox = np.array(ax.figbox)
assert (pre_figbox == post_figbox).all()
@cleanup
def test_remove_from_figure_with_gridspec():
"""
Make sure that `remove_from_figure` removes the colorbar and properly
restores the gridspec
"""
_test_remove_from_figure(True)
@cleanup
def test_remove_from_figure_no_gridspec():
"""
Make sure that `remove_from_figure` removes a colorbar that was created
without modifying the gridspec
"""
_test_remove_from_figure(False)
@cleanup
def test_colorbarbase():
# smoke test from #3805
ax = plt.gca()
ColorbarBase(ax, plt.cm.bone)
@image_comparison(
baseline_images=['colorbar_closed_patch'],
remove_text=True)
def test_colorbar_closed_patch():
fig = plt.figure(figsize=(8, 6))
ax1 = fig.add_axes([0.05, 0.85, 0.9, 0.1])
ax2 = fig.add_axes([0.1, 0.65, 0.75, 0.1])
ax3 = fig.add_axes([0.05, 0.45, 0.9, 0.1])
ax4 = fig.add_axes([0.05, 0.25, 0.9, 0.1])
ax5 = fig.add_axes([0.05, 0.05, 0.9, 0.1])
cmap = get_cmap("RdBu", lut=5)
im = ax1.pcolormesh(np.linspace(0, 10, 16).reshape((4, 4)), cmap=cmap)
values = np.linspace(0, 10, 5)
with rc_context({'axes.linewidth': 16}):
plt.colorbar(im, cax=ax2, cmap=cmap, orientation='horizontal',
extend='both', extendfrac=0.5, values=values)
plt.colorbar(im, cax=ax3, cmap=cmap, orientation='horizontal',
extend='both', values=values)
plt.colorbar(im, cax=ax4, cmap=cmap, orientation='horizontal',
extend='both', extendrect=True, values=values)
plt.colorbar(im, cax=ax5, cmap=cmap, orientation='horizontal',
extend='neither', values=values)
@cleanup
def test_colorbar_ticks():
# test fix for #5673
fig, ax = plt.subplots()
x = np.arange(-3.0, 4.001)
y = np.arange(-4.0, 3.001)
X, Y = np.meshgrid(x, y)
Z = X * Y
clevs = np.array([-12, -5, 0, 5, 12], dtype=float)
colors = ['r', 'g', 'b', 'c']
cs = ax.contourf(X, Y, Z, clevs, colors=colors)
cbar = fig.colorbar(cs, ax=ax, extend='neither',
orientation='horizontal', ticks=clevs)
assert len(cbar.ax.xaxis.get_ticklocs()) == len(clevs)
if __name__ == '__main__':
import nose
nose.runmodule(argv=['-s', '--with-doctest'], exit=False)
| mit |
thientu/scikit-learn | sklearn/neighbors/approximate.py | 71 | 22357 | """Approximate nearest neighbor search"""
# Author: Maheshakya Wijewardena <[email protected]>
# Joel Nothman <[email protected]>
import numpy as np
import warnings
from scipy import sparse
from .base import KNeighborsMixin, RadiusNeighborsMixin
from ..base import BaseEstimator
from ..utils.validation import check_array
from ..utils import check_random_state
from ..metrics.pairwise import pairwise_distances
from ..random_projection import GaussianRandomProjection
__all__ = ["LSHForest"]
HASH_DTYPE = '>u4'
MAX_HASH_SIZE = np.dtype(HASH_DTYPE).itemsize * 8
def _find_matching_indices(tree, bin_X, left_mask, right_mask):
"""Finds indices in sorted array of integers.
Most significant h bits in the binary representations of the
integers are matched with the items' most significant h bits.
"""
left_index = np.searchsorted(tree, bin_X & left_mask)
right_index = np.searchsorted(tree, bin_X | right_mask,
side='right')
return left_index, right_index
def _find_longest_prefix_match(tree, bin_X, hash_size,
left_masks, right_masks):
"""Find the longest prefix match in tree for each query in bin_X
Most significant bits are considered as the prefix.
"""
hi = np.empty_like(bin_X, dtype=np.intp)
hi.fill(hash_size)
lo = np.zeros_like(bin_X, dtype=np.intp)
res = np.empty_like(bin_X, dtype=np.intp)
left_idx, right_idx = _find_matching_indices(tree, bin_X,
left_masks[hi],
right_masks[hi])
found = right_idx > left_idx
res[found] = lo[found] = hash_size
r = np.arange(bin_X.shape[0])
kept = r[lo < hi] # indices remaining in bin_X mask
while kept.shape[0]:
mid = (lo.take(kept) + hi.take(kept)) // 2
left_idx, right_idx = _find_matching_indices(tree,
bin_X.take(kept),
left_masks[mid],
right_masks[mid])
found = right_idx > left_idx
mid_found = mid[found]
lo[kept[found]] = mid_found + 1
res[kept[found]] = mid_found
hi[kept[~found]] = mid[~found]
kept = r[lo < hi]
return res
class ProjectionToHashMixin(object):
"""Turn a transformed real-valued array into a hash"""
@staticmethod
def _to_hash(projected):
if projected.shape[1] % 8 != 0:
raise ValueError('Require reduced dimensionality to be a multiple '
'of 8 for hashing')
# XXX: perhaps non-copying operation better
out = np.packbits((projected > 0).astype(int)).view(dtype=HASH_DTYPE)
return out.reshape(projected.shape[0], -1)
def fit_transform(self, X, y=None):
self.fit(X)
return self.transform(X)
def transform(self, X, y=None):
return self._to_hash(super(ProjectionToHashMixin, self).transform(X))
class GaussianRandomProjectionHash(ProjectionToHashMixin,
GaussianRandomProjection):
"""Use GaussianRandomProjection to produce a cosine LSH fingerprint"""
def __init__(self,
n_components=8,
random_state=None):
super(GaussianRandomProjectionHash, self).__init__(
n_components=n_components,
random_state=random_state)
def _array_of_arrays(list_of_arrays):
"""Creates an array of array from list of arrays."""
out = np.empty(len(list_of_arrays), dtype=object)
out[:] = list_of_arrays
return out
class LSHForest(BaseEstimator, KNeighborsMixin, RadiusNeighborsMixin):
"""Performs approximate nearest neighbor search using LSH forest.
LSH Forest: Locality Sensitive Hashing forest [1] is an alternative
method for vanilla approximate nearest neighbor search methods.
LSH forest data structure has been implemented using sorted
arrays and binary search and 32 bit fixed-length hashes.
Random projection is used as the hash family which approximates
cosine distance.
The cosine distance is defined as ``1 - cosine_similarity``: the lowest
value is 0 (identical point) but it is bounded above by 2 for the farthest
points. Its value does not depend on the norm of the vector points but
only on their relative angles.
Read more in the :ref:`User Guide <approximate_nearest_neighbors>`.
Parameters
----------
n_estimators : int (default = 10)
Number of trees in the LSH Forest.
min_hash_match : int (default = 4)
lowest hash length to be searched when candidate selection is
performed for nearest neighbors.
n_candidates : int (default = 10)
Minimum number of candidates evaluated per estimator, assuming enough
items meet the `min_hash_match` constraint.
n_neighbors : int (default = 5)
Number of neighbors to be returned from query function when
it is not provided to the :meth:`kneighbors` method.
radius : float, optinal (default = 1.0)
Radius from the data point to its neighbors. This is the parameter
space to use by default for the :meth`radius_neighbors` queries.
radius_cutoff_ratio : float, optional (default = 0.9)
A value ranges from 0 to 1. Radius neighbors will be searched until
the ratio between total neighbors within the radius and the total
candidates becomes less than this value unless it is terminated by
hash length reaching `min_hash_match`.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
hash_functions_ : list of GaussianRandomProjectionHash objects
Hash function g(p,x) for a tree is an array of 32 randomly generated
float arrays with the same dimenstion as the data set. This array is
stored in GaussianRandomProjectionHash object and can be obtained
from ``components_`` attribute.
trees_ : array, shape (n_estimators, n_samples)
Each tree (corresponding to a hash function) contains an array of
sorted hashed values. The array representation may change in future
versions.
original_indices_ : array, shape (n_estimators, n_samples)
Original indices of sorted hashed values in the fitted index.
References
----------
.. [1] M. Bawa, T. Condie and P. Ganesan, "LSH Forest: Self-Tuning
Indexes for Similarity Search", WWW '05 Proceedings of the
14th international conference on World Wide Web, 651-660,
2005.
Examples
--------
>>> from sklearn.neighbors import LSHForest
>>> X_train = [[5, 5, 2], [21, 5, 5], [1, 1, 1], [8, 9, 1], [6, 10, 2]]
>>> X_test = [[9, 1, 6], [3, 1, 10], [7, 10, 3]]
>>> lshf = LSHForest()
>>> lshf.fit(X_train) # doctest: +NORMALIZE_WHITESPACE
LSHForest(min_hash_match=4, n_candidates=50, n_estimators=10,
n_neighbors=5, radius=1.0, radius_cutoff_ratio=0.9,
random_state=None)
>>> distances, indices = lshf.kneighbors(X_test, n_neighbors=2)
>>> distances # doctest: +ELLIPSIS
array([[ 0.069..., 0.149...],
[ 0.229..., 0.481...],
[ 0.004..., 0.014...]])
>>> indices
array([[1, 2],
[2, 0],
[4, 0]])
"""
def __init__(self, n_estimators=10, radius=1.0, n_candidates=50,
n_neighbors=5, min_hash_match=4, radius_cutoff_ratio=.9,
random_state=None):
self.n_estimators = n_estimators
self.radius = radius
self.random_state = random_state
self.n_candidates = n_candidates
self.n_neighbors = n_neighbors
self.min_hash_match = min_hash_match
self.radius_cutoff_ratio = radius_cutoff_ratio
def _compute_distances(self, query, candidates):
"""Computes the cosine distance.
Distance is from the query to points in the candidates array.
Returns argsort of distances in the candidates
array and sorted distances.
"""
if candidates.shape == (0,):
# needed since _fit_X[np.array([])] doesn't work if _fit_X sparse
return np.empty(0, dtype=np.int), np.empty(0, dtype=float)
if sparse.issparse(self._fit_X):
candidate_X = self._fit_X[candidates]
else:
candidate_X = self._fit_X.take(candidates, axis=0, mode='clip')
distances = pairwise_distances(query, candidate_X,
metric='cosine')[0]
distance_positions = np.argsort(distances)
distances = distances.take(distance_positions, mode='clip', axis=0)
return distance_positions, distances
def _generate_masks(self):
"""Creates left and right masks for all hash lengths."""
tri_size = MAX_HASH_SIZE + 1
# Called once on fitting, output is independent of hashes
left_mask = np.tril(np.ones((tri_size, tri_size), dtype=int))[:, 1:]
right_mask = left_mask[::-1, ::-1]
self._left_mask = np.packbits(left_mask).view(dtype=HASH_DTYPE)
self._right_mask = np.packbits(right_mask).view(dtype=HASH_DTYPE)
def _get_candidates(self, query, max_depth, bin_queries, n_neighbors):
"""Performs the Synchronous ascending phase.
Returns an array of candidates, their distance ranks and
distances.
"""
index_size = self._fit_X.shape[0]
# Number of candidates considered including duplicates
# XXX: not sure whether this is being calculated correctly wrt
# duplicates from different iterations through a single tree
n_candidates = 0
candidate_set = set()
min_candidates = self.n_candidates * self.n_estimators
while (max_depth > self.min_hash_match and
(n_candidates < min_candidates or
len(candidate_set) < n_neighbors)):
left_mask = self._left_mask[max_depth]
right_mask = self._right_mask[max_depth]
for i in range(self.n_estimators):
start, stop = _find_matching_indices(self.trees_[i],
bin_queries[i],
left_mask, right_mask)
n_candidates += stop - start
candidate_set.update(
self.original_indices_[i][start:stop].tolist())
max_depth -= 1
candidates = np.fromiter(candidate_set, count=len(candidate_set),
dtype=np.intp)
# For insufficient candidates, candidates are filled.
# Candidates are filled from unselected indices uniformly.
if candidates.shape[0] < n_neighbors:
warnings.warn(
"Number of candidates is not sufficient to retrieve"
" %i neighbors with"
" min_hash_match = %i. Candidates are filled up"
" uniformly from unselected"
" indices." % (n_neighbors, self.min_hash_match))
remaining = np.setdiff1d(np.arange(0, index_size), candidates)
to_fill = n_neighbors - candidates.shape[0]
candidates = np.concatenate((candidates, remaining[:to_fill]))
ranks, distances = self._compute_distances(query,
candidates.astype(int))
return (candidates[ranks[:n_neighbors]],
distances[:n_neighbors])
def _get_radius_neighbors(self, query, max_depth, bin_queries, radius):
"""Finds radius neighbors from the candidates obtained.
Their distances from query are smaller than radius.
Returns radius neighbors and distances.
"""
ratio_within_radius = 1
threshold = 1 - self.radius_cutoff_ratio
total_candidates = np.array([], dtype=int)
total_neighbors = np.array([], dtype=int)
total_distances = np.array([], dtype=float)
while (max_depth > self.min_hash_match and
ratio_within_radius > threshold):
left_mask = self._left_mask[max_depth]
right_mask = self._right_mask[max_depth]
candidates = []
for i in range(self.n_estimators):
start, stop = _find_matching_indices(self.trees_[i],
bin_queries[i],
left_mask, right_mask)
candidates.extend(
self.original_indices_[i][start:stop].tolist())
candidates = np.setdiff1d(candidates, total_candidates)
total_candidates = np.append(total_candidates, candidates)
ranks, distances = self._compute_distances(query, candidates)
m = np.searchsorted(distances, radius, side='right')
positions = np.searchsorted(total_distances, distances[:m])
total_neighbors = np.insert(total_neighbors, positions,
candidates[ranks[:m]])
total_distances = np.insert(total_distances, positions,
distances[:m])
ratio_within_radius = (total_neighbors.shape[0] /
float(total_candidates.shape[0]))
max_depth = max_depth - 1
return total_neighbors, total_distances
def fit(self, X, y=None):
"""Fit the LSH forest on the data.
This creates binary hashes of input data points by getting the
dot product of input points and hash_function then
transforming the projection into a binary string array based
on the sign (positive/negative) of the projection.
A sorted array of binary hashes is created.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
self : object
Returns self.
"""
self._fit_X = check_array(X, accept_sparse='csr')
# Creates a g(p,x) for each tree
self.hash_functions_ = []
self.trees_ = []
self.original_indices_ = []
rng = check_random_state(self.random_state)
int_max = np.iinfo(np.int32).max
for i in range(self.n_estimators):
# This is g(p,x) for a particular tree.
# Builds a single tree. Hashing is done on an array of data points.
# `GaussianRandomProjection` is used for hashing.
# `n_components=hash size and n_features=n_dim.
hasher = GaussianRandomProjectionHash(MAX_HASH_SIZE,
rng.randint(0, int_max))
hashes = hasher.fit_transform(self._fit_X)[:, 0]
original_index = np.argsort(hashes)
bin_hashes = hashes[original_index]
self.original_indices_.append(original_index)
self.trees_.append(bin_hashes)
self.hash_functions_.append(hasher)
self._generate_masks()
return self
def _query(self, X):
"""Performs descending phase to find maximum depth."""
# Calculate hashes of shape (n_samples, n_estimators, [hash_size])
bin_queries = np.asarray([hasher.transform(X)[:, 0]
for hasher in self.hash_functions_])
bin_queries = np.rollaxis(bin_queries, 1)
# descend phase
depths = [_find_longest_prefix_match(tree, tree_queries, MAX_HASH_SIZE,
self._left_mask, self._right_mask)
for tree, tree_queries in zip(self.trees_,
np.rollaxis(bin_queries, 1))]
return bin_queries, np.max(depths, axis=0)
def kneighbors(self, X, n_neighbors=None, return_distance=True):
"""Returns n_neighbors of approximate nearest neighbors.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single query.
n_neighbors : int, opitonal (default = None)
Number of neighbors required. If not provided, this will
return the number specified at the initialization.
return_distance : boolean, optional (default = False)
Returns the distances of neighbors if set to True.
Returns
-------
dist : array, shape (n_samples, n_neighbors)
Array representing the cosine distances to each point,
only present if return_distance=True.
ind : array, shape (n_samples, n_neighbors)
Indices of the approximate nearest points in the population
matrix.
"""
if not hasattr(self, 'hash_functions_'):
raise ValueError("estimator should be fitted.")
if n_neighbors is None:
n_neighbors = self.n_neighbors
X = check_array(X, accept_sparse='csr')
neighbors, distances = [], []
bin_queries, max_depth = self._query(X)
for i in range(X.shape[0]):
neighs, dists = self._get_candidates(X[[i]], max_depth[i],
bin_queries[i],
n_neighbors)
neighbors.append(neighs)
distances.append(dists)
if return_distance:
return np.array(distances), np.array(neighbors)
else:
return np.array(neighbors)
def radius_neighbors(self, X, radius=None, return_distance=True):
"""Finds the neighbors within a given radius of a point or points.
Return the indices and distances of some points from the dataset
lying in a ball with size ``radius`` around the points of the query
array. Points lying on the boundary are included in the results.
The result points are *not* necessarily sorted by distance to their
query point.
LSH Forest being an approximate method, some true neighbors from the
indexed dataset might be missing from the results.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single query.
radius : float
Limiting distance of neighbors to return.
(default is the value passed to the constructor).
return_distance : boolean, optional (default = False)
Returns the distances of neighbors if set to True.
Returns
-------
dist : array, shape (n_samples,) of arrays
Each element is an array representing the cosine distances
to some points found within ``radius`` of the respective query.
Only present if ``return_distance=True``.
ind : array, shape (n_samples,) of arrays
Each element is an array of indices for neighbors within ``radius``
of the respective query.
"""
if not hasattr(self, 'hash_functions_'):
raise ValueError("estimator should be fitted.")
if radius is None:
radius = self.radius
X = check_array(X, accept_sparse='csr')
neighbors, distances = [], []
bin_queries, max_depth = self._query(X)
for i in range(X.shape[0]):
neighs, dists = self._get_radius_neighbors(X[[i]], max_depth[i],
bin_queries[i], radius)
neighbors.append(neighs)
distances.append(dists)
if return_distance:
return _array_of_arrays(distances), _array_of_arrays(neighbors)
else:
return _array_of_arrays(neighbors)
def partial_fit(self, X, y=None):
"""
Inserts new data into the already fitted LSH Forest.
Cost is proportional to new total size, so additions
should be batched.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
New data point to be inserted into the LSH Forest.
"""
X = check_array(X, accept_sparse='csr')
if not hasattr(self, 'hash_functions_'):
return self.fit(X)
if X.shape[1] != self._fit_X.shape[1]:
raise ValueError("Number of features in X and"
" fitted array does not match.")
n_samples = X.shape[0]
n_indexed = self._fit_X.shape[0]
for i in range(self.n_estimators):
bin_X = self.hash_functions_[i].transform(X)[:, 0]
# gets the position to be added in the tree.
positions = self.trees_[i].searchsorted(bin_X)
# adds the hashed value into the tree.
self.trees_[i] = np.insert(self.trees_[i],
positions, bin_X)
# add the entry into the original_indices_.
self.original_indices_[i] = np.insert(self.original_indices_[i],
positions,
np.arange(n_indexed,
n_indexed +
n_samples))
# adds the entry into the input_array.
if sparse.issparse(X) or sparse.issparse(self._fit_X):
self._fit_X = sparse.vstack((self._fit_X, X))
else:
self._fit_X = np.row_stack((self._fit_X, X))
return self
| bsd-3-clause |
12AngryMen/votca-scripts | xtp/xtp_energielevels.py | 2 | 3385 | #!/usr/bin/env python
import sys
import numpy as np
import matplotlib.pyplot as plt
if len(sys.argv)==2:
infile=sys.argv[1]
export=False
elif len(sys.argv)==3:
infile=sys.argv[1]
gnufile=sys.argv[2]
export=True
else:
print "Wrong number of arguments simply specify first the profile.dat file and then optionally a file for output.Exiting"
sys.exit()
z=[]
EA=[]
IP=[]
dEA=[]
dIP=[]
with open (infile,"r") as f:
for line in f:
if "#" not in line:
lineparts=line.split()
IPblocked=False
dIPblocked=False
EAblocked=False
dEAblocked=False
#print lineparts
for i in range(len(lineparts)):
if lineparts[i]!='-nan' and i>0:
#print i%4,i,lineparts[i],lineparts[0],line
if i%4==1:
if not IPblocked:
IP.append(float(lineparts[i]))
IPblocked=True
else:
print "Two elements at same position"
elif i%4==3:
if not dIPblocked:
dIP.append(float(lineparts[i]))
dIPblocked=True
elif i%4==2:
if not EAblocked:
EA.append(float(lineparts[i]))
EAblocked=True
elif i%4==0:
if not dEAblocked:
dEA.append(float(lineparts[i]))
dEAblocked=True
else:
print i
if IPblocked+dIPblocked+EAblocked+dEAblocked!=0:
z.append(float(lineparts[0]))
profile=np.array([z,IP,dIP,EA,dEA])
plt.errorbar(profile[0],profile[1],profile[2],marker="o")
plt.errorbar(profile[0],-profile[3],profile[4],marker="x")
plt.axis('tight')
plt.show()
if export==True:
np.savetxt(gnufile, profile.T, delimiter="\t")
| apache-2.0 |
kiyoto/statsmodels | statsmodels/examples/ex_multivar_kde.py | 34 | 1504 |
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
from mpl_toolkits.mplot3d import axes3d
import statsmodels.api as sm
"""
This example illustrates the nonparametric estimation of a
bivariate bi-modal distribution that is a mixture of two normal
distributions.
author: George Panterov
"""
if __name__ == '__main__':
np.random.seed(123456)
# generate the data
nobs = 500
BW = 'cv_ml'
mu1 = [3, 4]
mu2 = [6, 1]
cov1 = np.asarray([[1, 0.7], [0.7, 1]])
cov2 = np.asarray([[1, -0.7], [-0.7, 1]])
ix = np.random.uniform(size=nobs) > 0.5
V = np.random.multivariate_normal(mu1, cov1, size=nobs)
V[ix, :] = np.random.multivariate_normal(mu2, cov2, size=nobs)[ix, :]
x = V[:, 0]
y = V[:, 1]
dens = sm.nonparametric.KDEMultivariate(data=[x, y], var_type='cc', bw=BW,
defaults=sm.nonparametric.EstimatorSettings(efficient=True))
supportx = np.linspace(min(x), max(x), 60)
supporty = np.linspace(min(y), max(y), 60)
X, Y = np.meshgrid(supportx, supporty)
edat = np.column_stack([X.ravel(), Y.ravel()])
Z = dens.pdf(edat).reshape(X.shape)
# plot
fig = plt.figure(1)
ax = fig.gca(projection='3d')
surf = ax.plot_surface(X, Y, Z, rstride=1, cstride=1, cmap=cm.jet,
linewidth=0, antialiased=False)
fig.colorbar(surf, shrink=0.5, aspect=5)
plt.figure(2)
plt.imshow(Z)
plt.show()
| bsd-3-clause |
fabiopetroni/Dato-Core | src/unity/python/graphlab/data_structures/sarray.py | 13 | 91593 | """
This module defines the SArray class which provides the
ability to create, access and manipulate a remote scalable array object.
SArray acts similarly to pandas.Series but without indexing.
The data is immutable, homogeneous, and is stored on the GraphLab Server side.
"""
'''
Copyright (C) 2015 Dato, Inc.
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the DATO-PYTHON-LICENSE file for details.
'''
import graphlab.connect as _mt
import graphlab.connect.main as glconnect
from graphlab.cython.cy_type_utils import pytype_from_dtype, infer_type_of_list, is_numeric_type
from graphlab.cython.cy_sarray import UnitySArrayProxy
from graphlab.cython.context import debug_trace as cython_context
from graphlab.util import _make_internal_url, _is_callable
import graphlab as gl
import inspect
import math
from graphlab.deps import numpy, HAS_NUMPY
from graphlab.deps import pandas, HAS_PANDAS
import time
import array
import datetime
import graphlab.meta as meta
import itertools
import warnings
__all__ = ['SArray']
def _create_sequential_sarray(size, start=0, reverse=False):
if type(size) is not int:
raise TypeError("size must be int")
if type(start) is not int:
raise TypeError("size must be int")
if type(reverse) is not bool:
raise TypeError("reverse must me bool")
with cython_context():
return SArray(_proxy=glconnect.get_unity().create_sequential_sarray(size, start, reverse))
class SArray(object):
"""
An immutable, homogeneously typed array object backed by persistent storage.
SArray is scaled to hold data that are much larger than the machine's main
memory. It fully supports missing values and random access. The
data backing an SArray is located on the same machine as the GraphLab
Server process. Each column in an :py:class:`~graphlab.SFrame` is an
SArray.
Parameters
----------
data : list | numpy.ndarray | pandas.Series | string
The input data. If this is a list, numpy.ndarray, or pandas.Series,
the data in the list is converted and stored in an SArray.
Alternatively if this is a string, it is interpreted as a path (or
url) to a text file. Each line of the text file is loaded as a
separate row. If ``data`` is a directory where an SArray was previously
saved, this is loaded as an SArray read directly out of that
directory.
dtype : {None, int, float, str, list, array.array, dict, datetime.datetime, graphlab.Image}, optional
The data type of the SArray. If not specified (None), we attempt to
infer it from the input. If it is a numpy array or a Pandas series, the
dtype of the array/series is used. If it is a list, the dtype is
inferred from the inner list. If it is a URL or path to a text file, we
default the dtype to str.
ignore_cast_failure : bool, optional
If True, ignores casting failures but warns when elements cannot be
casted into the specified dtype.
Notes
-----
- If ``data`` is pandas.Series, the index will be ignored.
- The datetime is based on the Boost datetime format (see http://www.boost.org/doc/libs/1_48_0/doc/html/date_time/date_time_io.html
for details)
- When working with the GraphLab EC2 instance (see
:py:func:`graphlab.aws.launch_EC2()`), an SArray cannot be constructed
using local file path, because it involves a potentially large amount of
data transfer from client to server. However, it is still okay to use a
remote file path. See the examples below. The same restriction applies to
:py:class:`~graphlab.SGraph` and :py:class:`~graphlab.SFrame`.
Examples
--------
SArray can be constructed in various ways:
Construct an SArray from list.
>>> from graphlab import SArray
>>> sa = SArray(data=[1,2,3,4,5], dtype=int)
Construct an SArray from numpy.ndarray.
>>> sa = SArray(data=numpy.asarray([1,2,3,4,5]), dtype=int)
or:
>>> sa = SArray(numpy.asarray([1,2,3,4,5]), int)
Construct an SArray from pandas.Series.
>>> sa = SArray(data=pd.Series([1,2,3,4,5]), dtype=int)
or:
>>> sa = SArray(pd.Series([1,2,3,4,5]), int)
If the type is not specified, automatic inference is attempted:
>>> SArray(data=[1,2,3,4,5]).dtype()
int
>>> SArray(data=[1,2,3,4,5.0]).dtype()
float
The SArray supports standard datatypes such as: integer, float and string.
It also supports three higher level datatypes: float arrays, dict
and list (array of arbitrary types).
Create an SArray from a list of strings:
>>> sa = SArray(data=['a','b'])
Create an SArray from a list of float arrays;
>>> sa = SArray([[1,2,3], [3,4,5]])
Create an SArray from a list of lists:
>>> sa = SArray(data=[['a', 1, {'work': 3}], [2, 2.0]])
Create an SArray from a list of dictionaries:
>>> sa = SArray(data=[{'a':1, 'b': 2}, {'b':2, 'c': 1}])
Create an SArray from a list of datetime objects:
>>> sa = SArray(data=[datetime.datetime(2011, 10, 20, 9, 30, 10)])
Construct an SArray from local text file. (Only works for local server).
>>> sa = SArray('/tmp/a_to_z.txt.gz')
Construct an SArray from a text file downloaded from a URL.
>>> sa = SArray('http://s3-us-west-2.amazonaws.com/testdatasets/a_to_z.txt.gz')
**Numeric Operators**
SArrays support a large number of vectorized operations on numeric types.
For instance:
>>> sa = SArray([1,1,1,1,1])
>>> sb = SArray([2,2,2,2,2])
>>> sc = sa + sb
>>> sc
dtype: int
Rows: 5
[3, 3, 3, 3, 3]
>>> sc + 2
dtype: int
Rows: 5
[5, 5, 5, 5, 5]
Operators which are supported include all numeric operators (+,-,*,/), as
well as comparison operators (>, >=, <, <=), and logical operators (&, |).
For instance:
>>> sa = SArray([1,2,3,4,5])
>>> (sa >= 2) & (sa <= 4)
dtype: int
Rows: 5
[0, 1, 1, 1, 0]
The numeric operators (+,-,*,/) also work on array types:
>>> sa = SArray(data=[[1.0,1.0], [2.0,2.0]])
>>> sa + 1
dtype: list
Rows: 2
[array('f', [2.0, 2.0]), array('f', [3.0, 3.0])]
>>> sa + sa
dtype: list
Rows: 2
[array('f', [2.0, 2.0]), array('f', [4.0, 4.0])]
The addition operator (+) can also be used for string concatenation:
>>> sa = SArray(data=['a','b'])
>>> sa + "x"
dtype: str
Rows: 2
['ax', 'bx']
This can be useful for performing type interpretation of lists or
dictionaries stored as strings:
>>> sa = SArray(data=['a,b','c,d'])
>>> ("[" + sa + "]").astype(list) # adding brackets make it look like a list
dtype: list
Rows: 2
[['a', 'b'], ['c', 'd']]
All comparison operations and boolean operators are supported and emit
binary SArrays.
>>> sa = SArray([1,2,3,4,5])
>>> sa >= 2
dtype: int
Rows: 3
[0, 1, 1, 1, 1]
>>> (sa >= 2) & (sa <= 4)
dtype: int
Rows: 3
[0, 1, 1, 1, 0]
**Element Access and Slicing**
SArrays can be accessed by integer keys just like a regular python list.
Such operations may not be fast on large datasets so looping over an SArray
should be avoided.
>>> sa = SArray([1,2,3,4,5])
>>> sa[0]
1
>>> sa[2]
3
>>> sa[5]
IndexError: SFrame index out of range
Negative indices can be used to access elements from the tail of the array
>>> sa[-1] # returns the last element
5
>>> sa[-2] # returns the second to last element
4
The SArray also supports the full range of python slicing operators:
>>> sa[1000:] # Returns an SArray containing rows 1000 to the end
>>> sa[:1000] # Returns an SArray containing rows 0 to row 999 inclusive
>>> sa[0:1000:2] # Returns an SArray containing rows 0 to row 1000 in steps of 2
>>> sa[-100:] # Returns an SArray containing last 100 rows
>>> sa[-100:len(sa):2] # Returns an SArray containing last 100 rows in steps of 2
**Logical Filter**
An SArray can be filtered using
>>> array[binary_filter]
where array and binary_filter are SArrays of the same length. The result is
a new SArray which contains only elements of 'array' where its matching row
in the binary_filter is non zero.
This permits the use of boolean operators that can be used to perform
logical filtering operations. For instance:
>>> sa = SArray([1,2,3,4,5])
>>> sa[(sa >= 2) & (sa <= 4)]
dtype: int
Rows: 3
[2, 3, 4]
This can also be used more generally to provide filtering capability which
is otherwise not expressible with simple boolean functions. For instance:
>>> sa = SArray([1,2,3,4,5])
>>> sa[sa.apply(lambda x: math.log(x) <= 1)]
dtype: int
Rows: 3
[1, 2]
This is equivalent to
>>> sa.filter(lambda x: math.log(x) <= 1)
dtype: int
Rows: 3
[1, 2]
**Iteration**
The SArray is also iterable, but not efficiently since this involves a
streaming transmission of data from the server to the client. This should
not be used for large data.
>>> sa = SArray([1,2,3,4,5])
>>> [i + 1 for i in sa]
[2, 3, 4, 5, 6]
This can be used to convert an SArray to a list:
>>> sa = SArray([1,2,3,4,5])
>>> l = list(sa)
>>> l
[1, 2, 3, 4, 5]
"""
def __init__(self, data=[], dtype=None, ignore_cast_failure=False, _proxy=None):
"""
__init__(data=list(), dtype=None, ignore_cast_failure=False)
Construct a new SArray. The source of data includes: list,
numpy.ndarray, pandas.Series, and urls.
"""
_mt._get_metric_tracker().track('sarray.init')
if dtype is not None and type(dtype) != type:
raise TypeError('dtype must be a type, e.g. use int rather than \'int\'')
if (_proxy):
self.__proxy__ = _proxy
elif type(data) == SArray:
self.__proxy__ = data.__proxy__
else:
self.__proxy__ = UnitySArrayProxy(glconnect.get_client())
# we need to perform type inference
if dtype is None:
if (isinstance(data, list)):
# if it is a list, Get the first type and make sure
# the remaining items are all of the same type
dtype = infer_type_of_list(data)
elif isinstance(data, array.array):
dtype = infer_type_of_list(data)
elif HAS_PANDAS and isinstance(data, pandas.Series):
# if it is a pandas series get the dtype of the series
dtype = pytype_from_dtype(data.dtype)
if dtype == object:
# we need to get a bit more fine grained than that
dtype = infer_type_of_list(data)
elif HAS_NUMPY and isinstance(data, numpy.ndarray):
# if it is a numpy array, get the dtype of the array
dtype = pytype_from_dtype(data.dtype)
if dtype == object:
# we need to get a bit more fine grained than that
dtype = infer_type_of_list(data)
if len(data.shape) == 2:
# we need to make it an array or a list
if dtype == float or dtype == int:
dtype = array.array
else:
dtype = list
elif len(data.shape) > 2:
raise TypeError("Cannot convert Numpy arrays of greater than 2 dimensions")
elif (isinstance(data, str) or isinstance(data, unicode)):
# if it is a file, we default to string
dtype = str
if HAS_PANDAS and isinstance(data, pandas.Series):
with cython_context():
self.__proxy__.load_from_iterable(data.values, dtype, ignore_cast_failure)
elif (HAS_NUMPY and isinstance(data, numpy.ndarray)) or isinstance(data, list) or isinstance(data, array.array):
with cython_context():
self.__proxy__.load_from_iterable(data, dtype, ignore_cast_failure)
elif (isinstance(data, str) or isinstance(data, unicode)):
internal_url = _make_internal_url(data)
with cython_context():
self.__proxy__.load_autodetect(internal_url, dtype)
else:
raise TypeError("Unexpected data source. " \
"Possible data source types are: list, " \
"numpy.ndarray, pandas.Series, and string(url)")
@classmethod
def from_const(cls, value, size):
"""
Constructs an SArray of size with a const value.
Parameters
----------
value : [int | float | str | array.array | list | dict | datetime]
The value to fill the SArray
size : int
The size of the SArray
Examples
--------
Construct an SArray consisting of 10 zeroes:
>>> graphlab.SArray.from_const(0, 10)
"""
assert type(size) is int and size >= 0, "size must be a positive int"
if (type(value) not in [type(None), int, float, str, array.array, list, dict, datetime.datetime]):
raise TypeError('Cannot create sarray of value type %s' % str(type(value)))
proxy = UnitySArrayProxy(glconnect.get_client())
proxy.load_from_const(value, size)
return cls(_proxy=proxy)
@classmethod
def from_sequence(cls, *args):
"""
from_sequence(start=0, stop)
Create an SArray from sequence
.. sourcecode:: python
Construct an SArray of integer values from 0 to 999
>>> gl.SArray.from_sequence(1000)
This is equivalent, but more efficient than:
>>> gl.SArray(range(1000))
Construct an SArray of integer values from 10 to 999
>>> gl.SArray.from_sequence(10, 1000)
This is equivalent, but more efficient than:
>>> gl.SArray(range(10, 1000))
Parameters
----------
start : int, optional
The start of the sequence. The sequence will contain this value.
stop : int
The end of the sequence. The sequence will not contain this value.
"""
start = None
stop = None
# fill with args. This checks for from_sequence(100), from_sequence(10,100)
if len(args) == 1:
stop = args[0]
elif len(args) == 2:
start = args[0]
stop = args[1]
if stop is None and start is None:
raise TypeError("from_sequence expects at least 1 argument. got 0")
elif start is None:
return _create_sequential_sarray(stop)
else:
size = stop - start
# this matches the behavior of range
# i.e. range(100,10) just returns an empty array
if (size < 0):
size = 0
return _create_sequential_sarray(size, start)
@classmethod
def from_avro(cls, filename):
"""
Construct an SArray from an Avro file. The SArray type is determined by
the schema of the Avro file.
Parameters
----------
filename : str
The Avro file to load into an SArray.
Examples
--------
Construct an SArray from a local Avro file named 'data.avro':
>>> graphlab.SArray.from_avro('/data/data.avro')
Notes
-----
Currently only supports direct loading of files on the local filesystem.
References
----------
- `Avro Specification <http://avro.apache.org/docs/1.7.7/spec.html>`_
"""
_mt._get_metric_tracker().track('sarray.from_avro')
proxy = UnitySArrayProxy(glconnect.get_client())
proxy.load_from_avro(filename)
return cls(_proxy = proxy)
def __get_content_identifier__(self):
"""
Returns the unique identifier of the content that backs the SArray
Notes
-----
Meant for internal use only.
"""
with cython_context():
return self.__proxy__.get_content_identifier()
def save(self, filename, format=None):
"""
Saves the SArray to file.
The saved SArray will be in a directory named with the `targetfile`
parameter.
Parameters
----------
filename : string
A local path or a remote URL. If format is 'text', it will be
saved as a text file. If format is 'binary', a directory will be
created at the location which will contain the SArray.
format : {'binary', 'text', 'csv'}, optional
Format in which to save the SFrame. Binary saved SArrays can be
loaded much faster and without any format conversion losses.
'text' and 'csv' are synonymous: Each SArray row will be written
as a single line in an output text file. If not
given, will try to infer the format from filename given. If file
name ends with 'csv', 'txt' or '.csv.gz', then save as 'csv' format,
otherwise save as 'binary' format.
"""
if format == None:
if filename.endswith(('.csv', '.csv.gz', 'txt')):
format = 'text'
else:
format = 'binary'
if format == 'binary':
with cython_context():
self.__proxy__.save(_make_internal_url(filename))
elif format == 'text':
sf = gl.SFrame({'X1':self})
with cython_context():
sf.__proxy__.save_as_csv(_make_internal_url(filename), {'header':False})
def _escape_space(self,s):
return "".join([ch.encode('string_escape') if ch.isspace() else ch for ch in s])
def __repr__(self):
"""
Returns a string description of the SArray.
"""
ret = "dtype: " + str(self.dtype().__name__) + "\n"
ret = ret + "Rows: " + str(self.size()) + "\n"
ret = ret + self.__str__()
return ret
def __str__(self):
"""
Returns a string containing the first 100 elements of the array.
"""
# If sarray is image, take head of elements casted to string.
if self.dtype() == gl.data_structures.image.Image:
headln = str(list(self._head_str(100)))
else:
headln = self._escape_space(str(list(self.head(100))))
headln = unicode(headln.decode('string_escape'),'utf-8',errors='replace').encode('utf-8')
if (self.size() > 100):
# cut the last close bracket
# and replace it with ...
headln = headln[0:-1] + ", ... ]"
return headln
def __nonzero__(self):
"""
Returns true if the array is not empty.
"""
return self.size() != 0
def __len__(self):
"""
Returns the length of the array
"""
return self.size()
def __iter__(self):
"""
Provides an iterator to the contents of the array.
"""
def generator():
elems_at_a_time = 262144
self.__proxy__.begin_iterator()
ret = self.__proxy__.iterator_get_next(elems_at_a_time)
while(True):
for j in ret:
yield j
if len(ret) == elems_at_a_time:
ret = self.__proxy__.iterator_get_next(elems_at_a_time)
else:
break
return generator()
def __add__(self, other):
"""
If other is a scalar value, adds it to the current array, returning
the new result. If other is an SArray, performs an element-wise
addition of the two arrays.
"""
with cython_context():
if type(other) is SArray:
return SArray(_proxy = self.__proxy__.vector_operator(other.__proxy__, '+'))
else:
return SArray(_proxy = self.__proxy__.left_scalar_operator(other, '+'))
def __sub__(self, other):
"""
If other is a scalar value, subtracts it from the current array, returning
the new result. If other is an SArray, performs an element-wise
subtraction of the two arrays.
"""
with cython_context():
if type(other) is SArray:
return SArray(_proxy = self.__proxy__.vector_operator(other.__proxy__, '-'))
else:
return SArray(_proxy = self.__proxy__.left_scalar_operator(other, '-'))
def __mul__(self, other):
"""
If other is a scalar value, multiplies it to the current array, returning
the new result. If other is an SArray, performs an element-wise
multiplication of the two arrays.
"""
with cython_context():
if type(other) is SArray:
return SArray(_proxy = self.__proxy__.vector_operator(other.__proxy__, '*'))
else:
return SArray(_proxy = self.__proxy__.left_scalar_operator(other, '*'))
def __div__(self, other):
"""
If other is a scalar value, divides each element of the current array
by the value, returning the result. If other is an SArray, performs
an element-wise division of the two arrays.
"""
with cython_context():
if type(other) is SArray:
return SArray(_proxy = self.__proxy__.vector_operator(other.__proxy__, '/'))
else:
return SArray(_proxy = self.__proxy__.left_scalar_operator(other, '/'))
def __lt__(self, other):
"""
If other is a scalar value, compares each element of the current array
by the value, returning the result. If other is an SArray, performs
an element-wise comparison of the two arrays.
"""
with cython_context():
if type(other) is SArray:
return SArray(_proxy = self.__proxy__.vector_operator(other.__proxy__, '<'))
else:
return SArray(_proxy = self.__proxy__.left_scalar_operator(other, '<'))
def __gt__(self, other):
"""
If other is a scalar value, compares each element of the current array
by the value, returning the result. If other is an SArray, performs
an element-wise comparison of the two arrays.
"""
with cython_context():
if type(other) is SArray:
return SArray(_proxy = self.__proxy__.vector_operator(other.__proxy__, '>'))
else:
return SArray(_proxy = self.__proxy__.left_scalar_operator(other, '>'))
def __le__(self, other):
"""
If other is a scalar value, compares each element of the current array
by the value, returning the result. If other is an SArray, performs
an element-wise comparison of the two arrays.
"""
with cython_context():
if type(other) is SArray:
return SArray(_proxy = self.__proxy__.vector_operator(other.__proxy__, '<='))
else:
return SArray(_proxy = self.__proxy__.left_scalar_operator(other, '<='))
def __ge__(self, other):
"""
If other is a scalar value, compares each element of the current array
by the value, returning the result. If other is an SArray, performs
an element-wise comparison of the two arrays.
"""
with cython_context():
if type(other) is SArray:
return SArray(_proxy = self.__proxy__.vector_operator(other.__proxy__, '>='))
else:
return SArray(_proxy = self.__proxy__.left_scalar_operator(other, '>='))
def __radd__(self, other):
"""
Adds a scalar value to the current array.
Returned array has the same type as the array on the right hand side
"""
with cython_context():
return SArray(_proxy = self.__proxy__.right_scalar_operator(other, '+'))
def __rsub__(self, other):
"""
Subtracts a scalar value from the current array.
Returned array has the same type as the array on the right hand side
"""
with cython_context():
return SArray(_proxy = self.__proxy__.right_scalar_operator(other, '-'))
def __rmul__(self, other):
"""
Multiplies a scalar value to the current array.
Returned array has the same type as the array on the right hand side
"""
with cython_context():
return SArray(_proxy = self.__proxy__.right_scalar_operator(other, '*'))
def __rdiv__(self, other):
"""
Divides a scalar value by each element in the array
Returned array has the same type as the array on the right hand side
"""
with cython_context():
return SArray(_proxy = self.__proxy__.right_scalar_operator(other, '/'))
def __eq__(self, other):
"""
If other is a scalar value, compares each element of the current array
by the value, returning the new result. If other is an SArray, performs
an element-wise comparison of the two arrays.
"""
with cython_context():
if type(other) is SArray:
return SArray(_proxy = self.__proxy__.vector_operator(other.__proxy__, '=='))
else:
return SArray(_proxy = self.__proxy__.left_scalar_operator(other, '=='))
def __ne__(self, other):
"""
If other is a scalar value, compares each element of the current array
by the value, returning the new result. If other is an SArray, performs
an element-wise comparison of the two arrays.
"""
with cython_context():
if type(other) is SArray:
return SArray(_proxy = self.__proxy__.vector_operator(other.__proxy__, '!='))
else:
return SArray(_proxy = self.__proxy__.left_scalar_operator(other, '!='))
def __and__(self, other):
"""
Perform a logical element-wise 'and' against another SArray.
"""
if type(other) is SArray:
with cython_context():
return SArray(_proxy = self.__proxy__.vector_operator(other.__proxy__, '&'))
else:
raise TypeError("SArray can only perform logical and against another SArray")
def __or__(self, other):
"""
Perform a logical element-wise 'or' against another SArray.
"""
if type(other) is SArray:
with cython_context():
return SArray(_proxy = self.__proxy__.vector_operator(other.__proxy__, '|'))
else:
raise TypeError("SArray can only perform logical or against another SArray")
def __getitem__(self, other):
"""
If the key is an SArray of identical length, this function performs a
logical filter: i.e. it subselects all the elements in this array
where the corresponding value in the other array evaluates to true.
If the key is an integer this returns a single row of
the SArray. If the key is a slice, this returns an SArray with the
sliced rows. See the GraphLab Create User Guide for usage examples.
"""
sa_len = len(self)
if type(other) is int:
if other < 0:
other += sa_len
if other >= sa_len:
raise IndexError("SFrame index out of range")
try:
lb, ub, value_list = self._getitem_cache
if lb <= other < ub:
return value_list[other - lb]
except AttributeError:
pass
# Not in cache, need to grab it
block_size = 1024 * (32 if self.dtype() in [int, long, float] else 4)
block_num = int(other // block_size)
lb = block_num * block_size
ub = min(sa_len, lb + block_size)
val_list = list(SArray(_proxy = self.__proxy__.copy_range(lb, 1, ub)))
self._getitem_cache = (lb, ub, val_list)
return val_list[other - lb]
elif type(other) is SArray:
if len(other) != sa_len:
raise IndexError("Cannot perform logical indexing on arrays of different length.")
with cython_context():
return SArray(_proxy = self.__proxy__.logical_filter(other.__proxy__))
elif type(other) is slice:
start = other.start
stop = other.stop
step = other.step
if start is None:
start = 0
if stop is None:
stop = sa_len
if step is None:
step = 1
# handle negative indices
if start < 0:
start = sa_len + start
if stop < 0:
stop = sa_len + stop
return SArray(_proxy = self.__proxy__.copy_range(start, step, stop))
else:
raise IndexError("Invalid type to use for indexing")
def __materialize__(self):
"""
For a SArray that is lazily evaluated, force persist this sarray
to disk, committing all lazy evaluated operations.
"""
with cython_context():
self.__proxy__.materialize()
def __is_materialized__(self):
"""
Returns whether or not the sarray has been materialized.
"""
return self.__proxy__.is_materialized()
def size(self):
"""
The size of the SArray.
"""
return self.__proxy__.size()
def dtype(self):
"""
The data type of the SArray.
Returns
-------
out : type
The type of the SArray.
Examples
--------
>>> sa = gl.SArray(["The quick brown fox jumps over the lazy dog."])
>>> sa.dtype()
str
>>> sa = gl.SArray(range(10))
>>> sa.dtype()
int
"""
return self.__proxy__.dtype()
def head(self, n=10):
"""
Returns an SArray which contains the first n rows of this SArray.
Parameters
----------
n : int
The number of rows to fetch.
Returns
-------
out : SArray
A new SArray which contains the first n rows of the current SArray.
Examples
--------
>>> gl.SArray(range(10)).head(5)
dtype: int
Rows: 5
[0, 1, 2, 3, 4]
"""
return SArray(_proxy=self.__proxy__.head(n))
def vector_slice(self, start, end=None):
"""
If this SArray contains vectors or recursive types, this returns a new SArray
containing each individual vector sliced, between start and end, exclusive.
Parameters
----------
start : int
The start position of the slice.
end : int, optional.
The end position of the slice. Note that the end position
is NOT included in the slice. Thus a g.vector_slice(1,3) will extract
entries in position 1 and 2.
Returns
-------
out : SArray
Each individual vector sliced according to the arguments.
Examples
--------
If g is a vector of floats:
>>> g = SArray([[1,2,3],[2,3,4]])
>>> g
dtype: array
Rows: 2
[array('d', [1.0, 2.0, 3.0]), array('d', [2.0, 3.0, 4.0])]
>>> g.vector_slice(0) # extracts the first element of each vector
dtype: float
Rows: 2
[1.0, 2.0]
>>> g.vector_slice(0, 2) # extracts the first two elements of each vector
dtype: array.array
Rows: 2
[array('d', [1.0, 2.0]), array('d', [2.0, 3.0])]
If a vector cannot be sliced, the result will be None:
>>> g = SArray([[1],[1,2],[1,2,3]])
>>> g
dtype: array.array
Rows: 3
[array('d', [1.0]), array('d', [1.0, 2.0]), array('d', [1.0, 2.0, 3.0])]
>>> g.vector_slice(2)
dtype: float
Rows: 3
[None, None, 3.0]
>>> g.vector_slice(0,2)
dtype: list
Rows: 3
[None, array('d', [1.0, 2.0]), array('d', [1.0, 2.0])]
If g is a vector of mixed types (float, int, str, array, list, etc.):
>>> g = SArray([['a',1,1.0],['b',2,2.0]])
>>> g
dtype: list
Rows: 2
[['a', 1, 1.0], ['b', 2, 2.0]]
>>> g.vector_slice(0) # extracts the first element of each vector
dtype: list
Rows: 2
[['a'], ['b']]
"""
if (self.dtype() != array.array) and (self.dtype() != list):
raise RuntimeError("Only Vector type can be sliced")
if end == None:
end = start + 1
with cython_context():
return SArray(_proxy=self.__proxy__.vector_slice(start, end))
def _count_words(self, to_lower=True):
"""
For documentation, see graphlab.text_analytics.count_ngrams().
"""
if (self.dtype() != str):
raise TypeError("Only SArray of string type is supported for counting bag of words")
_mt._get_metric_tracker().track('sarray.count_words')
# construct options, will extend over time
options = dict()
options["to_lower"] = to_lower == True
with cython_context():
return SArray(_proxy=self.__proxy__.count_bag_of_words(options))
def _count_ngrams(self, n=2, method="word", to_lower=True, ignore_space=True):
"""
For documentation, see graphlab.text_analytics.count_ngrams().
"""
if (self.dtype() != str):
raise TypeError("Only SArray of string type is supported for counting n-grams")
if (type(n) != int):
raise TypeError("Input 'n' must be of type int")
if (n < 1):
raise ValueError("Input 'n' must be greater than 0")
if (n > 5):
warnings.warn("It is unusual for n-grams to be of size larger than 5.")
_mt._get_metric_tracker().track('sarray.count_ngrams', properties={'n':n, 'method':method})
# construct options, will extend over time
options = dict()
options["to_lower"] = to_lower == True
options["ignore_space"] = ignore_space == True
if method == "word":
with cython_context():
return SArray(_proxy=self.__proxy__.count_ngrams(n, options ))
elif method == "character" :
with cython_context():
return SArray(_proxy=self.__proxy__.count_character_ngrams(n, options ))
else:
raise ValueError("Invalid 'method' input value. Please input either 'word' or 'character' ")
def dict_trim_by_keys(self, keys, exclude=True):
"""
Filter an SArray of dictionary type by the given keys. By default, all
keys that are in the provided list in ``keys`` are *excluded* from the
returned SArray.
Parameters
----------
keys : list
A collection of keys to trim down the elements in the SArray.
exclude : bool, optional
If True, all keys that are in the input key list are removed. If
False, only keys that are in the input key list are retained.
Returns
-------
out : SArray
A SArray of dictionary type, with each dictionary element trimmed
according to the input criteria.
See Also
--------
dict_trim_by_values
Examples
--------
>>> sa = graphlab.SArray([{"this":1, "is":1, "dog":2},
{"this": 2, "are": 2, "cat": 1}])
>>> sa.dict_trim_by_keys(["this", "is", "and", "are"], exclude=True)
dtype: dict
Rows: 2
[{'dog': 2}, {'cat': 1}]
"""
if isinstance(keys, str) or (not hasattr(keys, "__iter__")):
keys = [keys]
_mt._get_metric_tracker().track('sarray.dict_trim_by_keys')
with cython_context():
return SArray(_proxy=self.__proxy__.dict_trim_by_keys(keys, exclude))
def dict_trim_by_values(self, lower=None, upper=None):
"""
Filter dictionary values to a given range (inclusive). Trimming is only
performed on values which can be compared to the bound values. Fails on
SArrays whose data type is not ``dict``.
Parameters
----------
lower : int or long or float, optional
The lowest dictionary value that would be retained in the result. If
not given, lower bound is not applied.
upper : int or long or float, optional
The highest dictionary value that would be retained in the result.
If not given, upper bound is not applied.
Returns
-------
out : SArray
An SArray of dictionary type, with each dict element trimmed
according to the input criteria.
See Also
--------
dict_trim_by_keys
Examples
--------
>>> sa = graphlab.SArray([{"this":1, "is":5, "dog":7},
{"this": 2, "are": 1, "cat": 5}])
>>> sa.dict_trim_by_values(2,5)
dtype: dict
Rows: 2
[{'is': 5}, {'this': 2, 'cat': 5}]
>>> sa.dict_trim_by_values(upper=5)
dtype: dict
Rows: 2
[{'this': 1, 'is': 5}, {'this': 2, 'are': 1, 'cat': 5}]
"""
if None != lower and (not is_numeric_type(type(lower))):
raise TypeError("lower bound has to be a numeric value")
if None != upper and (not is_numeric_type(type(upper))):
raise TypeError("upper bound has to be a numeric value")
_mt._get_metric_tracker().track('sarray.dict_trim_by_values')
with cython_context():
return SArray(_proxy=self.__proxy__.dict_trim_by_values(lower, upper))
def dict_keys(self):
"""
Create an SArray that contains all the keys from each dictionary
element as a list. Fails on SArrays whose data type is not ``dict``.
Returns
-------
out : SArray
A SArray of list type, where each element is a list of keys
from the input SArray element.
See Also
--------
dict_values
Examples
---------
>>> sa = graphlab.SArray([{"this":1, "is":5, "dog":7},
{"this": 2, "are": 1, "cat": 5}])
>>> sa.dict_keys()
dtype: list
Rows: 2
[['this', 'is', 'dog'], ['this', 'are', 'cat']]
"""
_mt._get_metric_tracker().track('sarray.dict_keys')
with cython_context():
return SArray(_proxy=self.__proxy__.dict_keys())
def dict_values(self):
"""
Create an SArray that contains all the values from each dictionary
element as a list. Fails on SArrays whose data type is not ``dict``.
Returns
-------
out : SArray
A SArray of list type, where each element is a list of values
from the input SArray element.
See Also
--------
dict_keys
Examples
--------
>>> sa = graphlab.SArray([{"this":1, "is":5, "dog":7},
{"this": 2, "are": 1, "cat": 5}])
>>> sa.dict_values()
dtype: list
Rows: 2
[[1, 5, 7], [2, 1, 5]]
"""
_mt._get_metric_tracker().track('sarray.dict_values')
with cython_context():
return SArray(_proxy=self.__proxy__.dict_values())
def dict_has_any_keys(self, keys):
"""
Create a boolean SArray by checking the keys of an SArray of
dictionaries. An element of the output SArray is True if the
corresponding input element's dictionary has any of the given keys.
Fails on SArrays whose data type is not ``dict``.
Parameters
----------
keys : list
A list of key values to check each dictionary against.
Returns
-------
out : SArray
A SArray of int type, where each element indicates whether the
input SArray element contains any key in the input list.
See Also
--------
dict_has_all_keys
Examples
--------
>>> sa = graphlab.SArray([{"this":1, "is":5, "dog":7}, {"animal":1},
{"this": 2, "are": 1, "cat": 5}])
>>> sa.dict_has_any_keys(["is", "this", "are"])
dtype: int
Rows: 3
[1, 1, 0]
"""
if isinstance(keys, str) or (not hasattr(keys, "__iter__")):
keys = [keys]
_mt._get_metric_tracker().track('sarray.dict_has_any_keys')
with cython_context():
return SArray(_proxy=self.__proxy__.dict_has_any_keys(keys))
def dict_has_all_keys(self, keys):
"""
Create a boolean SArray by checking the keys of an SArray of
dictionaries. An element of the output SArray is True if the
corresponding input element's dictionary has all of the given keys.
Fails on SArrays whose data type is not ``dict``.
Parameters
----------
keys : list
A list of key values to check each dictionary against.
Returns
-------
out : SArray
A SArray of int type, where each element indicates whether the
input SArray element contains all keys in the input list.
See Also
--------
dict_has_any_keys
Examples
--------
>>> sa = graphlab.SArray([{"this":1, "is":5, "dog":7},
{"this": 2, "are": 1, "cat": 5}])
>>> sa.dict_has_all_keys(["is", "this"])
dtype: int
Rows: 2
[1, 0]
"""
if isinstance(keys, str) or (not hasattr(keys, "__iter__")):
keys = [keys]
_mt._get_metric_tracker().track('sarray.dict_has_all_keys')
with cython_context():
return SArray(_proxy=self.__proxy__.dict_has_all_keys(keys))
def apply(self, fn, dtype=None, skip_undefined=True, seed=None,
_lua_translate=False):
"""
apply(fn, dtype=None, skip_undefined=True, seed=None)
Transform each element of the SArray by a given function. The result
SArray is of type ``dtype``. ``fn`` should be a function that returns
exactly one value which can be cast into the type specified by
``dtype``. If ``dtype`` is not specified, the first 100 elements of the
SArray are used to make a guess about the data type.
Parameters
----------
fn : function
The function to transform each element. Must return exactly one
value which can be cast into the type specified by ``dtype``.
This can also be a toolkit extension function which is compiled
as a native shared library using SDK.
dtype : {None, int, float, str, list, array.array, dict, graphlab.Image}, optional
The data type of the new SArray. If ``None``, the first 100 elements
of the array are used to guess the target data type.
skip_undefined : bool, optional
If True, will not apply ``fn`` to any undefined values.
seed : int, optional
Used as the seed if a random number generator is included in ``fn``.
Returns
-------
out : SArray
The SArray transformed by ``fn``. Each element of the SArray is of
type ``dtype``.
See Also
--------
SFrame.apply
Examples
--------
>>> sa = graphlab.SArray([1,2,3])
>>> sa.apply(lambda x: x*2)
dtype: int
Rows: 3
[2, 4, 6]
Using native toolkit extension function:
.. code-block:: c++
#include <graphlab/sdk/toolkit_function_macros.hpp>
#include <cmath>
using namespace graphlab;
double logx(const flexible_type& x, double base) {
return log((double)(x)) / log(base);
}
BEGIN_FUNCTION_REGISTRATION
REGISTER_FUNCTION(logx, "x", "base");
END_FUNCTION_REGISTRATION
compiled into example.so
>>> import example
>>> sa = graphlab.SArray([1,2,4])
>>> sa.apply(lambda x: example.logx(x, 2))
dtype: float
Rows: 3
[0.0, 1.0, 2.0]
"""
if (type(fn) == str):
fn = "LUA" + fn
if dtype == None:
raise TypeError("dtype must be specified for a lua function")
else:
assert _is_callable(fn), "Input must be a function"
dryrun = [fn(i) for i in self.head(100) if i is not None]
import traceback
if dtype == None:
dtype = infer_type_of_list(dryrun)
if not seed:
seed = time.time()
# log metric
_mt._get_metric_tracker().track('sarray.apply')
# First phase test if it is a toolkit function
nativefn = None
try:
import graphlab.extensions as extensions
nativefn = extensions._build_native_function_call(fn)
except:
# failure are fine. we just fall out into the next few phases
pass
if nativefn is not None:
# this is a toolkit lambda. We can do something about it
with cython_context():
return SArray(_proxy=self.__proxy__.transform_native(nativefn, dtype, skip_undefined, seed))
# Second phase. Try lua compilation if possible
try:
# try compilation
if _lua_translate:
# its a function
print "Attempting Lua Translation"
import graphlab.Lua_Translator
import ast
import StringIO
def isalambda(v):
return isinstance(v, type(lambda: None)) and v.__name__ == '<lambda>'
output = StringIO.StringIO()
translator = gl.Lua_Translator.translator_NodeVisitor(output)
ast_node = None
try:
if not isalambda(fn):
ast_node = ast.parse(inspect.getsource(fn))
translator.rename_function[fn.__name__] = "__lambda__transfer__"
except:
pass
try:
if ast_node == None:
print "Cannot translate. Trying again from byte code decompilation"
ast_node = meta.decompiler.decompile_func(fn)
translator.rename_function[""] = "__lambda__transfer__"
except:
pass
if ast_node == None:
raise ValueError("Unable to get source of function")
ftype = gl.Lua_Translator.FunctionType()
selftype = self.dtype()
if selftype == list:
ftype.input_type = tuple([[]])
elif selftype == dict:
ftype.input_type = tuple([{}])
elif selftype == array.array:
ftype.input_type = tuple([[float]])
else:
ftype.input_type = tuple([selftype])
translator.function_known_types["__lambda__transfer__"] = ftype
translator.translate_ast(ast_node)
print "Lua Translation Success"
print output.getvalue()
fn = "LUA" + output.getvalue()
except Exception as e:
print traceback.format_exc()
print "Lua Translation Failed"
print e
except:
print traceback.format_exc()
print "Lua Translation Failed"
with cython_context():
return SArray(_proxy=self.__proxy__.transform(fn, dtype, skip_undefined, seed))
def filter(self, fn, skip_undefined=True, seed=None):
"""
Filter this SArray by a function.
Returns a new SArray filtered by this SArray. If `fn` evaluates an
element to true, this element is copied to the new SArray. If not, it
isn't. Throws an exception if the return type of `fn` is not castable
to a boolean value.
Parameters
----------
fn : function
Function that filters the SArray. Must evaluate to bool or int.
skip_undefined : bool, optional
If True, will not apply fn to any undefined values.
seed : int, optional
Used as the seed if a random number generator is included in fn.
Returns
-------
out : SArray
The SArray filtered by fn. Each element of the SArray is of
type int.
Examples
--------
>>> sa = graphlab.SArray([1,2,3])
>>> sa.filter(lambda x: x < 3)
dtype: int
Rows: 2
[1, 2]
"""
assert inspect.isfunction(fn), "Input must be a function"
if not seed:
seed = time.time()
_mt._get_metric_tracker().track('sarray.filter')
with cython_context():
return SArray(_proxy=self.__proxy__.filter(fn, skip_undefined, seed))
def sample(self, fraction, seed=None):
"""
Create an SArray which contains a subsample of the current SArray.
Parameters
----------
fraction : float
The fraction of the rows to fetch. Must be between 0 and 1.
seed : int
The random seed for the random number generator.
Returns
-------
out : SArray
The new SArray which contains the subsampled rows.
Examples
--------
>>> sa = graphlab.SArray(range(10))
>>> sa.sample(.3)
dtype: int
Rows: 3
[2, 6, 9]
"""
if (fraction > 1 or fraction < 0):
raise ValueError('Invalid sampling rate: ' + str(fraction))
if (self.size() == 0):
return SArray()
if not seed:
seed = time.time()
_mt._get_metric_tracker().track('sarray.sample')
with cython_context():
return SArray(_proxy=self.__proxy__.sample(fraction, seed))
def _save_as_text(self, url):
"""
Save the SArray to disk as text file.
"""
raise NotImplementedError
def all(self):
"""
Return True if every element of the SArray evaluates to False. For
numeric SArrays zeros and missing values (``None``) evaluate to False,
while all non-zero, non-missing values evaluate to True. For string,
list, and dictionary SArrays, empty values (zero length strings, lists
or dictionaries) or missing values (``None``) evaluate to False. All
other values evaluate to True.
Returns True on an empty SArray.
Returns
-------
out : bool
See Also
--------
any
Examples
--------
>>> graphlab.SArray([1, None]).all()
False
>>> graphlab.SArray([1, 0]).all()
False
>>> graphlab.SArray([1, 2]).all()
True
>>> graphlab.SArray(["hello", "world"]).all()
True
>>> graphlab.SArray(["hello", ""]).all()
False
>>> graphlab.SArray([]).all()
True
"""
with cython_context():
return self.__proxy__.all()
def any(self):
"""
Return True if any element of the SArray evaluates to True. For numeric
SArrays any non-zero value evaluates to True. For string, list, and
dictionary SArrays, any element of non-zero length evaluates to True.
Returns False on an empty SArray.
Returns
-------
out : bool
See Also
--------
all
Examples
--------
>>> graphlab.SArray([1, None]).any()
True
>>> graphlab.SArray([1, 0]).any()
True
>>> graphlab.SArray([0, 0]).any()
False
>>> graphlab.SArray(["hello", "world"]).any()
True
>>> graphlab.SArray(["hello", ""]).any()
True
>>> graphlab.SArray(["", ""]).any()
False
>>> graphlab.SArray([]).any()
False
"""
with cython_context():
return self.__proxy__.any()
def max(self):
"""
Get maximum numeric value in SArray.
Returns None on an empty SArray. Raises an exception if called on an
SArray with non-numeric type.
Returns
-------
out : type of SArray
Maximum value of SArray
See Also
--------
min
Examples
--------
>>> graphlab.SArray([14, 62, 83, 72, 77, 96, 5, 25, 69, 66]).max()
96
"""
with cython_context():
return self.__proxy__.max()
def min(self):
"""
Get minimum numeric value in SArray.
Returns None on an empty SArray. Raises an exception if called on an
SArray with non-numeric type.
Returns
-------
out : type of SArray
Minimum value of SArray
See Also
--------
max
Examples
--------
>>> graphlab.SArray([14, 62, 83, 72, 77, 96, 5, 25, 69, 66]).min()
"""
with cython_context():
return self.__proxy__.min()
def sum(self):
"""
Sum of all values in this SArray.
Raises an exception if called on an SArray of strings, lists, or
dictionaries. If the SArray contains numeric arrays (array.array) and
all the arrays are the same length, the sum over all the arrays will be
returned. Returns None on an empty SArray. For large values, this may
overflow without warning.
Returns
-------
out : type of SArray
Sum of all values in SArray
"""
with cython_context():
return self.__proxy__.sum()
def mean(self):
"""
Mean of all the values in the SArray, or mean image.
Returns None on an empty SArray. Raises an exception if called on an
SArray with non-numeric type or non-Image type.
Returns
-------
out : float | graphlab.Image
Mean of all values in SArray, or image holding per-pixel mean
across the input SArray.
"""
with cython_context():
if self.dtype() == gl.Image:
import graphlab.extensions as extensions
return extensions.generate_mean(self)
else:
return self.__proxy__.mean()
def std(self, ddof=0):
"""
Standard deviation of all the values in the SArray.
Returns None on an empty SArray. Raises an exception if called on an
SArray with non-numeric type or if `ddof` >= length of SArray.
Parameters
----------
ddof : int, optional
"delta degrees of freedom" in the variance calculation.
Returns
-------
out : float
The standard deviation of all the values.
"""
with cython_context():
return self.__proxy__.std(ddof)
def var(self, ddof=0):
"""
Variance of all the values in the SArray.
Returns None on an empty SArray. Raises an exception if called on an
SArray with non-numeric type or if `ddof` >= length of SArray.
Parameters
----------
ddof : int, optional
"delta degrees of freedom" in the variance calculation.
Returns
-------
out : float
Variance of all values in SArray.
"""
with cython_context():
return self.__proxy__.var(ddof)
def num_missing(self):
"""
Number of missing elements in the SArray.
Returns
-------
out : int
Number of missing values.
"""
with cython_context():
return self.__proxy__.num_missing()
def nnz(self):
"""
Number of non-zero elements in the SArray.
Returns
-------
out : int
Number of non-zero elements.
"""
with cython_context():
return self.__proxy__.nnz()
def datetime_to_str(self,str_format="%Y-%m-%dT%H:%M:%S%ZP"):
"""
Create a new SArray with all the values cast to str. The string format is
specified by the 'str_format' parameter.
Parameters
----------
str_format : str
The format to output the string. Default format is "%Y-%m-%dT%H:%M:%S%ZP".
Returns
-------
out : SArray[str]
The SArray converted to the type 'str'.
Examples
--------
>>> dt = datetime.datetime(2011, 10, 20, 9, 30, 10, tzinfo=GMT(-5))
>>> sa = graphlab.SArray([dt])
>>> sa.datetime_to_str("%e %b %Y %T %ZP")
dtype: str
Rows: 1
[20 Oct 2011 09:30:10 GMT-05:00]
See Also
----------
str_to_datetime
References
----------
[1] Boost date time from string conversion guide (http://www.boost.org/doc/libs/1_48_0/doc/html/date_time/date_time_io.html)
"""
if(self.dtype() != datetime.datetime):
raise TypeError("datetime_to_str expects SArray of datetime as input SArray")
_mt._get_metric_tracker().track('sarray.datetime_to_str')
with cython_context():
return SArray(_proxy=self.__proxy__.datetime_to_str(str_format))
def str_to_datetime(self,str_format="%Y-%m-%dT%H:%M:%S%ZP"):
"""
Create a new SArray with all the values cast to datetime. The string format is
specified by the 'str_format' parameter.
Parameters
----------
str_format : str
The string format of the input SArray. Default format is "%Y-%m-%dT%H:%M:%S%ZP".
Returns
-------
out : SArray[datetime.datetime]
The SArray converted to the type 'datetime'.
Examples
--------
>>> sa = graphlab.SArray(["20-Oct-2011 09:30:10 GMT-05:30"])
>>> sa.str_to_datetime("%d-%b-%Y %H:%M:%S %ZP")
dtype: datetime
Rows: 1
datetime.datetime(2011, 10, 20, 9, 30, 10, tzinfo=GMT(-5.5))
See Also
----------
datetime_to_str
References
----------
[1] boost date time to string conversion guide (http://www.boost.org/doc/libs/1_48_0/doc/html/date_time/date_time_io.html)
"""
if(self.dtype() != str):
raise TypeError("str_to_datetime expects SArray of str as input SArray")
_mt._get_metric_tracker().track('sarray.str_to_datetime')
with cython_context():
return SArray(_proxy=self.__proxy__.str_to_datetime(str_format))
def pixel_array_to_image(self, width, height, channels, undefined_on_failure=True, allow_rounding=False):
"""
Create a new SArray with all the values cast to :py:class:`graphlab.image.Image`
of uniform size.
Parameters
----------
width: int
The width of the new images.
height: int
The height of the new images.
channels: int.
Number of channels of the new images.
undefined_on_failure: bool , optional , default True
If True, return None type instead of Image type in failure instances.
If False, raises error upon failure.
allow_rounding: bool, optional , default False
If True, rounds non-integer values when converting to Image type.
If False, raises error upon rounding.
Returns
-------
out : SArray[graphlab.Image]
The SArray converted to the type 'graphlab.Image'.
See Also
--------
astype, str_to_datetime, datetime_to_str
Examples
--------
The MNIST data is scaled from 0 to 1, but our image type only loads integer pixel values
from 0 to 255. If we just convert without scaling, all values below one would be cast to
0.
>>> mnist_array = graphlab.SArray('http://s3.amazonaws.com/dato-datasets/mnist/mnist_vec_sarray')
>>> scaled_mnist_array = mnist_array * 255
>>> mnist_img_sarray = gl.SArray.pixel_array_to_image(scaled_mnist_array, 28, 28, 1, allow_rounding = True)
"""
if(self.dtype() != array.array):
raise TypeError("array_to_img expects SArray of arrays as input SArray")
num_to_test = 10
num_test = min(self.size(), num_to_test)
mod_values = [val % 1 for x in range(num_test) for val in self[x]]
out_of_range_values = [(val > 255 or val < 0) for x in range(num_test) for val in self[x]]
if sum(mod_values) != 0.0 and not allow_rounding:
raise ValueError("There are non-integer values in the array data. Images only support integer data values between 0 and 255. To permit rounding, set the 'allow_rounding' paramter to 1.")
if sum(out_of_range_values) != 0:
raise ValueError("There are values outside the range of 0 to 255. Images only support integer data values between 0 and 255.")
_mt._get_metric_tracker().track('sarray.pixel_array_to_img')
import graphlab.extensions as extensions
return extensions.vector_sarray_to_image_sarray(self, width, height, channels, undefined_on_failure)
def _head_str(self, num_rows):
"""
Takes the head of SArray casted to string.
"""
import graphlab.extensions as extensions
return extensions._head_str(self, num_rows)
def astype(self, dtype, undefined_on_failure=False):
"""
Create a new SArray with all values cast to the given type. Throws an
exception if the types are not castable to the given type.
Parameters
----------
dtype : {int, float, str, list, array.array, dict, datetime.datetime}
The type to cast the elements to in SArray
undefined_on_failure: bool, optional
If set to True, runtime cast failures will be emitted as missing
values rather than failing.
Returns
-------
out : SArray [dtype]
The SArray converted to the type ``dtype``.
Notes
-----
- The string parsing techniques used to handle conversion to dictionary
and list types are quite generic and permit a variety of interesting
formats to be interpreted. For instance, a JSON string can usually be
interpreted as a list or a dictionary type. See the examples below.
- For datetime-to-string and string-to-datetime conversions,
use sa.datetime_to_str() and sa.str_to_datetime() functions.
- For array.array to graphlab.Image conversions, use sa.pixel_array_to_image()
Examples
--------
>>> sa = graphlab.SArray(['1','2','3','4'])
>>> sa.astype(int)
dtype: int
Rows: 4
[1, 2, 3, 4]
Given an SArray of strings that look like dicts, convert to a dictionary
type:
>>> sa = graphlab.SArray(['{1:2 3:4}', '{a:b c:d}'])
>>> sa.astype(dict)
dtype: dict
Rows: 2
[{1: 2, 3: 4}, {'a': 'b', 'c': 'd'}]
"""
_mt._get_metric_tracker().track('sarray.astype.%s' % str(dtype.__name__))
if (dtype == gl.Image) and (self.dtype() == array.array):
raise TypeError("Cannot cast from image type to array with sarray.astype(). Please use sarray.array_to_img() instead.")
with cython_context():
return SArray(_proxy=self.__proxy__.astype(dtype, undefined_on_failure))
def clip(self, lower=float('nan'), upper=float('nan')):
"""
Create a new SArray with each value clipped to be within the given
bounds.
In this case, "clipped" means that values below the lower bound will be
set to the lower bound value. Values above the upper bound will be set
to the upper bound value. This function can operate on SArrays of
numeric type as well as array type, in which case each individual
element in each array is clipped. By default ``lower`` and ``upper`` are
set to ``float('nan')`` which indicates the respective bound should be
ignored. The method fails if invoked on an SArray of non-numeric type.
Parameters
----------
lower : int, optional
The lower bound used to clip. Ignored if equal to ``float('nan')``
(the default).
upper : int, optional
The upper bound used to clip. Ignored if equal to ``float('nan')``
(the default).
Returns
-------
out : SArray
See Also
--------
clip_lower, clip_upper
Examples
--------
>>> sa = graphlab.SArray([1,2,3])
>>> sa.clip(2,2)
dtype: int
Rows: 3
[2, 2, 2]
"""
with cython_context():
return SArray(_proxy=self.__proxy__.clip(lower, upper))
def clip_lower(self, threshold):
"""
Create new SArray with all values clipped to the given lower bound. This
function can operate on numeric arrays, as well as vector arrays, in
which case each individual element in each vector is clipped. Throws an
exception if the SArray is empty or the types are non-numeric.
Parameters
----------
threshold : float
The lower bound used to clip values.
Returns
-------
out : SArray
See Also
--------
clip, clip_upper
Examples
--------
>>> sa = graphlab.SArray([1,2,3])
>>> sa.clip_lower(2)
dtype: int
Rows: 3
[2, 2, 3]
"""
with cython_context():
return SArray(_proxy=self.__proxy__.clip(threshold, float('nan')))
def clip_upper(self, threshold):
"""
Create new SArray with all values clipped to the given upper bound. This
function can operate on numeric arrays, as well as vector arrays, in
which case each individual element in each vector is clipped.
Parameters
----------
threshold : float
The upper bound used to clip values.
Returns
-------
out : SArray
See Also
--------
clip, clip_lower
Examples
--------
>>> sa = graphlab.SArray([1,2,3])
>>> sa.clip_upper(2)
dtype: int
Rows: 3
[1, 2, 2]
"""
with cython_context():
return SArray(_proxy=self.__proxy__.clip(float('nan'), threshold))
def tail(self, n=10):
"""
Get an SArray that contains the last n elements in the SArray.
Parameters
----------
n : int
The number of elements to fetch
Returns
-------
out : SArray
A new SArray which contains the last n rows of the current SArray.
"""
with cython_context():
return SArray(_proxy=self.__proxy__.tail(n))
def dropna(self):
"""
Create new SArray containing only the non-missing values of the
SArray.
A missing value shows up in an SArray as 'None'. This will also drop
float('nan').
Returns
-------
out : SArray
The new SArray with missing values removed.
"""
_mt._get_metric_tracker().track('sarray.dropna')
with cython_context():
return SArray(_proxy = self.__proxy__.drop_missing_values())
def fillna(self, value):
"""
Create new SArray with all missing values (None or NaN) filled in
with the given value.
The size of the new SArray will be the same as the original SArray. If
the given value is not the same type as the values in the SArray,
`fillna` will attempt to convert the value to the original SArray's
type. If this fails, an error will be raised.
Parameters
----------
value : type convertible to SArray's type
The value used to replace all missing values
Returns
-------
out : SArray
A new SArray with all missing values filled
"""
_mt._get_metric_tracker().track('sarray.fillna')
with cython_context():
return SArray(_proxy = self.__proxy__.fill_missing_values(value))
def topk_index(self, topk=10, reverse=False):
"""
Create an SArray indicating which elements are in the top k.
Entries are '1' if the corresponding element in the current SArray is a
part of the top k elements, and '0' if that corresponding element is
not. Order is descending by default.
Parameters
----------
topk : int
The number of elements to determine if 'top'
reverse: bool
If True, return the topk elements in ascending order
Returns
-------
out : SArray (of type int)
Notes
-----
This is used internally by SFrame's topk function.
"""
with cython_context():
return SArray(_proxy = self.__proxy__.topk_index(topk, reverse))
def sketch_summary(self, background=False, sub_sketch_keys=None):
"""
Summary statistics that can be calculated with one pass over the SArray.
Returns a graphlab.Sketch object which can be further queried for many
descriptive statistics over this SArray. Many of the statistics are
approximate. See the :class:`~graphlab.Sketch` documentation for more
detail.
Parameters
----------
background : boolean, optional
If True, the sketch construction will return immediately and the
sketch will be constructed in the background. While this is going on,
the sketch can be queried incrementally, but at a performance penalty.
Defaults to False.
sub_sketch_keys: int | str | list of int | list of str, optional
For SArray of dict type, also constructs sketches for a given set of keys,
For SArray of array type, also constructs sketches for the given indexes.
The sub sketches may be queried using:
:py:func:`~graphlab.Sketch.element_sub_sketch()`
Defaults to None in which case no subsketches will be constructed.
Returns
-------
out : Sketch
Sketch object that contains descriptive statistics for this SArray.
Many of the statistics are approximate.
"""
from graphlab.data_structures.sketch import Sketch
if (self.dtype() == gl.data_structures.image.Image):
raise TypeError("sketch_summary() is not supported for arrays of image type")
if (type(background) != bool):
raise TypeError("'background' parameter has to be a boolean value")
if (sub_sketch_keys != None):
if (self.dtype() != dict and self.dtype() != array.array):
raise TypeError("sub_sketch_keys is only supported for SArray of dictionary or array type")
if not hasattr(sub_sketch_keys, "__iter__"):
sub_sketch_keys = [sub_sketch_keys]
value_types = set([type(i) for i in sub_sketch_keys])
if (len(value_types) != 1):
raise ValueError("sub_sketch_keys member values need to have the same type.")
value_type = value_types.pop();
if (self.dtype() == dict and value_type != str):
raise TypeError("Only string value(s) can be passed to sub_sketch_keys for SArray of dictionary type. "+
"For dictionary types, sketch summary is computed by casting keys to string values.")
if (self.dtype() == array.array and value_type != int):
raise TypeError("Only int value(s) can be passed to sub_sketch_keys for SArray of array type")
else:
sub_sketch_keys = list()
_mt._get_metric_tracker().track('sarray.sketch_summary')
return Sketch(self, background, sub_sketch_keys = sub_sketch_keys)
def append(self, other):
"""
Append an SArray to the current SArray. Creates a new SArray with the
rows from both SArrays. Both SArrays must be of the same type.
Parameters
----------
other : SArray
Another SArray whose rows are appended to current SArray.
Returns
-------
out : SArray
A new SArray that contains rows from both SArrays, with rows from
the ``other`` SArray coming after all rows from the current SArray.
See Also
--------
SFrame.append
Examples
--------
>>> sa = graphlab.SArray([1, 2, 3])
>>> sa2 = graphlab.SArray([4, 5, 6])
>>> sa.append(sa2)
dtype: int
Rows: 6
[1, 2, 3, 4, 5, 6]
"""
_mt._get_metric_tracker().track('sarray.append')
if type(other) is not SArray:
raise RuntimeError("SArray append can only work with SArray")
if self.dtype() != other.dtype():
raise RuntimeError("Data types in both SArrays have to be the same")
with cython_context():
other.__materialize__()
return SArray(_proxy = self.__proxy__.append(other.__proxy__))
def unique(self):
"""
Get all unique values in the current SArray.
Raises a TypeError if the SArray is of dictionary type. Will not
necessarily preserve the order of the given SArray in the new SArray.
Returns
-------
out : SArray
A new SArray that contains the unique values of the current SArray.
See Also
--------
SFrame.unique
"""
_mt._get_metric_tracker().track('sarray.unique')
tmp_sf = gl.SFrame()
tmp_sf.add_column(self, 'X1')
res = tmp_sf.groupby('X1',{})
return SArray(_proxy=res['X1'].__proxy__)
@gl._check_canvas_enabled
def show(self, view=None):
"""
show(view=None)
Visualize the SArray with GraphLab Create :mod:`~graphlab.canvas`. This function starts Canvas
if it is not already running. If the SArray has already been plotted,
this function will update the plot.
Parameters
----------
view : str, optional
The name of the SFrame view to show. Can be one of:
- None: Use the default (depends on the dtype of the SArray).
- 'Categorical': Shows most frequent items in this SArray, sorted
by frequency. Only valid for str, int, or float dtypes.
- 'Numeric': Shows a histogram (distribution of values) for the
SArray. Only valid for int or float dtypes.
- 'Dictionary': Shows a cross filterable list of keys (categorical)
and values (categorical or numeric). Only valid for dict dtype.
- 'Array': Shows a Numeric view, filterable by sub-column (index).
Only valid for array.array dtype.
- 'List': Shows a Categorical view, aggregated across all sub-
columns (indices). Only valid for list dtype.
Returns
-------
view : graphlab.canvas.view.View
An object representing the GraphLab Canvas view
See Also
--------
canvas
Examples
--------
Suppose 'sa' is an SArray, we can view it in GraphLab Canvas using:
>>> sa.show()
If 'sa' is a numeric (int or float) SArray, we can view it as
a categorical variable using:
>>> sa.show(view='Categorical')
"""
import graphlab.canvas
import graphlab.canvas.inspect
import graphlab.canvas.views.sarray
graphlab.canvas.inspect.find_vars(self)
return graphlab.canvas.show(graphlab.canvas.views.sarray.SArrayView(self, params={
'view': view
}))
def item_length(self):
"""
Length of each element in the current SArray.
Only works on SArrays of dict, array, or list type. If a given element
is a missing value, then the output elements is also a missing value.
This function is equivalent to the following but more performant:
sa_item_len = sa.apply(lambda x: len(x) if x is not None else None)
Returns
-------
out_sf : SArray
A new SArray, each element in the SArray is the len of the corresponding
items in original SArray.
Examples
--------
>>> sa = SArray([
... {"is_restaurant": 1, "is_electronics": 0},
... {"is_restaurant": 1, "is_retail": 1, "is_electronics": 0},
... {"is_restaurant": 0, "is_retail": 1, "is_electronics": 0},
... {"is_restaurant": 0},
... {"is_restaurant": 1, "is_electronics": 1},
... None])
>>> sa.item_length()
dtype: int
Rows: 6
[2, 3, 3, 1, 2, None]
"""
if (self.dtype() not in [list, dict, array.array]):
raise TypeError("item_length() is only applicable for SArray of type list, dict and array.")
_mt._get_metric_tracker().track('sarray.item_length')
with cython_context():
return SArray(_proxy = self.__proxy__.item_length())
def split_datetime(self, column_name_prefix = "X", limit=None, tzone=False):
"""
Splits an SArray of datetime type to multiple columns, return a
new SFrame that contains expanded columns. A SArray of datetime will be
split by default into an SFrame of 6 columns, one for each
year/month/day/hour/minute/second element.
column naming:
When splitting a SArray of datetime type, new columns are named:
prefix.year, prefix.month, etc. The prefix is set by the parameter
"column_name_prefix" and defaults to 'X'. If column_name_prefix is
None or empty, then no prefix is used.
Timezone column:
If tzone parameter is True, then timezone information is represented
as one additional column which is a float shows the offset from
GMT(0.0) or from UTC.
Parameters
----------
column_name_prefix: str, optional
If provided, expanded column names would start with the given prefix.
Defaults to "X".
limit: list[str], optional
Limits the set of datetime elements to expand.
Elements are 'year','month','day','hour','minute',
and 'second'.
tzone: bool, optional
A boolean parameter that determines whether to show timezone column or not.
Defaults to False.
Returns
-------
out : SFrame
A new SFrame that contains all expanded columns
Examples
--------
To expand only day and year elements of a datetime SArray
>>> sa = SArray(
[datetime(2011, 1, 21, 7, 7, 21, tzinfo=GMT(0)),
datetime(2010, 2, 5, 7, 8, 21, tzinfo=GMT(4.5)])
>>> sa.split_datetime(column_name_prefix=None,limit=['day','year'])
Columns:
day int
year int
Rows: 2
Data:
+-------+--------+
| day | year |
+-------+--------+
| 21 | 2011 |
| 5 | 2010 |
+-------+--------+
[2 rows x 2 columns]
To expand only year and tzone elements of a datetime SArray
with tzone column represented as a string. Columns are named with prefix:
'Y.column_name'.
>>> sa.split_datetime(column_name_prefix="Y",limit=['year'],tzone=True)
Columns:
Y.year int
Y.tzone float
Rows: 2
Data:
+----------+---------+
| Y.year | Y.tzone |
+----------+---------+
| 2011 | 0.0 |
| 2010 | 4.5 |
+----------+---------+
[2 rows x 2 columns]
"""
if self.dtype() != datetime.datetime:
raise TypeError("Only column of datetime type is supported.")
if column_name_prefix == None:
column_name_prefix = ""
if type(column_name_prefix) != str:
raise TypeError("'column_name_prefix' must be a string")
# convert limit to column_keys
if limit != None:
if (not hasattr(limit, '__iter__')):
raise TypeError("'limit' must be a list");
name_types = set([type(i) for i in limit])
if (len(name_types) != 1):
raise TypeError("'limit' contains values that are different types")
if (name_types.pop() != str):
raise TypeError("'limit' must contain string values.")
if len(set(limit)) != len(limit):
raise ValueError("'limit' contains duplicate values")
column_types = []
if(limit != None):
column_types = list()
for i in limit:
column_types.append(int);
else:
limit = ['year','month','day','hour','minute','second']
column_types = [int, int, int, int, int, int]
if(tzone == True):
limit += ['tzone']
column_types += [float]
_mt._get_metric_tracker().track('sarray.split_datetime')
with cython_context():
return gl.SFrame(_proxy=self.__proxy__.expand(column_name_prefix, limit, column_types))
def unpack(self, column_name_prefix = "X", column_types=None, na_value=None, limit=None):
"""
Convert an SArray of list, array, or dict type to an SFrame with
multiple columns.
`unpack` expands an SArray using the values of each list/array/dict as
elements in a new SFrame of multiple columns. For example, an SArray of
lists each of length 4 will be expanded into an SFrame of 4 columns,
one for each list element. An SArray of lists/arrays of varying size
will be expand to a number of columns equal to the longest list/array.
An SArray of dictionaries will be expanded into as many columns as
there are keys.
When unpacking an SArray of list or array type, new columns are named:
`column_name_prefix`.0, `column_name_prefix`.1, etc. If unpacking a
column of dict type, unpacked columns are named
`column_name_prefix`.key1, `column_name_prefix`.key2, etc.
When unpacking an SArray of list or dictionary types, missing values in
the original element remain as missing values in the resultant columns.
If the `na_value` parameter is specified, all values equal to this
given value are also replaced with missing values. In an SArray of
array.array type, NaN is interpreted as a missing value.
:py:func:`graphlab.SFrame.pack_columns()` is the reverse effect of unpack
Parameters
----------
column_name_prefix: str, optional
If provided, unpacked column names would start with the given prefix.
column_types: list[type], optional
Column types for the unpacked columns. If not provided, column
types are automatically inferred from first 100 rows. Defaults to
None.
na_value: optional
Convert all values that are equal to `na_value` to
missing value if specified.
limit: list, optional
Limits the set of list/array/dict keys to unpack.
For list/array SArrays, 'limit' must contain integer indices.
For dict SArray, 'limit' must contain dictionary keys.
Returns
-------
out : SFrame
A new SFrame that contains all unpacked columns
Examples
--------
To unpack a dict SArray
>>> sa = SArray([{ 'word': 'a', 'count': 1},
... { 'word': 'cat', 'count': 2},
... { 'word': 'is', 'count': 3},
... { 'word': 'coming','count': 4}])
Normal case of unpacking SArray of type dict:
>>> sa.unpack(column_name_prefix=None)
Columns:
count int
word str
<BLANKLINE>
Rows: 4
<BLANKLINE>
Data:
+-------+--------+
| count | word |
+-------+--------+
| 1 | a |
| 2 | cat |
| 3 | is |
| 4 | coming |
+-------+--------+
[4 rows x 2 columns]
<BLANKLINE>
Unpack only keys with 'word':
>>> sa.unpack(limit=['word'])
Columns:
X.word str
<BLANKLINE>
Rows: 4
<BLANKLINE>
Data:
+--------+
| X.word |
+--------+
| a |
| cat |
| is |
| coming |
+--------+
[4 rows x 1 columns]
<BLANKLINE>
>>> sa2 = SArray([
... [1, 0, 1],
... [1, 1, 1],
... [0, 1]])
Convert all zeros to missing values:
>>> sa2.unpack(column_types=[int, int, int], na_value=0)
Columns:
X.0 int
X.1 int
X.2 int
<BLANKLINE>
Rows: 3
<BLANKLINE>
Data:
+------+------+------+
| X.0 | X.1 | X.2 |
+------+------+------+
| 1 | None | 1 |
| 1 | 1 | 1 |
| None | 1 | None |
+------+------+------+
[3 rows x 3 columns]
<BLANKLINE>
"""
if self.dtype() not in [dict, array.array, list]:
raise TypeError("Only SArray of dict/list/array type supports unpack")
if column_name_prefix == None:
column_name_prefix = ""
if type(column_name_prefix) != str:
raise TypeError("'column_name_prefix' must be a string")
# validdate 'limit'
if limit != None:
if (not hasattr(limit, '__iter__')):
raise TypeError("'limit' must be a list");
name_types = set([type(i) for i in limit])
if (len(name_types) != 1):
raise TypeError("'limit' contains values that are different types")
# limit value should be numeric if unpacking sarray.array value
if (self.dtype() != dict) and (name_types.pop() != int):
raise TypeError("'limit' must contain integer values.")
if len(set(limit)) != len(limit):
raise ValueError("'limit' contains duplicate values")
if (column_types != None):
if not hasattr(column_types, '__iter__'):
raise TypeError("column_types must be a list");
for column_type in column_types:
if (column_type not in (int, float, str, list, dict, array.array)):
raise TypeError("column_types contains unsupported types. Supported types are ['float', 'int', 'list', 'dict', 'str', 'array.array']")
if limit != None:
if len(limit) != len(column_types):
raise ValueError("limit and column_types do not have the same length")
elif self.dtype() == dict:
raise ValueError("if 'column_types' is given, 'limit' has to be provided to unpack dict type.")
else:
limit = range(len(column_types))
else:
head_rows = self.head(100).dropna()
lengths = [len(i) for i in head_rows]
if len(lengths) == 0 or max(lengths) == 0:
raise RuntimeError("Cannot infer number of items from the SArray, SArray may be empty. please explicitly provide column types")
# infer column types for dict type at server side, for list and array, infer from client side
if self.dtype() != dict:
length = max(lengths)
if limit == None:
limit = range(length)
else:
# adjust the length
length = len(limit)
if self.dtype() == array.array:
column_types = [float for i in range(length)]
else:
column_types = list()
for i in limit:
t = [(x[i] if ((x is not None) and len(x) > i) else None) for x in head_rows]
column_types.append(infer_type_of_list(t))
_mt._get_metric_tracker().track('sarray.unpack')
with cython_context():
if (self.dtype() == dict and column_types == None):
limit = limit if limit != None else []
return gl.SFrame(_proxy=self.__proxy__.unpack_dict(column_name_prefix, limit, na_value))
else:
return gl.SFrame(_proxy=self.__proxy__.unpack(column_name_prefix, limit, column_types, na_value))
def sort(self, ascending=True):
"""
Sort all values in this SArray.
Sort only works for sarray of type str, int and float, otherwise TypeError
will be raised. Creates a new, sorted SArray.
Parameters
----------
ascending: boolean, optional
If true, the sarray values are sorted in ascending order, otherwise,
descending order.
Returns
-------
out: SArray
Examples
--------
>>> sa = SArray([3,2,1])
>>> sa.sort()
dtype: int
Rows: 3
[1, 2, 3]
"""
if self.dtype() not in (int, float, str, datetime.datetime):
raise TypeError("Only sarray with type (int, float, str, datetime.datetime) can be sorted")
sf = gl.SFrame()
sf['a'] = self
return sf.sort('a', ascending)['a']
| agpl-3.0 |
raghavrv/scikit-learn | sklearn/ensemble/tests/test_forest.py | 6 | 42990 | """
Testing for the forest module (sklearn.ensemble.forest).
"""
# Authors: Gilles Louppe,
# Brian Holt,
# Andreas Mueller,
# Arnaud Joly
# License: BSD 3 clause
import pickle
from collections import defaultdict
from itertools import combinations
from itertools import product
import numpy as np
from scipy.sparse import csr_matrix
from scipy.sparse import csc_matrix
from scipy.sparse import coo_matrix
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_false, assert_true
from sklearn.utils.testing import assert_less, assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import skip_if_32bit
from sklearn import datasets
from sklearn.decomposition import TruncatedSVD
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import RandomTreesEmbedding
from sklearn.model_selection import GridSearchCV
from sklearn.svm import LinearSVC
from sklearn.utils.validation import check_random_state
from sklearn.utils.fixes import comb
from sklearn.tree.tree import SPARSE_SPLITTERS
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
rng = check_random_state(0)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# also load the boston dataset
# and randomly permute it
boston = datasets.load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
# also make a hastie_10_2 dataset
hastie_X, hastie_y = datasets.make_hastie_10_2(n_samples=20, random_state=1)
hastie_X = hastie_X.astype(np.float32)
FOREST_CLASSIFIERS = {
"ExtraTreesClassifier": ExtraTreesClassifier,
"RandomForestClassifier": RandomForestClassifier,
}
FOREST_REGRESSORS = {
"ExtraTreesRegressor": ExtraTreesRegressor,
"RandomForestRegressor": RandomForestRegressor,
}
FOREST_TRANSFORMERS = {
"RandomTreesEmbedding": RandomTreesEmbedding,
}
FOREST_ESTIMATORS = dict()
FOREST_ESTIMATORS.update(FOREST_CLASSIFIERS)
FOREST_ESTIMATORS.update(FOREST_REGRESSORS)
FOREST_ESTIMATORS.update(FOREST_TRANSFORMERS)
def check_classification_toy(name):
"""Check classification on a toy dataset."""
ForestClassifier = FOREST_CLASSIFIERS[name]
clf = ForestClassifier(n_estimators=10, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(10, len(clf))
clf = ForestClassifier(n_estimators=10, max_features=1, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(10, len(clf))
# also test apply
leaf_indices = clf.apply(X)
assert_equal(leaf_indices.shape, (len(X), clf.n_estimators))
def test_classification_toy():
for name in FOREST_CLASSIFIERS:
yield check_classification_toy, name
def check_iris_criterion(name, criterion):
# Check consistency on dataset iris.
ForestClassifier = FOREST_CLASSIFIERS[name]
clf = ForestClassifier(n_estimators=10, criterion=criterion,
random_state=1)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert_greater(score, 0.9, "Failed with criterion %s and score = %f"
% (criterion, score))
clf = ForestClassifier(n_estimators=10, criterion=criterion,
max_features=2, random_state=1)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert_greater(score, 0.5, "Failed with criterion %s and score = %f"
% (criterion, score))
def test_iris():
for name, criterion in product(FOREST_CLASSIFIERS, ("gini", "entropy")):
yield check_iris_criterion, name, criterion
def check_boston_criterion(name, criterion):
# Check consistency on dataset boston house prices.
ForestRegressor = FOREST_REGRESSORS[name]
clf = ForestRegressor(n_estimators=5, criterion=criterion,
random_state=1)
clf.fit(boston.data, boston.target)
score = clf.score(boston.data, boston.target)
assert_greater(score, 0.94, "Failed with max_features=None, criterion %s "
"and score = %f" % (criterion, score))
clf = ForestRegressor(n_estimators=5, criterion=criterion,
max_features=6, random_state=1)
clf.fit(boston.data, boston.target)
score = clf.score(boston.data, boston.target)
assert_greater(score, 0.95, "Failed with max_features=6, criterion %s "
"and score = %f" % (criterion, score))
def test_boston():
for name, criterion in product(FOREST_REGRESSORS, ("mse", "mae", "friedman_mse")):
yield check_boston_criterion, name, criterion
def check_regressor_attributes(name):
# Regression models should not have a classes_ attribute.
r = FOREST_REGRESSORS[name](random_state=0)
assert_false(hasattr(r, "classes_"))
assert_false(hasattr(r, "n_classes_"))
r.fit([[1, 2, 3], [4, 5, 6]], [1, 2])
assert_false(hasattr(r, "classes_"))
assert_false(hasattr(r, "n_classes_"))
def test_regressor_attributes():
for name in FOREST_REGRESSORS:
yield check_regressor_attributes, name
def check_probability(name):
# Predict probabilities.
ForestClassifier = FOREST_CLASSIFIERS[name]
with np.errstate(divide="ignore"):
clf = ForestClassifier(n_estimators=10, random_state=1, max_features=1,
max_depth=1)
clf.fit(iris.data, iris.target)
assert_array_almost_equal(np.sum(clf.predict_proba(iris.data), axis=1),
np.ones(iris.data.shape[0]))
assert_array_almost_equal(clf.predict_proba(iris.data),
np.exp(clf.predict_log_proba(iris.data)))
def test_probability():
for name in FOREST_CLASSIFIERS:
yield check_probability, name
def check_importances(name, criterion, X, y):
ForestEstimator = FOREST_ESTIMATORS[name]
est = ForestEstimator(n_estimators=20, criterion=criterion,
random_state=0)
est.fit(X, y)
importances = est.feature_importances_
n_important = np.sum(importances > 0.1)
assert_equal(importances.shape[0], 10)
assert_equal(n_important, 3)
# Check with parallel
importances = est.feature_importances_
est.set_params(n_jobs=2)
importances_parrallel = est.feature_importances_
assert_array_almost_equal(importances, importances_parrallel)
# Check with sample weights
sample_weight = check_random_state(0).randint(1, 10, len(X))
est = ForestEstimator(n_estimators=20, random_state=0, criterion=criterion)
est.fit(X, y, sample_weight=sample_weight)
importances = est.feature_importances_
assert_true(np.all(importances >= 0.0))
for scale in [0.5, 10, 100]:
est = ForestEstimator(n_estimators=20, random_state=0, criterion=criterion)
est.fit(X, y, sample_weight=scale * sample_weight)
importances_bis = est.feature_importances_
assert_less(np.abs(importances - importances_bis).mean(), 0.001)
@skip_if_32bit
def test_importances():
X, y = datasets.make_classification(n_samples=500, n_features=10,
n_informative=3, n_redundant=0,
n_repeated=0, shuffle=False,
random_state=0)
for name, criterion in product(FOREST_CLASSIFIERS, ["gini", "entropy"]):
yield check_importances, name, criterion, X, y
for name, criterion in product(FOREST_REGRESSORS, ["mse", "friedman_mse", "mae"]):
yield check_importances, name, criterion, X, y
def test_importances_asymptotic():
# Check whether variable importances of totally randomized trees
# converge towards their theoretical values (See Louppe et al,
# Understanding variable importances in forests of randomized trees, 2013).
def binomial(k, n):
return 0 if k < 0 or k > n else comb(int(n), int(k), exact=True)
def entropy(samples):
n_samples = len(samples)
entropy = 0.
for count in np.bincount(samples):
p = 1. * count / n_samples
if p > 0:
entropy -= p * np.log2(p)
return entropy
def mdi_importance(X_m, X, y):
n_samples, n_features = X.shape
features = list(range(n_features))
features.pop(X_m)
values = [np.unique(X[:, i]) for i in range(n_features)]
imp = 0.
for k in range(n_features):
# Weight of each B of size k
coef = 1. / (binomial(k, n_features) * (n_features - k))
# For all B of size k
for B in combinations(features, k):
# For all values B=b
for b in product(*[values[B[j]] for j in range(k)]):
mask_b = np.ones(n_samples, dtype=np.bool)
for j in range(k):
mask_b &= X[:, B[j]] == b[j]
X_, y_ = X[mask_b, :], y[mask_b]
n_samples_b = len(X_)
if n_samples_b > 0:
children = []
for xi in values[X_m]:
mask_xi = X_[:, X_m] == xi
children.append(y_[mask_xi])
imp += (coef
* (1. * n_samples_b / n_samples) # P(B=b)
* (entropy(y_) -
sum([entropy(c) * len(c) / n_samples_b
for c in children])))
return imp
data = np.array([[0, 0, 1, 0, 0, 1, 0, 1],
[1, 0, 1, 1, 1, 0, 1, 2],
[1, 0, 1, 1, 0, 1, 1, 3],
[0, 1, 1, 1, 0, 1, 0, 4],
[1, 1, 0, 1, 0, 1, 1, 5],
[1, 1, 0, 1, 1, 1, 1, 6],
[1, 0, 1, 0, 0, 1, 0, 7],
[1, 1, 1, 1, 1, 1, 1, 8],
[1, 1, 1, 1, 0, 1, 1, 9],
[1, 1, 1, 0, 1, 1, 1, 0]])
X, y = np.array(data[:, :7], dtype=np.bool), data[:, 7]
n_features = X.shape[1]
# Compute true importances
true_importances = np.zeros(n_features)
for i in range(n_features):
true_importances[i] = mdi_importance(i, X, y)
# Estimate importances with totally randomized trees
clf = ExtraTreesClassifier(n_estimators=500,
max_features=1,
criterion="entropy",
random_state=0).fit(X, y)
importances = sum(tree.tree_.compute_feature_importances(normalize=False)
for tree in clf.estimators_) / clf.n_estimators
# Check correctness
assert_almost_equal(entropy(y), sum(importances))
assert_less(np.abs(true_importances - importances).mean(), 0.01)
def check_unfitted_feature_importances(name):
assert_raises(ValueError, getattr, FOREST_ESTIMATORS[name](random_state=0),
"feature_importances_")
def test_unfitted_feature_importances():
for name in FOREST_ESTIMATORS:
yield check_unfitted_feature_importances, name
def check_oob_score(name, X, y, n_estimators=20):
# Check that oob prediction is a good estimation of the generalization
# error.
# Proper behavior
est = FOREST_ESTIMATORS[name](oob_score=True, random_state=0,
n_estimators=n_estimators, bootstrap=True)
n_samples = X.shape[0]
est.fit(X[:n_samples // 2, :], y[:n_samples // 2])
test_score = est.score(X[n_samples // 2:, :], y[n_samples // 2:])
if name in FOREST_CLASSIFIERS:
assert_less(abs(test_score - est.oob_score_), 0.1)
else:
assert_greater(test_score, est.oob_score_)
assert_greater(est.oob_score_, .8)
# Check warning if not enough estimators
with np.errstate(divide="ignore", invalid="ignore"):
est = FOREST_ESTIMATORS[name](oob_score=True, random_state=0,
n_estimators=1, bootstrap=True)
assert_warns(UserWarning, est.fit, X, y)
def test_oob_score():
for name in FOREST_CLASSIFIERS:
yield check_oob_score, name, iris.data, iris.target
# csc matrix
yield check_oob_score, name, csc_matrix(iris.data), iris.target
# non-contiguous targets in classification
yield check_oob_score, name, iris.data, iris.target * 2 + 1
for name in FOREST_REGRESSORS:
yield check_oob_score, name, boston.data, boston.target, 50
# csc matrix
yield check_oob_score, name, csc_matrix(boston.data), boston.target, 50
def check_oob_score_raise_error(name):
ForestEstimator = FOREST_ESTIMATORS[name]
if name in FOREST_TRANSFORMERS:
for oob_score in [True, False]:
assert_raises(TypeError, ForestEstimator, oob_score=oob_score)
assert_raises(NotImplementedError, ForestEstimator()._set_oob_score,
X, y)
else:
# Unfitted / no bootstrap / no oob_score
for oob_score, bootstrap in [(True, False), (False, True),
(False, False)]:
est = ForestEstimator(oob_score=oob_score, bootstrap=bootstrap,
random_state=0)
assert_false(hasattr(est, "oob_score_"))
# No bootstrap
assert_raises(ValueError, ForestEstimator(oob_score=True,
bootstrap=False).fit, X, y)
def test_oob_score_raise_error():
for name in FOREST_ESTIMATORS:
yield check_oob_score_raise_error, name
def check_gridsearch(name):
forest = FOREST_CLASSIFIERS[name]()
clf = GridSearchCV(forest, {'n_estimators': (1, 2), 'max_depth': (1, 2)})
clf.fit(iris.data, iris.target)
def test_gridsearch():
# Check that base trees can be grid-searched.
for name in FOREST_CLASSIFIERS:
yield check_gridsearch, name
def check_parallel(name, X, y):
"""Check parallel computations in classification"""
ForestEstimator = FOREST_ESTIMATORS[name]
forest = ForestEstimator(n_estimators=10, n_jobs=3, random_state=0)
forest.fit(X, y)
assert_equal(len(forest), 10)
forest.set_params(n_jobs=1)
y1 = forest.predict(X)
forest.set_params(n_jobs=2)
y2 = forest.predict(X)
assert_array_almost_equal(y1, y2, 3)
def test_parallel():
for name in FOREST_CLASSIFIERS:
yield check_parallel, name, iris.data, iris.target
for name in FOREST_REGRESSORS:
yield check_parallel, name, boston.data, boston.target
def check_pickle(name, X, y):
# Check pickability.
ForestEstimator = FOREST_ESTIMATORS[name]
obj = ForestEstimator(random_state=0)
obj.fit(X, y)
score = obj.score(X, y)
pickle_object = pickle.dumps(obj)
obj2 = pickle.loads(pickle_object)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(X, y)
assert_equal(score, score2)
def test_pickle():
for name in FOREST_CLASSIFIERS:
yield check_pickle, name, iris.data[::2], iris.target[::2]
for name in FOREST_REGRESSORS:
yield check_pickle, name, boston.data[::2], boston.target[::2]
def check_multioutput(name):
# Check estimators on multi-output problems.
X_train = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1], [-2, 1],
[-1, 1], [-1, 2], [2, -1], [1, -1], [1, -2]]
y_train = [[-1, 0], [-1, 0], [-1, 0], [1, 1], [1, 1], [1, 1], [-1, 2],
[-1, 2], [-1, 2], [1, 3], [1, 3], [1, 3]]
X_test = [[-1, -1], [1, 1], [-1, 1], [1, -1]]
y_test = [[-1, 0], [1, 1], [-1, 2], [1, 3]]
est = FOREST_ESTIMATORS[name](random_state=0, bootstrap=False)
y_pred = est.fit(X_train, y_train).predict(X_test)
assert_array_almost_equal(y_pred, y_test)
if name in FOREST_CLASSIFIERS:
with np.errstate(divide="ignore"):
proba = est.predict_proba(X_test)
assert_equal(len(proba), 2)
assert_equal(proba[0].shape, (4, 2))
assert_equal(proba[1].shape, (4, 4))
log_proba = est.predict_log_proba(X_test)
assert_equal(len(log_proba), 2)
assert_equal(log_proba[0].shape, (4, 2))
assert_equal(log_proba[1].shape, (4, 4))
def test_multioutput():
for name in FOREST_CLASSIFIERS:
yield check_multioutput, name
for name in FOREST_REGRESSORS:
yield check_multioutput, name
def check_classes_shape(name):
# Test that n_classes_ and classes_ have proper shape.
ForestClassifier = FOREST_CLASSIFIERS[name]
# Classification, single output
clf = ForestClassifier(random_state=0).fit(X, y)
assert_equal(clf.n_classes_, 2)
assert_array_equal(clf.classes_, [-1, 1])
# Classification, multi-output
_y = np.vstack((y, np.array(y) * 2)).T
clf = ForestClassifier(random_state=0).fit(X, _y)
assert_array_equal(clf.n_classes_, [2, 2])
assert_array_equal(clf.classes_, [[-1, 1], [-2, 2]])
def test_classes_shape():
for name in FOREST_CLASSIFIERS:
yield check_classes_shape, name
def test_random_trees_dense_type():
# Test that the `sparse_output` parameter of RandomTreesEmbedding
# works by returning a dense array.
# Create the RTE with sparse=False
hasher = RandomTreesEmbedding(n_estimators=10, sparse_output=False)
X, y = datasets.make_circles(factor=0.5)
X_transformed = hasher.fit_transform(X)
# Assert that type is ndarray, not scipy.sparse.csr.csr_matrix
assert_equal(type(X_transformed), np.ndarray)
def test_random_trees_dense_equal():
# Test that the `sparse_output` parameter of RandomTreesEmbedding
# works by returning the same array for both argument values.
# Create the RTEs
hasher_dense = RandomTreesEmbedding(n_estimators=10, sparse_output=False,
random_state=0)
hasher_sparse = RandomTreesEmbedding(n_estimators=10, sparse_output=True,
random_state=0)
X, y = datasets.make_circles(factor=0.5)
X_transformed_dense = hasher_dense.fit_transform(X)
X_transformed_sparse = hasher_sparse.fit_transform(X)
# Assert that dense and sparse hashers have same array.
assert_array_equal(X_transformed_sparse.toarray(), X_transformed_dense)
# Ignore warnings from switching to more power iterations in randomized_svd
@ignore_warnings
def test_random_hasher():
# test random forest hashing on circles dataset
# make sure that it is linearly separable.
# even after projected to two SVD dimensions
# Note: Not all random_states produce perfect results.
hasher = RandomTreesEmbedding(n_estimators=30, random_state=1)
X, y = datasets.make_circles(factor=0.5)
X_transformed = hasher.fit_transform(X)
# test fit and transform:
hasher = RandomTreesEmbedding(n_estimators=30, random_state=1)
assert_array_equal(hasher.fit(X).transform(X).toarray(),
X_transformed.toarray())
# one leaf active per data point per forest
assert_equal(X_transformed.shape[0], X.shape[0])
assert_array_equal(X_transformed.sum(axis=1), hasher.n_estimators)
svd = TruncatedSVD(n_components=2)
X_reduced = svd.fit_transform(X_transformed)
linear_clf = LinearSVC()
linear_clf.fit(X_reduced, y)
assert_equal(linear_clf.score(X_reduced, y), 1.)
def test_random_hasher_sparse_data():
X, y = datasets.make_multilabel_classification(random_state=0)
hasher = RandomTreesEmbedding(n_estimators=30, random_state=1)
X_transformed = hasher.fit_transform(X)
X_transformed_sparse = hasher.fit_transform(csc_matrix(X))
assert_array_equal(X_transformed_sparse.toarray(), X_transformed.toarray())
def test_parallel_train():
rng = check_random_state(12321)
n_samples, n_features = 80, 30
X_train = rng.randn(n_samples, n_features)
y_train = rng.randint(0, 2, n_samples)
clfs = [
RandomForestClassifier(n_estimators=20, n_jobs=n_jobs,
random_state=12345).fit(X_train, y_train)
for n_jobs in [1, 2, 3, 8, 16, 32]
]
X_test = rng.randn(n_samples, n_features)
probas = [clf.predict_proba(X_test) for clf in clfs]
for proba1, proba2 in zip(probas, probas[1:]):
assert_array_almost_equal(proba1, proba2)
def test_distribution():
rng = check_random_state(12321)
# Single variable with 4 values
X = rng.randint(0, 4, size=(1000, 1))
y = rng.rand(1000)
n_trees = 500
clf = ExtraTreesRegressor(n_estimators=n_trees, random_state=42).fit(X, y)
uniques = defaultdict(int)
for tree in clf.estimators_:
tree = "".join(("%d,%d/" % (f, int(t)) if f >= 0 else "-")
for f, t in zip(tree.tree_.feature,
tree.tree_.threshold))
uniques[tree] += 1
uniques = sorted([(1. * count / n_trees, tree)
for tree, count in uniques.items()])
# On a single variable problem where X_0 has 4 equiprobable values, there
# are 5 ways to build a random tree. The more compact (0,1/0,0/--0,2/--) of
# them has probability 1/3 while the 4 others have probability 1/6.
assert_equal(len(uniques), 5)
assert_greater(0.20, uniques[0][0]) # Rough approximation of 1/6.
assert_greater(0.20, uniques[1][0])
assert_greater(0.20, uniques[2][0])
assert_greater(0.20, uniques[3][0])
assert_greater(uniques[4][0], 0.3)
assert_equal(uniques[4][1], "0,1/0,0/--0,2/--")
# Two variables, one with 2 values, one with 3 values
X = np.empty((1000, 2))
X[:, 0] = np.random.randint(0, 2, 1000)
X[:, 1] = np.random.randint(0, 3, 1000)
y = rng.rand(1000)
clf = ExtraTreesRegressor(n_estimators=100, max_features=1,
random_state=1).fit(X, y)
uniques = defaultdict(int)
for tree in clf.estimators_:
tree = "".join(("%d,%d/" % (f, int(t)) if f >= 0 else "-")
for f, t in zip(tree.tree_.feature,
tree.tree_.threshold))
uniques[tree] += 1
uniques = [(count, tree) for tree, count in uniques.items()]
assert_equal(len(uniques), 8)
def check_max_leaf_nodes_max_depth(name):
X, y = hastie_X, hastie_y
# Test precedence of max_leaf_nodes over max_depth.
ForestEstimator = FOREST_ESTIMATORS[name]
est = ForestEstimator(max_depth=1, max_leaf_nodes=4,
n_estimators=1, random_state=0).fit(X, y)
assert_greater(est.estimators_[0].tree_.max_depth, 1)
est = ForestEstimator(max_depth=1, n_estimators=1,
random_state=0).fit(X, y)
assert_equal(est.estimators_[0].tree_.max_depth, 1)
def test_max_leaf_nodes_max_depth():
for name in FOREST_ESTIMATORS:
yield check_max_leaf_nodes_max_depth, name
def check_min_samples_split(name):
X, y = hastie_X, hastie_y
ForestEstimator = FOREST_ESTIMATORS[name]
# test boundary value
assert_raises(ValueError,
ForestEstimator(min_samples_split=-1).fit, X, y)
assert_raises(ValueError,
ForestEstimator(min_samples_split=0).fit, X, y)
assert_raises(ValueError,
ForestEstimator(min_samples_split=1.1).fit, X, y)
est = ForestEstimator(min_samples_split=10, n_estimators=1, random_state=0)
est.fit(X, y)
node_idx = est.estimators_[0].tree_.children_left != -1
node_samples = est.estimators_[0].tree_.n_node_samples[node_idx]
assert_greater(np.min(node_samples), len(X) * 0.5 - 1,
"Failed with {0}".format(name))
est = ForestEstimator(min_samples_split=0.5, n_estimators=1, random_state=0)
est.fit(X, y)
node_idx = est.estimators_[0].tree_.children_left != -1
node_samples = est.estimators_[0].tree_.n_node_samples[node_idx]
assert_greater(np.min(node_samples), len(X) * 0.5 - 1,
"Failed with {0}".format(name))
def test_min_samples_split():
for name in FOREST_ESTIMATORS:
yield check_min_samples_split, name
def check_min_samples_leaf(name):
X, y = hastie_X, hastie_y
# Test if leaves contain more than leaf_count training examples
ForestEstimator = FOREST_ESTIMATORS[name]
# test boundary value
assert_raises(ValueError,
ForestEstimator(min_samples_leaf=-1).fit, X, y)
assert_raises(ValueError,
ForestEstimator(min_samples_leaf=0).fit, X, y)
est = ForestEstimator(min_samples_leaf=5, n_estimators=1, random_state=0)
est.fit(X, y)
out = est.estimators_[0].tree_.apply(X)
node_counts = np.bincount(out)
# drop inner nodes
leaf_count = node_counts[node_counts != 0]
assert_greater(np.min(leaf_count), 4,
"Failed with {0}".format(name))
est = ForestEstimator(min_samples_leaf=0.25, n_estimators=1,
random_state=0)
est.fit(X, y)
out = est.estimators_[0].tree_.apply(X)
node_counts = np.bincount(out)
# drop inner nodes
leaf_count = node_counts[node_counts != 0]
assert_greater(np.min(leaf_count), len(X) * 0.25 - 1,
"Failed with {0}".format(name))
def test_min_samples_leaf():
for name in FOREST_ESTIMATORS:
yield check_min_samples_leaf, name
def check_min_weight_fraction_leaf(name):
X, y = hastie_X, hastie_y
# Test if leaves contain at least min_weight_fraction_leaf of the
# training set
ForestEstimator = FOREST_ESTIMATORS[name]
rng = np.random.RandomState(0)
weights = rng.rand(X.shape[0])
total_weight = np.sum(weights)
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for frac in np.linspace(0, 0.5, 6):
est = ForestEstimator(min_weight_fraction_leaf=frac, n_estimators=1,
random_state=0)
if "RandomForest" in name:
est.bootstrap = False
est.fit(X, y, sample_weight=weights)
out = est.estimators_[0].tree_.apply(X)
node_weights = np.bincount(out, weights=weights)
# drop inner nodes
leaf_weights = node_weights[node_weights != 0]
assert_greater_equal(
np.min(leaf_weights),
total_weight * est.min_weight_fraction_leaf,
"Failed with {0} "
"min_weight_fraction_leaf={1}".format(
name, est.min_weight_fraction_leaf))
def test_min_weight_fraction_leaf():
for name in FOREST_ESTIMATORS:
yield check_min_weight_fraction_leaf, name
def check_sparse_input(name, X, X_sparse, y):
ForestEstimator = FOREST_ESTIMATORS[name]
dense = ForestEstimator(random_state=0, max_depth=2).fit(X, y)
sparse = ForestEstimator(random_state=0, max_depth=2).fit(X_sparse, y)
assert_array_almost_equal(sparse.apply(X), dense.apply(X))
if name in FOREST_CLASSIFIERS or name in FOREST_REGRESSORS:
assert_array_almost_equal(sparse.predict(X), dense.predict(X))
assert_array_almost_equal(sparse.feature_importances_,
dense.feature_importances_)
if name in FOREST_CLASSIFIERS:
assert_array_almost_equal(sparse.predict_proba(X),
dense.predict_proba(X))
assert_array_almost_equal(sparse.predict_log_proba(X),
dense.predict_log_proba(X))
if name in FOREST_TRANSFORMERS:
assert_array_almost_equal(sparse.transform(X).toarray(),
dense.transform(X).toarray())
assert_array_almost_equal(sparse.fit_transform(X).toarray(),
dense.fit_transform(X).toarray())
def test_sparse_input():
X, y = datasets.make_multilabel_classification(random_state=0,
n_samples=50)
for name, sparse_matrix in product(FOREST_ESTIMATORS,
(csr_matrix, csc_matrix, coo_matrix)):
yield check_sparse_input, name, X, sparse_matrix(X), y
def check_memory_layout(name, dtype):
# Check that it works no matter the memory layout
est = FOREST_ESTIMATORS[name](random_state=0, bootstrap=False)
# Nothing
X = np.asarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# C-order
X = np.asarray(iris.data, order="C", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# F-order
X = np.asarray(iris.data, order="F", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Contiguous
X = np.ascontiguousarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
if est.base_estimator.splitter in SPARSE_SPLITTERS:
# csr matrix
X = csr_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# csc_matrix
X = csc_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# coo_matrix
X = coo_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Strided
X = np.asarray(iris.data[::3], dtype=dtype)
y = iris.target[::3]
assert_array_equal(est.fit(X, y).predict(X), y)
def test_memory_layout():
for name, dtype in product(FOREST_CLASSIFIERS, [np.float64, np.float32]):
yield check_memory_layout, name, dtype
for name, dtype in product(FOREST_REGRESSORS, [np.float64, np.float32]):
yield check_memory_layout, name, dtype
@ignore_warnings
def check_1d_input(name, X, X_2d, y):
ForestEstimator = FOREST_ESTIMATORS[name]
assert_raises(ValueError, ForestEstimator(n_estimators=1,
random_state=0).fit, X, y)
est = ForestEstimator(random_state=0)
est.fit(X_2d, y)
if name in FOREST_CLASSIFIERS or name in FOREST_REGRESSORS:
assert_raises(ValueError, est.predict, X)
@ignore_warnings
def test_1d_input():
X = iris.data[:, 0]
X_2d = iris.data[:, 0].reshape((-1, 1))
y = iris.target
for name in FOREST_ESTIMATORS:
yield check_1d_input, name, X, X_2d, y
def check_class_weights(name):
# Check class_weights resemble sample_weights behavior.
ForestClassifier = FOREST_CLASSIFIERS[name]
# Iris is balanced, so no effect expected for using 'balanced' weights
clf1 = ForestClassifier(random_state=0)
clf1.fit(iris.data, iris.target)
clf2 = ForestClassifier(class_weight='balanced', random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Make a multi-output problem with three copies of Iris
iris_multi = np.vstack((iris.target, iris.target, iris.target)).T
# Create user-defined weights that should balance over the outputs
clf3 = ForestClassifier(class_weight=[{0: 2., 1: 2., 2: 1.},
{0: 2., 1: 1., 2: 2.},
{0: 1., 1: 2., 2: 2.}],
random_state=0)
clf3.fit(iris.data, iris_multi)
assert_almost_equal(clf2.feature_importances_, clf3.feature_importances_)
# Check against multi-output "balanced" which should also have no effect
clf4 = ForestClassifier(class_weight='balanced', random_state=0)
clf4.fit(iris.data, iris_multi)
assert_almost_equal(clf3.feature_importances_, clf4.feature_importances_)
# Inflate importance of class 1, check against user-defined weights
sample_weight = np.ones(iris.target.shape)
sample_weight[iris.target == 1] *= 100
class_weight = {0: 1., 1: 100., 2: 1.}
clf1 = ForestClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight)
clf2 = ForestClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Check that sample_weight and class_weight are multiplicative
clf1 = ForestClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight ** 2)
clf2 = ForestClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target, sample_weight)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Using a Python 2.x list as the sample_weight parameter used to raise
# an exception. This test makes sure such code will now run correctly.
clf = ForestClassifier()
sample_weight = [1.] * len(iris.data)
clf.fit(iris.data, iris.target, sample_weight=sample_weight)
def test_class_weights():
for name in FOREST_CLASSIFIERS:
yield check_class_weights, name
def check_class_weight_balanced_and_bootstrap_multi_output(name):
# Test class_weight works for multi-output"""
ForestClassifier = FOREST_CLASSIFIERS[name]
_y = np.vstack((y, np.array(y) * 2)).T
clf = ForestClassifier(class_weight='balanced', random_state=0)
clf.fit(X, _y)
clf = ForestClassifier(class_weight=[{-1: 0.5, 1: 1.}, {-2: 1., 2: 1.}],
random_state=0)
clf.fit(X, _y)
# smoke test for balanced subsample
clf = ForestClassifier(class_weight='balanced_subsample', random_state=0)
clf.fit(X, _y)
def test_class_weight_balanced_and_bootstrap_multi_output():
for name in FOREST_CLASSIFIERS:
yield check_class_weight_balanced_and_bootstrap_multi_output, name
def check_class_weight_errors(name):
# Test if class_weight raises errors and warnings when expected.
ForestClassifier = FOREST_CLASSIFIERS[name]
_y = np.vstack((y, np.array(y) * 2)).T
# Invalid preset string
clf = ForestClassifier(class_weight='the larch', random_state=0)
assert_raises(ValueError, clf.fit, X, y)
assert_raises(ValueError, clf.fit, X, _y)
# Warning warm_start with preset
clf = ForestClassifier(class_weight='balanced', warm_start=True,
random_state=0)
assert_warns(UserWarning, clf.fit, X, y)
assert_warns(UserWarning, clf.fit, X, _y)
# Not a list or preset for multi-output
clf = ForestClassifier(class_weight=1, random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
# Incorrect length list for multi-output
clf = ForestClassifier(class_weight=[{-1: 0.5, 1: 1.}], random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
def test_class_weight_errors():
for name in FOREST_CLASSIFIERS:
yield check_class_weight_errors, name
def check_warm_start(name, random_state=42):
# Test if fitting incrementally with warm start gives a forest of the
# right size and the same results as a normal fit.
X, y = hastie_X, hastie_y
ForestEstimator = FOREST_ESTIMATORS[name]
clf_ws = None
for n_estimators in [5, 10]:
if clf_ws is None:
clf_ws = ForestEstimator(n_estimators=n_estimators,
random_state=random_state,
warm_start=True)
else:
clf_ws.set_params(n_estimators=n_estimators)
clf_ws.fit(X, y)
assert_equal(len(clf_ws), n_estimators)
clf_no_ws = ForestEstimator(n_estimators=10, random_state=random_state,
warm_start=False)
clf_no_ws.fit(X, y)
assert_equal(set([tree.random_state for tree in clf_ws]),
set([tree.random_state for tree in clf_no_ws]))
assert_array_equal(clf_ws.apply(X), clf_no_ws.apply(X),
err_msg="Failed with {0}".format(name))
def test_warm_start():
for name in FOREST_ESTIMATORS:
yield check_warm_start, name
def check_warm_start_clear(name):
# Test if fit clears state and grows a new forest when warm_start==False.
X, y = hastie_X, hastie_y
ForestEstimator = FOREST_ESTIMATORS[name]
clf = ForestEstimator(n_estimators=5, max_depth=1, warm_start=False,
random_state=1)
clf.fit(X, y)
clf_2 = ForestEstimator(n_estimators=5, max_depth=1, warm_start=True,
random_state=2)
clf_2.fit(X, y) # inits state
clf_2.set_params(warm_start=False, random_state=1)
clf_2.fit(X, y) # clears old state and equals clf
assert_array_almost_equal(clf_2.apply(X), clf.apply(X))
def test_warm_start_clear():
for name in FOREST_ESTIMATORS:
yield check_warm_start_clear, name
def check_warm_start_smaller_n_estimators(name):
# Test if warm start second fit with smaller n_estimators raises error.
X, y = hastie_X, hastie_y
ForestEstimator = FOREST_ESTIMATORS[name]
clf = ForestEstimator(n_estimators=5, max_depth=1, warm_start=True)
clf.fit(X, y)
clf.set_params(n_estimators=4)
assert_raises(ValueError, clf.fit, X, y)
def test_warm_start_smaller_n_estimators():
for name in FOREST_ESTIMATORS:
yield check_warm_start_smaller_n_estimators, name
def check_warm_start_equal_n_estimators(name):
# Test if warm start with equal n_estimators does nothing and returns the
# same forest and raises a warning.
X, y = hastie_X, hastie_y
ForestEstimator = FOREST_ESTIMATORS[name]
clf = ForestEstimator(n_estimators=5, max_depth=3, warm_start=True,
random_state=1)
clf.fit(X, y)
clf_2 = ForestEstimator(n_estimators=5, max_depth=3, warm_start=True,
random_state=1)
clf_2.fit(X, y)
# Now clf_2 equals clf.
clf_2.set_params(random_state=2)
assert_warns(UserWarning, clf_2.fit, X, y)
# If we had fit the trees again we would have got a different forest as we
# changed the random state.
assert_array_equal(clf.apply(X), clf_2.apply(X))
def test_warm_start_equal_n_estimators():
for name in FOREST_ESTIMATORS:
yield check_warm_start_equal_n_estimators, name
def check_warm_start_oob(name):
# Test that the warm start computes oob score when asked.
X, y = hastie_X, hastie_y
ForestEstimator = FOREST_ESTIMATORS[name]
# Use 15 estimators to avoid 'some inputs do not have OOB scores' warning.
clf = ForestEstimator(n_estimators=15, max_depth=3, warm_start=False,
random_state=1, bootstrap=True, oob_score=True)
clf.fit(X, y)
clf_2 = ForestEstimator(n_estimators=5, max_depth=3, warm_start=False,
random_state=1, bootstrap=True, oob_score=False)
clf_2.fit(X, y)
clf_2.set_params(warm_start=True, oob_score=True, n_estimators=15)
clf_2.fit(X, y)
assert_true(hasattr(clf_2, 'oob_score_'))
assert_equal(clf.oob_score_, clf_2.oob_score_)
# Test that oob_score is computed even if we don't need to train
# additional trees.
clf_3 = ForestEstimator(n_estimators=15, max_depth=3, warm_start=True,
random_state=1, bootstrap=True, oob_score=False)
clf_3.fit(X, y)
assert_true(not(hasattr(clf_3, 'oob_score_')))
clf_3.set_params(oob_score=True)
ignore_warnings(clf_3.fit)(X, y)
assert_equal(clf.oob_score_, clf_3.oob_score_)
def test_warm_start_oob():
for name in FOREST_CLASSIFIERS:
yield check_warm_start_oob, name
for name in FOREST_REGRESSORS:
yield check_warm_start_oob, name
def test_dtype_convert(n_classes=15):
classifier = RandomForestClassifier(random_state=0, bootstrap=False)
X = np.eye(n_classes)
y = [ch for ch in 'ABCDEFGHIJKLMNOPQRSTU'[:n_classes]]
result = classifier.fit(X, y).predict(X)
assert_array_equal(classifier.classes_, y)
assert_array_equal(result, y)
def check_decision_path(name):
X, y = hastie_X, hastie_y
n_samples = X.shape[0]
ForestEstimator = FOREST_ESTIMATORS[name]
est = ForestEstimator(n_estimators=5, max_depth=1, warm_start=False,
random_state=1)
est.fit(X, y)
indicator, n_nodes_ptr = est.decision_path(X)
assert_equal(indicator.shape[1], n_nodes_ptr[-1])
assert_equal(indicator.shape[0], n_samples)
assert_array_equal(np.diff(n_nodes_ptr),
[e.tree_.node_count for e in est.estimators_])
# Assert that leaves index are correct
leaves = est.apply(X)
for est_id in range(leaves.shape[1]):
leave_indicator = [indicator[i, n_nodes_ptr[est_id] + j]
for i, j in enumerate(leaves[:, est_id])]
assert_array_almost_equal(leave_indicator, np.ones(shape=n_samples))
def test_decision_path():
for name in FOREST_CLASSIFIERS:
yield check_decision_path, name
for name in FOREST_REGRESSORS:
yield check_decision_path, name
def test_min_impurity_split():
# Test if min_impurity_split of base estimators is set
# Regression test for #8006
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
all_estimators = [RandomForestClassifier, RandomForestRegressor,
ExtraTreesClassifier, ExtraTreesRegressor]
for Estimator in all_estimators:
est = Estimator(min_impurity_split=0.1)
est = assert_warns_message(DeprecationWarning, "min_impurity_decrease",
est.fit, X, y)
for tree in est.estimators_:
assert_equal(tree.min_impurity_split, 0.1)
def test_min_impurity_decrease():
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
all_estimators = [RandomForestClassifier, RandomForestRegressor,
ExtraTreesClassifier, ExtraTreesRegressor]
for Estimator in all_estimators:
est = Estimator(min_impurity_decrease=0.1)
est.fit(X, y)
for tree in est.estimators_:
# Simply check if the parameter is passed on correctly. Tree tests
# will suffice for the actual working of this param
assert_equal(tree.min_impurity_decrease, 0.1)
| bsd-3-clause |
lbishal/scikit-learn | examples/gaussian_process/plot_gpr_noisy.py | 104 | 3778 | """
=============================================================
Gaussian process regression (GPR) with noise-level estimation
=============================================================
This example illustrates that GPR with a sum-kernel including a WhiteKernel can
estimate the noise level of data. An illustration of the
log-marginal-likelihood (LML) landscape shows that there exist two local
maxima of LML. The first corresponds to a model with a high noise level and a
large length scale, which explains all variations in the data by noise. The
second one has a smaller noise level and shorter length scale, which explains
most of the variation by the noise-free functional relationship. The second
model has a higher likelihood; however, depending on the initial value for the
hyperparameters, the gradient-based optimization might also converge to the
high-noise solution. It is thus important to repeat the optimization several
times for different initializations.
"""
print(__doc__)
# Authors: Jan Hendrik Metzen <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import LogNorm
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import RBF, WhiteKernel
rng = np.random.RandomState(0)
X = rng.uniform(0, 5, 20)[:, np.newaxis]
y = 0.5 * np.sin(3 * X[:, 0]) + rng.normal(0, 0.5, X.shape[0])
# First run
plt.figure(0)
kernel = 1.0 * RBF(length_scale=100.0, length_scale_bounds=(1e-2, 1e3)) \
+ WhiteKernel(noise_level=1, noise_level_bounds=(1e-10, 1e+1))
gp = GaussianProcessRegressor(kernel=kernel,
alpha=0.0).fit(X, y)
X_ = np.linspace(0, 5, 100)
y_mean, y_cov = gp.predict(X_[:, np.newaxis], return_cov=True)
plt.plot(X_, y_mean, 'k', lw=3, zorder=9)
plt.fill_between(X_, y_mean - np.sqrt(np.diag(y_cov)),
y_mean + np.sqrt(np.diag(y_cov)),
alpha=0.5, color='k')
plt.plot(X_, 0.5*np.sin(3*X_), 'r', lw=3, zorder=9)
plt.scatter(X[:, 0], y, c='r', s=50, zorder=10)
plt.title("Initial: %s\nOptimum: %s\nLog-Marginal-Likelihood: %s"
% (kernel, gp.kernel_,
gp.log_marginal_likelihood(gp.kernel_.theta)))
plt.tight_layout()
# Second run
plt.figure(1)
kernel = 1.0 * RBF(length_scale=1.0, length_scale_bounds=(1e-2, 1e3)) \
+ WhiteKernel(noise_level=1e-5, noise_level_bounds=(1e-10, 1e+1))
gp = GaussianProcessRegressor(kernel=kernel,
alpha=0.0).fit(X, y)
X_ = np.linspace(0, 5, 100)
y_mean, y_cov = gp.predict(X_[:, np.newaxis], return_cov=True)
plt.plot(X_, y_mean, 'k', lw=3, zorder=9)
plt.fill_between(X_, y_mean - np.sqrt(np.diag(y_cov)),
y_mean + np.sqrt(np.diag(y_cov)),
alpha=0.5, color='k')
plt.plot(X_, 0.5*np.sin(3*X_), 'r', lw=3, zorder=9)
plt.scatter(X[:, 0], y, c='r', s=50, zorder=10)
plt.title("Initial: %s\nOptimum: %s\nLog-Marginal-Likelihood: %s"
% (kernel, gp.kernel_,
gp.log_marginal_likelihood(gp.kernel_.theta)))
plt.tight_layout()
# Plot LML landscape
plt.figure(2)
theta0 = np.logspace(-2, 3, 49)
theta1 = np.logspace(-2, 0, 50)
Theta0, Theta1 = np.meshgrid(theta0, theta1)
LML = [[gp.log_marginal_likelihood(np.log([0.36, Theta0[i, j], Theta1[i, j]]))
for i in range(Theta0.shape[0])] for j in range(Theta0.shape[1])]
LML = np.array(LML).T
vmin, vmax = (-LML).min(), (-LML).max()
vmax = 50
plt.contour(Theta0, Theta1, -LML,
levels=np.logspace(np.log10(vmin), np.log10(vmax), 50),
norm=LogNorm(vmin=vmin, vmax=vmax))
plt.colorbar()
plt.xscale("log")
plt.yscale("log")
plt.xlabel("Length-scale")
plt.ylabel("Noise-level")
plt.title("Log-marginal-likelihood")
plt.tight_layout()
plt.show()
| bsd-3-clause |
fzalkow/scikit-learn | sklearn/metrics/pairwise.py | 104 | 42995 | # -*- coding: utf-8 -*-
# Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Robert Layton <[email protected]>
# Andreas Mueller <[email protected]>
# Philippe Gervais <[email protected]>
# Lars Buitinck <[email protected]>
# Joel Nothman <[email protected]>
# License: BSD 3 clause
import itertools
import numpy as np
from scipy.spatial import distance
from scipy.sparse import csr_matrix
from scipy.sparse import issparse
from ..utils import check_array
from ..utils import gen_even_slices
from ..utils import gen_batches
from ..utils.fixes import partial
from ..utils.extmath import row_norms, safe_sparse_dot
from ..preprocessing import normalize
from ..externals.joblib import Parallel
from ..externals.joblib import delayed
from ..externals.joblib.parallel import cpu_count
from .pairwise_fast import _chi2_kernel_fast, _sparse_manhattan
# Utility Functions
def _return_float_dtype(X, Y):
"""
1. If dtype of X and Y is float32, then dtype float32 is returned.
2. Else dtype float is returned.
"""
if not issparse(X) and not isinstance(X, np.ndarray):
X = np.asarray(X)
if Y is None:
Y_dtype = X.dtype
elif not issparse(Y) and not isinstance(Y, np.ndarray):
Y = np.asarray(Y)
Y_dtype = Y.dtype
else:
Y_dtype = Y.dtype
if X.dtype == Y_dtype == np.float32:
dtype = np.float32
else:
dtype = np.float
return X, Y, dtype
def check_pairwise_arrays(X, Y):
""" Set X and Y appropriately and checks inputs
If Y is None, it is set as a pointer to X (i.e. not a copy).
If Y is given, this does not happen.
All distance metrics should use this function first to assert that the
given parameters are correct and safe to use.
Specifically, this function first ensures that both X and Y are arrays,
then checks that they are at least two dimensional while ensuring that
their elements are floats. Finally, the function checks that the size
of the second dimension of the two arrays is equal.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
Returns
-------
safe_X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
An array equal to X, guaranteed to be a numpy array.
safe_Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
An array equal to Y if Y was not None, guaranteed to be a numpy array.
If Y was None, safe_Y will be a pointer to X.
"""
X, Y, dtype = _return_float_dtype(X, Y)
if Y is X or Y is None:
X = Y = check_array(X, accept_sparse='csr', dtype=dtype)
else:
X = check_array(X, accept_sparse='csr', dtype=dtype)
Y = check_array(Y, accept_sparse='csr', dtype=dtype)
if X.shape[1] != Y.shape[1]:
raise ValueError("Incompatible dimension for X and Y matrices: "
"X.shape[1] == %d while Y.shape[1] == %d" % (
X.shape[1], Y.shape[1]))
return X, Y
def check_paired_arrays(X, Y):
""" Set X and Y appropriately and checks inputs for paired distances
All paired distance metrics should use this function first to assert that
the given parameters are correct and safe to use.
Specifically, this function first ensures that both X and Y are arrays,
then checks that they are at least two dimensional while ensuring that
their elements are floats. Finally, the function checks that the size
of the dimensions of the two arrays are equal.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
Returns
-------
safe_X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
An array equal to X, guaranteed to be a numpy array.
safe_Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
An array equal to Y if Y was not None, guaranteed to be a numpy array.
If Y was None, safe_Y will be a pointer to X.
"""
X, Y = check_pairwise_arrays(X, Y)
if X.shape != Y.shape:
raise ValueError("X and Y should be of same shape. They were "
"respectively %r and %r long." % (X.shape, Y.shape))
return X, Y
# Pairwise distances
def euclidean_distances(X, Y=None, Y_norm_squared=None, squared=False):
"""
Considering the rows of X (and Y=X) as vectors, compute the
distance matrix between each pair of vectors.
For efficiency reasons, the euclidean distance between a pair of row
vector x and y is computed as::
dist(x, y) = sqrt(dot(x, x) - 2 * dot(x, y) + dot(y, y))
This formulation has two advantages over other ways of computing distances.
First, it is computationally efficient when dealing with sparse data.
Second, if x varies but y remains unchanged, then the right-most dot
product `dot(y, y)` can be pre-computed.
However, this is not the most precise way of doing this computation, and
the distance matrix returned by this function may not be exactly
symmetric as required by, e.g., ``scipy.spatial.distance`` functions.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_1, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_2, n_features)
Y_norm_squared : array-like, shape (n_samples_2, ), optional
Pre-computed dot-products of vectors in Y (e.g.,
``(Y**2).sum(axis=1)``)
squared : boolean, optional
Return squared Euclidean distances.
Returns
-------
distances : {array, sparse matrix}, shape (n_samples_1, n_samples_2)
Examples
--------
>>> from sklearn.metrics.pairwise import euclidean_distances
>>> X = [[0, 1], [1, 1]]
>>> # distance between rows of X
>>> euclidean_distances(X, X)
array([[ 0., 1.],
[ 1., 0.]])
>>> # get distance to origin
>>> euclidean_distances(X, [[0, 0]])
array([[ 1. ],
[ 1.41421356]])
See also
--------
paired_distances : distances betweens pairs of elements of X and Y.
"""
# should not need X_norm_squared because if you could precompute that as
# well as Y, then you should just pre-compute the output and not even
# call this function.
X, Y = check_pairwise_arrays(X, Y)
if Y_norm_squared is not None:
YY = check_array(Y_norm_squared)
if YY.shape != (1, Y.shape[0]):
raise ValueError(
"Incompatible dimensions for Y and Y_norm_squared")
else:
YY = row_norms(Y, squared=True)[np.newaxis, :]
if X is Y: # shortcut in the common case euclidean_distances(X, X)
XX = YY.T
else:
XX = row_norms(X, squared=True)[:, np.newaxis]
distances = safe_sparse_dot(X, Y.T, dense_output=True)
distances *= -2
distances += XX
distances += YY
np.maximum(distances, 0, out=distances)
if X is Y:
# Ensure that distances between vectors and themselves are set to 0.0.
# This may not be the case due to floating point rounding errors.
distances.flat[::distances.shape[0] + 1] = 0.0
return distances if squared else np.sqrt(distances, out=distances)
def pairwise_distances_argmin_min(X, Y, axis=1, metric="euclidean",
batch_size=500, metric_kwargs=None):
"""Compute minimum distances between one point and a set of points.
This function computes for each row in X, the index of the row of Y which
is closest (according to the specified distance). The minimal distances are
also returned.
This is mostly equivalent to calling:
(pairwise_distances(X, Y=Y, metric=metric).argmin(axis=axis),
pairwise_distances(X, Y=Y, metric=metric).min(axis=axis))
but uses much less memory, and is faster for large arrays.
Parameters
----------
X, Y : {array-like, sparse matrix}
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
batch_size : integer
To reduce memory consumption over the naive solution, data are
processed in batches, comprising batch_size rows of X and
batch_size rows of Y. The default value is quite conservative, but
can be changed for fine-tuning. The larger the number, the larger the
memory usage.
metric : string or callable, default 'euclidean'
metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath',
'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
metric_kwargs : dict, optional
Keyword arguments to pass to specified metric function.
axis : int, optional, default 1
Axis along which the argmin and distances are to be computed.
Returns
-------
argmin : numpy.ndarray
Y[argmin[i], :] is the row in Y that is closest to X[i, :].
distances : numpy.ndarray
distances[i] is the distance between the i-th row in X and the
argmin[i]-th row in Y.
See also
--------
sklearn.metrics.pairwise_distances
sklearn.metrics.pairwise_distances_argmin
"""
dist_func = None
if metric in PAIRWISE_DISTANCE_FUNCTIONS:
dist_func = PAIRWISE_DISTANCE_FUNCTIONS[metric]
elif not callable(metric) and not isinstance(metric, str):
raise ValueError("'metric' must be a string or a callable")
X, Y = check_pairwise_arrays(X, Y)
if metric_kwargs is None:
metric_kwargs = {}
if axis == 0:
X, Y = Y, X
# Allocate output arrays
indices = np.empty(X.shape[0], dtype=np.intp)
values = np.empty(X.shape[0])
values.fill(np.infty)
for chunk_x in gen_batches(X.shape[0], batch_size):
X_chunk = X[chunk_x, :]
for chunk_y in gen_batches(Y.shape[0], batch_size):
Y_chunk = Y[chunk_y, :]
if dist_func is not None:
if metric == 'euclidean': # special case, for speed
d_chunk = safe_sparse_dot(X_chunk, Y_chunk.T,
dense_output=True)
d_chunk *= -2
d_chunk += row_norms(X_chunk, squared=True)[:, np.newaxis]
d_chunk += row_norms(Y_chunk, squared=True)[np.newaxis, :]
np.maximum(d_chunk, 0, d_chunk)
else:
d_chunk = dist_func(X_chunk, Y_chunk, **metric_kwargs)
else:
d_chunk = pairwise_distances(X_chunk, Y_chunk,
metric=metric, **metric_kwargs)
# Update indices and minimum values using chunk
min_indices = d_chunk.argmin(axis=1)
min_values = d_chunk[np.arange(chunk_x.stop - chunk_x.start),
min_indices]
flags = values[chunk_x] > min_values
indices[chunk_x][flags] = min_indices[flags] + chunk_y.start
values[chunk_x][flags] = min_values[flags]
if metric == "euclidean" and not metric_kwargs.get("squared", False):
np.sqrt(values, values)
return indices, values
def pairwise_distances_argmin(X, Y, axis=1, metric="euclidean",
batch_size=500, metric_kwargs=None):
"""Compute minimum distances between one point and a set of points.
This function computes for each row in X, the index of the row of Y which
is closest (according to the specified distance).
This is mostly equivalent to calling:
pairwise_distances(X, Y=Y, metric=metric).argmin(axis=axis)
but uses much less memory, and is faster for large arrays.
This function works with dense 2D arrays only.
Parameters
----------
X : array-like
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
Y : array-like
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
batch_size : integer
To reduce memory consumption over the naive solution, data are
processed in batches, comprising batch_size rows of X and
batch_size rows of Y. The default value is quite conservative, but
can be changed for fine-tuning. The larger the number, the larger the
memory usage.
metric : string or callable
metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath',
'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
metric_kwargs : dict
keyword arguments to pass to specified metric function.
axis : int, optional, default 1
Axis along which the argmin and distances are to be computed.
Returns
-------
argmin : numpy.ndarray
Y[argmin[i], :] is the row in Y that is closest to X[i, :].
See also
--------
sklearn.metrics.pairwise_distances
sklearn.metrics.pairwise_distances_argmin_min
"""
if metric_kwargs is None:
metric_kwargs = {}
return pairwise_distances_argmin_min(X, Y, axis, metric, batch_size,
metric_kwargs)[0]
def manhattan_distances(X, Y=None, sum_over_features=True,
size_threshold=5e8):
""" Compute the L1 distances between the vectors in X and Y.
With sum_over_features equal to False it returns the componentwise
distances.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array_like
An array with shape (n_samples_X, n_features).
Y : array_like, optional
An array with shape (n_samples_Y, n_features).
sum_over_features : bool, default=True
If True the function returns the pairwise distance matrix
else it returns the componentwise L1 pairwise-distances.
Not supported for sparse matrix inputs.
size_threshold : int, default=5e8
Unused parameter.
Returns
-------
D : array
If sum_over_features is False shape is
(n_samples_X * n_samples_Y, n_features) and D contains the
componentwise L1 pairwise-distances (ie. absolute difference),
else shape is (n_samples_X, n_samples_Y) and D contains
the pairwise L1 distances.
Examples
--------
>>> from sklearn.metrics.pairwise import manhattan_distances
>>> manhattan_distances(3, 3)#doctest:+ELLIPSIS
array([[ 0.]])
>>> manhattan_distances(3, 2)#doctest:+ELLIPSIS
array([[ 1.]])
>>> manhattan_distances(2, 3)#doctest:+ELLIPSIS
array([[ 1.]])
>>> manhattan_distances([[1, 2], [3, 4]],\
[[1, 2], [0, 3]])#doctest:+ELLIPSIS
array([[ 0., 2.],
[ 4., 4.]])
>>> import numpy as np
>>> X = np.ones((1, 2))
>>> y = 2 * np.ones((2, 2))
>>> manhattan_distances(X, y, sum_over_features=False)#doctest:+ELLIPSIS
array([[ 1., 1.],
[ 1., 1.]]...)
"""
X, Y = check_pairwise_arrays(X, Y)
if issparse(X) or issparse(Y):
if not sum_over_features:
raise TypeError("sum_over_features=%r not supported"
" for sparse matrices" % sum_over_features)
X = csr_matrix(X, copy=False)
Y = csr_matrix(Y, copy=False)
D = np.zeros((X.shape[0], Y.shape[0]))
_sparse_manhattan(X.data, X.indices, X.indptr,
Y.data, Y.indices, Y.indptr,
X.shape[1], D)
return D
if sum_over_features:
return distance.cdist(X, Y, 'cityblock')
D = X[:, np.newaxis, :] - Y[np.newaxis, :, :]
D = np.abs(D, D)
return D.reshape((-1, X.shape[1]))
def cosine_distances(X, Y=None):
"""
Compute cosine distance between samples in X and Y.
Cosine distance is defined as 1.0 minus the cosine similarity.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array_like, sparse matrix
with shape (n_samples_X, n_features).
Y : array_like, sparse matrix (optional)
with shape (n_samples_Y, n_features).
Returns
-------
distance matrix : array
An array with shape (n_samples_X, n_samples_Y).
See also
--------
sklearn.metrics.pairwise.cosine_similarity
scipy.spatial.distance.cosine (dense matrices only)
"""
# 1.0 - cosine_similarity(X, Y) without copy
S = cosine_similarity(X, Y)
S *= -1
S += 1
return S
# Paired distances
def paired_euclidean_distances(X, Y):
"""
Computes the paired euclidean distances between X and Y
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray (n_samples, )
"""
X, Y = check_paired_arrays(X, Y)
return row_norms(X - Y)
def paired_manhattan_distances(X, Y):
"""Compute the L1 distances between the vectors in X and Y.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray (n_samples, )
"""
X, Y = check_paired_arrays(X, Y)
diff = X - Y
if issparse(diff):
diff.data = np.abs(diff.data)
return np.squeeze(np.array(diff.sum(axis=1)))
else:
return np.abs(diff).sum(axis=-1)
def paired_cosine_distances(X, Y):
"""
Computes the paired cosine distances between X and Y
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray, shape (n_samples, )
Notes
------
The cosine distance is equivalent to the half the squared
euclidean distance if each sample is normalized to unit norm
"""
X, Y = check_paired_arrays(X, Y)
return .5 * row_norms(normalize(X) - normalize(Y), squared=True)
PAIRED_DISTANCES = {
'cosine': paired_cosine_distances,
'euclidean': paired_euclidean_distances,
'l2': paired_euclidean_distances,
'l1': paired_manhattan_distances,
'manhattan': paired_manhattan_distances,
'cityblock': paired_manhattan_distances}
def paired_distances(X, Y, metric="euclidean", **kwds):
"""
Computes the paired distances between X and Y.
Computes the distances between (X[0], Y[0]), (X[1], Y[1]), etc...
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : ndarray (n_samples, n_features)
Array 1 for distance computation.
Y : ndarray (n_samples, n_features)
Array 2 for distance computation.
metric : string or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
specified in PAIRED_DISTANCES, including "euclidean",
"manhattan", or "cosine".
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
Returns
-------
distances : ndarray (n_samples, )
Examples
--------
>>> from sklearn.metrics.pairwise import paired_distances
>>> X = [[0, 1], [1, 1]]
>>> Y = [[0, 1], [2, 1]]
>>> paired_distances(X, Y)
array([ 0., 1.])
See also
--------
pairwise_distances : pairwise distances.
"""
if metric in PAIRED_DISTANCES:
func = PAIRED_DISTANCES[metric]
return func(X, Y)
elif callable(metric):
# Check the matrix first (it is usually done by the metric)
X, Y = check_paired_arrays(X, Y)
distances = np.zeros(len(X))
for i in range(len(X)):
distances[i] = metric(X[i], Y[i])
return distances
else:
raise ValueError('Unknown distance %s' % metric)
# Kernels
def linear_kernel(X, Y=None):
"""
Compute the linear kernel between X and Y.
Read more in the :ref:`User Guide <linear_kernel>`.
Parameters
----------
X : array of shape (n_samples_1, n_features)
Y : array of shape (n_samples_2, n_features)
Returns
-------
Gram matrix : array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
return safe_sparse_dot(X, Y.T, dense_output=True)
def polynomial_kernel(X, Y=None, degree=3, gamma=None, coef0=1):
"""
Compute the polynomial kernel between X and Y::
K(X, Y) = (gamma <X, Y> + coef0)^degree
Read more in the :ref:`User Guide <polynomial_kernel>`.
Parameters
----------
X : ndarray of shape (n_samples_1, n_features)
Y : ndarray of shape (n_samples_2, n_features)
coef0 : int, default 1
degree : int, default 3
Returns
-------
Gram matrix : array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = safe_sparse_dot(X, Y.T, dense_output=True)
K *= gamma
K += coef0
K **= degree
return K
def sigmoid_kernel(X, Y=None, gamma=None, coef0=1):
"""
Compute the sigmoid kernel between X and Y::
K(X, Y) = tanh(gamma <X, Y> + coef0)
Read more in the :ref:`User Guide <sigmoid_kernel>`.
Parameters
----------
X : ndarray of shape (n_samples_1, n_features)
Y : ndarray of shape (n_samples_2, n_features)
coef0 : int, default 1
Returns
-------
Gram matrix: array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = safe_sparse_dot(X, Y.T, dense_output=True)
K *= gamma
K += coef0
np.tanh(K, K) # compute tanh in-place
return K
def rbf_kernel(X, Y=None, gamma=None):
"""
Compute the rbf (gaussian) kernel between X and Y::
K(x, y) = exp(-gamma ||x-y||^2)
for each pair of rows x in X and y in Y.
Read more in the :ref:`User Guide <rbf_kernel>`.
Parameters
----------
X : array of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
gamma : float
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = euclidean_distances(X, Y, squared=True)
K *= -gamma
np.exp(K, K) # exponentiate K in-place
return K
def cosine_similarity(X, Y=None, dense_output=True):
"""Compute cosine similarity between samples in X and Y.
Cosine similarity, or the cosine kernel, computes similarity as the
normalized dot product of X and Y:
K(X, Y) = <X, Y> / (||X||*||Y||)
On L2-normalized data, this function is equivalent to linear_kernel.
Read more in the :ref:`User Guide <cosine_similarity>`.
Parameters
----------
X : ndarray or sparse array, shape: (n_samples_X, n_features)
Input data.
Y : ndarray or sparse array, shape: (n_samples_Y, n_features)
Input data. If ``None``, the output will be the pairwise
similarities between all samples in ``X``.
dense_output : boolean (optional), default True
Whether to return dense output even when the input is sparse. If
``False``, the output is sparse if both input arrays are sparse.
Returns
-------
kernel matrix : array
An array with shape (n_samples_X, n_samples_Y).
"""
# to avoid recursive import
X, Y = check_pairwise_arrays(X, Y)
X_normalized = normalize(X, copy=True)
if X is Y:
Y_normalized = X_normalized
else:
Y_normalized = normalize(Y, copy=True)
K = safe_sparse_dot(X_normalized, Y_normalized.T, dense_output=dense_output)
return K
def additive_chi2_kernel(X, Y=None):
"""Computes the additive chi-squared kernel between observations in X and Y
The chi-squared kernel is computed between each pair of rows in X and Y. X
and Y have to be non-negative. This kernel is most commonly applied to
histograms.
The chi-squared kernel is given by::
k(x, y) = -Sum [(x - y)^2 / (x + y)]
It can be interpreted as a weighted difference per entry.
Read more in the :ref:`User Guide <chi2_kernel>`.
Notes
-----
As the negative of a distance, this kernel is only conditionally positive
definite.
Parameters
----------
X : array-like of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
References
----------
* Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C.
Local features and kernels for classification of texture and object
categories: A comprehensive study
International Journal of Computer Vision 2007
http://eprints.pascal-network.org/archive/00002309/01/Zhang06-IJCV.pdf
See also
--------
chi2_kernel : The exponentiated version of the kernel, which is usually
preferable.
sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation
to this kernel.
"""
if issparse(X) or issparse(Y):
raise ValueError("additive_chi2 does not support sparse matrices.")
X, Y = check_pairwise_arrays(X, Y)
if (X < 0).any():
raise ValueError("X contains negative values.")
if Y is not X and (Y < 0).any():
raise ValueError("Y contains negative values.")
result = np.zeros((X.shape[0], Y.shape[0]), dtype=X.dtype)
_chi2_kernel_fast(X, Y, result)
return result
def chi2_kernel(X, Y=None, gamma=1.):
"""Computes the exponential chi-squared kernel X and Y.
The chi-squared kernel is computed between each pair of rows in X and Y. X
and Y have to be non-negative. This kernel is most commonly applied to
histograms.
The chi-squared kernel is given by::
k(x, y) = exp(-gamma Sum [(x - y)^2 / (x + y)])
It can be interpreted as a weighted difference per entry.
Read more in the :ref:`User Guide <chi2_kernel>`.
Parameters
----------
X : array-like of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
gamma : float, default=1.
Scaling parameter of the chi2 kernel.
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
References
----------
* Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C.
Local features and kernels for classification of texture and object
categories: A comprehensive study
International Journal of Computer Vision 2007
http://eprints.pascal-network.org/archive/00002309/01/Zhang06-IJCV.pdf
See also
--------
additive_chi2_kernel : The additive version of this kernel
sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation
to the additive version of this kernel.
"""
K = additive_chi2_kernel(X, Y)
K *= gamma
return np.exp(K, K)
# Helper functions - distance
PAIRWISE_DISTANCE_FUNCTIONS = {
# If updating this dictionary, update the doc in both distance_metrics()
# and also in pairwise_distances()!
'cityblock': manhattan_distances,
'cosine': cosine_distances,
'euclidean': euclidean_distances,
'l2': euclidean_distances,
'l1': manhattan_distances,
'manhattan': manhattan_distances, }
def distance_metrics():
"""Valid metrics for pairwise_distances.
This function simply returns the valid pairwise distance metrics.
It exists to allow for a description of the mapping for
each of the valid strings.
The valid distance metrics, and the function they map to, are:
============ ====================================
metric Function
============ ====================================
'cityblock' metrics.pairwise.manhattan_distances
'cosine' metrics.pairwise.cosine_distances
'euclidean' metrics.pairwise.euclidean_distances
'l1' metrics.pairwise.manhattan_distances
'l2' metrics.pairwise.euclidean_distances
'manhattan' metrics.pairwise.manhattan_distances
============ ====================================
Read more in the :ref:`User Guide <metrics>`.
"""
return PAIRWISE_DISTANCE_FUNCTIONS
def _parallel_pairwise(X, Y, func, n_jobs, **kwds):
"""Break the pairwise matrix in n_jobs even slices
and compute them in parallel"""
if n_jobs < 0:
n_jobs = max(cpu_count() + 1 + n_jobs, 1)
if Y is None:
Y = X
if n_jobs == 1:
# Special case to avoid picklability checks in delayed
return func(X, Y, **kwds)
# TODO: in some cases, backend='threading' may be appropriate
fd = delayed(func)
ret = Parallel(n_jobs=n_jobs, verbose=0)(
fd(X, Y[s], **kwds)
for s in gen_even_slices(Y.shape[0], n_jobs))
return np.hstack(ret)
def _pairwise_callable(X, Y, metric, **kwds):
"""Handle the callable case for pairwise_{distances,kernels}
"""
X, Y = check_pairwise_arrays(X, Y)
if X is Y:
# Only calculate metric for upper triangle
out = np.zeros((X.shape[0], Y.shape[0]), dtype='float')
iterator = itertools.combinations(range(X.shape[0]), 2)
for i, j in iterator:
out[i, j] = metric(X[i], Y[j], **kwds)
# Make symmetric
# NB: out += out.T will produce incorrect results
out = out + out.T
# Calculate diagonal
# NB: nonzero diagonals are allowed for both metrics and kernels
for i in range(X.shape[0]):
x = X[i]
out[i, i] = metric(x, x, **kwds)
else:
# Calculate all cells
out = np.empty((X.shape[0], Y.shape[0]), dtype='float')
iterator = itertools.product(range(X.shape[0]), range(Y.shape[0]))
for i, j in iterator:
out[i, j] = metric(X[i], Y[j], **kwds)
return out
_VALID_METRICS = ['euclidean', 'l2', 'l1', 'manhattan', 'cityblock',
'braycurtis', 'canberra', 'chebyshev', 'correlation',
'cosine', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener',
'sokalsneath', 'sqeuclidean', 'yule', "wminkowski"]
def pairwise_distances(X, Y=None, metric="euclidean", n_jobs=1, **kwds):
""" Compute the distance matrix from a vector array X and optional Y.
This method takes either a vector array or a distance matrix, and returns
a distance matrix. If the input is a vector array, the distances are
computed. If the input is a distances matrix, it is returned instead.
This method provides a safe way to take a distance matrix as input, while
preserving compatibility with many other algorithms that take a vector
array.
If Y is given (default is None), then the returned matrix is the pairwise
distance between the arrays from both X and Y.
Valid values for metric are:
- From scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']. These metrics support sparse matrix inputs.
- From scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski', 'mahalanobis',
'matching', 'minkowski', 'rogerstanimoto', 'russellrao', 'seuclidean',
'sokalmichener', 'sokalsneath', 'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics. These metrics do not support sparse matrix inputs.
Note that in the case of 'cityblock', 'cosine' and 'euclidean' (which are
valid scipy.spatial.distance metrics), the scikit-learn implementation
will be used, which is faster and has support for sparse matrices (except
for 'cityblock'). For a verbose description of the metrics from
scikit-learn, see the __doc__ of the sklearn.pairwise.distance_metrics
function.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise distances between samples, or a feature array.
Y : array [n_samples_b, n_features], optional
An optional second feature array. Only allowed if metric != "precomputed".
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by scipy.spatial.distance.pdist for its metric parameter, or
a metric listed in pairwise.PAIRWISE_DISTANCE_FUNCTIONS.
If metric is "precomputed", X is assumed to be a distance matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
n_jobs : int
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a scipy.spatial.distance metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
D : array [n_samples_a, n_samples_a] or [n_samples_a, n_samples_b]
A distance matrix D such that D_{i, j} is the distance between the
ith and jth vectors of the given matrix X, if Y is None.
If Y is not None, then D_{i, j} is the distance between the ith array
from X and the jth array from Y.
"""
if (metric not in _VALID_METRICS and
not callable(metric) and metric != "precomputed"):
raise ValueError("Unknown metric %s. "
"Valid metrics are %s, or 'precomputed', or a "
"callable" % (metric, _VALID_METRICS))
if metric == "precomputed":
return X
elif metric in PAIRWISE_DISTANCE_FUNCTIONS:
func = PAIRWISE_DISTANCE_FUNCTIONS[metric]
elif callable(metric):
func = partial(_pairwise_callable, metric=metric, **kwds)
else:
if issparse(X) or issparse(Y):
raise TypeError("scipy distance metrics do not"
" support sparse matrices.")
X, Y = check_pairwise_arrays(X, Y)
if n_jobs == 1 and X is Y:
return distance.squareform(distance.pdist(X, metric=metric,
**kwds))
func = partial(distance.cdist, metric=metric, **kwds)
return _parallel_pairwise(X, Y, func, n_jobs, **kwds)
# Helper functions - distance
PAIRWISE_KERNEL_FUNCTIONS = {
# If updating this dictionary, update the doc in both distance_metrics()
# and also in pairwise_distances()!
'additive_chi2': additive_chi2_kernel,
'chi2': chi2_kernel,
'linear': linear_kernel,
'polynomial': polynomial_kernel,
'poly': polynomial_kernel,
'rbf': rbf_kernel,
'sigmoid': sigmoid_kernel,
'cosine': cosine_similarity, }
def kernel_metrics():
""" Valid metrics for pairwise_kernels
This function simply returns the valid pairwise distance metrics.
It exists, however, to allow for a verbose description of the mapping for
each of the valid strings.
The valid distance metrics, and the function they map to, are:
=============== ========================================
metric Function
=============== ========================================
'additive_chi2' sklearn.pairwise.additive_chi2_kernel
'chi2' sklearn.pairwise.chi2_kernel
'linear' sklearn.pairwise.linear_kernel
'poly' sklearn.pairwise.polynomial_kernel
'polynomial' sklearn.pairwise.polynomial_kernel
'rbf' sklearn.pairwise.rbf_kernel
'sigmoid' sklearn.pairwise.sigmoid_kernel
'cosine' sklearn.pairwise.cosine_similarity
=============== ========================================
Read more in the :ref:`User Guide <metrics>`.
"""
return PAIRWISE_KERNEL_FUNCTIONS
KERNEL_PARAMS = {
"additive_chi2": (),
"chi2": (),
"cosine": (),
"exp_chi2": frozenset(["gamma"]),
"linear": (),
"poly": frozenset(["gamma", "degree", "coef0"]),
"polynomial": frozenset(["gamma", "degree", "coef0"]),
"rbf": frozenset(["gamma"]),
"sigmoid": frozenset(["gamma", "coef0"]),
}
def pairwise_kernels(X, Y=None, metric="linear", filter_params=False,
n_jobs=1, **kwds):
"""Compute the kernel between arrays X and optional array Y.
This method takes either a vector array or a kernel matrix, and returns
a kernel matrix. If the input is a vector array, the kernels are
computed. If the input is a kernel matrix, it is returned instead.
This method provides a safe way to take a kernel matrix as input, while
preserving compatibility with many other algorithms that take a vector
array.
If Y is given (default is None), then the returned matrix is the pairwise
kernel between the arrays from both X and Y.
Valid values for metric are::
['rbf', 'sigmoid', 'polynomial', 'poly', 'linear', 'cosine']
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise kernels between samples, or a feature array.
Y : array [n_samples_b, n_features]
A second feature array only if X has shape [n_samples_a, n_features].
metric : string, or callable
The metric to use when calculating kernel between instances in a
feature array. If metric is a string, it must be one of the metrics
in pairwise.PAIRWISE_KERNEL_FUNCTIONS.
If metric is "precomputed", X is assumed to be a kernel matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
n_jobs : int
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
filter_params: boolean
Whether to filter invalid parameters or not.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the kernel function.
Returns
-------
K : array [n_samples_a, n_samples_a] or [n_samples_a, n_samples_b]
A kernel matrix K such that K_{i, j} is the kernel between the
ith and jth vectors of the given matrix X, if Y is None.
If Y is not None, then K_{i, j} is the kernel between the ith array
from X and the jth array from Y.
Notes
-----
If metric is 'precomputed', Y is ignored and X is returned.
"""
if metric == "precomputed":
return X
elif metric in PAIRWISE_KERNEL_FUNCTIONS:
if filter_params:
kwds = dict((k, kwds[k]) for k in kwds
if k in KERNEL_PARAMS[metric])
func = PAIRWISE_KERNEL_FUNCTIONS[metric]
elif callable(metric):
func = partial(_pairwise_callable, metric=metric, **kwds)
else:
raise ValueError("Unknown kernel %r" % metric)
return _parallel_pairwise(X, Y, func, n_jobs, **kwds)
| bsd-3-clause |
mikegraham/dask | dask/dataframe/tests/test_io.py | 1 | 34901 | import gzip
import pandas as pd
import numpy as np
import pandas.util.testing as tm
import os
import dask
import pytest
from threading import Lock
import shutil
from time import sleep
import threading
import dask.array as da
import dask.dataframe as dd
from dask.dataframe.io import (from_array, from_bcolz, from_dask_array)
from dask.utils import filetext, filetexts, tmpfile, tmpdir
from dask.async import get_sync
from dask.dataframe.utils import eq
########
# CSVS #
########
text = """
name,amount
Alice,100
Bob,-200
Charlie,300
Dennis,400
Edith,-500
Frank,600
Alice,200
Frank,-200
Bob,600
Alice,400
Frank,200
Alice,300
Edith,600
""".strip()
def test_read_csv():
with filetext(text) as fn:
f = dd.read_csv(fn, chunkbytes=30, lineterminator='\n')
assert list(f.columns) == ['name', 'amount']
assert f._known_dtype
result = f.compute(get=dask.get)
# index may be different
assert eq(result.reset_index(drop=True),
pd.read_csv(fn, lineterminator='\n'))
def test_read_multiple_csv():
try:
with open('_foo.1.csv', 'w') as f:
f.write(text)
with open('_foo.2.csv', 'w') as f:
f.write(text)
df = dd.read_csv('_foo.*.csv', chunkbytes=30)
assert df._known_dtype
assert df.npartitions > 2
assert (len(dd.read_csv('_foo.*.csv').compute()) ==
len(dd.read_csv('_foo.1.csv').compute()) * 2)
finally:
os.remove('_foo.1.csv')
os.remove('_foo.2.csv')
def normalize_text(s):
return '\n'.join(map(str.strip, s.strip().split('\n')))
def test_consistent_dtypes():
text = normalize_text("""
name,amount
Alice,100.5
Bob,-200.5
Charlie,300
Dennis,400
Edith,-500
Frank,600
""")
with filetext(text) as fn:
df = dd.read_csv(fn, chunkbytes=30)
assert isinstance(df.amount.sum().compute(), float)
assert df._known_dtype
datetime_csv_file = """
name,amount,when
Alice,100,2014-01-01
Bob,200,2014-01-01
Charlie,300,2014-01-01
Dan,400,2014-01-01
""".strip()
def test_read_csv_index():
with filetext(text) as fn:
f = dd.read_csv(fn, chunkbytes=20).set_index('amount')
assert f._known_dtype
result = f.compute(get=get_sync)
assert result.index.name == 'amount'
blocks = dd.DataFrame._get(f.dask, f._keys(), get=get_sync)
for i, block in enumerate(blocks):
if i < len(f.divisions) - 2:
assert (block.index < f.divisions[i + 1]).all()
if i > 0:
assert (block.index >= f.divisions[i]).all()
expected = pd.read_csv(fn).set_index('amount')
assert eq(result, expected)
def test_usecols():
with filetext(datetime_csv_file) as fn:
df = dd.read_csv(fn, chunkbytes=30, usecols=['when', 'amount'])
expected = pd.read_csv(fn, usecols=['when', 'amount'])
assert (df.compute().values == expected.values).all()
####################
# Arrays and BColz #
####################
def test_dummy_from_array():
x = np.array([[1, 2], [3, 4]], dtype=np.int64)
res = dd.io._dummy_from_array(x)
assert isinstance(res, pd.DataFrame)
assert res[0].dtype == np.int64
assert res[1].dtype == np.int64
tm.assert_index_equal(res.columns, pd.Index([0, 1]))
x = np.array([[1., 2.], [3., 4.]], dtype=np.float64)
res = dd.io._dummy_from_array(x, columns=['a', 'b'])
assert isinstance(res, pd.DataFrame)
assert res['a'].dtype == np.float64
assert res['b'].dtype == np.float64
tm.assert_index_equal(res.columns, pd.Index(['a', 'b']))
msg = r"""Length mismatch: Expected axis has 2 elements, new values have 3 elements"""
with tm.assertRaisesRegexp(ValueError, msg):
dd.io._dummy_from_array(x, columns=['a', 'b', 'c'])
np.random.seed(42)
x = np.random.rand(201, 2)
x = from_array(x, chunksize=50, columns=['a', 'b'])
assert len(x.divisions) == 6 # Should be 5 partitions and the end
def test_dummy_from_1darray():
x = np.array([1., 2., 3.], dtype=np.float64)
res = dd.io._dummy_from_array(x)
assert isinstance(res, pd.Series)
assert res.dtype == np.float64
x = np.array([1, 2, 3], dtype=np.object_)
res = dd.io._dummy_from_array(x, columns='x')
assert isinstance(res, pd.Series)
assert res.name == 'x'
assert res.dtype == np.object_
x = np.array([1, 2, 3], dtype=np.object_)
res = dd.io._dummy_from_array(x, columns=['x'])
assert isinstance(res, pd.DataFrame)
assert res['x'].dtype == np.object_
tm.assert_index_equal(res.columns, pd.Index(['x']))
msg = r"""Length mismatch: Expected axis has 1 elements, new values have 2 elements"""
with tm.assertRaisesRegexp(ValueError, msg):
dd.io._dummy_from_array(x, columns=['a', 'b'])
def test_dummy_from_recarray():
x = np.array([(i, i*10) for i in range(10)],
dtype=[('a', np.float64), ('b', np.int64)])
res = dd.io._dummy_from_array(x)
assert isinstance(res, pd.DataFrame)
assert res['a'].dtype == np.float64
assert res['b'].dtype == np.int64
tm.assert_index_equal(res.columns, pd.Index(['a', 'b']))
res = dd.io._dummy_from_array(x, columns=['x', 'y'])
assert isinstance(res, pd.DataFrame)
assert res['x'].dtype == np.float64
assert res['y'].dtype == np.int64
tm.assert_index_equal(res.columns, pd.Index(['x', 'y']))
msg = r"""Length mismatch: Expected axis has 2 elements, new values have 3 elements"""
with tm.assertRaisesRegexp(ValueError, msg):
dd.io._dummy_from_array(x, columns=['a', 'b', 'c'])
def test_from_array():
x = np.arange(10 * 3).reshape(10, 3)
d = dd.from_array(x, chunksize=4)
assert isinstance(d, dd.DataFrame)
assert d._known_dtype
tm.assert_index_equal(d.columns, pd.Index([0, 1, 2]))
assert d.divisions == (0, 4, 8, 9)
assert (d.compute().values == x).all()
d = dd.from_array(x, chunksize=4, columns=list('abc'))
assert isinstance(d, dd.DataFrame)
assert d._known_dtype
tm.assert_index_equal(d.columns, pd.Index(['a', 'b', 'c']))
assert d.divisions == (0, 4, 8, 9)
assert (d.compute().values == x).all()
with pytest.raises(ValueError):
dd.from_array(np.ones(shape=(10, 10, 10)))
def test_from_array_with_record_dtype():
x = np.array([(i, i*10) for i in range(10)],
dtype=[('a', 'i4'), ('b', 'i4')])
d = dd.from_array(x, chunksize=4)
assert isinstance(d, dd.DataFrame)
assert d._known_dtype
assert list(d.columns) == ['a', 'b']
assert d.divisions == (0, 4, 8, 9)
assert (d.compute().to_records(index=False) == x).all()
def test_from_bcolz_multiple_threads():
bcolz = pytest.importorskip('bcolz')
def check():
t = bcolz.ctable([[1, 2, 3], [1., 2., 3.], ['a', 'b', 'a']],
names=['x', 'y', 'a'])
d = dd.from_bcolz(t, chunksize=2)
assert d.npartitions == 2
assert str(d.dtypes['a']) == 'category'
assert list(d.x.compute(get=get_sync)) == [1, 2, 3]
assert list(d.a.compute(get=get_sync)) == ['a', 'b', 'a']
d = dd.from_bcolz(t, chunksize=2, index='x')
L = list(d.index.compute(get=get_sync))
assert L == [1, 2, 3] or L == [1, 3, 2]
# Names
assert (sorted(dd.from_bcolz(t, chunksize=2).dask) ==
sorted(dd.from_bcolz(t, chunksize=2).dask))
assert (sorted(dd.from_bcolz(t, chunksize=2).dask) !=
sorted(dd.from_bcolz(t, chunksize=3).dask))
threads = []
for i in range(5):
thread = threading.Thread(target=check)
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
def test_from_bcolz():
bcolz = pytest.importorskip('bcolz')
t = bcolz.ctable([[1, 2, 3], [1., 2., 3.], ['a', 'b', 'a']],
names=['x', 'y', 'a'])
d = dd.from_bcolz(t, chunksize=2)
assert d._known_dtype
assert d.npartitions == 2
assert str(d.dtypes['a']) == 'category'
assert list(d.x.compute(get=get_sync)) == [1, 2, 3]
assert list(d.a.compute(get=get_sync)) == ['a', 'b', 'a']
L = list(d.index.compute(get=get_sync))
assert L == [0, 1, 2]
d = dd.from_bcolz(t, chunksize=2, index='x')
L = list(d.index.compute(get=get_sync))
assert L == [1, 2, 3] or L == [1, 3, 2]
# Names
assert (sorted(dd.from_bcolz(t, chunksize=2).dask) ==
sorted(dd.from_bcolz(t, chunksize=2).dask))
assert (sorted(dd.from_bcolz(t, chunksize=2).dask) !=
sorted(dd.from_bcolz(t, chunksize=3).dask))
dsk = dd.from_bcolz(t, chunksize=3).dask
t.append((4, 4., 'b'))
t.flush()
assert (sorted(dd.from_bcolz(t, chunksize=2).dask) !=
sorted(dsk))
def test_from_bcolz_no_lock():
bcolz = pytest.importorskip('bcolz')
locktype = type(Lock())
t = bcolz.ctable([[1, 2, 3], [1., 2., 3.], ['a', 'b', 'a']],
names=['x', 'y', 'a'], chunklen=2)
a = dd.from_bcolz(t, chunksize=2)
b = dd.from_bcolz(t, chunksize=2, lock=True)
c = dd.from_bcolz(t, chunksize=2, lock=False)
eq(a, b)
eq(a, c)
assert not any(isinstance(item, locktype)
for v in c.dask.values()
for item in v)
def test_from_bcolz_filename():
bcolz = pytest.importorskip('bcolz')
with tmpfile('.bcolz') as fn:
t = bcolz.ctable([[1, 2, 3], [1., 2., 3.], ['a', 'b', 'a']],
names=['x', 'y', 'a'],
rootdir=fn)
t.flush()
d = dd.from_bcolz(fn, chunksize=2)
assert list(d.x.compute()) == [1, 2, 3]
def test_from_bcolz_column_order():
bcolz = pytest.importorskip('bcolz')
t = bcolz.ctable([[1, 2, 3], [1., 2., 3.], ['a', 'b', 'a']],
names=['x', 'y', 'a'])
df = dd.from_bcolz(t, chunksize=2)
assert list(df.loc[0].compute().columns) == ['x', 'y', 'a']
def test_skipinitialspace():
text = normalize_text("""
name, amount
Alice,100
Bob,-200
Charlie,300
Dennis,400
Edith,-500
Frank,600
""")
with filetext(text) as fn:
df = dd.read_csv(fn, skipinitialspace=True, chunkbytes=20)
assert 'amount' in df.columns
assert df.amount.max().compute() == 600
def test_consistent_dtypes_2():
text1 = normalize_text("""
name,amount
Alice,100
Bob,-200
Charlie,300
""")
text2 = normalize_text("""
name,amount
1,400
2,-500
Frank,600
""")
try:
with open('_foo.1.csv', 'w') as f:
f.write(text1)
with open('_foo.2.csv', 'w') as f:
f.write(text2)
df = dd.read_csv('_foo.*.csv', chunkbytes=25)
assert df.amount.max().compute() == 600
finally:
pass
os.remove('_foo.1.csv')
os.remove('_foo.2.csv')
@pytest.mark.slow
def test_compression_multiple_files():
with tmpdir() as tdir:
f = gzip.open(os.path.join(tdir, 'a.csv.gz'), 'wb')
f.write(text.encode())
f.close()
f = gzip.open(os.path.join(tdir, 'b.csv.gz'), 'wb')
f.write(text.encode())
f.close()
df = dd.read_csv(os.path.join(tdir, '*.csv.gz'), compression='gzip')
assert len(df.compute()) == (len(text.split('\n')) - 1) * 2
def test_empty_csv_file():
with filetext('a,b') as fn:
df = dd.read_csv(fn, header=0)
assert len(df.compute()) == 0
assert list(df.columns) == ['a', 'b']
def test_from_pandas_dataframe():
a = list('aaaaaaabbbbbbbbccccccc')
df = pd.DataFrame(dict(a=a, b=np.random.randn(len(a))),
index=pd.date_range(start='20120101', periods=len(a)))
ddf = dd.from_pandas(df, 3)
assert len(ddf.dask) == 3
assert len(ddf.divisions) == len(ddf.dask) + 1
assert type(ddf.divisions[0]) == type(df.index[0])
tm.assert_frame_equal(df, ddf.compute())
ddf = dd.from_pandas(df, chunksize=8)
msg = 'Exactly one of npartitions and chunksize must be specified.'
with tm.assertRaisesRegexp(ValueError, msg):
dd.from_pandas(df, npartitions=2, chunksize=2)
with tm.assertRaisesRegexp(ValueError, msg):
dd.from_pandas(df)
assert len(ddf.dask) == 3
assert len(ddf.divisions) == len(ddf.dask) + 1
assert type(ddf.divisions[0]) == type(df.index[0])
tm.assert_frame_equal(df, ddf.compute())
def test_from_pandas_small():
df = pd.DataFrame({'x': [1, 2, 3]})
for i in [1, 2, 30]:
a = dd.from_pandas(df, i)
assert len(a.compute()) == 3
assert a.divisions[0] == 0
assert a.divisions[-1] == 2
a = dd.from_pandas(df, chunksize=i)
assert len(a.compute()) == 3
assert a.divisions[0] == 0
assert a.divisions[-1] == 2
@pytest.mark.xfail(reason="")
def test_from_pandas_npartitions_is_accurate():
df = pd.DataFrame({'x': [1, 2, 3, 4, 5, 6], 'y': list('abdabd')},
index=[10, 20, 30, 40, 50, 60])
for n in [1, 2, 4, 5]:
assert dd.from_pandas(df, npartitions=n).npartitions == n
def test_from_pandas_series():
n = 20
s = pd.Series(np.random.randn(n),
index=pd.date_range(start='20120101', periods=n))
ds = dd.from_pandas(s, 3)
assert len(ds.dask) == 3
assert len(ds.divisions) == len(ds.dask) + 1
assert type(ds.divisions[0]) == type(s.index[0])
tm.assert_series_equal(s, ds.compute())
ds = dd.from_pandas(s, chunksize=8)
assert len(ds.dask) == 3
assert len(ds.divisions) == len(ds.dask) + 1
assert type(ds.divisions[0]) == type(s.index[0])
tm.assert_series_equal(s, ds.compute())
def test_from_pandas_non_sorted():
df = pd.DataFrame({'x': [1, 2, 3]}, index=[3, 1, 2])
ddf = dd.from_pandas(df, npartitions=2, sort=False)
assert not ddf.known_divisions
eq(df, ddf)
ddf = dd.from_pandas(df, chunksize=2, sort=False)
assert not ddf.known_divisions
eq(df, ddf)
def test_from_pandas_single_row():
df = pd.DataFrame({'x': [1]}, index=[1])
ddf = dd.from_pandas(df, npartitions=1)
assert ddf.divisions == (1, 1)
assert eq(ddf, df)
def test_DataFrame_from_dask_array():
x = da.ones((10, 3), chunks=(4, 2))
df = from_dask_array(x, ['a', 'b', 'c'])
assert isinstance(df, dd.DataFrame)
tm.assert_index_equal(df.columns, pd.Index(['a', 'b', 'c']))
assert list(df.divisions) == [0, 4, 8, 9]
assert (df.compute(get=get_sync).values == x.compute(get=get_sync)).all()
# dd.from_array should re-route to from_dask_array
df2 = dd.from_array(x, columns=['a', 'b', 'c'])
assert isinstance(df, dd.DataFrame)
tm.assert_index_equal(df2.columns, df.columns)
assert df2.divisions == df.divisions
def test_Series_from_dask_array():
x = da.ones(10, chunks=4)
ser = from_dask_array(x, 'a')
assert isinstance(ser, dd.Series)
assert ser.name == 'a'
assert list(ser.divisions) == [0, 4, 8, 9]
assert (ser.compute(get=get_sync).values == x.compute(get=get_sync)).all()
ser = from_dask_array(x)
assert isinstance(ser, dd.Series)
assert ser.name is None
# dd.from_array should re-route to from_dask_array
ser2 = dd.from_array(x)
assert isinstance(ser2, dd.Series)
assert eq(ser, ser2)
def test_from_dask_array_compat_numpy_array():
x = da.ones((3, 3, 3), chunks=2)
msg = r"from_array does not input more than 2D array, got array with shape \(3, 3, 3\)"
with tm.assertRaisesRegexp(ValueError, msg):
from_dask_array(x) # dask
with tm.assertRaisesRegexp(ValueError, msg):
from_array(x.compute()) # numpy
x = da.ones((10, 3), chunks=(3, 3))
d1 = from_dask_array(x) # dask
assert isinstance(d1, dd.DataFrame)
assert (d1.compute().values == x.compute()).all()
tm.assert_index_equal(d1.columns, pd.Index([0, 1, 2]))
d2 = from_array(x.compute()) # numpy
assert isinstance(d1, dd.DataFrame)
assert (d2.compute().values == x.compute()).all()
tm.assert_index_equal(d2.columns, pd.Index([0, 1, 2]))
msg = r"""Length mismatch: Expected axis has 3 elements, new values have 1 elements"""
with tm.assertRaisesRegexp(ValueError, msg):
from_dask_array(x, columns=['a']) # dask
with tm.assertRaisesRegexp(ValueError, msg):
from_array(x.compute(), columns=['a']) # numpy
d1 = from_dask_array(x, columns=['a', 'b', 'c']) # dask
assert isinstance(d1, dd.DataFrame)
assert (d1.compute().values == x.compute()).all()
tm.assert_index_equal(d1.columns, pd.Index(['a', 'b', 'c']))
d2 = from_array(x.compute(), columns=['a', 'b', 'c']) # numpy
assert isinstance(d1, dd.DataFrame)
assert (d2.compute().values == x.compute()).all()
tm.assert_index_equal(d2.columns, pd.Index(['a', 'b', 'c']))
def test_from_dask_array_compat_numpy_array_1d():
x = da.ones(10, chunks=3)
d1 = from_dask_array(x) # dask
assert isinstance(d1, dd.Series)
assert (d1.compute().values == x.compute()).all()
assert d1.name is None
d2 = from_array(x.compute()) # numpy
assert isinstance(d1, dd.Series)
assert (d2.compute().values == x.compute()).all()
assert d2.name is None
d1 = from_dask_array(x, columns='name') # dask
assert isinstance(d1, dd.Series)
assert (d1.compute().values == x.compute()).all()
assert d1.name == 'name'
d2 = from_array(x.compute(), columns='name') # numpy
assert isinstance(d1, dd.Series)
assert (d2.compute().values == x.compute()).all()
assert d2.name == 'name'
# passing list via columns results in DataFrame
d1 = from_dask_array(x, columns=['name']) # dask
assert isinstance(d1, dd.DataFrame)
assert (d1.compute().values == x.compute()).all()
tm.assert_index_equal(d1.columns, pd.Index(['name']))
d2 = from_array(x.compute(), columns=['name']) # numpy
assert isinstance(d1, dd.DataFrame)
assert (d2.compute().values == x.compute()).all()
tm.assert_index_equal(d2.columns, pd.Index(['name']))
def test_from_dask_array_struct_dtype():
x = np.array([(1, 'a'), (2, 'b')], dtype=[('a', 'i4'), ('b', 'object')])
y = da.from_array(x, chunks=(1,))
df = dd.from_dask_array(y)
tm.assert_index_equal(df.columns, pd.Index(['a', 'b']))
assert eq(df, pd.DataFrame(x))
assert eq(dd.from_dask_array(y, columns=['b', 'a']),
pd.DataFrame(x, columns=['b', 'a']))
@pytest.mark.xfail(reason="bloscpack BLOSC_MAX_BUFFERSIZE")
def test_to_castra():
pytest.importorskip('castra')
df = pd.DataFrame({'x': ['a', 'b', 'c', 'd'],
'y': [2, 3, 4, 5]},
index=pd.Index([1., 2., 3., 4.], name='ind'))
a = dd.from_pandas(df, 2)
c = a.to_castra()
b = c.to_dask()
try:
tm.assert_frame_equal(df, c[:])
tm.assert_frame_equal(b.compute(), df)
finally:
c.drop()
c = a.to_castra(categories=['x'])
try:
assert c[:].dtypes['x'] == 'category'
finally:
c.drop()
c = a.to_castra(sorted_index_column='y')
try:
tm.assert_frame_equal(c[:], df.set_index('y'))
finally:
c.drop()
dsk, keys = a.to_castra(compute=False)
assert isinstance(dsk, dict)
assert isinstance(keys, list)
c, last = keys
assert last[1] == a.npartitions - 1
@pytest.mark.xfail(reason="bloscpack BLOSC_MAX_BUFFERSIZE")
def test_from_castra():
pytest.importorskip('castra')
df = pd.DataFrame({'x': ['a', 'b', 'c', 'd'],
'y': [2, 3, 4, 5]},
index=pd.Index([1., 2., 3., 4.], name='ind'))
a = dd.from_pandas(df, 2)
c = a.to_castra()
with_castra = dd.from_castra(c)
with_fn = dd.from_castra(c.path)
with_columns = dd.from_castra(c, 'x')
try:
tm.assert_frame_equal(df, with_castra.compute())
tm.assert_frame_equal(df, with_fn.compute())
tm.assert_series_equal(df.x, with_columns.compute())
finally:
# Calling c.drop() is a race condition on drop from `with_fn.__del__`
# and c.drop. Manually `del`ing gets around this.
del with_fn, c
@pytest.mark.xfail(reason="bloscpack BLOSC_MAX_BUFFERSIZE")
def test_from_castra_with_selection():
""" Optimizations fuse getitems with load_partitions
We used to use getitem for both column access and selections
"""
pytest.importorskip('castra')
df = pd.DataFrame({'x': ['a', 'b', 'c', 'd'],
'y': [2, 3, 4, 5]},
index=pd.Index([1., 2., 3., 4.], name='ind'))
a = dd.from_pandas(df, 2)
b = dd.from_castra(a.to_castra())
assert eq(b[b.y > 3].x, df[df.y > 3].x)
def test_to_hdf():
pytest.importorskip('tables')
df = pd.DataFrame({'x': ['a', 'b', 'c', 'd'],
'y': [1, 2, 3, 4]}, index=[1., 2., 3., 4.])
a = dd.from_pandas(df, 2)
with tmpfile('h5') as fn:
a.to_hdf(fn, '/data')
out = pd.read_hdf(fn, '/data')
tm.assert_frame_equal(df, out[:])
with tmpfile('h5') as fn:
a.x.to_hdf(fn, '/data')
out = pd.read_hdf(fn, '/data')
tm.assert_series_equal(df.x, out[:])
a = dd.from_pandas(df, 1)
with tmpfile('h5') as fn:
a.to_hdf(fn, '/data')
out = pd.read_hdf(fn, '/data')
tm.assert_frame_equal(df, out[:])
# saving to multiple datasets
a = dd.from_pandas(df, 2)
with tmpfile('h5') as fn:
a.to_hdf(fn, '/data*')
out = dd.read_hdf(fn, '/data*')
tm.assert_frame_equal(df, out.compute())
# saving to multiple files
a = dd.from_pandas(df, 2)
with tmpdir() as dn:
fn = os.path.join(dn, 'data_*.h5')
a.to_hdf(fn, '/data')
out = dd.read_hdf(fn, '/data')
tm.assert_frame_equal(df, out.compute())
# saving to multiple datasets with custom name_function
a = dd.from_pandas(df, 2)
with tmpfile('h5') as fn:
a.to_hdf(fn, '/data_*', name_function=lambda i: 'a' * (i + 1))
out = dd.read_hdf(fn, '/data_*')
tm.assert_frame_equal(df, out.compute())
out = pd.read_hdf(fn, '/data_a')
tm.assert_frame_equal(out, df.iloc[:2])
out = pd.read_hdf(fn, '/data_aa')
tm.assert_frame_equal(out, df.iloc[2:])
# saving to multiple files with custom name_function
a = dd.from_pandas(df, 2)
with tmpdir() as dn:
fn = os.path.join(dn, 'data_*.h5')
a.to_hdf(fn, '/data', name_function=lambda i: 'a' * (i + 1))
out = dd.read_hdf(fn, '/data')
tm.assert_frame_equal(df, out.compute())
out = pd.read_hdf(os.path.join(dn, 'data_a.h5'), '/data')
tm.assert_frame_equal(out, df.iloc[:2])
out = pd.read_hdf(os.path.join(dn, 'data_aa.h5'), '/data')
tm.assert_frame_equal(out, df.iloc[2:])
# saving to different datasets in multiple files with custom name_function
a = dd.from_pandas(df, 2)
with tmpdir() as dn:
with pytest.raises(ValueError):
fn = os.path.join(dn, 'data_*.h5')
a.to_hdf(fn, '/data_*', name_function=lambda i: 'a' * (i + 1))
def test_read_hdf():
pytest.importorskip('tables')
df = pd.DataFrame({'x': ['a', 'b', 'c', 'd'],
'y': [1, 2, 3, 4]}, index=[1., 2., 3., 4.])
with tmpfile('h5') as fn:
df.to_hdf(fn, '/data')
try:
dd.read_hdf(fn, 'data', chunksize=2)
assert False
except TypeError as e:
assert "format='table'" in str(e)
with tmpfile('h5') as fn:
df.to_hdf(fn, '/data', format='table')
a = dd.read_hdf(fn, '/data', chunksize=2)
assert a.npartitions == 2
assert a._known_dtype
tm.assert_frame_equal(a.compute(), df)
tm.assert_frame_equal(
dd.read_hdf(fn, '/data', chunksize=2, start=1, stop=3).compute(),
pd.read_hdf(fn, '/data', start=1, stop=3))
assert (sorted(dd.read_hdf(fn, '/data').dask) ==
sorted(dd.read_hdf(fn, '/data').dask))
def test_to_csv():
df = pd.DataFrame({'x': ['a', 'b', 'c', 'd'],
'y': [1, 2, 3, 4]}, index=[1., 2., 3., 4.])
for npartitions in [1, 2]:
a = dd.from_pandas(df, npartitions)
with tmpfile('csv') as fn:
a.to_csv(fn)
result = pd.read_csv(fn, index_col=0)
tm.assert_frame_equal(result, df)
@pytest.mark.xfail(reason="bloscpack BLOSC_MAX_BUFFERSIZE")
def test_to_csv_gzip():
df = pd.DataFrame({'x': ['a', 'b', 'c', 'd'],
'y': [1, 2, 3, 4]}, index=[1., 2., 3., 4.])
for npartitions in [1, 2]:
a = dd.from_pandas(df, npartitions)
with tmpfile('csv') as fn:
a.to_csv(fn, compression='gzip')
result = pd.read_csv(fn, index_col=0, compression='gzip')
tm.assert_frame_equal(result, df)
def test_to_csv_series():
s = pd.Series([1, 2, 3], index=[10, 20, 30], name='foo')
a = dd.from_pandas(s, 2)
with tmpfile('csv') as fn:
with tmpfile('csv') as fn2:
a.to_csv(fn)
s.to_csv(fn2)
with open(fn) as f:
adata = f.read()
with open(fn2) as f:
sdata = f.read()
assert adata == sdata
def test_read_csv_with_nrows():
with filetext(text) as fn:
f = dd.read_csv(fn, nrows=3)
assert list(f.columns) == ['name', 'amount']
assert f.npartitions == 1
assert eq(dd.read_csv(fn, nrows=3), pd.read_csv(fn, nrows=3))
def test_read_csv_raises_on_no_files():
fn = '.not.a.real.file.csv'
try:
dd.read_csv(fn)
assert False
except IOError as e:
assert fn in str(e)
def test_read_csv_has_deterministic_name():
with filetext(text) as fn:
a = dd.read_csv(fn)
b = dd.read_csv(fn)
assert a._name == b._name
assert sorted(a.dask.keys(), key=str) == sorted(b.dask.keys(), key=str)
assert isinstance(a._name, str)
c = dd.read_csv(fn, skiprows=1, na_values=[0])
assert a._name != c._name
def test_multiple_read_csv_has_deterministic_name():
with filetexts({'_foo.1.csv': text, '_foo.2.csv': text}):
a = dd.read_csv('_foo.*.csv')
b = dd.read_csv('_foo.*.csv')
assert sorted(a.dask.keys(), key=str) == sorted(b.dask.keys(), key=str)
def test_csv_with_integer_names():
with filetext('alice,1\nbob,2') as fn:
df = dd.read_csv(fn, header=None)
assert list(df.columns) == [0, 1]
@pytest.mark.slow
def test_read_csv_of_modified_file_has_different_name():
with filetext(text) as fn:
sleep(1)
a = dd.read_csv(fn)
sleep(1)
with open(fn, 'a') as f:
f.write('\nGeorge,700')
os.fsync(f)
b = dd.read_csv(fn)
assert sorted(a.dask) != sorted(b.dask)
def test_to_bag():
pytest.importorskip('dask.bag')
a = pd.DataFrame({'x': ['a', 'b', 'c', 'd'],
'y': [2, 3, 4, 5]},
index=pd.Index([1., 2., 3., 4.], name='ind'))
ddf = dd.from_pandas(a, 2)
assert ddf.to_bag().compute(get=get_sync) == list(a.itertuples(False))
assert ddf.to_bag(True).compute(get=get_sync) == list(a.itertuples(True))
assert ddf.x.to_bag(True).compute(get=get_sync) == list(a.x.iteritems())
assert ddf.x.to_bag().compute(get=get_sync) == list(a.x)
@pytest.mark.xfail(reason='we might want permissive behavior here')
def test_report_dtype_correction_on_csvs():
text = 'numbers,names\n'
for i in range(1000):
text += '1,foo\n'
text += '1.5,bar\n'
with filetext(text) as fn:
with pytest.raises(ValueError) as e:
dd.read_csv(fn).compute(get=get_sync)
assert "'numbers': 'float64'" in str(e)
def test_hdf_globbing():
pytest.importorskip('tables')
df = pd.DataFrame({'x': ['a', 'b', 'c', 'd'],
'y': [1, 2, 3, 4]}, index=[1., 2., 3., 4.])
with tmpdir() as tdir:
df.to_hdf(os.path.join(tdir, 'one.h5'), '/foo/data', format='table')
df.to_hdf(os.path.join(tdir, 'two.h5'), '/bar/data', format='table')
df.to_hdf(os.path.join(tdir, 'two.h5'), '/foo/data', format='table')
with dask.set_options(get=dask.get):
res = dd.read_hdf(os.path.join(tdir, 'one.h5'), '/*/data',
chunksize=2)
assert res.npartitions == 2
tm.assert_frame_equal(res.compute(), df)
res = dd.read_hdf(os.path.join(tdir, 'one.h5'), '/*/data',
chunksize=2, start=1, stop=3)
expected = pd.read_hdf(os.path.join(tdir, 'one.h5'), '/foo/data',
start=1, stop=3)
tm.assert_frame_equal(res.compute(), expected)
res = dd.read_hdf(os.path.join(tdir, 'two.h5'), '/*/data', chunksize=2)
assert res.npartitions == 2 + 2
tm.assert_frame_equal(res.compute(), pd.concat([df] * 2))
res = dd.read_hdf(os.path.join(tdir, '*.h5'), '/foo/data', chunksize=2)
assert res.npartitions == 2 + 2
tm.assert_frame_equal(res.compute(), pd.concat([df] * 2))
res = dd.read_hdf(os.path.join(tdir, '*.h5'), '/*/data', chunksize=2)
assert res.npartitions == 2 + 2 + 2
tm.assert_frame_equal(res.compute(), pd.concat([df] * 3))
def test_index_col():
with filetext(text) as fn:
try:
f = dd.read_csv(fn, chunkbytes=30, index_col='name')
assert False
except ValueError as e:
assert 'set_index' in str(e)
timeseries = """
Date,Open,High,Low,Close,Volume,Adj Close
2015-08-28,198.50,199.839996,197.919998,199.240005,143298900,199.240005
2015-08-27,197.020004,199.419998,195.210007,199.160004,266244700,199.160004
2015-08-26,192.080002,194.789993,188.369995,194.679993,328058100,194.679993
2015-08-25,195.429993,195.449997,186.919998,187.229996,353966700,187.229996
2015-08-24,197.630005,197.630005,182.399994,189.550003,478672400,189.550003
2015-08-21,201.729996,203.940002,197.520004,197.630005,328271500,197.630005
2015-08-20,206.509995,208.289993,203.899994,204.009995,185865600,204.009995
2015-08-19,209.089996,210.009995,207.350006,208.279999,167316300,208.279999
2015-08-18,210.259995,210.679993,209.699997,209.929993,70043800,209.929993
""".strip()
def test_read_csv_with_datetime_index_partitions_one():
with filetext(timeseries) as fn:
df = pd.read_csv(fn, index_col=0, header=0, usecols=[0, 4],
parse_dates=['Date'])
# chunkbytes set to explicitly set to single chunk
ddf = dd.read_csv(fn, header=0, usecols=[0, 4],
parse_dates=['Date'],
chunkbytes=10000000).set_index('Date')
eq(df, ddf)
# because fn is so small, by default, this will only be one chunk
ddf = dd.read_csv(fn, header=0, usecols=[0, 4],
parse_dates=['Date']).set_index('Date')
eq(df, ddf)
def test_read_csv_with_datetime_index_partitions_n():
with filetext(timeseries) as fn:
df = pd.read_csv(fn, index_col=0, header=0, usecols=[0, 4],
parse_dates=['Date'])
# because fn is so small, by default, set chunksize small
ddf = dd.read_csv(fn, header=0, usecols=[0, 4],
parse_dates=['Date'],
chunkbytes=400).set_index('Date')
eq(df, ddf)
def test_from_pandas_with_datetime_index():
with filetext(timeseries) as fn:
df = pd.read_csv(fn, index_col=0, header=0, usecols=[0, 4],
parse_dates=['Date'])
ddf = dd.from_pandas(df, 2)
eq(df, ddf)
ddf = dd.from_pandas(df, chunksize=2)
eq(df, ddf)
@pytest.mark.parametrize('encoding', ['utf-16', 'utf-16-le', 'utf-16-be'])
def test_encoding_gh601(encoding):
ar = pd.Series(range(0, 100))
br = ar % 7
cr = br * 3.3
dr = br / 1.9836
test_df = pd.DataFrame({'a': ar, 'b': br, 'c': cr, 'd': dr})
with tmpfile('.csv') as fn:
test_df.to_csv(fn, encoding=encoding, index=False)
a = pd.read_csv(fn, encoding=encoding)
d = dd.read_csv(fn, encoding=encoding, chunkbytes=1000)
d = d.compute()
d.index = range(len(d.index))
assert eq(d, a)
def test_read_hdf_doesnt_segfault():
pytest.importorskip('tables')
with tmpfile('h5') as fn:
N = 40
df = pd.DataFrame(np.random.randn(N, 3))
with pd.HDFStore(fn, mode='w') as store:
store.append('/x', df)
ddf = dd.read_hdf(fn, '/x', chunksize=2)
assert len(ddf) == N
def test_read_csv_header_issue_823():
text = '''a b c-d\n1 2 3\n4 5 6'''.replace(' ', '\t')
with filetext(text) as fn:
df = dd.read_csv(fn, sep='\t')
eq(df, pd.read_csv(fn, sep='\t'))
df = dd.read_csv(fn, delimiter='\t')
eq(df, pd.read_csv(fn, delimiter='\t'))
def test_none_usecols():
with filetext(text) as fn:
df = dd.read_csv(fn, usecols=None)
eq(df, pd.read_csv(fn, usecols=None))
pdmc_text = """
ID,date,time
10,2003-11-04,180036
11,2003-11-05,125640
12,2003-11-01,2519
13,2003-10-22,142559
14,2003-10-24,163113
15,2003-10-20,170133
16,2003-11-11,160448
17,2003-11-03,171759
18,2003-11-07,190928
19,2003-10-21,84623
20,2003-10-25,192207
21,2003-11-13,180156
22,2003-11-15,131037
""".strip()
def test_parse_dates_multi_column():
with filetext(pdmc_text) as fn:
ddf = dd.read_csv(fn, parse_dates=[['date', 'time']])
df = pd.read_csv(fn, parse_dates=[['date', 'time']])
assert (df.columns == ddf.columns).all()
assert len(df) == len(ddf)
sep_text = """
name###amount
alice###100
bob###200
charlie###300"""
def test_read_csv_sep():
with filetext(sep_text) as fn:
ddf = dd.read_csv(fn, sep="###")
df = pd.read_csv(fn, sep="###")
assert (df.columns == ddf.columns).all()
assert len(df) == len(ddf)
def test_to_hdf_kwargs():
df = pd.DataFrame({'A': ['a', 'aaaa']})
ddf = dd.from_pandas(df, npartitions=2)
ddf.to_hdf('tst.h5', 'foo4', format='table', min_itemsize=4)
df2 = pd.read_hdf('tst.h5', 'foo4')
tm.assert_frame_equal(df, df2)
def test_read_csv_slash_r():
data = b'0,my\n1,data\n' * 1000 + b'2,foo\rbar'
with filetext(data, mode='wb') as fn:
dd.read_csv(fn, header=None, sep=',', lineterminator='\n',
names=['a','b'], blocksize=200).compute(get=dask.get)
def test_read_csv_singleton_dtype():
data = b'a,b\n1,2\n3,4\n5,6'
with filetext(data, mode='wb') as fn:
eq(pd.read_csv(fn, dtype=float),
dd.read_csv(fn, dtype=float))
| bsd-3-clause |
jarryliu/queue-sim | plot/draw.py | 1 | 15784 | #!/usr/local/bin/python
from math import factorial, exp
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
from mpl_toolkits.mplot3d import axes3d, Axes3D #<-- Note the capitalization!
import sys
intList = [5000, 1000, 500, 100, 50, 10]
trate = [1000000.0/i for i in intList]
cpuList = [1.0, 4.2, 8.0, 26.9, 38.7, 52]
rate = [0.314, 1.45, 2.72, 9.42, 13.6, 21.1]
rate = [r*1000 for r in rate]
latency = [8.1924e+03, 7.9385e+03, 7.8343e+03, 8.1685e+03, 8.6729e+03, 8.6729e+03 ]
latency = [l/1000000.0 for l in latency]
#plt.plot(trate, cpuList, 'r-.')
plt.figure(1)
plt.subplot(211)
plt.plot(rate, cpuList, 'bo-')
plt.ylabel("CPU utilization (%)")
plt.xlabel("Message rate (Kbps)")
plt.subplot(212)
plt.plot(rate, latency, 'ro-')
plt.ylim(0, 0.01)
plt.ylabel("Latency (ms)")
plt.xlabel("Message rate (Kbps)")
plt.show()
sys.exit()
#from mpl_toolkits.mplot3d import Axes3D
#from theory import getDelay, getLatency, totalDelay
# bucket = np.arange(1,21)
# bresult= [0.00409438016142, 0.0033155469912, 0.00267805247694, 0.00217196080862, 0.00179592654568,
# 0.00143718393687, 0.00116060379269, 0.000978849410248, 0.000755804749056, 0.000629652721451,
# 0.000509918882204, 0.000438399316067, 0.000338310877662, 0.000280665269416, 0.000244070153101,
# 0.000172161374231, 0.000149499687789, 0.000121459034788, 9.30199199637e-05, 7.75854592678e-05]
#
# dlist = []
# for i in bucket:
# dlist.append(getDelay(0.9,i))
# plt.plot(bucket, dlist, "-")
# plt.plot(bucket, np.array(bresult)*1000, 'o')
#
#
# legendList = ['theory', 'simulation']
# plt.legend(legendList, loc='upper right')
# plt.xlabel('bucket size')
# plt.ylabel('average latency (ms)')
# plt.show()
#
#
# rate = range(1000, 0, -100)
# rresult = [0.000644522106328, 0.000720025905961, 0.000833121678584, 0.000895596093789, 0.00101505313479, 0.00128537828299, 0.0015555967225, 0.00209048499208, 0.00313702591988, 0.00616596723663]
#
# d = getDelay(0.9,10)
# dlist = [d/(0.1*(10-i)) for i in xrange(10)]
# plt.plot(rate, dlist, "-")
# plt.plot(rate, np.array(rresult)*1000, 'o')
#
#
# legendList = ['theory', 'simulation']
# plt.legend(legendList, loc='upper right')
# plt.xlabel('bucket rate')
# plt.ylabel('average latency (ms)')
# plt.show()
# interval = [0.1, 0.2, 0.5, 1, 2, 5, 10, 20]
# possion_result = [0.00131450177183, 0.0015070228446, 0.0016599388821, 0.00161004213216, 0.0015961046498, 0.00146764593642, 0.00144323696861, 0.00140161336144]
# b_result = np.array([0.000590173923748, 0.00223829675234, 0.00349507988276, 0.00554642015014,
# 0.00793513324288, 0.0117633777557, 0.0131939118183, 0.0152916625152,
# 0.0164328270268, 0.0222740491034, 0.0260078343715, 0.026809945385])*1000
#
# s_result = np.array([0.00945765245304, 0.00211677915805, 0.00153174938914, 0.00129779523745,
# 0.00117139743497, 0.00108493653043, 0.00106551896397, 0.00105197218411,
# 0.00104446798347, 0.00100978968546, 0.00100655731514, 0.00100732780158])*1000
# b_result = np.array([0.000556862018053, 0.00226279268004, 0.00373865173411, 0.00554710361537,
# 0.00823055300791, 0.0117136387434, 0.0128881523441, 0.0166177605538,
# 0.016524255912, 0.0221778073856, 0.0257723768586, 0.0267681413876])*1000
#
# s_result = np.array([0.0092905418664, 0.0021032834536, 0.00152273155381, 0.00129437599152,
# 0.00116818969581, 0.00108350271543, 0.00106527594669, 0.00105236611835,
# 0.0010370405632086788, 0.00101056378729, 0.00100803562565, 0.00100450341295])*1000
######### best result
# b_result = np.array([0, 0, 0, 1.24239805608e-06,
# 1.34584248141e-05, 4.84002550078e-05, 0.000117872470448, 0.000214928715841,
# 0.000351449322535, 0.000594727983716, 0.000975557026088, 0.00151676371671])*1000
#
# s_result = np.array([0.00980780382356, 0.00251265470871, 0.00181477766449, 0.00156341771023,
# 0.00142817810789, 0.00134093139615, 0.00128743022846, 0.00124448951586,
# 0.00121615276775, 0.00118856757796, 0.00116722571315, 0.00115158808519])*1000
# rate = 2000
# bucketSize = 200
# w_result = b_result + s_result
#
# x = range(2,14)
# b_theory = np.array([getLatency(rate/i, 0.9, bucketSize/i) for i in x])
# s_theory = np.array([1.0/(1000 - 1800.0/i) for i in x])*1000
# print b_theory
# print s_theory
# plt.plot(x, b_result, '*')
# plt.plot(x,b_theory)
# plt.plot(x, s_result, '.')
# plt.plot(x, s_theory)
# plt.plot(x, w_result, 'o')
# plt.plot(x, b_theory + s_theory)
#
# legendList = ['token_bucket_sim', 'token_bucket_theory', 'server_sim', 'server_theory', 'latency_sim', 'latency_theory']
# plt.legend(legendList, loc='upper right')
# plt.xlabel('number of servers')
# plt.ylabel('average latency (ms)')
# plt.show()
######### draw theory
# b_result = np.array([0, 0, 0, 1.24239805608e-06,
# 1.34584248141e-05, 4.84002550078e-05, 0.000117872470448, 0.000214928715841,
# 0.000351449322535, 0.000594727983716, 0.000975557026088, 0.00151676371671])*1000
#
# s_result = np.array([0.00980780382356, 0.00251265470871, 0.00181477766449, 0.00156341771023,
# 0.00142817810789, 0.00134093139615, 0.00128743022846, 0.00124448951586,
# 0.00121615276775, 0.00118856757796, 0.00116722571315, 0.00115158808519])*1000
#
# util = 0.9
# rate = 2000
# prate = 1000
# bucketSize = 200
# start = 2
# x = range(start,len(b_result)+2)
#
# b_theory = []
# s_theory = []
# for i in x:
# b, s, t = totalDelay(rate, bucketSize, rate*util, prate, i)
# b_theory.append(b)
# s_theory.append(s)
# print b, s, t
# # b_theory = np.array([getLatency(rate/i, util, bucketSize/i) for i in x])
# # s_theory = np.array([1/(prate - start*prate*0.9*1.0/i) for i in x])
# #
#
# w_result = b_result + s_result
# plt.plot(x, b_result, '*')
# plt.plot(x,np.array(b_theory)*1000)
# plt.plot(x, s_result, '.')
# plt.plot(x, np.array(s_theory)*1000)
# plt.plot(x, w_result, 'o')
# plt.plot(x, np.array(b_theory)*1000 + np.array(s_theory)*1000)
#
# legendList = ['token_bucket_sim', 'token_bucket_theory', 'server_sim', 'server_theory', 'latency_sim', 'latency_theory']
# plt.legend(legendList, loc='upper right')
# plt.xlabel('number of servers')
# plt.ylabel('average latency (ms)')
# plt.show()
######### drop load increase
# r = 500
# b = 20
# mu = 500
# opt_n = 1
# x = []
# nList = []
# bList = []
# sList = []
# tList = []
#
# for i in xrange(49):
# lam = (i+1)*10
# x.append(lam)
# for j in xrange(4):
# tb, ts, t = totalDelay(r, b, lam, mu, j+1)
# if len(bList) < j+1:
# bList.append([])
# sList.append([])
# tList.append([])
# bList[j].append(tb)
# sList[j].append(ts)
# tList[j].append(t)
#
# print bList
# print sList
# print tList
# print nList
# #plt.plot(x, b_result, '*')
# plt.plot(x,np.array(tList[0])*1000)
# #plt.plot(x, s_result, '.')
# plt.plot(x, np.array(tList[1])*1000)
# #plt.plot(x, w_result, 'o')
# plt.plot(x, np.array(tList[2])*1000)
# plt.plot(x, np.array(tList[3])*1000)
#
# legendList = ['1 server', '2 servers', '3 servers', '4 servers']
# plt.legend(legendList, loc='upper left')
# plt.xlabel('arrival rate')
# plt.ylabel('average latency (ms)')
# plt.ylim(0, 15)
# plt.show()
############################
# lam = 50.0
# r = 500
# b = [2 , 4, 8, 16, 32, 128]
# bLegend = ["token bucket b="+str(i) for i in b]
# #mu = 500
# opt_n = 1
# x = []
# bList = []
# sList = []
#
# for i in xrange(100):
# mu = lam + (i+1)*0.5
# x.append(lam/mu)
# for j in xrange(len(b)):
# tb, ts, t = totalDelay(mu, b[j], lam, mu, 1)
# if len(bList) < j+1:
# bList.append([])
# bList[j].append(tb*1000)
# sList.append(lam/mu/(mu - lam)*1000)
#
# plt.plot(x,sList)
# for j in xrange(len(b)):
# plt.plot(x, bList[j])
# legendList = ["queuing time"] + bLegend
# plt.legend(legendList, loc='upper left')
# plt.xlabel('utilization')
# plt.ylabel('average latency (ms)')
# plt.ylim(0, 400)
# plt.show()
### increase server
# lam = 500.0
# r = 500
# b = [2 , 4, 8, 16, 32, 64]
# bLegend = ["token bucket b="+str(i) for i in b]
# #mu = 500
# opt_n = 1
# x = []
# bList = []
# sList = []
# ratioA = 3
# ratioB = 4
# ratio = ratioA*1.0/ratioB
#
# for i in xrange(100):
# mu = lam + (i+1)*5.0
# x.append(lam/mu)
# for j in xrange(len(b)):
# tb1, ts1, t1 = totalDelay(mu*ratioA, b[j]*ratioA, lam*ratioA, mu, ratioA)
# tb2, ts2, t2 = totalDelay(mu*ratioA, b[j]*ratioA, lam*ratioA, mu, ratioB)
# if len(bList) < j+1:
# bList.append([])
# bList[j].append((tb2-tb1)*1000)
# print (tb2-tb1)*1000
# sList.append((lam/mu/(mu - lam) - ratio*lam/mu/(mu - lam*ratio))*1000)
#
# print x
# plt.plot(x,sList)
# for j in xrange(len(b)):
# plt.plot(x, bList[j])
# legendList = ["queuing time"] + bLegend
# plt.legend(legendList, loc='upper left')
# plt.xlabel('utilization')
# plt.ylabel('change in latency (ms) when increase a server')
# plt.ylim(0, 40)
# plt.show()
####### plot
# r = 1000.0
# b = 20
# lamList = [950, 955, 960, 965, 970, 975, 980, 985, 990, 995]
# mu = 1000.0
# bList = []
# sList = []
# for lam in lamList:
# lam *= 1.0
# tb, ts, t = totalDelay(r, b, lam, mu, 1)
# bList.append(tb)
# sList.append(ts)
#
#
# plt.plot(lamList, bList)
# plt.plot(lamList, sList)
# legendList = ["token time", "server time"]
# plt.legend(legendList,loc="upper left")
# plt.ylim(-0.01, 0.3)
# plt.xlim(945, 1000)
# plt.show()
##### draw
bList = [2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24]
rList = [1050.0, 1100.0, 1150.0, 1200.0, 1250.0, 1300.0, 1350.0, 1400.0, 1450.0, 1500.0]
tbList = [[0.0086655012164963772, 0.0069462937026606858, 0.0061119878316403089,
0.004497763222681749, 0.003876938261844539, 0.003390546964274696,
0.0027159118226675167, 0.0024347461060429659, 0.0019866744028636291,
0.0017593567570707523, 0.0013200001731956527, 0.00099414911505809364],
[0.0037627201922375147, 0.0025625427437552606, 0.0017829949842417827,
0.0012298620493933936, 0.00082696692817260838, 0.00058308794201233677,
0.00040019521369635792, 0.00026437935404872629, 0.00018632066836472602,
0.00012402267729263054, 7.0943063294622068e-05, 5.6494559620774469e-05],
[0.0022095713301978964, 0.0013096853587949539, 0.00072713937256503071,
0.00042894764353715976, 0.00024653261679521114, 0.00013736124194561419,
8.0361412315634121e-05, 4.4430293019744773e-05, 2.9406724790343939e-05,
1.1795146423416464e-05, 8.5039484345559113e-06, 5.9312839614103951e-06],
[0.0014611881929298993, 0.00072593918371486915, 0.00035256953645628077,
0.00017954436487361305, 8.4489334837231018e-05, 4.1318479906370685e-05,
2.1974399513942762e-05, 1.0698779145992274e-05, 4.4373112312226795e-06,
3.028488661354572e-06, 1.2635151914837706e-06, 5.3085700028457263e-07],
[0.0010424890384005199, 0.00044756733360717705, 0.00018240195443261018,
7.9531375689721311e-05, 3.580796894964474e-05, 1.3407249793421535e-05,
5.0436495208507264e-06, 2.3280473410579587e-06, 1.0953525661858181e-06,
4.1576557217018718e-07, 7.1502696550169271e-08, 3.0380263923825622e-08],
[0.0007794387364076905, 0.00028877273908869651, 0.00010307204425758113,
3.7338351682952915e-05, 1.4652217675302338e-05, 4.8590325874305792e-06,
2.377105866288889e-06, 7.8939033892313559e-07, 1.2753345779798336e-07,
1.5646413789522741e-07, 5.0947068469440634e-09, 2.7730064349452735e-08],
[0.00060260250620125587, 0.00019080787103471121, 5.7977087554087513e-05,
1.8072280771463227e-05, 6.3648178513343291e-06, 1.9166132329105377e-06,
6.420684786416018e-07, 2.2753073556394841e-07, 5.0865510844266733e-08,
9.5950108645411091e-09, 1.7806682401669604e-09, 4.703149038959964e-10],
[0.00047860166509120731, 0.00013136002069690563, 3.6838882146894813e-05,
9.6232019150645455e-06, 3.0314451320358898e-06, 1.0411160334375608e-06,
1.7512695237192022e-07, 6.5362352172974166e-08, 5.7878174140796546e-09,
3.1298298001729565e-10, 0.0, 0.0],
[0.0003868298345192014, 9.373483983780517e-05, 2.4924532266800483e-05,
5.2633050377738303e-06, 1.3950417193645079e-06, 2.6167633881354963e-07,
8.4777204153101606e-08, 1.3302193317463208e-08, 1.5399734173206525e-08,
0.0, 0.0, 0.0],
[0.00031714521472453683, 6.7876044345209876e-05, 1.5430425620576841e-05,
2.8363864016281357e-06, 7.2926797369432278e-07, 1.1011910837496543e-07,
7.7841931393777485e-09, 1.3981584637986088e-08, 0.0,
7.8820269800417015e-11, 0.0, 0.0]]
tsList = [[0.006679677862655292, 0.0068770308868411735, 0.0074732966659507918,
0.0077348077227535148, 0.0078105416045624043, 0.008147963937665325,
0.0084921141776806743, 0.008752305338601777, 0.0088621115063590317,
0.0090566327780958918, 0.0093905065648900807, 0.0094743977123601664],
[0.0076441088658002893, 0.0081786353435035122, 0.0087498405194113942,
0.0090029671774246641, 0.0092297928778259427, 0.0093696536701099262,
0.0096617572741030684, 0.0096833025293018727, 0.010003607981588163,
0.0098724565038900442, 0.0098445482952155567, 0.0098002479005328443],
[0.0086956475242956251, 0.0090666151686463504, 0.0093720184341949571,
0.0094531775361485718, 0.0098398963059626848, 0.0098212355945071234,
0.010018041874352037, 0.0098921459096796907, 0.0098956955424670603,
0.010029270355956356, 0.010064604268816387, 0.01001023740313349],
[0.0090904504424792146, 0.009225412608549784, 0.0096016456056585951,
0.0099027595356318918, 0.010039369303912821, 0.0097721368289364549,
0.010042447619923751, 0.010045292325722056, 0.010007482265982762,
0.0099870953803561369, 0.010184912443106139, 0.0098858368161917395],
[0.0093137423055012838, 0.0095619384206493182, 0.0096557424523883665,
0.0099299592347444968, 0.010063250392674448, 0.010127057969903762,
0.0098904826150556166, 0.010036861438288495, 0.0099961991171080636,
0.0099088390440836595, 0.0096536991934565494, 0.010030348539790601],
[0.0093682707003560229, 0.010176501383261838, 0.0098165119959769571,
0.0097205414379321099, 0.010006320447941785, 0.0099182604435972422,
0.010001961172821086, 0.0098252164378607853, 0.0099495692669901714,
0.010102707098157179, 0.010090222760704749, 0.0099789223025760522],
[0.0096336326797271388, 0.0097238686533284747, 0.0098371166194679786,
0.0097904040711137234, 0.0099297341641229348, 0.010001390250069974,
0.0099266848307628282, 0.0098179879154293419, 0.0098578389481048211,
0.0098189810593029593, 0.010100181267139989, 0.0099267782464376418],
[0.0095568714523684116, 0.009885090780846607, 0.0097968008289410768,
0.0097222136568735906, 0.0099612086330636024, 0.010063981692023737,
0.010186485114693453, 0.010036024516736682, 0.0099838449228713509,
0.010130933882378523, 0.010193518255552692, 0.0099776912059497298],
[0.0098415813407483066, 0.0097824395111458257, 0.009936011172877738,
0.010052051864369575, 0.010126848886467584, 0.010142662759735766,
0.010290573689306257, 0.0099869683348446474, 0.0098433343622829003,
0.0098570165778807204, 0.010013374979903155, 0.010064330226453103],
[0.0097614194737020623, 0.009815994410360249, 0.0099672335642590013,
0.0099349179582449675, 0.0098621461642761175, 0.010137879445556835,
0.009970959157126022, 0.010194055612801445, 0.0099125417813472286,
0.0098741304370536624, 0.0099527508964485801, 0.009803767794647502]]
X = np.array(rList)
Y = np.array(bList)
Y, X = np.meshgrid(Y,X)
#
# fig = plt.figure()
#
# ax = Axes3D(fig) #<-- Note the difference from your original code...
#
# cset = ax.contour(X, Y, np.array(tbList))
# ax.clabel(cset, fontsize=9, inline=1)
# plt.show()
# #
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.plot_wireframe(X, Y, np.array(tbList))
ax.set_xlabel('token bucket size')
ax.set_ylabel('token bucket rate')
plt.show()
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.plot_wireframe(X, Y, np.array(tsList))
ax.set_xlabel('token bucket size')
ax.set_ylabel('token bucket rate')
plt.show()
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.plot_wireframe(X, Y, np.array(tbList)+np.array(tsList))
ax.set_xlabel('token bucket size')
ax.set_ylabel('token bucket rate')
plt.show()
| mit |
3manuek/scikit-learn | sklearn/tests/test_pipeline.py | 162 | 14875 | """
Test the pipeline module.
"""
import numpy as np
from scipy import sparse
from sklearn.externals.six.moves import zip
from sklearn.utils.testing import assert_raises, assert_raises_regex, assert_raise_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.base import clone
from sklearn.pipeline import Pipeline, FeatureUnion, make_pipeline, make_union
from sklearn.svm import SVC
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import LinearRegression
from sklearn.cluster import KMeans
from sklearn.feature_selection import SelectKBest, f_classif
from sklearn.decomposition import PCA, RandomizedPCA, TruncatedSVD
from sklearn.datasets import load_iris
from sklearn.preprocessing import StandardScaler
from sklearn.feature_extraction.text import CountVectorizer
JUNK_FOOD_DOCS = (
"the pizza pizza beer copyright",
"the pizza burger beer copyright",
"the the pizza beer beer copyright",
"the burger beer beer copyright",
"the coke burger coke copyright",
"the coke burger burger",
)
class IncorrectT(object):
"""Small class to test parameter dispatching.
"""
def __init__(self, a=None, b=None):
self.a = a
self.b = b
class T(IncorrectT):
def fit(self, X, y):
return self
def get_params(self, deep=False):
return {'a': self.a, 'b': self.b}
def set_params(self, **params):
self.a = params['a']
return self
class TransfT(T):
def transform(self, X, y=None):
return X
class FitParamT(object):
"""Mock classifier
"""
def __init__(self):
self.successful = False
pass
def fit(self, X, y, should_succeed=False):
self.successful = should_succeed
def predict(self, X):
return self.successful
def test_pipeline_init():
# Test the various init parameters of the pipeline.
assert_raises(TypeError, Pipeline)
# Check that we can't instantiate pipelines with objects without fit
# method
pipe = assert_raises(TypeError, Pipeline, [('svc', IncorrectT)])
# Smoke test with only an estimator
clf = T()
pipe = Pipeline([('svc', clf)])
assert_equal(pipe.get_params(deep=True),
dict(svc__a=None, svc__b=None, svc=clf,
**pipe.get_params(deep=False)
))
# Check that params are set
pipe.set_params(svc__a=0.1)
assert_equal(clf.a, 0.1)
assert_equal(clf.b, None)
# Smoke test the repr:
repr(pipe)
# Test with two objects
clf = SVC()
filter1 = SelectKBest(f_classif)
pipe = Pipeline([('anova', filter1), ('svc', clf)])
# Check that we can't use the same stage name twice
assert_raises(ValueError, Pipeline, [('svc', SVC()), ('svc', SVC())])
# Check that params are set
pipe.set_params(svc__C=0.1)
assert_equal(clf.C, 0.1)
# Smoke test the repr:
repr(pipe)
# Check that params are not set when naming them wrong
assert_raises(ValueError, pipe.set_params, anova__C=0.1)
# Test clone
pipe2 = clone(pipe)
assert_false(pipe.named_steps['svc'] is pipe2.named_steps['svc'])
# Check that apart from estimators, the parameters are the same
params = pipe.get_params(deep=True)
params2 = pipe2.get_params(deep=True)
for x in pipe.get_params(deep=False):
params.pop(x)
for x in pipe2.get_params(deep=False):
params2.pop(x)
# Remove estimators that where copied
params.pop('svc')
params.pop('anova')
params2.pop('svc')
params2.pop('anova')
assert_equal(params, params2)
def test_pipeline_methods_anova():
# Test the various methods of the pipeline (anova).
iris = load_iris()
X = iris.data
y = iris.target
# Test with Anova + LogisticRegression
clf = LogisticRegression()
filter1 = SelectKBest(f_classif, k=2)
pipe = Pipeline([('anova', filter1), ('logistic', clf)])
pipe.fit(X, y)
pipe.predict(X)
pipe.predict_proba(X)
pipe.predict_log_proba(X)
pipe.score(X, y)
def test_pipeline_fit_params():
# Test that the pipeline can take fit parameters
pipe = Pipeline([('transf', TransfT()), ('clf', FitParamT())])
pipe.fit(X=None, y=None, clf__should_succeed=True)
# classifier should return True
assert_true(pipe.predict(None))
# and transformer params should not be changed
assert_true(pipe.named_steps['transf'].a is None)
assert_true(pipe.named_steps['transf'].b is None)
def test_pipeline_raise_set_params_error():
# Test pipeline raises set params error message for nested models.
pipe = Pipeline([('cls', LinearRegression())])
# expected error message
error_msg = ('Invalid parameter %s for estimator %s. '
'Check the list of available parameters '
'with `estimator.get_params().keys()`.')
assert_raise_message(ValueError,
error_msg % ('fake', 'Pipeline'),
pipe.set_params,
fake='nope')
# nested model check
assert_raise_message(ValueError,
error_msg % ("fake", pipe),
pipe.set_params,
fake__estimator='nope')
def test_pipeline_methods_pca_svm():
# Test the various methods of the pipeline (pca + svm).
iris = load_iris()
X = iris.data
y = iris.target
# Test with PCA + SVC
clf = SVC(probability=True, random_state=0)
pca = PCA(n_components='mle', whiten=True)
pipe = Pipeline([('pca', pca), ('svc', clf)])
pipe.fit(X, y)
pipe.predict(X)
pipe.predict_proba(X)
pipe.predict_log_proba(X)
pipe.score(X, y)
def test_pipeline_methods_preprocessing_svm():
# Test the various methods of the pipeline (preprocessing + svm).
iris = load_iris()
X = iris.data
y = iris.target
n_samples = X.shape[0]
n_classes = len(np.unique(y))
scaler = StandardScaler()
pca = RandomizedPCA(n_components=2, whiten=True)
clf = SVC(probability=True, random_state=0)
for preprocessing in [scaler, pca]:
pipe = Pipeline([('preprocess', preprocessing), ('svc', clf)])
pipe.fit(X, y)
# check shapes of various prediction functions
predict = pipe.predict(X)
assert_equal(predict.shape, (n_samples,))
proba = pipe.predict_proba(X)
assert_equal(proba.shape, (n_samples, n_classes))
log_proba = pipe.predict_log_proba(X)
assert_equal(log_proba.shape, (n_samples, n_classes))
decision_function = pipe.decision_function(X)
assert_equal(decision_function.shape, (n_samples, n_classes))
pipe.score(X, y)
def test_fit_predict_on_pipeline():
# test that the fit_predict method is implemented on a pipeline
# test that the fit_predict on pipeline yields same results as applying
# transform and clustering steps separately
iris = load_iris()
scaler = StandardScaler()
km = KMeans(random_state=0)
# first compute the transform and clustering step separately
scaled = scaler.fit_transform(iris.data)
separate_pred = km.fit_predict(scaled)
# use a pipeline to do the transform and clustering in one step
pipe = Pipeline([('scaler', scaler), ('Kmeans', km)])
pipeline_pred = pipe.fit_predict(iris.data)
assert_array_almost_equal(pipeline_pred, separate_pred)
def test_fit_predict_on_pipeline_without_fit_predict():
# tests that a pipeline does not have fit_predict method when final
# step of pipeline does not have fit_predict defined
scaler = StandardScaler()
pca = PCA()
pipe = Pipeline([('scaler', scaler), ('pca', pca)])
assert_raises_regex(AttributeError,
"'PCA' object has no attribute 'fit_predict'",
getattr, pipe, 'fit_predict')
def test_feature_union():
# basic sanity check for feature union
iris = load_iris()
X = iris.data
X -= X.mean(axis=0)
y = iris.target
svd = TruncatedSVD(n_components=2, random_state=0)
select = SelectKBest(k=1)
fs = FeatureUnion([("svd", svd), ("select", select)])
fs.fit(X, y)
X_transformed = fs.transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 3))
# check if it does the expected thing
assert_array_almost_equal(X_transformed[:, :-1], svd.fit_transform(X))
assert_array_equal(X_transformed[:, -1],
select.fit_transform(X, y).ravel())
# test if it also works for sparse input
# We use a different svd object to control the random_state stream
fs = FeatureUnion([("svd", svd), ("select", select)])
X_sp = sparse.csr_matrix(X)
X_sp_transformed = fs.fit_transform(X_sp, y)
assert_array_almost_equal(X_transformed, X_sp_transformed.toarray())
# test setting parameters
fs.set_params(select__k=2)
assert_equal(fs.fit_transform(X, y).shape, (X.shape[0], 4))
# test it works with transformers missing fit_transform
fs = FeatureUnion([("mock", TransfT()), ("svd", svd), ("select", select)])
X_transformed = fs.fit_transform(X, y)
assert_equal(X_transformed.shape, (X.shape[0], 8))
def test_make_union():
pca = PCA()
mock = TransfT()
fu = make_union(pca, mock)
names, transformers = zip(*fu.transformer_list)
assert_equal(names, ("pca", "transft"))
assert_equal(transformers, (pca, mock))
def test_pipeline_transform():
# Test whether pipeline works with a transformer at the end.
# Also test pipeline.transform and pipeline.inverse_transform
iris = load_iris()
X = iris.data
pca = PCA(n_components=2)
pipeline = Pipeline([('pca', pca)])
# test transform and fit_transform:
X_trans = pipeline.fit(X).transform(X)
X_trans2 = pipeline.fit_transform(X)
X_trans3 = pca.fit_transform(X)
assert_array_almost_equal(X_trans, X_trans2)
assert_array_almost_equal(X_trans, X_trans3)
X_back = pipeline.inverse_transform(X_trans)
X_back2 = pca.inverse_transform(X_trans)
assert_array_almost_equal(X_back, X_back2)
def test_pipeline_fit_transform():
# Test whether pipeline works with a transformer missing fit_transform
iris = load_iris()
X = iris.data
y = iris.target
transft = TransfT()
pipeline = Pipeline([('mock', transft)])
# test fit_transform:
X_trans = pipeline.fit_transform(X, y)
X_trans2 = transft.fit(X, y).transform(X)
assert_array_almost_equal(X_trans, X_trans2)
def test_make_pipeline():
t1 = TransfT()
t2 = TransfT()
pipe = make_pipeline(t1, t2)
assert_true(isinstance(pipe, Pipeline))
assert_equal(pipe.steps[0][0], "transft-1")
assert_equal(pipe.steps[1][0], "transft-2")
pipe = make_pipeline(t1, t2, FitParamT())
assert_true(isinstance(pipe, Pipeline))
assert_equal(pipe.steps[0][0], "transft-1")
assert_equal(pipe.steps[1][0], "transft-2")
assert_equal(pipe.steps[2][0], "fitparamt")
def test_feature_union_weights():
# test feature union with transformer weights
iris = load_iris()
X = iris.data
y = iris.target
pca = RandomizedPCA(n_components=2, random_state=0)
select = SelectKBest(k=1)
# test using fit followed by transform
fs = FeatureUnion([("pca", pca), ("select", select)],
transformer_weights={"pca": 10})
fs.fit(X, y)
X_transformed = fs.transform(X)
# test using fit_transform
fs = FeatureUnion([("pca", pca), ("select", select)],
transformer_weights={"pca": 10})
X_fit_transformed = fs.fit_transform(X, y)
# test it works with transformers missing fit_transform
fs = FeatureUnion([("mock", TransfT()), ("pca", pca), ("select", select)],
transformer_weights={"mock": 10})
X_fit_transformed_wo_method = fs.fit_transform(X, y)
# check against expected result
# We use a different pca object to control the random_state stream
assert_array_almost_equal(X_transformed[:, :-1], 10 * pca.fit_transform(X))
assert_array_equal(X_transformed[:, -1],
select.fit_transform(X, y).ravel())
assert_array_almost_equal(X_fit_transformed[:, :-1],
10 * pca.fit_transform(X))
assert_array_equal(X_fit_transformed[:, -1],
select.fit_transform(X, y).ravel())
assert_equal(X_fit_transformed_wo_method.shape, (X.shape[0], 7))
def test_feature_union_parallel():
# test that n_jobs work for FeatureUnion
X = JUNK_FOOD_DOCS
fs = FeatureUnion([
("words", CountVectorizer(analyzer='word')),
("chars", CountVectorizer(analyzer='char')),
])
fs_parallel = FeatureUnion([
("words", CountVectorizer(analyzer='word')),
("chars", CountVectorizer(analyzer='char')),
], n_jobs=2)
fs_parallel2 = FeatureUnion([
("words", CountVectorizer(analyzer='word')),
("chars", CountVectorizer(analyzer='char')),
], n_jobs=2)
fs.fit(X)
X_transformed = fs.transform(X)
assert_equal(X_transformed.shape[0], len(X))
fs_parallel.fit(X)
X_transformed_parallel = fs_parallel.transform(X)
assert_equal(X_transformed.shape, X_transformed_parallel.shape)
assert_array_equal(
X_transformed.toarray(),
X_transformed_parallel.toarray()
)
# fit_transform should behave the same
X_transformed_parallel2 = fs_parallel2.fit_transform(X)
assert_array_equal(
X_transformed.toarray(),
X_transformed_parallel2.toarray()
)
# transformers should stay fit after fit_transform
X_transformed_parallel2 = fs_parallel2.transform(X)
assert_array_equal(
X_transformed.toarray(),
X_transformed_parallel2.toarray()
)
def test_feature_union_feature_names():
word_vect = CountVectorizer(analyzer="word")
char_vect = CountVectorizer(analyzer="char_wb", ngram_range=(3, 3))
ft = FeatureUnion([("chars", char_vect), ("words", word_vect)])
ft.fit(JUNK_FOOD_DOCS)
feature_names = ft.get_feature_names()
for feat in feature_names:
assert_true("chars__" in feat or "words__" in feat)
assert_equal(len(feature_names), 35)
def test_classes_property():
iris = load_iris()
X = iris.data
y = iris.target
reg = make_pipeline(SelectKBest(k=1), LinearRegression())
reg.fit(X, y)
assert_raises(AttributeError, getattr, reg, "classes_")
clf = make_pipeline(SelectKBest(k=1), LogisticRegression(random_state=0))
assert_raises(AttributeError, getattr, clf, "classes_")
clf.fit(X, y)
assert_array_equal(clf.classes_, np.unique(y))
| bsd-3-clause |
sequana/sequana | sequana/assembly.py | 1 | 4934 | # -*- coding: utf-8 -*-
#
# This file is part of Sequana software
#
# Copyright (c) 2018 - Sequana Development Team
#
# File author(s):
# Thomas Cokelaer <[email protected]>
#
# Distributed under the terms of the 3-clause BSD license.
# The full license is in the LICENSE file, distributed with this software.
#
# website: https://github.com/sequana/sequana
# documentation: http://sequana.readthedocs.io
#
##############################################################################
from sequana.lazy import pylab
from sequana.lazy import pandas as pd
import colorlog
logger = colorlog.getLogger(__name__)
__all__ = ["BUSCO"]
class BUSCO(object):
"""Wrapper of the BUSCO output
"BUSCO provides a quantitative measures for the assessment
of a genome assembly, gene set, transcriptome completeness, based on
evolutionarily-informed expectations of gene content from near-universal
single-copy orthologs selected from OrthoDB v9." -- BUSCO website 2017
This class reads the full report generated by BUSCO and provides some
visualisation of this report. The information is stored in a dataframe
:attr:`df`. The score can be retrieve with the attribute :attr:`score` in
percentage in the range 0-100.
:reference: http://busco.ezlab.org/
"""
def __init__(self, filename="full_table_testbusco.tsv"):
""".. rubric:: constructor
:filename: a valid BUSCO input file (full table). See example in sequana
code source (testing)
"""
self.df = pd.read_csv(filename, sep="\t", skiprows=4)
def pie_plot(self, filename=None, hold=False):
"""Plot PIE plot of the status (complete / fragment / missed)
.. plot::
:include-source:
from sequana import BUSCO, sequana_data
b = BUSCO(sequana_data("test_busco_full_table.tsv"))
b.pie_plot()
"""
if hold is False:
pylab.clf()
self.df.groupby('Status').count()['# Busco id'].plot(kind="pie")
pylab.ylabel("")
#pylab.title("Distribution Complete/Fragmented/Missing")
#pylab.legend()
if filename:
pylab.savefig(filename)
def scatter_plot(self, filename=None, hold=False):
"""Scatter plot of the score versus length of each ortholog
.. plot::
:include-source:
from sequana import BUSCO, sequana_data
b = BUSCO(sequana_data("test_busco_full_table.tsv"))
b.scatter_plot()
Missing are not show since there is no information about contig .
"""
if hold is False:
pylab.clf()
colors = ["green", "orange", "red", "blue"]
markers = ['o', 's', 'x', 'o']
for i, this in enumerate(["Complete", "Fragmented", "Duplicated"]):
mask = self.df.Status == this
if sum(mask)>0:
self.df[mask].plot(x="Length", y="Score", kind="scatter",
color=colors[i], ax=pylab.gca(),
marker=markers[i], label=this)
pylab.legend()
pylab.grid()
if filename:
pylab.savefig(filename)
def summary(self):
"""Return summary information of the missing, completed, fragemented
orthologs
"""
df = self.df.drop_duplicates(subset=["# Busco id"])
data = {}
data['S'] = sum(df.Status == "Complete")
data['F'] = sum(df.Status == "Fragmented")
data['D'] = sum(df.Status == "Duplicated")
data['C'] = data['S'] + data['D']
data['M'] = sum(df.Status == "Missing")
data['total'] = len(df)
data['C_pc'] = data['C'] *100. / data['total']
data['D_pc'] = data['D'] *100. / data['total']
data['S_pc'] = data['S'] *100. / data['total']
data['M_pc'] = data['M'] *100. / data['total']
data['F_pc'] = data['F'] *100. / data['total']
return data
def get_summary_string(self):
data = self.summary()
C = data['C_pc']
F = data["F_pc"]
D = data["D_pc"]
S = data["S_pc"]
M = data["M_pc"]
N = data["total"]
string = "C:{:.1f}%[S:{:.1f}%,D:{:.1f}%],F:{:.1f}%,M:{:.1f}%,n:{}"
return string.format(C, S, D, F, M, N)
def _get_score(self):
return self.summary()["C_pc"]
score = property(_get_score)
def __str__(self):
data = self.summary()
C = data['C']
F = data["F"]
D = data["D"]
S = data["S"]
M = data["M"]
N = data["total"]
string = """# BUSCO diagnostic
{}
{} Complete BUSCOs (C)
{} Complete and single-copy BUSCOs (S)
{} Complete and duplicated BUSCOs (D)
{} Fragmented BUSCOs (F)
{} Missing BUSCOs (M)
{} Total BUSCO groups searched
"""
return string.format(self.get_summary_string(), C, S, D, F, M, N)
| bsd-3-clause |
ybayle/ReproducibleResearchIEEE2017 | src/bayle.py | 1 | 21016 | # -*- coding: utf-8 -*-
#!/usr/bin/python
#
# Author Yann Bayle
# E-mail [email protected]
# License MIT
# Created 01/12/2016
# Updated 01/12/2016
# Version 1.0.0
#
"""
Description of bayle.py
======================
0 Input the local extracted features from YAAFE
13 MFCC per frame
186 musical pieces as train set
1 Computes delta and double delta (39 features per frame)
2 Gather global mean (39 features per musical pieces)
3 train on mfcc & deltas (39 feat/frame) to output global predictions
4 Use global preds to compute song and instru n-grams and histogramm
which add 70 feat/track
lead to a total of 109 feat/track
5 Fit on 109x186
6 predict (or predict_proba) on 41491 track
:Example:
source activate py27
ipython
run bayle.py -d /media/sf_github/yann/train/
..todo::
"""
import multiprocessing
import webbrowser
import utils
import numpy as np
from sklearn.svm import SVC
from sklearn import linear_model
import sys
from functools import partial
import time
from sklearn.metrics import precision_recall_curve, precision_score, recall_score, classification_report, f1_score
import time
import numpy as np
import matplotlib.pyplot as plt
import math
import re
import os
import sys
import csv
import time
import utils
import argparse
from datetime import date
from collections import Counter
from matplotlib.cm import ScalarMappable
from matplotlib.colors import Normalize
from matplotlib.colorbar import ColorbarBase
import matplotlib.pyplot as plt
import numpy as np
import joblib
from sklearn.ensemble import RandomForestClassifier
import librosa
import os
import sys
import json
import math
import utils
import random
import joblib
from pprint import pprint
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import precision_score, recall_score, f1_score
from sklearn.model_selection import train_test_split, StratifiedKFold
from sklearn.neural_network import MLPClassifier
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF
from sklearn import datasets
from sklearn import svm
from sklearn.ensemble import RandomForestClassifier
from sklearn.cross_validation import KFold, cross_val_score
from statistics import mean, stdev
from sklearn.neighbors import KNeighborsClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier, ExtraTreesClassifier, GradientBoostingClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis, LinearDiscriminantAnalysis
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import precision_recall_curve, precision_score, recall_score, classification_report, f1_score, accuracy_score
from sklearn import linear_model
from sklearn.tree import DecisionTreeClassifier
import classify
# import reproduce
def arr2str(data, separator=","):
return separator.join(str(x) for x in data)
def str2arr(data):
return np.array(data).astype(np.float)
def read_gts(filename, separator="\t"):
track_gts = {}
with open(filename, "r") as filep:
for line in filep:
line = line.split(separator)
track_gts[line[0]] = line[1][:-1]
return track_gts
def match_feat_with_song_gt(dir_feat, dir_gts):
"""Description of match_feat_gt
Use groundtruth created by
http://www.mathieuramona.com/wp/data/jamendo/
associate to local features
csv 7041 lines yaafe
lab 326.973 sec ramona
Definition of YAAFE from
http://yaafe.sourceforge.net/features.html
"""
utils.print_success("Matching local feat to song/instru groundtruths")
dir_feat = utils.abs_path_dir(dir_feat)
dir_gts = utils.abs_path_dir(dir_gts)
block_size = 1024.
step_size = 512.
fech = 22050.
frame_size_ms = block_size / fech
filenames = [fn for fn in os.listdir(dir_gts)]
for index, filename in enumerate(filenames):
utils.print_progress_start(str(index) + "/" + str(len(filenames)) + " " + filename)
# gather groundtruths
groundtruths = []
with open(dir_gts + filename, "r") as filep:
for row in filep:
line = row.split(" ")
end = float(line[1])
if "no" in line[2]:
tag = ",i\n"
else:
tag = ",s\n"
groundtruths.append([end, tag])
gt_len = len(groundtruths)
overflow = False
gt_index = 0
cpt = 0
# Write features & groundtruths to file
str_to_write = ""
feat_fn = filename.split(".")[0]
feat_fn += ".wav.mfcc.csv"
with open(dir_feat + feat_fn, "r") as filep:
for index, line in enumerate(filep):
# todo cleanup
if gt_index < gt_len:
if frame_size_ms * index > groundtruths[gt_index][0]:
gt_index += 1
if gt_index < gt_len:
str_to_write += line[:-1] + groundtruths[gt_index][1]
with open(dir_feat + feat_fn, "w") as filep:
filep.write(str_to_write)
utils.print_progress_end()
def match_feat_with_instru_gt(indir, outdir):
"""Description of match_feat_gt
Apply instru groundtruth to CCmixter and MedleyDB
"""
utils.print_success("Matching local features to instrumental groundtruths")
indir = utils.abs_path_dir(indir) + "/"
outdir = utils.abs_path_dir(outdir) + "/"
filenames = [fn for fn in os.listdir(indir)]
for filename in filenames:
outfile = open(outdir + filename, "w")
with open(indir + filename, "r") as filep:
for line in filep:
outfile.write(line[:-1] + " i\n")
outfile.close()
def process_local_feat(indir, file_gts_track, outdir_local, out_feat_global, train):
"""Description of process_local_feat
Add delta and double delta to MFCCs
"""
utils.print_success("Processing local features")
# Preprocess arg
indir = utils.abs_path_dir(indir)
file_gts_track = utils.abs_path_file(file_gts_track)
filelist = os.listdir(indir)
outdir_local = utils.abs_path_dir(outdir_local)
track_gts = {}
with open(file_gts_track, "r") as filep:
for line in filep:
line = line.split(",")
if train:
index = line[0]
else:
index = line[0] + ".wav.mfcc.csv"
track_gts[index] = line[1][:-1]
for index, filename in enumerate(filelist):
utils.print_progress_start(str(index) + "/" + str(len(filelist)) + " " + filename)
if filename in track_gts:
mfccs = []
groundtruths = []
with open(indir + filename, "r") as filep:
next(filep)
next(filep)
next(filep)
next(filep)
next(filep)
for line in filep:
line = line.split(",")
mfccs.append(str2arr(line[:-1]))
if train:
groundtruths.append(line[-1][:-1])
mfccs = np.array(mfccs)
delta_mfcc = librosa.feature.delta(mfccs)
delta2_mfcc = librosa.feature.delta(mfccs, order=2)
# Write local features in outdir_local
with open(outdir_local + filename, "w") as filep:
gt_to_write = ""
if "i" in track_gts[filename]:
gt_to_write = ",i"
elif "s" in track_gts[filename]:
# postpone frame groundtruth annotationa to another function later in the code
gt_to_write = ""
else:
utils.print_warning("bayle.py line 231 local frame groundtruth undefined")
if train:
for a, b, c, d in zip(mfccs, delta_mfcc, delta2_mfcc, groundtruths):
filep.write(arr2str(a) + "," + arr2str(b) + "," + arr2str(c) + "," + d + "\n")
else:
for a, b, c in zip(mfccs, delta_mfcc, delta2_mfcc):
filep.write(arr2str(a) + "," + arr2str(b) + "," + arr2str(c) + gt_to_write + "\n")
# # Write global features in out_feat_global
# with open(out_feat_global, "a") as filep:
# filep.write(filename + "," +
# arr2str(np.mean(mfccs, axis=0)) + "," +
# arr2str(np.mean(delta_mfcc, axis=0)) + "," +
# arr2str(np.mean(delta2_mfcc, axis=0)) + "," +
# track_gts[filename] + "\n")
utils.print_progress_end()
utils.print_success("Adding local groundtruths to Songs in Jamendo thanks to Ramona annotations")
match_feat_with_song_gt(dir_feat=outdir_local, dir_gts="groundtruths/frame_annot_jamendo_ramona/")
utils.print_success("Done")
def column(matrix, i):
return [row[i] for row in matrix]
def ngram_proba(local_pred, threshold=0.5, above_threshold=True):
"""
n-gram creation
"""
cpt_ngram = 0
nb_ngram = 30
ngrams = [0,] * nb_ngram
for pred in local_pred:
if above_threshold:
condition = pred > threshold
else:
condition = pred <= threshold
if condition:
cpt_ngram += 1
else:
if cpt_ngram < nb_ngram:
ngrams[cpt_ngram] += 1
else:
ngrams[nb_ngram-1] += 1
cpt_ngram = 0
nb_tag_sing = float(sum(ngrams))
if nb_tag_sing > 0.:
ngrams = [float(x) / nb_tag_sing for x in ngrams]
# utils.print_error(ngrams)
return ','.join(str(x) for x in ngrams)
def ngram(preds, tag):
"""Description of ngram
"""
cpt_ngram = 0
nb_ngram = 30
ngrams = [0,] * nb_ngram
for pred in preds:
if tag in pred:
cpt_ngram += 1
else:
if cpt_ngram < nb_ngram:
ngrams[cpt_ngram] += 1
else:
ngrams[nb_ngram-1] += 1
cpt_ngram = 0
nb_tag = float(sum(ngrams))
if nb_tag > 0.:
ngrams = [float(x) / nb_tag for x in ngrams]
return ','.join(str(x) for x in ngrams)
def create_track_feat_testset(folder, infile, outfile, model_file, train=False):
"""Description of create_track_feat_testset
Need to read each test file
compute deltas on mfcc in the ram
predict and predict_proba
generate song and instru ngrams and histograms
Add the mean of mfcc+deltas
append 109 features vector in feat_track/feat_test.csv
"""
utils.print_success("Create track feat testset")
folder = utils.abs_path_dir(folder)
infile = utils.abs_path_file(infile)
clf = joblib.load(model_file)
track_gts = read_gts(infile, separator=",")
for index, filename in enumerate(track_gts):
utils.print_progress_start(str(index+1) + "/" + str(len(track_gts)) + " " + filename)
mfccs = []
mfccs_1 = []
extension = ""
if train:
extension = ""
else:
extension += "_audio_full_mono_22k"
extension += ".wav.mfcc.csv"
with open(folder + filename + extension, "r") as filep:
if train:
next(filep)
next(filep)
next(filep)
next(filep)
next(filep)
for line in filep:
if train:
line = line.split(",")
else:
line = line.split(" ")
mfccs_1.append(str2arr(line[:-1]))
# if train:
# mfccs.append(str2arr(line[:-1]))
# else:
# mfccs.append(str2arr(line[0:]))
mfccs = np.array(mfccs_1)
delta_mfcc = librosa.feature.delta(mfccs)
delta2_mfcc = librosa.feature.delta(mfccs, order=2)
tmp = np.append(mfccs, delta_mfcc, axis=1)
features = np.append(tmp, delta2_mfcc, axis=1)
preds_proba = clf.predict_proba(features)
# Histogramm
nb_hist_class = 10
numbers = column(preds_proba, 0)
hist_pred = np.histogram(numbers, nb_hist_class)
hist_pred_norm = hist_pred[0] / float(sum(hist_pred[0]))
ngram_threshold = 0.5
song_ngram_proba = ngram_proba(local_pred=numbers, threshold=ngram_threshold, above_threshold=True)
instru_ngram_proba = ngram_proba(local_pred=numbers, threshold=ngram_threshold, above_threshold=False)
preds = clf.predict(features)
song_ngram = ngram(preds, "s")
instru_ngram = ngram(preds, "i")
with open(outfile, "a") as filep:
filep.write(filename[:12] + "," +
arr2str(np.mean(mfccs, axis=0)) + "," +
arr2str(np.mean(delta_mfcc, axis=0)) + "," +
arr2str(np.mean(delta2_mfcc, axis=0)) + "," +
arr2str(hist_pred_norm) + "," +
song_ngram_proba + "," +
instru_ngram_proba + "," +
song_ngram + "," +
instru_ngram + "," +
track_gts[filename] + "\n")
utils.print_progress_end()
def figures1bd(indir, file_gts_track):
"""Description of figures1bd
infile is formated like:
/media/sf_github/yann/train/01 - 01 Les Jardins Japonais.wav.mfcc.csv
feat1 feat2 ... featn tag1
feat1 feat2 ... featn tag2
...
feat1 feat2 ... featn tag2
0 Input the local extracted features from YAAFE
13 MFCC per frame
186 musical pieces as train set
1 Computes delta and double delta (39 features per frame)
2 Gather global mean (39 features per musical pieces)
3 train on mfcc & deltas (39 feat/frame) to output global predictions
4 Use global preds to compute song and instru n-grams and histogramm
which add 70 feat/track
lead to a total of 109 feat/track
5 Fit on 109x186
6 predict (or predict_proba) on 41491 track
"""
# Preprocess arg
indir = utils.abs_path_dir(indir)
file_gts_track = utils.abs_path_file(file_gts_track)
feat_frame_train = "feat_frame_train/"
utils.create_dir(feat_frame_train)
feat_frame_test = "feat_frame_test/"
utils.create_dir(feat_frame_test)
outdir_global = "feat_track/"
utils.create_dir(outdir_global)
feat_train = outdir_global + "train.csv"
feat_test = outdir_global + "test.csv"
models_dir = "models/"
utils.create_dir(models_dir)
loc_feat_testset_dirpath = "/media/sf_DATA/Datasets/Simbals/yaafe/results/processed/"
filelist_test = "filelist_test.tsv"
filelist_train = "filelist_train.tsv"
models_global = "models_track/"
utils.create_dir(models_global)
# process_local_feat(indir, file_gts_track, feat_frame_train, feat_train, train=True)
# classify.create_models(outdir=models_dir, train_dir=feat_frame_train, separator=",")
# create_track_feat_testset(indir, filelist_train, feat_train, train=True)
# 15h28m44s to 19h08m28s Done in 13184117ms
# create_track_feat_testset(loc_feat_testset_dirpath, filelist_test, feat_test)
# classify.create_models(outdir=models_global, train_file=feat_train)
# classify.test_models_parallel(
# models_dir=models_global,
# out_dir="results/",
# test_file=feat_test)
# Display results
reproduce.plot_results("results/")
def figure1a(file_gts_track):
"""Description of figure1a
"""
outdir_global = "feat_track/"
utils.create_dir(outdir_global)
feat_train = outdir_global + "train.csv"
# process_local_feat(indir, file_gts_track, feat_frame_train, feat_train, train=True)
classify.cross_validation(feat_train, n_folds=5)
def figure2(indir, file_gts_track):
"""Description of figure2
Method to maintain 100 percent of precision and to maximize recall.
"""
pass
def read_file_bayle(filename):
"""Description of read_file
train/test example line:
filename,feat1,feat2,...,featn,tag
"""
filename = utils.abs_path_file(filename)
filenames = []
groundtruths = []
features = []
with open(filename, "r") as filep:
for row in filep:
line = row.split(",")
filenames.append(line[0])
features.append([float(i) for i in line[1:-1]])
gt = line[-1]
while "\n" in gt or "\r" in gt:
gt = gt [:-1]
groundtruths.append(gt)
return filenames, features, groundtruths
def column(matrix, i):
return [row[i] for row in matrix]
def process_results(train, test):
train_fn, train_features, train_groundtruths = read_file_bayle(train)
test_fn, test_features, test_groundtruths = read_file_bayle(test)
step = 0.1
# for weight in np.arange(0.0, 1.0, step):
# inside_clf = RandomForestClassifier(random_state=2)
inside_clf = DecisionTreeClassifier(random_state=2)
# class_weight={"i":weight, "s":1-weight})
clf = AdaBoostClassifier(
random_state=2,#with 4 98%precision song class
base_estimator=inside_clf)
clf.fit(train_features, train_groundtruths)
predictions = clf.predict(test_features)
print("Accuracy " + str(accuracy_score(test_groundtruths, predictions)))
print("F-Measure " + str(f1_score(test_groundtruths, predictions, average="weighted")))
print("Precision " + str(precision_score(test_groundtruths, predictions, average=None)))
print("Recall " + str(recall_score(test_groundtruths, predictions, average=None)))
print("F-Measure " + str(f1_score(test_groundtruths, predictions, average=None)))
# predictions = [1.0 if i=="s" else 0.0 for i in predictions]
predictions = column(clf.predict_proba(test_features), 0)
outdir = "predictions/"
with open(outdir + "Bayle.csv", "w") as filep:
for name, pred in zip(test_fn, predictions):
filep.write(name + "," + str(1.0 - float(pred)) + "\n")
def new_algo_final(indir, file_gts_track):
utils.print_success("Approx. time ~6 hours.")
# Preprocess arg
indir = utils.abs_path_dir(indir)
file_gts_track = utils.abs_path_file(file_gts_track)
dir_tmp = utils.create_dir(utils.create_dir("src/tmp") + "bayle")
feat_frame_train = utils.create_dir(dir_tmp + "feat_frame_train")
feat_frame_test = utils.create_dir(dir_tmp + "feat_frame_test")
outdir_global = utils.create_dir(dir_tmp + "feat_track")
feat_train = outdir_global + "train.csv"
feat_test = outdir_global + "test.csv"
models_dir = utils.create_dir(dir_tmp + "models")
loc_feat_testset_dirpath = "features/database2/"
filelist_train = "groundtruths/database1.csv"
filelist_test = "groundtruths/database2.csv"
models_global = utils.create_dir(dir_tmp + "models_track")
process_local_feat(indir, file_gts_track, outdir_local=feat_frame_train, out_feat_global=feat_train, train=False)
classify.create_models(outdir=models_dir, train_dir=feat_frame_train, separator=",", classifiers="RandomForest")
"""
Create features at track scale for the train set
Features: MFCC + Delta + Double Delta + ngrams + hist
"""
model_file = "src/tmp/bayle/models/RandomForest/RandomForest.pkl"
model_file = "/media/sf_DATA/ReproducibleResearchIEEE2017/src/tmp/bayle/models/RandomForest/RandomForest.pkl"
create_track_feat_testset(indir, filelist_train, feat_train, model_file, train=True)
# # 15h28m44s to 19h08m28s Done in 13184117ms
create_track_feat_testset(loc_feat_testset_dirpath, filelist_test, feat_test, model_file)
classify.create_models(outdir=models_global, train_file=feat_train, classifiers="RandomForest")
process_results(feat_train, feat_test)
def main():
begin = int(round(time.time() * 1000))
PARSER = argparse.ArgumentParser(description="Bayle et al. (2017) algorithm")
PARSER.add_argument(
"-d",
"--indir",
help="input dir containing all local features extracted by YAAFE",
type=str,
default="/media/sf_github/yann/train/",
metavar="indir")
PARSER.add_argument(
"-i",
"--gts",
help="input file containing all track groundtruths",
type=str,
default="filelist_train.tsv")
indir = "features/database1/"
file_gts_track = "groundtruths/database1.csv"
new_algo_final(indir, file_gts_track)
# figure1a(PARSER.parse_args().gts)
# figures1bd(PARSER.parse_args().indir, PARSER.parse_args().gts)
# figure2(PARSER.parse_args().indir, PARSER.parse_args().gts)
# Local feat processing
# Global feat processing
# bayle_fig3()
utils.print_success("Done in " + str(int(round(time.time() * 1000)) - begin) + "ms")
if __name__ == "__main__":
main()
| mit |
pjgaudre/2016-IPSW-500px | Def500.py | 1 | 3315 | # -*- coding: utf-8 -*-
"""
Created on Wed Aug 17 10:26:55 2016
@author: davidc
"""
#!/usr/bin/python
import numpy as np
import pylab #image showing, an apendix to matplotlib
from PIL import Image, ImageChops
import pandas as pd #data package like read csv
import os
#import matplotlib.pyplot as plt #image showing
# import exifread # https://pypi.python.org/pypi/ExifRead
#import pytesseract # https://pypi.python.org/pypi/pytesseract
# https://github.com/tesseract-ocr/tesseract/wiki
'''
These pictures are duplicates
#1343 train/21556793.jpeg
#1594 train/19990265.jpeg
#3410 train/19028923.jpeg
'''
#set working directory
os.chdir('/Users/davidc/Desktop/Research_Tools/2016_IPSW_500px')
def Photo_id_2_index(photo_id,train):
return train['photo_id'][train['photo_id']==photo_id].index[0]
def ImagToJpeg(index,train):
'''
Function ImagToJpeg opens the jpeg:
INPUTS:
index: which picture do you want? 0, 1, 2, ...., 3677
'''
path = 'dataset/'+ train['image_path'][index]
img_jpg = Image.open(open(path,'rb'))
return img_jpg
def JpegToArray(img_jpg):
'''
Function JpegToArray converts the jpeg to an array:
INPUTS:
index: file in a jpg image type
'''
img_arry = np.asarray(img_jpg, dtype='float64') # Converts Jpeg to Array
return img_arry
def ViewArray(img_arry):
'''
Function ViewImg outputs the image:
INPUTS:
img: array returned from JpegToArray
'''
pylab.imshow(img_arry)
def rgb2grey(rbg):
'''
Function rgb2grey converts an array image to greyscale
INPUTS:
index: img_array
'''
return np.dot(rbg[...,:3], [0.299, 0.587, 0.114])
def ViewImagIndex(index,train):
'''
Function ViewImagIndex give the index
INPUTS:
index:
'''
View_img_jpg = ImagToJpeg(index,train)
pylab.imshow(View_img_jpg)
def Is_there_a_border(index,train):
'''
Function ViewImagIndex give the index
INPUTS:
index:
'''
im = ImagToJpeg(index,train)
bg = Image.new(im.mode, im.size, im.getpixel((0,0)))
diff = ImageChops.difference(im, bg)
# diff = ImageChops.add(diff, diff, 2.0, -50)
diff = ImageChops.add(diff, diff, 2.0, -int(np.percentile(JpegToArray(diff),25)))
bbox = diff.getbbox()
LB = not (bbox[1] == 0)
RB = not (bbox[3] == im.size[1])
TB = not (bbox[0] == 0)
BB = not (bbox[2] == im.size[1])
borders = ( (LB and RB) ) # or (TB and BB) or (LB and TB) or (LB and BB) or (RB and TB) or (RB and BB)
return borders
def Is_there_a_border_vec(index,train):
'''
Function Is_there_a_border_vec puts into a vector
INPUTS:
index:
'''
temp = np.zeros(index.size)
for i in index:
temp[i] = Is_there_a_border(i,train)
return temp
#Is_there_a_border_vec = np.vectorize(Is_there_a_border, excluded=['train'])
def CompBordList(Predicted_Border):
'''
Function CompBordList gives photo_id if label is true border
INPUTS:
Predicted_Border: algorithm chosen pics with borders
'''
BenMar3col = pd.read_csv('train_borders3.csv')
BM_border = BenMar3col['photo_id'][BenMar3col['label']==2]
BM_border = BM_border.values
return BM_border
| mit |
TensorVision/MediSeg | AP3/basic_local_classifier.py | 1 | 14922 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""A basic classifier which uses only local features."""
import os.path
from PIL import Image
import scipy.misc
import scipy.ndimage
import logging
import sys
import time
import numpy as np
import json
logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s',
level=logging.INFO,
stream=sys.stdout)
from keras.models import Sequential
from keras.layers import Dense, Dropout
import keras.optimizers
import sklearn
from keras.models import model_from_yaml
from keras.preprocessing.image import img_to_array
from skimage.segmentation import quickshift, slic
from tensorvision.utils import load_segmentation_mask
import sys
sys.path.append(
os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir)))
from utils import get_file_list
import analyze
from seg_utils import get_image, get_class_weight
def get_features(x, y, image, model_nr=2):
"""Get features at position (x, y) from image."""
height, width, _ = image.shape
p = get_pos_colors(image, x, y)
if model_nr in [1, "1.1"]:
return p
elif model_nr in [2, 3]:
return (p[0], p[1], p[2], x, y)
elif model_nr in [4]:
left = get_pos_colors(image, x - 1, y)
return (p[0], p[1], p[2], left[0], left[1], left[2], x, y)
elif model_nr in [5]:
left = get_pos_colors(image, x - 1, y)
right = get_pos_colors(image, x + 1, y)
top = get_pos_colors(image, x, y + 1)
bottom = get_pos_colors(image, x, y - 1)
return (p[0], p[1], p[2],
left[0], left[1], left[2],
right[0], right[1], right[2],
top[0], top[1], top[2],
bottom[0], bottom[1], bottom[2])
else:
print("model_nr '%s' unknown" % str(model_nr))
sys.exit(-1)
def get_pos_colors(image, x, y):
"""Get the color at a position or 0-vector, if the position is invalid."""
if x > 0 and y > 0 and len(image) > y and len(image[0]) > x:
return (image[y][x][0], image[y][x][1], image[y][x][2])
else:
return (0, 0, 0)
def inputs(hypes, _, phase, data_dir):
"""
Get data.
Parameters
----------
hypes : dict
_ : ignore this
phase : {'train', 'val'}
data_dir : str
Returns
-------
tuple
(xs, ys), where xs and ys are lists of the same length.
xs are paths to the input images and ys are paths to the expected
output
"""
x_files, y_files = get_file_list(hypes, 'train')
x_files, y_files = sklearn.utils.shuffle(x_files,
y_files,
random_state=0)
xs, ys = [], []
for x, y in zip(x_files, y_files):
logging.info("Read '%s' for data...", x)
image = get_image(x, 'RGB')
label = load_segmentation_mask(hypes, y)
im = Image.open(x, 'r')
width, height = im.size
for x in range(width):
for y in range(height):
image_val = get_features(x, y, image, hypes['model_nr'])
label_val = label[y][x]
xs.append(image_val)
ys.append(label_val)
return xs, np.array(ys, dtype=int)
def shuffle_in_unison_inplace(a, b):
"""Shuffle both, a and b, the same way."""
assert len(a) == len(b)
p = np.random.permutation(len(a))
return a[p], b[p]
def generate_training_data(hypes, x_files, y_files):
"""
Generate training data.
Parameters
----------
hypes : dict
Hyperparameters
x_files : list
Paths to raw data files
y_files : list
Paths to segmentation masks
Yields
------
tuple
(xs, ys) - training batch of feature list xs and label list ys
"""
x_files, y_files = sklearn.utils.shuffle(x_files,
y_files,
random_state=0)
i = 0
xs, ys = get_traindata_single_file(hypes, x_files[i], y_files[i])
i = (i + 1) % len(x_files)
while True:
while len(xs) < hypes['solver']['batch_size']:
xs_tmp, ys_tmp = get_traindata_single_file(hypes,
x_files[i],
y_files[i])
i = (i + 1) % len(x_files)
xs = np.concatenate((xs, xs_tmp), axis=0)
ys = np.concatenate((ys, ys_tmp), axis=0)
if hypes['training']['make_equal']:
xs, ys = reduce_data_equal(xs, ys)
# xs, ys = shuffle_in_unison_inplace(xs, ys)
# print("sum(ys)=%i / %i" % (np.sum(ys), len(ys) - np.sum(ys)))
# print("sum(ys[s])=%i" % np.sum(ys[:hypes['solver']['batch_size']]))
yield (xs[:hypes['solver']['batch_size']],
ys[:hypes['solver']['batch_size']])
xs = xs[hypes['solver']['batch_size']:]
ys = ys[hypes['solver']['batch_size']:]
def get_traindata_single_file(hypes, x, y):
"""Get trainingdata for a single file x with segmentation file y."""
xs, ys = [], []
logging.info("Read '%s' for data...", x)
image = get_image(x, 'RGB')
label = load_segmentation_mask(hypes, y)
im = Image.open(x, 'r')
width, height = im.size
for x in range(width):
for y in range(height):
image_val = get_features(x, y, image, hypes['model_nr'])
label_val = label[y][x]
xs.append(image_val)
ys.append(label_val)
return np.array(xs), np.array(ys, dtype=int)
def get_segmentation(hypes, image_path, model):
"""
Get a segmentation.
Path
----
hypes : dict
Hyperparameters (model specific information)
image_path : str
Path to a file which gets segmented.
model : object
Returns
-------
Numpy array of the same width and height as input.
"""
image = get_image(image_path, 'RGB')
# Preprocess
# import skimage.exposure
# image = skimage.exposure.equalize_hist(image)
# image = Image.fromarray(image, 'RGB')
# converter = PIL.ImageEnhance.Color(image)
# image = converter.enhance(2)
# image = img_to_array(image)
# scipy.misc.imshow(image)
im = Image.open(image_path, 'r')
width, height = im.size
segmentation = np.zeros((height, width), dtype=int)
x_test = []
for x in range(width):
for y in range(height):
x_test.append(get_features(x, y, image, hypes['model_nr']))
classes = model.predict_classes(np.array(x_test, dtype=int),
batch_size=1024)
i = 0
for x in range(width):
for y in range(height):
segmentation[y][x] = classes[i]
i += 1
if hypes['model_nr'] == [3, "1.1"]:
segmentation = morphological_operations(segmentation)
if hypes['segmenter']['invert']:
# Set all labels which are 1 to 0 and vice versa.
segmentation = np.invert(segmentation.astype(bool)).astype(int)
# segmentation = superpixel_majority_vote(image, segmentation)
return segmentation
def superpixel_majority_vote(image, segmentation):
"""Mark superpixels by majority vote."""
image = image.astype(float)
segments = quickshift(image, ratio=0.5, max_dist=10, sigma=1.0)
# segments = slic(image, n_segments=50, compactness=20)
# watershed -
("http://scikit-image.org/docs/dev/auto_examples/segmentation/"
"plot_marked_watershed.html")
# http://scikit-image.org/docs/dev/auto_examples/
height, width = segments.shape
segment_count = {}
for x in range(width):
for y in range(height):
s = segments[y][x]
if s not in segment_count:
segment_count[s] = {0: 0, 1: 0} # binary
segment_count[s][segmentation[y][x]] += 1
for x in range(width):
for y in range(height):
s = segments[y][x]
class_ = int(segment_count[s][1] > segment_count[s][0])
segmentation[y][x] = class_
return segmentation
def morphological_operations(segmentation):
"""Apply morphological operations to improve the segmentation."""
size = 3
segmentation = scipy.ndimage.morphology.binary_erosion(segmentation,
iterations=size)
segmentation = scipy.ndimage.morphology.binary_dilation(segmentation,
iterations=size)
return segmentation
def main(hypes_file, data_dir, override):
"""Orchestrate."""
with open(hypes_file, 'r') as f:
hypes = json.load(f)
if 'training' not in hypes:
hypes['training'] = {}
if 'make_equal' not in hypes['training']:
hypes['training']['make_equal'] = False
base = os.path.dirname(hypes_file)
model_file_path = os.path.join(base, '%s.yaml' % hypes['model']['name'])
model_file_path = os.path.abspath(model_file_path)
weights_file_path = os.path.join(base, '%s.hdf5' % hypes['model']['name'])
weights_file_path = os.path.abspath(weights_file_path)
if not os.path.isfile(model_file_path) or override:
if not os.path.isfile(model_file_path):
logging.info("Did not find '%s'. Start training...",
model_file_path)
else:
logging.info("Override '%s'. Start training...",
model_file_path)
# Get data
# x_files, y_files = inputs(hypes, None, 'train', data_dir)
x_files, y_files = get_file_list(hypes, 'train')
x_files, y_files = sklearn.utils.shuffle(x_files,
y_files,
random_state=0)
x_train, y_train = get_traindata_single_file(hypes,
x_files[0],
y_files[0])
nb_features = x_train[0].shape[0]
logging.info("Input gets %i features", nb_features)
# Make model
model = Sequential()
model.add(Dense(64,
input_dim=nb_features,
init='uniform',
activation='sigmoid'))
model.add(Dropout(0.5))
model.add(Dense(64, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='adagrad', # rmsprop
metrics=['accuracy'])
generator = generate_training_data(hypes, x_files, y_files)
t0 = time.time()
sep = hypes['solver']['samples_per_epoch']
if True:
class_weight = get_class_weight(hypes)
logging.info("class_weights = %s", class_weight)
model.fit_generator(generator,
samples_per_epoch=sep,
nb_epoch=hypes['solver']['epochs'],
verbose=1,
validation_data=(x_train, y_train),
class_weight=class_weight)
else:
logging.info("Fit with .fit")
x_train, y_train = inputs(hypes, None, 'train', data_dir)
model.fit(x_train, y_train, batch_size=128, nb_epoch=1)
t1 = time.time()
print("Training Time: %0.4f" % (t1 - t0))
# save as YAML
yaml_string = model.to_yaml()
with open(model_file_path, 'w') as f:
f.write(yaml_string)
model.save_weights(weights_file_path)
# Evaluate
data = get_file_list(hypes, 'test')
logging.info("Start segmentation")
analyze.evaluate(hypes,
data,
data_dir,
model,
elements=[0, 1],
get_segmentation=get_segmentation)
else:
logging.info("## Found '%s'.", model_file_path)
with open(model_file_path) as f:
yaml_string = f.read()
model = model_from_yaml(yaml_string)
model.load_weights(weights_file_path)
model.compile(optimizer='adagrad', loss='binary_crossentropy')
data = get_file_list(hypes, 'test')
analyze.evaluate(hypes,
data,
data_dir,
model,
elements=[0, 1],
get_segmentation=get_segmentation)
def reduce_data_equal(x_train, y_train, max_per_class=None):
"""
Reduce the amount of data to get the same number per class.
This script assumes that y_train is a list of binary labels {0, 1}.
"""
n = min(sum(y_train), abs(len(y_train) - sum(y_train)))
if max_per_class is not None:
n = min(n, max_per_class)
true_count, false_count = 0, 0
x_train_n, y_train_n = [], []
x_train = list(x_train)
y_train = list(y_train)
for x, y in zip(x_train, y_train):
if y == 1 and true_count < n:
x_train_n.append(x)
y_train_n.append(y)
true_count += 1
elif y == 0 and false_count < n:
x_train_n.append(x)
y_train_n.append(y)
false_count += 1
x_train = np.array(x_train_n)
y_train = np.array(y_train_n)
return x_train, y_train
def is_valid_file(parser, arg):
"""
Check if arg is a valid file that already exists on the file system.
Parameters
----------
parser : argparse object
arg : str
Returns
-------
arg
"""
arg = os.path.abspath(arg)
if not os.path.exists(arg):
parser.error("The file %s does not exist!" % arg)
else:
return arg
def get_parser():
"""Get parser object for basic local classifier."""
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
parser = ArgumentParser(description=__doc__,
formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument("--out",
dest="data",
help=("output directory"),
required=True)
parser.add_argument("--hypes",
dest="hypes_file",
help=("Configuration file in JSON format"),
type=lambda x: is_valid_file(parser, x),
metavar="FILE",
required=True)
parser.add_argument("--override",
action="store_true", dest="override", default=False,
help="override old model, if it exists")
return parser
if __name__ == "__main__":
args = get_parser().parse_args()
main(args.hypes_file, args.data, args.override)
| mit |
alexpearce/thesis | scripts/background_categories.py | 1 | 3206 | from __future__ import absolute_import, division, print_function
import os
import matplotlib.pyplot as plt
import ROOT
import root_pandas
from histograms import histogram
from root_converters import roocurve, tgraphasymerrors
from plotting_utilities import (
COLOURS as colours,
set_axis_labels
)
PREFIX = 'root://eoslhcb.cern.ch//eos/lhcb/user/a/apearce/CharmProduction/2015_MagDown_MC/{0}' # noqa
FNAME = 'DVntuple.root'
DATA_PATHS = [
os.path.join(PREFIX, str(idx), FNAME)
for idx in range(1, 3)
]
EVT_TYPES = {
'D0ToKpi': 27163003,
'DpToKpipi': 21263010
}
def background_categories(mode):
"""Plot BKGCAT values."""
tree = 'Tuple{0}/DecayTree'.format(mode)
parent = mode.split('To')[0]
columns = [
'{0}_M'.format(parent),
'{0}_BKGCAT'.format(parent)
]
paths = [p.format(EVT_TYPES[mode]) for p in DATA_PATHS]
df = root_pandas.read_root(paths, key=tree, columns=columns)
df.columns = ['M', 'BKGCAT']
if mode == 'D0ToKpi':
mrange = (1800, 1930)
elif mode == 'DpToKpipi':
mrange = (1805, 1935)
nbins = mrange[1] - mrange[0]
signal = df.M[(df.BKGCAT == 0) | (df.BKGCAT == 10)]
ghost = df.M[(df.BKGCAT == 60)]
other = df.M[~((df.BKGCAT == 0) | (df.BKGCAT == 10) | (df.BKGCAT == 60))]
fig = plt.figure(figsize=(8, 8))
ax = fig.add_subplot(1, 1, 1)
histogram([signal, ghost, other], range=mrange, bins=nbins,
label=['Signal', 'Ghost background', 'Other background'], ax=ax)
# Don't have the y-axis go to zero, and add some padding at the top
ax.set_ylim(bottom=0.1, top=2*ax.get_ylim()[1])
ax.set_yscale('log')
set_axis_labels(ax, mode)
ax.legend(loc='best')
fig.savefig('output/{0}_BKGCAT.pdf'.format(mode))
def fits(mode):
f = ROOT.TFile('~/Physics/CharmProduction/analysis/{0}_2015_MagDown_truth_matching_fit.root'.format(mode)) # noqa
w = f.Get('workspace_{0}'.format(mode))
parent = mode.split('To')[0]
x = w.var('{0}_M'.format(parent))
pdf_tot = w.pdf('pdf_m_tot')
pdf_bkg = w.pdf('pdf_m_tot')
data = w.data('data_binned')
frame = x.frame()
data.plotOn(frame)
pdf_bkg.plotOn(frame)
pdf_tot.plotOn(frame, ROOT.RooFit.Components('*bkg*'))
plotobjs = [frame.getObject(i) for i in range(int(frame.numItems()))]
tgraph, tcurve_tot, tcurve_bkg = plotobjs
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
roocurve(ax, tcurve_bkg, color=colours.red, linestyle=':',
label='Background')
roocurve(ax, tcurve_tot, color=colours.blue,
label='Total fit')
tgraphasymerrors(ax, tgraph, color=colours.black, label='MC data')
ax.set_xlim((frame.GetXaxis().GetXmin(), frame.GetXaxis().GetXmax()))
ax.set_ylim(top=1.2*ax.get_ylim()[1])
# Swap the legend entry order so the data is first
handles, labels = ax.get_legend_handles_labels()
ax.legend(handles[::-1], labels[::-1], loc='best')
set_axis_labels(ax, mode)
fig.savefig('output/{0}_BKGCAT_fit.pdf'.format(mode))
if __name__ == '__main__':
# background_categories('D0ToKpi')
# background_categories('DpToKpipi')
fits('D0ToKpi')
fits('DpToKpipi')
| mit |
mtmarsh2/vislab | vislab/tests/vw3.py | 4 | 6227 | import logging
import unittest
import pandas as pd
import numpy as np
import gzip
import os
import test_context
import vislab.predict
import vislab.vw3
class TestVW(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.temp_dirname = vislab.util.cleardirs(
test_context.temp_dirname + '/test_vw')
def test_write_data_in_vw_format_float(self):
feat_df = pd.DataFrame(
data=[
np.array([3.24, 5.666, 1., 0.0000001, 0.]),
np.array([1.00000003, 5, 2, 0.001, -0.000001]),
],
index=['loller', 'taco']
)
feat_name = 'best'
assert(len(feat_df.columns) > 1)
expected = """\
idloller |best 0:3.24 1:5.666 2:1.0
idtaco |best 0:1.0 1:5.0 2:2.0 3:0.001
"""
output_filename = self.temp_dirname + \
'/test_write_data_in_vw_format.txt'
try:
os.remove(output_filename)
except:
pass
vislab.vw3.write_data_in_vw_format(feat_df, feat_name, output_filename)
with open(output_filename) as f:
actual = f.read()
assert(expected == actual)
# Try writing to gzipped file
output_filename = self.temp_dirname + \
'/test_write_data_in_vw_format.txt.gz'
try:
os.remove(output_filename)
except:
pass
vislab.vw3.write_data_in_vw_format(feat_df, feat_name, output_filename)
with gzip.open(output_filename) as f:
actual = f.read()
assert(expected == actual)
def test_write_data_in_vw_format_single_column(self):
feat_df = pd.DataFrame(
data=[
(np.array([2.0003, 2]),),
(np.array([True, False, True, False, False, True]),)
],
index=['id', 'badman']
)
feat_name = 'best'
assert(len(feat_df.columns) == 1)
expected = """\
idid |best 0:2.0003 1:2.0
idbadman |best 0 2 5
"""
output_filename = self.temp_dirname + \
'/test_write_data_in_vw_format_single_column.txt'
try:
os.remove(output_filename)
except:
pass
vislab.vw3.write_data_in_vw_format(feat_df, feat_name, output_filename)
with open(output_filename) as f:
actual = f.read()
assert(expected == actual)
def test__cache_data(self):
# These test file were created from the 'classifier tests' notebook.
feat_filenames = [
test_context.support_dirname + '/simple/first.txt',
test_context.support_dirname + '/simple/second.txt.gz'
]
label_df_filename = test_context.support_dirname + \
'/simple/label_df.h5'
output_dirname = vislab.util.makedirs(
self.temp_dirname + '/cache_data')
cache_cmd, preview_cmd = vislab.vw3._cache_cmd(
label_df_filename, feat_filenames, output_dirname,
2, bit_precision=18, verbose=False, force=False)
vislab.util.run_through_bash_script(
[cache_cmd, preview_cmd], None, verbose=False)
assert(os.path.exists(output_dirname + '/cache.vw'))
expected = """\
-1 1.000000 0|first 0:0.907699 1:0.910662 |second 0:1.057998
-1 1.000000 1|first 0:-0.375222 1:2.900907 |second 0:0.831044
-1 1.000000 2|first 0:-0.276823 1:1.717314 |second 0:-0.345345
-1 1.000000 3|first 0:0.596906 1:1.522828 |second 0:-0.766781
-1 1.000000 4|first 0:0.540094 1:0.094393 |second 0:-0.919987
1 1.000000 5|first 0:-0.972403 1:2.213648 |second 0:-0.0831
-1 1.000000 6|first 0:0.098378 1:0.200471 |second 0:-0.9833
1 1.000000 7|first 0:-0.755463 1:2.802532 |second 0:-0.642245
1 1.000000 8|first 0:-0.326318 1:0.74197 |second 0:1.21393
1 1.000000 9|first 0:-2.115056 1:0.353851 |second 0:1.62912
"""
with open(output_dirname + '/cache_preview.txt') as f:
actual = f.read()
assert(expected == actual)
def test__get_feat_filenames(self):
feat_names = ['first', 'second']
feat_dirname = test_context.support_dirname + '/simple'
vislab.vw3._get_feat_filenames(feat_names, feat_dirname)
def test_vw_fit_simple(self):
label_df_filename = test_context.support_dirname + \
'/simple/label_df.h5'
label_df = pd.read_hdf(label_df_filename, 'df')
dataset = vislab.predict.get_binary_or_regression_dataset(
label_df, 'simple', 'label')
feat_dirname = test_context.support_dirname + '/simple'
vw = vislab.vw3.VW(self.temp_dirname + '/vw_simple')
feat_names = ['first']
pred_df, test_score, val_score, train_score = vw.fit_and_predict(
dataset, feat_names, feat_dirname)
print(feat_names, test_score, val_score, train_score)
#assert(test_score > 0.7 and test_score < 0.8)
feat_names = ['second']
pred_df, test_score, val_score, train_score = vw.fit_and_predict(
dataset, feat_names, feat_dirname)
print(feat_names, test_score, val_score, train_score)
#assert(test_score > 0.9)
feat_names = ['first', 'second']
pred_df, test_score, val_score, train_score = vw.fit_and_predict(
dataset, feat_names, feat_dirname)
print(feat_names, test_score, val_score, train_score)
#assert(test_score > 0.9)
def test_vw_fit_iris(self):
label_df_filename = test_context.support_dirname + \
'/iris/label_df.h5'
label_df = pd.read_hdf(label_df_filename, 'df')
dataset = vislab.predict.get_multiclass_dataset(
label_df, 'iris', 'labels', ['label_0', 'label_1', 'label_2'])
feat_dirname = test_context.support_dirname + '/iris'
vw = vislab.vw3.VW(self.temp_dirname + '/vw_iris', num_passes=[10, 50, 100])
feat_names = ['all']
pred_df, test_score, val_score, train_score = vw.fit_and_predict(
dataset, feat_names, feat_dirname)
print(feat_names, test_score, val_score, train_score)
assert(test_score > 0.8)
# TODO: really want > .9 accuracy!
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
unittest.main()
| bsd-2-clause |
farhaanbukhsh/sympy | sympy/physics/quantum/circuitplot.py | 58 | 12941 | """Matplotlib based plotting of quantum circuits.
Todo:
* Optimize printing of large circuits.
* Get this to work with single gates.
* Do a better job checking the form of circuits to make sure it is a Mul of
Gates.
* Get multi-target gates plotting.
* Get initial and final states to plot.
* Get measurements to plot. Might need to rethink measurement as a gate
issue.
* Get scale and figsize to be handled in a better way.
* Write some tests/examples!
"""
from __future__ import print_function, division
from sympy import Mul
from sympy.core.compatibility import u, range
from sympy.external import import_module
from sympy.physics.quantum.gate import Gate, OneQubitGate, CGate, CGateS
from sympy.core.core import BasicMeta
from sympy.core.assumptions import ManagedProperties
__all__ = [
'CircuitPlot',
'circuit_plot',
'labeller',
'Mz',
'Mx',
'CreateOneQubitGate',
'CreateCGate',
]
np = import_module('numpy')
matplotlib = import_module(
'matplotlib', __import__kwargs={'fromlist': ['pyplot']},
catch=(RuntimeError,)) # This is raised in environments that have no display.
if not np or not matplotlib:
class CircuitPlot(object):
def __init__(*args, **kwargs):
raise ImportError('numpy or matplotlib not available.')
def circuit_plot(*args, **kwargs):
raise ImportError('numpy or matplotlib not available.')
else:
pyplot = matplotlib.pyplot
Line2D = matplotlib.lines.Line2D
Circle = matplotlib.patches.Circle
#from matplotlib import rc
#rc('text',usetex=True)
class CircuitPlot(object):
"""A class for managing a circuit plot."""
scale = 1.0
fontsize = 20.0
linewidth = 1.0
control_radius = 0.05
not_radius = 0.15
swap_delta = 0.05
labels = []
inits = {}
label_buffer = 0.5
def __init__(self, c, nqubits, **kwargs):
self.circuit = c
self.ngates = len(self.circuit.args)
self.nqubits = nqubits
self.update(kwargs)
self._create_grid()
self._create_figure()
self._plot_wires()
self._plot_gates()
self._finish()
def update(self, kwargs):
"""Load the kwargs into the instance dict."""
self.__dict__.update(kwargs)
def _create_grid(self):
"""Create the grid of wires."""
scale = self.scale
wire_grid = np.arange(0.0, self.nqubits*scale, scale, dtype=float)
gate_grid = np.arange(0.0, self.ngates*scale, scale, dtype=float)
self._wire_grid = wire_grid
self._gate_grid = gate_grid
def _create_figure(self):
"""Create the main matplotlib figure."""
self._figure = pyplot.figure(
figsize=(self.ngates*self.scale, self.nqubits*self.scale),
facecolor='w',
edgecolor='w'
)
ax = self._figure.add_subplot(
1, 1, 1,
frameon=True
)
ax.set_axis_off()
offset = 0.5*self.scale
ax.set_xlim(self._gate_grid[0] - offset, self._gate_grid[-1] + offset)
ax.set_ylim(self._wire_grid[0] - offset, self._wire_grid[-1] + offset)
ax.set_aspect('equal')
self._axes = ax
def _plot_wires(self):
"""Plot the wires of the circuit diagram."""
xstart = self._gate_grid[0]
xstop = self._gate_grid[-1]
xdata = (xstart - self.scale, xstop + self.scale)
for i in range(self.nqubits):
ydata = (self._wire_grid[i], self._wire_grid[i])
line = Line2D(
xdata, ydata,
color='k',
lw=self.linewidth
)
self._axes.add_line(line)
if self.labels:
init_label_buffer = 0
if self.inits.get(self.labels[i]): init_label_buffer = 0.25
self._axes.text(
xdata[0]-self.label_buffer-init_label_buffer,ydata[0],
render_label(self.labels[i],self.inits),
size=self.fontsize,
color='k',ha='center',va='center')
self._plot_measured_wires()
def _plot_measured_wires(self):
ismeasured = self._measurements()
xstop = self._gate_grid[-1]
dy = 0.04 # amount to shift wires when doubled
# Plot doubled wires after they are measured
for im in ismeasured:
xdata = (self._gate_grid[ismeasured[im]],xstop+self.scale)
ydata = (self._wire_grid[im]+dy,self._wire_grid[im]+dy)
line = Line2D(
xdata, ydata,
color='k',
lw=self.linewidth
)
self._axes.add_line(line)
# Also double any controlled lines off these wires
for i,g in enumerate(self._gates()):
if isinstance(g, CGate) or isinstance(g, CGateS):
wires = g.controls + g.targets
for wire in wires:
if wire in ismeasured and \
self._gate_grid[i] > self._gate_grid[ismeasured[wire]]:
ydata = min(wires), max(wires)
xdata = self._gate_grid[i]-dy, self._gate_grid[i]-dy
line = Line2D(
xdata, ydata,
color='k',
lw=self.linewidth
)
self._axes.add_line(line)
def _gates(self):
"""Create a list of all gates in the circuit plot."""
gates = []
if isinstance(self.circuit, Mul):
for g in reversed(self.circuit.args):
if isinstance(g, Gate):
gates.append(g)
elif isinstance(self.circuit, Gate):
gates.append(self.circuit)
return gates
def _plot_gates(self):
"""Iterate through the gates and plot each of them."""
for i, gate in enumerate(self._gates()):
gate.plot_gate(self, i)
def _measurements(self):
"""Return a dict {i:j} where i is the index of the wire that has
been measured, and j is the gate where the wire is measured.
"""
ismeasured = {}
for i,g in enumerate(self._gates()):
if getattr(g,'measurement',False):
for target in g.targets:
if target in ismeasured:
if ismeasured[target] > i:
ismeasured[target] = i
else:
ismeasured[target] = i
return ismeasured
def _finish(self):
# Disable clipping to make panning work well for large circuits.
for o in self._figure.findobj():
o.set_clip_on(False)
def one_qubit_box(self, t, gate_idx, wire_idx):
"""Draw a box for a single qubit gate."""
x = self._gate_grid[gate_idx]
y = self._wire_grid[wire_idx]
self._axes.text(
x, y, t,
color='k',
ha='center',
va='center',
bbox=dict(ec='k', fc='w', fill=True, lw=self.linewidth),
size=self.fontsize
)
def two_qubit_box(self, t, gate_idx, wire_idx):
"""Draw a box for a two qubit gate. Doesn't work yet.
"""
x = self._gate_grid[gate_idx]
y = self._wire_grid[wire_idx]+0.5
print(self._gate_grid)
print(self._wire_grid)
obj = self._axes.text(
x, y, t,
color='k',
ha='center',
va='center',
bbox=dict(ec='k', fc='w', fill=True, lw=self.linewidth),
size=self.fontsize
)
def control_line(self, gate_idx, min_wire, max_wire):
"""Draw a vertical control line."""
xdata = (self._gate_grid[gate_idx], self._gate_grid[gate_idx])
ydata = (self._wire_grid[min_wire], self._wire_grid[max_wire])
line = Line2D(
xdata, ydata,
color='k',
lw=self.linewidth
)
self._axes.add_line(line)
def control_point(self, gate_idx, wire_idx):
"""Draw a control point."""
x = self._gate_grid[gate_idx]
y = self._wire_grid[wire_idx]
radius = self.control_radius
c = Circle(
(x, y),
radius*self.scale,
ec='k',
fc='k',
fill=True,
lw=self.linewidth
)
self._axes.add_patch(c)
def not_point(self, gate_idx, wire_idx):
"""Draw a NOT gates as the circle with plus in the middle."""
x = self._gate_grid[gate_idx]
y = self._wire_grid[wire_idx]
radius = self.not_radius
c = Circle(
(x, y),
radius,
ec='k',
fc='w',
fill=False,
lw=self.linewidth
)
self._axes.add_patch(c)
l = Line2D(
(x, x), (y - radius, y + radius),
color='k',
lw=self.linewidth
)
self._axes.add_line(l)
def swap_point(self, gate_idx, wire_idx):
"""Draw a swap point as a cross."""
x = self._gate_grid[gate_idx]
y = self._wire_grid[wire_idx]
d = self.swap_delta
l1 = Line2D(
(x - d, x + d),
(y - d, y + d),
color='k',
lw=self.linewidth
)
l2 = Line2D(
(x - d, x + d),
(y + d, y - d),
color='k',
lw=self.linewidth
)
self._axes.add_line(l1)
self._axes.add_line(l2)
def circuit_plot(c, nqubits, **kwargs):
"""Draw the circuit diagram for the circuit with nqubits.
Parameters
==========
c : circuit
The circuit to plot. Should be a product of Gate instances.
nqubits : int
The number of qubits to include in the circuit. Must be at least
as big as the largest `min_qubits`` of the gates.
"""
return CircuitPlot(c, nqubits, **kwargs)
def render_label(label, inits={}):
"""Slightly more flexible way to render labels.
>>> from sympy.physics.quantum.circuitplot import render_label
>>> render_label('q0')
'$|q0\\\\rangle$'
>>> render_label('q0', {'q0':'0'})
'$|q0\\\\rangle=|0\\\\rangle$'
"""
init = inits.get(label)
if init:
return r'$|%s\rangle=|%s\rangle$' % (label, init)
return r'$|%s\rangle$' % label
def labeller(n, symbol='q'):
"""Autogenerate labels for wires of quantum circuits.
Parameters
==========
n : int
number of qubits in the circuit
symbol : string
A character string to precede all gate labels. E.g. 'q_0', 'q_1', etc.
>>> from sympy.physics.quantum.circuitplot import labeller
>>> labeller(2)
['q_1', 'q_0']
>>> labeller(3,'j')
['j_2', 'j_1', 'j_0']
"""
return ['%s_%d' % (symbol,n-i-1) for i in range(n)]
class Mz(OneQubitGate):
"""Mock-up of a z measurement gate.
This is in circuitplot rather than gate.py because it's not a real
gate, it just draws one.
"""
measurement = True
gate_name='Mz'
gate_name_latex=u('M_z')
class Mx(OneQubitGate):
"""Mock-up of an x measurement gate.
This is in circuitplot rather than gate.py because it's not a real
gate, it just draws one.
"""
measurement = True
gate_name='Mx'
gate_name_latex=u('M_x')
class CreateOneQubitGate(ManagedProperties):
def __new__(mcl, name, latexname=None):
if not latexname:
latexname = name
return BasicMeta.__new__(mcl, name + "Gate", (OneQubitGate,),
{'gate_name': name, 'gate_name_latex': latexname})
def CreateCGate(name, latexname=None):
"""Use a lexical closure to make a controlled gate.
"""
if not latexname:
latexname = name
onequbitgate = CreateOneQubitGate(name, latexname)
def ControlledGate(ctrls,target):
return CGate(tuple(ctrls),onequbitgate(target))
return ControlledGate
| bsd-3-clause |
kenshay/ImageScript | ProgramData/SystemFiles/Python/Lib/site-packages/matplotlib/backends/backend_tkagg.py | 10 | 40267 | # Todd Miller [email protected]
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six.moves import tkinter as Tk
from six.moves import tkinter_filedialog as FileDialog
import os, sys, math
import os.path
# Paint image to Tk photo blitter extension
import matplotlib.backends.tkagg as tkagg
from matplotlib.backends.backend_agg import FigureCanvasAgg
import matplotlib.backends.windowing as windowing
import matplotlib
from matplotlib.cbook import is_string_like
from matplotlib.backend_bases import RendererBase, GraphicsContextBase
from matplotlib.backend_bases import FigureManagerBase, FigureCanvasBase
from matplotlib.backend_bases import NavigationToolbar2, cursors, TimerBase
from matplotlib.backend_bases import (ShowBase, ToolContainerBase,
StatusbarBase)
from matplotlib.backend_managers import ToolManager
from matplotlib import backend_tools
from matplotlib._pylab_helpers import Gcf
from matplotlib.figure import Figure
from matplotlib.widgets import SubplotTool
import matplotlib.cbook as cbook
rcParams = matplotlib.rcParams
verbose = matplotlib.verbose
backend_version = Tk.TkVersion
# the true dots per inch on the screen; should be display dependent
# see http://groups.google.com/groups?q=screen+dpi+x11&hl=en&lr=&ie=UTF-8&oe=UTF-8&safe=off&selm=7077.26e81ad5%40swift.cs.tcd.ie&rnum=5 for some info about screen dpi
PIXELS_PER_INCH = 75
cursord = {
cursors.MOVE: "fleur",
cursors.HAND: "hand2",
cursors.POINTER: "arrow",
cursors.SELECT_REGION: "tcross",
}
def raise_msg_to_str(msg):
"""msg is a return arg from a raise. Join with new lines"""
if not is_string_like(msg):
msg = '\n'.join(map(str, msg))
return msg
def error_msg_tkpaint(msg, parent=None):
from six.moves import tkinter_messagebox as tkMessageBox
tkMessageBox.showerror("matplotlib", msg)
def draw_if_interactive():
if matplotlib.is_interactive():
figManager = Gcf.get_active()
if figManager is not None:
figManager.show()
class Show(ShowBase):
def mainloop(self):
Tk.mainloop()
show = Show()
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
FigureClass = kwargs.pop('FigureClass', Figure)
figure = FigureClass(*args, **kwargs)
return new_figure_manager_given_figure(num, figure)
def new_figure_manager_given_figure(num, figure):
"""
Create a new figure manager instance for the given figure.
"""
_focus = windowing.FocusManager()
window = Tk.Tk()
window.withdraw()
if Tk.TkVersion >= 8.5:
# put a mpl icon on the window rather than the default tk icon. Tkinter
# doesn't allow colour icons on linux systems, but tk >=8.5 has a iconphoto
# command which we call directly. Source:
# http://mail.python.org/pipermail/tkinter-discuss/2006-November/000954.html
icon_fname = os.path.join(rcParams['datapath'], 'images', 'matplotlib.ppm')
icon_img = Tk.PhotoImage(file=icon_fname)
try:
window.tk.call('wm', 'iconphoto', window._w, icon_img)
except (SystemExit, KeyboardInterrupt):
# re-raise exit type Exceptions
raise
except:
# log the failure, but carry on
verbose.report('Could not load matplotlib icon: %s' % sys.exc_info()[1])
canvas = FigureCanvasTkAgg(figure, master=window)
figManager = FigureManagerTkAgg(canvas, num, window)
if matplotlib.is_interactive():
figManager.show()
canvas.draw_idle()
return figManager
class TimerTk(TimerBase):
'''
Subclass of :class:`backend_bases.TimerBase` that uses Tk's timer events.
Attributes:
* interval: The time between timer events in milliseconds. Default
is 1000 ms.
* single_shot: Boolean flag indicating whether this timer should
operate as single shot (run once and then stop). Defaults to False.
* callbacks: Stores list of (func, args) tuples that will be called
upon timer events. This list can be manipulated directly, or the
functions add_callback and remove_callback can be used.
'''
def __init__(self, parent, *args, **kwargs):
TimerBase.__init__(self, *args, **kwargs)
self.parent = parent
self._timer = None
def _timer_start(self):
self._timer_stop()
self._timer = self.parent.after(self._interval, self._on_timer)
def _timer_stop(self):
if self._timer is not None:
self.parent.after_cancel(self._timer)
self._timer = None
def _on_timer(self):
TimerBase._on_timer(self)
# Tk after() is only a single shot, so we need to add code here to
# reset the timer if we're not operating in single shot mode.
if not self._single and len(self.callbacks) > 0:
self._timer = self.parent.after(self._interval, self._on_timer)
else:
self._timer = None
class FigureCanvasTkAgg(FigureCanvasAgg):
keyvald = {65507 : 'control',
65505 : 'shift',
65513 : 'alt',
65515 : 'super',
65508 : 'control',
65506 : 'shift',
65514 : 'alt',
65361 : 'left',
65362 : 'up',
65363 : 'right',
65364 : 'down',
65307 : 'escape',
65470 : 'f1',
65471 : 'f2',
65472 : 'f3',
65473 : 'f4',
65474 : 'f5',
65475 : 'f6',
65476 : 'f7',
65477 : 'f8',
65478 : 'f9',
65479 : 'f10',
65480 : 'f11',
65481 : 'f12',
65300 : 'scroll_lock',
65299 : 'break',
65288 : 'backspace',
65293 : 'enter',
65379 : 'insert',
65535 : 'delete',
65360 : 'home',
65367 : 'end',
65365 : 'pageup',
65366 : 'pagedown',
65438 : '0',
65436 : '1',
65433 : '2',
65435 : '3',
65430 : '4',
65437 : '5',
65432 : '6',
65429 : '7',
65431 : '8',
65434 : '9',
65451 : '+',
65453 : '-',
65450 : '*',
65455 : '/',
65439 : 'dec',
65421 : 'enter',
}
_keycode_lookup = {
262145: 'control',
524320: 'alt',
524352: 'alt',
1048584: 'super',
1048592: 'super',
131074: 'shift',
131076: 'shift',
}
"""_keycode_lookup is used for badly mapped (i.e. no event.key_sym set)
keys on apple keyboards."""
def __init__(self, figure, master=None, resize_callback=None):
FigureCanvasAgg.__init__(self, figure)
self._idle = True
self._idle_callback = None
t1,t2,w,h = self.figure.bbox.bounds
w, h = int(w), int(h)
self._tkcanvas = Tk.Canvas(
master=master, width=w, height=h, borderwidth=0,
highlightthickness=0)
self._tkphoto = Tk.PhotoImage(
master=self._tkcanvas, width=w, height=h)
self._tkcanvas.create_image(w//2, h//2, image=self._tkphoto)
self._resize_callback = resize_callback
self._tkcanvas.bind("<Configure>", self.resize)
self._tkcanvas.bind("<Key>", self.key_press)
self._tkcanvas.bind("<Motion>", self.motion_notify_event)
self._tkcanvas.bind("<KeyRelease>", self.key_release)
for name in "<Button-1>", "<Button-2>", "<Button-3>":
self._tkcanvas.bind(name, self.button_press_event)
for name in "<Double-Button-1>", "<Double-Button-2>", "<Double-Button-3>":
self._tkcanvas.bind(name, self.button_dblclick_event)
for name in "<ButtonRelease-1>", "<ButtonRelease-2>", "<ButtonRelease-3>":
self._tkcanvas.bind(name, self.button_release_event)
# Mouse wheel on Linux generates button 4/5 events
for name in "<Button-4>", "<Button-5>":
self._tkcanvas.bind(name, self.scroll_event)
# Mouse wheel for windows goes to the window with the focus.
# Since the canvas won't usually have the focus, bind the
# event to the window containing the canvas instead.
# See http://wiki.tcl.tk/3893 (mousewheel) for details
root = self._tkcanvas.winfo_toplevel()
root.bind("<MouseWheel>", self.scroll_event_windows, "+")
# Can't get destroy events by binding to _tkcanvas. Therefore, bind
# to the window and filter.
def filter_destroy(evt):
if evt.widget is self._tkcanvas:
self.close_event()
root.bind("<Destroy>", filter_destroy, "+")
self._master = master
self._tkcanvas.focus_set()
def resize(self, event):
width, height = event.width, event.height
if self._resize_callback is not None:
self._resize_callback(event)
# compute desired figure size in inches
dpival = self.figure.dpi
winch = width/dpival
hinch = height/dpival
self.figure.set_size_inches(winch, hinch, forward=False)
self._tkcanvas.delete(self._tkphoto)
self._tkphoto = Tk.PhotoImage(
master=self._tkcanvas, width=int(width), height=int(height))
self._tkcanvas.create_image(int(width/2),int(height/2),image=self._tkphoto)
self.resize_event()
self.show()
# a resizing will in general move the pointer position
# relative to the canvas, so process it as a motion notify
# event. An intended side effect of this call is to allow
# window raises (which trigger a resize) to get the cursor
# position to the mpl event framework so key presses which are
# over the axes will work w/o clicks or explicit motion
self._update_pointer_position(event)
def _update_pointer_position(self, guiEvent=None):
"""
Figure out if we are inside the canvas or not and update the
canvas enter/leave events
"""
# if the pointer if over the canvas, set the lastx and lasty
# attrs of the canvas so it can process event w/o mouse click
# or move
# the window's upper, left coords in screen coords
xw = self._tkcanvas.winfo_rootx()
yw = self._tkcanvas.winfo_rooty()
# the pointer's location in screen coords
xp, yp = self._tkcanvas.winfo_pointerxy()
# not figure out the canvas coordinates of the pointer
xc = xp - xw
yc = yp - yw
# flip top/bottom
yc = self.figure.bbox.height - yc
# JDH: this method was written originally to get the pointer
# location to the backend lastx and lasty attrs so that events
# like KeyEvent can be handled without mouse events. e.g., if
# the cursor is already above the axes, then key presses like
# 'g' should toggle the grid. In order for this to work in
# backend_bases, the canvas needs to know _lastx and _lasty.
# There are three ways to get this info the canvas:
#
# 1) set it explicity
#
# 2) call enter/leave events explicity. The downside of this
# in the impl below is that enter could be repeatedly
# triggered if thes mouse is over the axes and one is
# resizing with the keyboard. This is not entirely bad,
# because the mouse position relative to the canvas is
# changing, but it may be surprising to get repeated entries
# without leaves
#
# 3) process it as a motion notify event. This also has pros
# and cons. The mouse is moving relative to the window, but
# this may surpise an event handler writer who is getting
# motion_notify_events even if the mouse has not moved
# here are the three scenarios
if 1:
# just manually set it
self._lastx, self._lasty = xc, yc
elif 0:
# alternate implementation: process it as a motion
FigureCanvasBase.motion_notify_event(self, xc, yc, guiEvent)
elif 0:
# alternate implementation -- process enter/leave events
# instead of motion/notify
if self.figure.bbox.contains(xc, yc):
self.enter_notify_event(guiEvent, xy=(xc,yc))
else:
self.leave_notify_event(guiEvent)
def draw(self):
FigureCanvasAgg.draw(self)
tkagg.blit(self._tkphoto, self.renderer._renderer, colormode=2)
self._master.update_idletasks()
def blit(self, bbox=None):
tkagg.blit(self._tkphoto, self.renderer._renderer, bbox=bbox, colormode=2)
self._master.update_idletasks()
show = draw
def draw_idle(self):
'update drawing area only if idle'
if self._idle is False:
return
self._idle = False
def idle_draw(*args):
try:
self.draw()
finally:
self._idle = True
self._idle_callback = self._tkcanvas.after_idle(idle_draw)
def get_tk_widget(self):
"""returns the Tk widget used to implement FigureCanvasTkAgg.
Although the initial implementation uses a Tk canvas, this routine
is intended to hide that fact.
"""
return self._tkcanvas
def motion_notify_event(self, event):
x = event.x
# flipy so y=0 is bottom of canvas
y = self.figure.bbox.height - event.y
FigureCanvasBase.motion_notify_event(self, x, y, guiEvent=event)
def button_press_event(self, event, dblclick=False):
x = event.x
# flipy so y=0 is bottom of canvas
y = self.figure.bbox.height - event.y
num = getattr(event, 'num', None)
if sys.platform=='darwin':
# 2 and 3 were reversed on the OSX platform I
# tested under tkagg
if num==2: num=3
elif num==3: num=2
FigureCanvasBase.button_press_event(self, x, y, num, dblclick=dblclick, guiEvent=event)
def button_dblclick_event(self,event):
self.button_press_event(event,dblclick=True)
def button_release_event(self, event):
x = event.x
# flipy so y=0 is bottom of canvas
y = self.figure.bbox.height - event.y
num = getattr(event, 'num', None)
if sys.platform=='darwin':
# 2 and 3 were reversed on the OSX platform I
# tested under tkagg
if num==2: num=3
elif num==3: num=2
FigureCanvasBase.button_release_event(self, x, y, num, guiEvent=event)
def scroll_event(self, event):
x = event.x
y = self.figure.bbox.height - event.y
num = getattr(event, 'num', None)
if num==4: step = +1
elif num==5: step = -1
else: step = 0
FigureCanvasBase.scroll_event(self, x, y, step, guiEvent=event)
def scroll_event_windows(self, event):
"""MouseWheel event processor"""
# need to find the window that contains the mouse
w = event.widget.winfo_containing(event.x_root, event.y_root)
if w == self._tkcanvas:
x = event.x_root - w.winfo_rootx()
y = event.y_root - w.winfo_rooty()
y = self.figure.bbox.height - y
step = event.delta/120.
FigureCanvasBase.scroll_event(self, x, y, step, guiEvent=event)
def _get_key(self, event):
val = event.keysym_num
if val in self.keyvald:
key = self.keyvald[val]
elif val == 0 and sys.platform == 'darwin' and \
event.keycode in self._keycode_lookup:
key = self._keycode_lookup[event.keycode]
elif val < 256:
key = chr(val)
else:
key = None
# add modifier keys to the key string. Bit details originate from
# http://effbot.org/tkinterbook/tkinter-events-and-bindings.htm
# BIT_SHIFT = 0x001; BIT_CAPSLOCK = 0x002; BIT_CONTROL = 0x004;
# BIT_LEFT_ALT = 0x008; BIT_NUMLOCK = 0x010; BIT_RIGHT_ALT = 0x080;
# BIT_MB_1 = 0x100; BIT_MB_2 = 0x200; BIT_MB_3 = 0x400;
# In general, the modifier key is excluded from the modifier flag,
# however this is not the case on "darwin", so double check that
# we aren't adding repeat modifier flags to a modifier key.
if sys.platform == 'win32':
modifiers = [(17, 'alt', 'alt'),
(2, 'ctrl', 'control'),
]
elif sys.platform == 'darwin':
modifiers = [(3, 'super', 'super'),
(4, 'alt', 'alt'),
(2, 'ctrl', 'control'),
]
else:
modifiers = [(6, 'super', 'super'),
(3, 'alt', 'alt'),
(2, 'ctrl', 'control'),
]
if key is not None:
# note, shift is not added to the keys as this is already accounted for
for bitmask, prefix, key_name in modifiers:
if event.state & (1 << bitmask) and key_name not in key:
key = '{0}+{1}'.format(prefix, key)
return key
def key_press(self, event):
key = self._get_key(event)
FigureCanvasBase.key_press_event(self, key, guiEvent=event)
def key_release(self, event):
key = self._get_key(event)
FigureCanvasBase.key_release_event(self, key, guiEvent=event)
def new_timer(self, *args, **kwargs):
"""
Creates a new backend-specific subclass of :class:`backend_bases.Timer`.
This is useful for getting periodic events through the backend's native
event loop. Implemented only for backends with GUIs.
optional arguments:
*interval*
Timer interval in milliseconds
*callbacks*
Sequence of (func, args, kwargs) where func(*args, **kwargs) will
be executed by the timer every *interval*.
"""
return TimerTk(self._tkcanvas, *args, **kwargs)
def flush_events(self):
self._master.update()
def start_event_loop(self,timeout):
FigureCanvasBase.start_event_loop_default(self,timeout)
start_event_loop.__doc__=FigureCanvasBase.start_event_loop_default.__doc__
def stop_event_loop(self):
FigureCanvasBase.stop_event_loop_default(self)
stop_event_loop.__doc__=FigureCanvasBase.stop_event_loop_default.__doc__
class FigureManagerTkAgg(FigureManagerBase):
"""
Public attributes
canvas : The FigureCanvas instance
num : The Figure number
toolbar : The tk.Toolbar
window : The tk.Window
"""
def __init__(self, canvas, num, window):
FigureManagerBase.__init__(self, canvas, num)
self.window = window
self.window.withdraw()
self.set_window_title("Figure %d" % num)
self.canvas = canvas
self.canvas._tkcanvas.pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
self._num = num
self.toolmanager = self._get_toolmanager()
self.toolbar = self._get_toolbar()
self.statusbar = None
if self.toolmanager:
backend_tools.add_tools_to_manager(self.toolmanager)
if self.toolbar:
backend_tools.add_tools_to_container(self.toolbar)
self.statusbar = StatusbarTk(self.window, self.toolmanager)
self._shown = False
def notify_axes_change(fig):
'this will be called whenever the current axes is changed'
if self.toolmanager is not None:
pass
elif self.toolbar is not None:
self.toolbar.update()
self.canvas.figure.add_axobserver(notify_axes_change)
def _get_toolbar(self):
if matplotlib.rcParams['toolbar'] == 'toolbar2':
toolbar = NavigationToolbar2TkAgg(self.canvas, self.window)
elif matplotlib.rcParams['toolbar'] == 'toolmanager':
toolbar = ToolbarTk(self.toolmanager, self.window)
else:
toolbar = None
return toolbar
def _get_toolmanager(self):
if rcParams['toolbar'] == 'toolmanager':
toolmanager = ToolManager(self.canvas)
else:
toolmanager = None
return toolmanager
def resize(self, width, height=None):
# before 09-12-22, the resize method takes a single *event*
# parameter. On the other hand, the resize method of other
# FigureManager class takes *width* and *height* parameter,
# which is used to change the size of the window. For the
# Figure.set_size_inches with forward=True work with Tk
# backend, I changed the function signature but tried to keep
# it backward compatible. -JJL
# when a single parameter is given, consider it as a event
if height is None:
width = width.width
else:
self.canvas._tkcanvas.master.geometry("%dx%d" % (width, height))
if self.toolbar is not None:
self.toolbar.configure(width=width)
def show(self):
"""
this function doesn't segfault but causes the
PyEval_RestoreThread: NULL state bug on win32
"""
_focus = windowing.FocusManager()
if not self._shown:
def destroy(*args):
self.window = None
Gcf.destroy(self._num)
self.canvas._tkcanvas.bind("<Destroy>", destroy)
self.window.deiconify()
# anim.py requires this
self.window.update()
else:
self.canvas.draw_idle()
# Raise the new window.
self.canvas.manager.window.attributes('-topmost', 1)
self.canvas.manager.window.attributes('-topmost', 0)
self._shown = True
def destroy(self, *args):
if self.window is not None:
#self.toolbar.destroy()
if self.canvas._idle_callback:
self.canvas._tkcanvas.after_cancel(self.canvas._idle_callback)
self.window.destroy()
if Gcf.get_num_fig_managers()==0:
if self.window is not None:
self.window.quit()
self.window = None
def get_window_title(self):
return self.window.wm_title()
def set_window_title(self, title):
self.window.wm_title(title)
def full_screen_toggle(self):
is_fullscreen = bool(self.window.attributes('-fullscreen'))
self.window.attributes('-fullscreen', not is_fullscreen)
class AxisMenu(object):
def __init__(self, master, naxes):
self._master = master
self._naxes = naxes
self._mbar = Tk.Frame(master=master, relief=Tk.RAISED, borderwidth=2)
self._mbar.pack(side=Tk.LEFT)
self._mbutton = Tk.Menubutton(
master=self._mbar, text="Axes", underline=0)
self._mbutton.pack(side=Tk.LEFT, padx="2m")
self._mbutton.menu = Tk.Menu(self._mbutton)
self._mbutton.menu.add_command(
label="Select All", command=self.select_all)
self._mbutton.menu.add_command(
label="Invert All", command=self.invert_all)
self._axis_var = []
self._checkbutton = []
for i in range(naxes):
self._axis_var.append(Tk.IntVar())
self._axis_var[i].set(1)
self._checkbutton.append(self._mbutton.menu.add_checkbutton(
label = "Axis %d" % (i+1),
variable=self._axis_var[i],
command=self.set_active))
self._mbutton.menu.invoke(self._mbutton.menu.index("Select All"))
self._mbutton['menu'] = self._mbutton.menu
self._mbar.tk_menuBar(self._mbutton)
self.set_active()
def adjust(self, naxes):
if self._naxes < naxes:
for i in range(self._naxes, naxes):
self._axis_var.append(Tk.IntVar())
self._axis_var[i].set(1)
self._checkbutton.append( self._mbutton.menu.add_checkbutton(
label = "Axis %d" % (i+1),
variable=self._axis_var[i],
command=self.set_active))
elif self._naxes > naxes:
for i in range(self._naxes-1, naxes-1, -1):
del self._axis_var[i]
self._mbutton.menu.forget(self._checkbutton[i])
del self._checkbutton[i]
self._naxes = naxes
self.set_active()
def get_indices(self):
a = [i for i in range(len(self._axis_var)) if self._axis_var[i].get()]
return a
def set_active(self):
self._master.set_active(self.get_indices())
def invert_all(self):
for a in self._axis_var:
a.set(not a.get())
self.set_active()
def select_all(self):
for a in self._axis_var:
a.set(1)
self.set_active()
class NavigationToolbar2TkAgg(NavigationToolbar2, Tk.Frame):
"""
Public attributes
canvas - the FigureCanvas (gtk.DrawingArea)
win - the gtk.Window
"""
def __init__(self, canvas, window):
self.canvas = canvas
self.window = window
self._idle = True
#Tk.Frame.__init__(self, master=self.canvas._tkcanvas)
NavigationToolbar2.__init__(self, canvas)
def destroy(self, *args):
del self.message
Tk.Frame.destroy(self, *args)
def set_message(self, s):
self.message.set(s)
def draw_rubberband(self, event, x0, y0, x1, y1):
height = self.canvas.figure.bbox.height
y0 = height-y0
y1 = height-y1
try: self.lastrect
except AttributeError: pass
else: self.canvas._tkcanvas.delete(self.lastrect)
self.lastrect = self.canvas._tkcanvas.create_rectangle(x0, y0, x1, y1)
#self.canvas.draw()
def release(self, event):
try: self.lastrect
except AttributeError: pass
else:
self.canvas._tkcanvas.delete(self.lastrect)
del self.lastrect
def set_cursor(self, cursor):
self.window.configure(cursor=cursord[cursor])
def _Button(self, text, file, command, extension='.gif'):
img_file = os.path.join(rcParams['datapath'], 'images', file + extension)
im = Tk.PhotoImage(master=self, file=img_file)
b = Tk.Button(
master=self, text=text, padx=2, pady=2, image=im, command=command)
b._ntimage = im
b.pack(side=Tk.LEFT)
return b
def _init_toolbar(self):
xmin, xmax = self.canvas.figure.bbox.intervalx
height, width = 50, xmax-xmin
Tk.Frame.__init__(self, master=self.window,
width=int(width), height=int(height),
borderwidth=2)
self.update() # Make axes menu
for text, tooltip_text, image_file, callback in self.toolitems:
if text is None:
# spacer, unhandled in Tk
pass
else:
button = self._Button(text=text, file=image_file,
command=getattr(self, callback))
if tooltip_text is not None:
ToolTip.createToolTip(button, tooltip_text)
self.message = Tk.StringVar(master=self)
self._message_label = Tk.Label(master=self, textvariable=self.message)
self._message_label.pack(side=Tk.RIGHT)
self.pack(side=Tk.BOTTOM, fill=Tk.X)
def configure_subplots(self):
toolfig = Figure(figsize=(6,3))
window = Tk.Tk()
canvas = FigureCanvasTkAgg(toolfig, master=window)
toolfig.subplots_adjust(top=0.9)
tool = SubplotTool(self.canvas.figure, toolfig)
canvas.show()
canvas.get_tk_widget().pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
def save_figure(self, *args):
from six.moves import tkinter_tkfiledialog, tkinter_messagebox
filetypes = self.canvas.get_supported_filetypes().copy()
default_filetype = self.canvas.get_default_filetype()
# Tk doesn't provide a way to choose a default filetype,
# so we just have to put it first
default_filetype_name = filetypes[default_filetype]
del filetypes[default_filetype]
sorted_filetypes = list(six.iteritems(filetypes))
sorted_filetypes.sort()
sorted_filetypes.insert(0, (default_filetype, default_filetype_name))
tk_filetypes = [
(name, '*.%s' % ext) for (ext, name) in sorted_filetypes]
# adding a default extension seems to break the
# asksaveasfilename dialog when you choose various save types
# from the dropdown. Passing in the empty string seems to
# work - JDH!
#defaultextension = self.canvas.get_default_filetype()
defaultextension = ''
initialdir = rcParams.get('savefig.directory', '')
initialdir = os.path.expanduser(initialdir)
initialfile = self.canvas.get_default_filename()
fname = tkinter_tkfiledialog.asksaveasfilename(
master=self.window,
title='Save the figure',
filetypes=tk_filetypes,
defaultextension=defaultextension,
initialdir=initialdir,
initialfile=initialfile,
)
if fname == "" or fname == ():
return
else:
if initialdir == '':
# explicitly missing key or empty str signals to use cwd
rcParams['savefig.directory'] = initialdir
else:
# save dir for next time
rcParams['savefig.directory'] = os.path.dirname(six.text_type(fname))
try:
# This method will handle the delegation to the correct type
self.canvas.print_figure(fname)
except Exception as e:
tkinter_messagebox.showerror("Error saving file", str(e))
def set_active(self, ind):
self._ind = ind
self._active = [ self._axes[i] for i in self._ind ]
def update(self):
_focus = windowing.FocusManager()
self._axes = self.canvas.figure.axes
naxes = len(self._axes)
#if not hasattr(self, "omenu"):
# self.set_active(range(naxes))
# self.omenu = AxisMenu(master=self, naxes=naxes)
#else:
# self.omenu.adjust(naxes)
NavigationToolbar2.update(self)
def dynamic_update(self):
'update drawing area only if idle'
# legacy method; new method is canvas.draw_idle
self.canvas.draw_idle()
class ToolTip(object):
"""
Tooltip recipe from
http://www.voidspace.org.uk/python/weblog/arch_d7_2006_07_01.shtml#e387
"""
@staticmethod
def createToolTip(widget, text):
toolTip = ToolTip(widget)
def enter(event):
toolTip.showtip(text)
def leave(event):
toolTip.hidetip()
widget.bind('<Enter>', enter)
widget.bind('<Leave>', leave)
def __init__(self, widget):
self.widget = widget
self.tipwindow = None
self.id = None
self.x = self.y = 0
def showtip(self, text):
"Display text in tooltip window"
self.text = text
if self.tipwindow or not self.text:
return
x, y, _, _ = self.widget.bbox("insert")
x = x + self.widget.winfo_rootx() + 27
y = y + self.widget.winfo_rooty()
self.tipwindow = tw = Tk.Toplevel(self.widget)
tw.wm_overrideredirect(1)
tw.wm_geometry("+%d+%d" % (x, y))
try:
# For Mac OS
tw.tk.call("::tk::unsupported::MacWindowStyle",
"style", tw._w,
"help", "noActivates")
except Tk.TclError:
pass
label = Tk.Label(tw, text=self.text, justify=Tk.LEFT,
background="#ffffe0", relief=Tk.SOLID, borderwidth=1,
)
label.pack(ipadx=1)
def hidetip(self):
tw = self.tipwindow
self.tipwindow = None
if tw:
tw.destroy()
class RubberbandTk(backend_tools.RubberbandBase):
def __init__(self, *args, **kwargs):
backend_tools.RubberbandBase.__init__(self, *args, **kwargs)
def draw_rubberband(self, x0, y0, x1, y1):
height = self.figure.canvas.figure.bbox.height
y0 = height - y0
y1 = height - y1
try:
self.lastrect
except AttributeError:
pass
else:
self.figure.canvas._tkcanvas.delete(self.lastrect)
self.lastrect = self.figure.canvas._tkcanvas.create_rectangle(x0, y0, x1, y1)
def remove_rubberband(self):
try:
self.lastrect
except AttributeError:
pass
else:
self.figure.canvas._tkcanvas.delete(self.lastrect)
del self.lastrect
class SetCursorTk(backend_tools.SetCursorBase):
def set_cursor(self, cursor):
self.figure.canvas.manager.window.configure(cursor=cursord[cursor])
class ToolbarTk(ToolContainerBase, Tk.Frame):
def __init__(self, toolmanager, window):
ToolContainerBase.__init__(self, toolmanager)
xmin, xmax = self.toolmanager.canvas.figure.bbox.intervalx
height, width = 50, xmax - xmin
Tk.Frame.__init__(self, master=window,
width=int(width), height=int(height),
borderwidth=2)
self._toolitems = {}
self.pack(side=Tk.TOP, fill=Tk.X)
self._groups = {}
def add_toolitem(self, name, group, position, image_file, description,
toggle):
frame = self._get_groupframe(group)
button = self._Button(name, image_file, toggle, frame)
if description is not None:
ToolTip.createToolTip(button, description)
self._toolitems.setdefault(name, [])
self._toolitems[name].append(button)
def _get_groupframe(self, group):
if group not in self._groups:
if self._groups:
self._add_separator()
frame = Tk.Frame(master=self, borderwidth=0)
frame.pack(side=Tk.LEFT, fill=Tk.Y)
self._groups[group] = frame
return self._groups[group]
def _add_separator(self):
separator = Tk.Frame(master=self, bd=5, width=1, bg='black')
separator.pack(side=Tk.LEFT, fill=Tk.Y, padx=2)
def _Button(self, text, image_file, toggle, frame):
if image_file is not None:
im = Tk.PhotoImage(master=self, file=image_file)
else:
im = None
if not toggle:
b = Tk.Button(master=frame, text=text, padx=2, pady=2, image=im,
command=lambda: self._button_click(text))
else:
b = Tk.Checkbutton(master=frame, text=text, padx=2, pady=2,
image=im, indicatoron=False,
command=lambda: self._button_click(text))
b._ntimage = im
b.pack(side=Tk.LEFT)
return b
def _button_click(self, name):
self.trigger_tool(name)
def toggle_toolitem(self, name, toggled):
if name not in self._toolitems:
return
for toolitem in self._toolitems[name]:
if toggled:
toolitem.select()
else:
toolitem.deselect()
def remove_toolitem(self, name):
for toolitem in self._toolitems[name]:
toolitem.pack_forget()
del self._toolitems[name]
class StatusbarTk(StatusbarBase, Tk.Frame):
def __init__(self, window, *args, **kwargs):
StatusbarBase.__init__(self, *args, **kwargs)
xmin, xmax = self.toolmanager.canvas.figure.bbox.intervalx
height, width = 50, xmax - xmin
Tk.Frame.__init__(self, master=window,
width=int(width), height=int(height),
borderwidth=2)
self._message = Tk.StringVar(master=self)
self._message_label = Tk.Label(master=self, textvariable=self._message)
self._message_label.pack(side=Tk.RIGHT)
self.pack(side=Tk.TOP, fill=Tk.X)
def set_message(self, s):
self._message.set(s)
class SaveFigureTk(backend_tools.SaveFigureBase):
def trigger(self, *args):
from six.moves import tkinter_tkfiledialog, tkinter_messagebox
filetypes = self.figure.canvas.get_supported_filetypes().copy()
default_filetype = self.figure.canvas.get_default_filetype()
# Tk doesn't provide a way to choose a default filetype,
# so we just have to put it first
default_filetype_name = filetypes[default_filetype]
del filetypes[default_filetype]
sorted_filetypes = list(six.iteritems(filetypes))
sorted_filetypes.sort()
sorted_filetypes.insert(0, (default_filetype, default_filetype_name))
tk_filetypes = [
(name, '*.%s' % ext) for (ext, name) in sorted_filetypes]
# adding a default extension seems to break the
# asksaveasfilename dialog when you choose various save types
# from the dropdown. Passing in the empty string seems to
# work - JDH!
# defaultextension = self.figure.canvas.get_default_filetype()
defaultextension = ''
initialdir = rcParams.get('savefig.directory', '')
initialdir = os.path.expanduser(initialdir)
initialfile = self.figure.canvas.get_default_filename()
fname = tkinter_tkfiledialog.asksaveasfilename(
master=self.figure.canvas.manager.window,
title='Save the figure',
filetypes=tk_filetypes,
defaultextension=defaultextension,
initialdir=initialdir,
initialfile=initialfile,
)
if fname == "" or fname == ():
return
else:
if initialdir == '':
# explicitly missing key or empty str signals to use cwd
rcParams['savefig.directory'] = initialdir
else:
# save dir for next time
rcParams['savefig.directory'] = os.path.dirname(
six.text_type(fname))
try:
# This method will handle the delegation to the correct type
self.figure.canvas.print_figure(fname)
except Exception as e:
tkinter_messagebox.showerror("Error saving file", str(e))
class ConfigureSubplotsTk(backend_tools.ConfigureSubplotsBase):
def __init__(self, *args, **kwargs):
backend_tools.ConfigureSubplotsBase.__init__(self, *args, **kwargs)
self.window = None
def trigger(self, *args):
self.init_window()
self.window.lift()
def init_window(self):
if self.window:
return
toolfig = Figure(figsize=(6, 3))
self.window = Tk.Tk()
canvas = FigureCanvasTkAgg(toolfig, master=self.window)
toolfig.subplots_adjust(top=0.9)
_tool = SubplotTool(self.figure, toolfig)
canvas.show()
canvas.get_tk_widget().pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
self.window.protocol("WM_DELETE_WINDOW", self.destroy)
def destroy(self, *args, **kwargs):
self.window.destroy()
self.window = None
backend_tools.ToolSaveFigure = SaveFigureTk
backend_tools.ToolConfigureSubplots = ConfigureSubplotsTk
backend_tools.ToolSetCursor = SetCursorTk
backend_tools.ToolRubberband = RubberbandTk
Toolbar = ToolbarTk
FigureCanvas = FigureCanvasTkAgg
FigureManager = FigureManagerTkAgg
| gpl-3.0 |
demiangomez/Parallel.GAMIT | classes/pyETM.py | 1 | 109441 | # -*- coding: utf-8 -*-
"""
Project: Parallel.Archive
Date: 3/3/17 11:27 AM
Author: Demian D. Gomez
"""
import numpy as np
import pyStationInfo
import pyDate
from numpy import sin
from numpy import cos
from numpy import pi
from scipy.stats import chi2
import pyEvents
from zlib import crc32
from Utils import ct2lg
from Utils import lg2ct
from Utils import rotlg2ct
from os.path import getmtime
from itertools import repeat
from pyBunch import Bunch
from pprint import pprint
import traceback
import warnings
import sys
import os
from time import time
from matplotlib.widgets import Button
import pg
import matplotlib
from io import BytesIO
import base64
import logging
from logging import INFO, ERROR, WARNING, DEBUG, StreamHandler, Formatter
if 'DISPLAY' in os.environ.keys():
if not os.environ['DISPLAY']:
matplotlib.use('Agg')
else:
matplotlib.use('Agg')
language = {
'eng': {
"station": "Station",
"north": "North",
"east": "East",
"up": "Up",
"table_title": "Year Day Relx [mm] Mag",
"periodic": "Periodic amp",
"velocity": "Velocity",
"from_model": "from model",
"acceleration": "Acceleration",
"position": "Ref. Position",
"completion": "Completion",
"other": "other polynomial terms",
"not_enough": "Not enough solutions to fit an ETM.",
"table_too_long": "Table too long to print!",
"frequency": "Frequency",
"N residuals": "N Residuals",
"E residuals": "E Residuals",
"U residuals": "U Residuals",
"histogram plot": "Histogram",
"residual plot": "Residual Plot"
},
'spa': {
"station": u"Estación",
"north": u"Norte",
"east": u"Este",
"up": u"Arriba",
"table_title": u"Año Día Relx [mm] Mag",
"periodic": u"Amp. Periódica",
"velocity": u"Velocidad",
"from_model": u"de modelo",
"acceleration": u"Aceleración",
"position": u"Posición de ref.",
"completion": u"Completitud",
"other": u"otros términos polinómicos",
"not_enough": u"No hay suficientes soluciones para ajustar trayectorias.",
"table_too_long": u"Tabla demasiado larga!",
"frequency": u"Frecuencia",
"N residuals": u"Residuos N",
"E residuals": u"Residuos E",
"U residuals": u"Residuos U",
"histogram plot": u"Histograma",
"residual plot": u"Gráfico de Residuos"
}}
defined = 'LANG' in globals()
if not defined:
LANG = 'eng'
# logger information and setup
logger = logging.getLogger('pyETM')
stream = StreamHandler()
stream.setFormatter(Formatter(' -- %(message)s'))
logger.addHandler(stream)
def tic():
global tt
tt = time()
def toc(text):
global tt
print text + ': ' + str(time() - tt)
LIMIT = 2.5
type_dict = {-1: 'UNDETERMINED',
1: 'GENERIC_JUMP',
2: 'ANTENNA_CHANGE',
5: 'REFERENCE_FRAME_JUMP',
10: 'CO_SEISMIC_JUMP_DECAY',
15: 'CO_SEISMIC_JUMP',
20: 'CO_SEISMIC_DECAY'}
# unknown jump
UNDETERMINED = -1
# no effect: display purposes
GENERIC_JUMP = 1
# antenna change jump
ANTENNA_CHANGE = 2
# reference frame jump
REFERENCE_FRAME_JUMP = 5
# co-seismic jump and decay
CO_SEISMIC_JUMP_DECAY = 10
# co-seismic jump only, no decay
CO_SEISMIC_JUMP = 15
# co-seismic decay only
CO_SEISMIC_DECAY = 20
EQ_MIN_DAYS = 15
JP_MIN_DAYS = 5
DEFAULT_RELAXATION = np.array([0.5])
DEFAULT_POL_TERMS = 2
DEFAULT_FREQUENCIES = np.array((1/365.25, 1/(365.25/2))) # (1 yr, 6 months) expressed in 1/days (one year = 365.25)
SIGMA_FLOOR_H = 0.10
SIGMA_FLOOR_V = 0.15
ESTIMATION = 0
DATABASE = 1
VERSION = '1.2.1'
class pyETMException(Exception):
def __init__(self, value):
self.value = value
self.event = pyEvents.Event(Description=value, EventType='error')
def __str__(self):
return str(self.value)
class pyETMException_NoDesignMatrix(pyETMException):
pass
def distance(lon1, lat1, lon2, lat2):
"""
Calculate the great circle distance between two points
on the earth (specified in decimal degrees)
"""
# convert decimal degrees to radians
lon1 = lon1*pi/180
lat1 = lat1*pi/180
lon2 = lon2*pi/180
lat2 = lat2*pi/180
# haversine formula
dlon = lon2 - lon1
dlat = lat2 - lat1
a = np.sin(dlat/2)**2 + np.cos(lat1) * np.cos(lat2) * np.sin(dlon/2)**2
c = 2 * np.arcsin(np.sqrt(a))
km = 6371 * c
return km
def to_postgres(dictionary):
if isinstance(dictionary, dict):
for key, val in dictionary.items():
if isinstance(val, np.ndarray):
dictionary[key] = str(val.flatten().tolist()).replace('[', '{').replace(']', '}')
else:
dictionary = str(dictionary.flatten().tolist()).replace('[', '{').replace(']', '}')
return dictionary
def to_list(dictionary):
for key, val in dictionary.items():
if isinstance(val, np.ndarray):
dictionary[key] = val.tolist()
if isinstance(val, pyDate.datetime):
dictionary[key] = val.strftime('%Y-%m-%d %H:%M:%S')
return dictionary
class PppSoln(object):
""""class to extract the PPP solutions from the database"""
def __init__(self, cnn, NetworkCode, StationCode):
self.NetworkCode = NetworkCode
self.StationCode = StationCode
self.hash = 0
self.type = 'ppp'
self.stack_name = 'ppp'
# get the station from the stations table
stn = cnn.query('SELECT * FROM stations WHERE "NetworkCode" = \'%s\' AND "StationCode" = \'%s\''
% (NetworkCode, StationCode))
stn = stn.dictresult()[0]
if stn['lat'] is not None:
self.lat = np.array([float(stn['lat'])])
self.lon = np.array([float(stn['lon'])])
self.height = np.array([float(stn['height'])])
self.auto_x = np.array([float(stn['auto_x'])])
self.auto_y = np.array([float(stn['auto_y'])])
self.auto_z = np.array([float(stn['auto_z'])])
x = np.array([float(stn['auto_x'])])
y = np.array([float(stn['auto_y'])])
z = np.array([float(stn['auto_z'])])
if stn['max_dist'] is not None:
self.max_dist = stn['max_dist']
else:
self.max_dist = 20
# load all the PPP coordinates available for this station
# exclude ppp solutions in the exclude table and any solution that is more than 20 meters from the simple
# linear trend calculated above
self.excluded = cnn.query_float('SELECT "Year", "DOY" FROM ppp_soln_excl '
'WHERE "NetworkCode" = \'%s\' AND "StationCode" = \'%s\''
% (NetworkCode, StationCode))
self.table = cnn.query_float(
'SELECT "X", "Y", "Z", "Year", "DOY" FROM ppp_soln p1 '
'WHERE p1."NetworkCode" = \'%s\' AND p1."StationCode" = \'%s\' ORDER BY "Year", "DOY"'
% (NetworkCode, StationCode))
self.table = [item for item in self.table
if np.sqrt(np.square(item[0] - x) + np.square(item[1] - y) + np.square(item[2] - z)) <=
self.max_dist and item[3:] not in self.excluded]
self.blunders = [item for item in self.table
if np.sqrt(np.square(item[0] - x) + np.square(item[1] - y) + np.square(item[2] - z)) >
self.max_dist and item[3:] not in self.excluded]
self.solutions = len(self.table)
self.ts_blu = np.array([pyDate.Date(year=item[3], doy=item[4]).fyear for item in self.blunders])
if self.solutions >= 1:
a = np.array(self.table)
self.x = a[:, 0]
self.y = a[:, 1]
self.z = a[:, 2]
self.t = np.array([pyDate.Date(year=item[0], doy=item[1]).fyear for item in a[:, 3:5]])
self.mjd = np.array([pyDate.Date(year=item[0], doy=item[1]).mjd for item in a[:, 3:5]])
self.date = [pyDate.Date(year=item[0], doy=item[1]) for item in a[:, 3:5]]
# continuous time vector for plots
ts = np.arange(np.min(self.mjd), np.max(self.mjd) + 1, 1)
self.mjds = ts
self.ts = np.array([pyDate.Date(mjd=tts).fyear for tts in ts])
else:
if len(self.blunders) >= 1:
raise pyETMException('No viable PPP solutions available for %s.%s (all blunders!)\n'
' -> min distance to station coordinate is %.1f meters'
% (NetworkCode, StationCode, np.array([item[5]
for item in self.blunders]).min()))
else:
raise pyETMException('No PPP solutions available for %s.%s' % (NetworkCode, StationCode))
# get a list of the epochs with files but no solutions.
# This will be shown in the outliers plot as a special marker
rnx = cnn.query(
'SELECT r."ObservationFYear" FROM rinex_proc as r '
'LEFT JOIN ppp_soln as p ON '
'r."NetworkCode" = p."NetworkCode" AND '
'r."StationCode" = p."StationCode" AND '
'r."ObservationYear" = p."Year" AND '
'r."ObservationDOY" = p."DOY"'
'WHERE r."NetworkCode" = \'%s\' AND r."StationCode" = \'%s\' AND '
'p."NetworkCode" IS NULL' % (NetworkCode, StationCode))
self.rnx_no_ppp = rnx.getresult()
self.ts_ns = np.array([item for item in self.rnx_no_ppp])
self.completion = 100. - float(len(self.ts_ns)) / float(len(self.ts_ns) + len(self.t)) * 100.
ppp_hash = cnn.query_float('SELECT sum(hash) FROM ppp_soln p1 '
'WHERE p1."NetworkCode" = \'%s\' AND p1."StationCode" = \'%s\''
% (NetworkCode, StationCode))
self.hash = crc32(str(len(self.t) + len(self.blunders)) + ' ' + str(self.auto_x) + str(self.auto_y) +
str(self.auto_z) + str(ts[0]) + ' ' + str(ts[-1]) + ' ' + str(ppp_hash[0][0]) + VERSION)
else:
raise pyETMException('Station %s.%s has no valid metadata in the stations table.'
% (NetworkCode, StationCode))
class GamitSoln(object):
""""class to extract the GAMIT polyhedrons from the database"""
def __init__(self, cnn, polyhedrons, NetworkCode, StationCode, stack_name):
self.NetworkCode = NetworkCode
self.StationCode = StationCode
self.stack_name = stack_name
self.hash = 0
self.type = 'gamit'
# get the station from the stations table
stn = cnn.query_float('SELECT * FROM stations WHERE "NetworkCode" = \'%s\' AND "StationCode" = \'%s\''
% (NetworkCode, StationCode), as_dict=True)[0]
if stn['lat'] is not None:
self.lat = np.array([float(stn['lat'])])
self.lon = np.array([float(stn['lon'])])
self.height = np.array([stn['height']])
self.auto_x = np.array([float(stn['auto_x'])])
self.auto_y = np.array([float(stn['auto_y'])])
self.auto_z = np.array([float(stn['auto_z'])])
if stn['max_dist'] is not None:
self.max_dist = stn['max_dist']
else:
self.max_dist = 20
self.solutions = len(polyhedrons)
# blunders
self.blunders = []
self.ts_blu = np.array([])
if self.solutions >= 1:
a = np.array(polyhedrons, dtype=float)
if np.sqrt(np.square(np.sum(np.square(a[0, 0:3])))) > 6.3e3:
# coordinates given in XYZ
nb = np.sqrt(np.square(np.sum(
np.square(a[:, 0:3] - np.array([stn['auto_x'], stn['auto_y'], stn['auto_z']])), axis=1))) \
<= self.max_dist
else:
# coordinates are differences
nb = np.sqrt(np.square(np.sum(np.square(a[:, 0:3]), axis=1))) <= self.max_dist
if np.any(nb):
self.x = a[nb, 0]
self.y = a[nb, 1]
self.z = a[nb, 2]
self.t = np.array([pyDate.Date(year=item[0], doy=item[1]).fyear for item in a[nb, 3:5]])
self.mjd = np.array([pyDate.Date(year=item[0], doy=item[1]).mjd for item in a[nb, 3:5]])
self.date = [pyDate.Date(year=item[0], doy=item[1]) for item in a[nb, 3:5]]
# continuous time vector for plots
ts = np.arange(np.min(self.mjd), np.max(self.mjd) + 1, 1)
self.mjds = ts
self.ts = np.array([pyDate.Date(mjd=tts).fyear for tts in ts])
else:
dd = np.sqrt(np.square(np.sum(
np.square(a[:, 0:3] - np.array([stn['auto_x'], stn['auto_y'], stn['auto_z']])), axis=1)))
raise pyETMException('No viable GAMIT solutions available for %s.%s (all blunders!)\n'
' -> min distance to station coordinate is %.1f meters'
% (NetworkCode, StationCode, dd.min()))
else:
raise pyETMException('No GAMIT polyhedrons vertices available for %s.%s' % (NetworkCode, StationCode))
# get a list of the epochs with files but no solutions.
# This will be shown in the outliers plot as a special marker
rnx = cnn.query(
'SELECT r.* FROM rinex_proc as r '
'LEFT JOIN stacks as p ON '
'r."NetworkCode" = p."NetworkCode" AND '
'r."StationCode" = p."StationCode" AND '
'r."ObservationYear" = p."Year" AND '
'r."ObservationDOY" = p."DOY" AND '
'p."name" = \'%s\''
'WHERE r."NetworkCode" = \'%s\' AND r."StationCode" = \'%s\' AND '
'p."NetworkCode" IS NULL' % (stack_name, NetworkCode, StationCode))
self.rnx_no_ppp = rnx.dictresult()
self.ts_ns = np.array([float(item['ObservationFYear']) for item in self.rnx_no_ppp])
self.completion = 100. - float(len(self.ts_ns)) / float(len(self.ts_ns) + len(self.t)) * 100.
self.hash = crc32(str(len(self.t) + len(self.blunders)) + ' ' + str(ts[0]) + ' ' + str(ts[-1]) + VERSION)
else:
raise pyETMException('Station %s.%s has no valid metadata in the stations table.'
% (NetworkCode, StationCode))
class ListSoln(GamitSoln):
""""class to extract the polyhedrons from a list"""
def __init__(self, cnn, polyhedrons, NetworkCode, StationCode, stack_name='file-unknown'):
super(ListSoln, self).__init__(cnn=cnn, polyhedrons=polyhedrons, NetworkCode=NetworkCode,
StationCode=StationCode, stack_name=stack_name)
self.rnx_no_ppp = []
class JumpTable:
def __init__(self, cnn, NetworkCode, StationCode, soln, t, FitEarthquakes=True, FitGenericJumps=True):
self.table = []
# get earthquakes for this station
self.earthquakes = Earthquakes(cnn, NetworkCode, StationCode, soln, t, FitEarthquakes)
self.generic_jumps = GenericJumps(cnn, NetworkCode, StationCode, soln, t, FitGenericJumps)
jumps = self.earthquakes.table + self.generic_jumps.table
jumps.sort()
# add the relevant jumps, make sure none are incompatible
for jump in jumps:
self.insert_jump(jump)
# verify last jump to make sure there's enough data
if len(self.table) > 0:
jump = None
# find last active jump
for j in self.table[-1::-1]:
# find the previous active jump
if j.fit:
jump = j
break
if jump:
dt = np.max(t[jump.design[:, -1] != 0]) - np.min(t[jump.design[:, -1] != 0])
if (jump.p.jump_type == CO_SEISMIC_JUMP_DECAY and
(dt < 1 and np.count_nonzero(jump.design[:, -1]) / 365.25 < 0.5)):
# was a jump and decay, leave the jump
jump.p.jump_type = CO_SEISMIC_JUMP
jump.param_count -= jump.nr # subtract from param count the number of relaxations
jump.p.params = np.zeros((3, 1))
jump.p.sigmas = np.zeros((3, 1))
# reevaluate the design matrix!
jump.design = jump.eval(t)
jump.rehash()
# for j in self.table:
# print j
self.constrains = np.array([])
def param_count(self):
return sum([jump.param_count for jump in self.table if jump.fit])
def insert_jump(self, jump):
if len(self.table) == 0:
self.table.append(jump)
else:
# take last jump and compare to adding jump
jj = None
for j in self.table[-1::-1]:
# find the previous active jump
if j.fit:
jj = j
break
if not jj:
# no active jumps in the table!
self.table.append(jump)
return
if jump.fit:
# this operation determines if jumps are equivalent
# result is true if equivalent, decision is which one survives
result, decision = jj.__eq__(jump)
if result:
# jumps are equivalent
# decision branches:
# 1) decision == jump, remove previous; add jump
# 2) decision == jj , do not add jump (i.e. do nothing)
if decision is jump:
jj.remove_from_fit()
else:
jump.remove_from_fit()
self.table.append(jump)
def get_design_ts(self, t):
# if function call NOT for inversion, return the columns even if the design matrix is unstable
A = np.array([])
# get the design matrix for the jump table
for jump in self.table:
if jump.fit:
a = jump.eval(t)
if a.size:
if A.size:
# if A is not empty, verify that this jump will not make the matrix singular
tA = np.column_stack((A, a))
# getting the condition number might trigger divide_zero warning => turn off
np.seterr(divide='ignore', invalid='ignore')
if np.linalg.cond(tA) < 1e10:
# adding this jumps doesn't make the matrix singular
A = tA
else:
# if matrix becomes singular, remove from fit!
jump.remove_from_fit()
warnings.warn('%s had to be removed due to high condition number' % str(jump))
else:
A = a
return A
def load_parameters(self, params, sigmas):
for jump in self.table:
if jump.fit:
jump.load_parameters(params=params, sigmas=sigmas)
def print_parameters(self):
output_n = [language[LANG]['table_title']]
output_e = [language[LANG]['table_title']]
output_u = [language[LANG]['table_title']]
for jump in self.table:
# relaxation counter
rx = 0
m = ' -' if np.isnan(jump.magnitude) else jump.magnitude
if jump.fit:
for j, p in enumerate(np.arange(jump.param_count)):
psc = jump.p.params[:, p]
if j == 0 and jump.p.jump_type is not CO_SEISMIC_DECAY:
output_n.append('{} {:>7.1f} {} {}'.format(jump.date.yyyyddd(), psc[0] * 1000.0,
m, jump.action))
output_e.append('{} {:>7.1f} {} {}'.format(jump.date.yyyyddd(), psc[1] * 1000.0,
m, jump.action))
output_u.append('{} {:>7.1f} {} {}'.format(jump.date.yyyyddd(), psc[2] * 1000.0,
m, jump.action))
else:
output_n.append('{} {:4.2f} {:>7.1f} {} {}'.format(jump.date.yyyyddd(), jump.p.relaxation[rx],
psc[0] * 1000.0, m, jump.action))
output_e.append('{} {:4.2f} {:>7.1f} {} {}'.format(jump.date.yyyyddd(), jump.p.relaxation[rx],
psc[1] * 1000.0, m, jump.action))
output_u.append('{} {:4.2f} {:>7.1f} {} {}'.format(jump.date.yyyyddd(), jump.p.relaxation[rx],
psc[2] * 1000.0, m, jump.action))
# relaxation counter
rx += 1
else:
for j, _ in enumerate(np.arange(jump.param_count)):
if j == 0 and jump.p.jump_type is not CO_SEISMIC_DECAY:
# the only type of jump that does not show the jump is a co-seismic decay
output_n.append('{} - {} {}'.format(jump.date.yyyyddd(), m, jump.action))
output_e.append('{} - {} {}'.format(jump.date.yyyyddd(), m, jump.action))
output_u.append('{} - {} {}'.format(jump.date.yyyyddd(), m, jump.action))
else:
output_n.append('{} {:4.2f} - {} {}'.format(jump.date.yyyyddd(), jump.p.relaxation[rx],
m, jump.action))
output_e.append('{} {:4.2f} - {} {}'.format(jump.date.yyyyddd(), jump.p.relaxation[rx],
m, jump.action))
output_u.append('{} {:4.2f} - {} {}'.format(jump.date.yyyyddd(), jump.p.relaxation[rx],
m, jump.action))
# relaxation counter
rx += 1
if len(output_n) > 22:
output_n = output_n[0:22] + [language[LANG]['table_too_long']]
output_e = output_e[0:22] + [language[LANG]['table_too_long']]
output_u = output_u[0:22] + [language[LANG]['table_too_long']]
return '\n'.join(output_n), '\n'.join(output_e), '\n'.join(output_u)
class EtmFunction(object):
def __init__(self, **kwargs):
self.p = Bunch()
self.p.NetworkCode = kwargs['NetworkCode']
self.p.StationCode = kwargs['StationCode']
self.p.soln = kwargs['soln'].type
self.p.stack = kwargs['soln'].stack_name
self.p.params = np.array([])
self.p.sigmas = np.array([])
self.p.object = ''
self.p.metadata = None
self.p.hash = 0
self.param_count = 0
self.column_index = np.array([])
self.format_str = ''
self.fit = True
def load_parameters(self, **kwargs):
params = kwargs['params']
sigmas = kwargs['sigmas']
if params.ndim == 1:
# parameters coming from the database, reshape
params = params.reshape((3, params.shape[0] / 3))
if sigmas.ndim == 1:
# parameters coming from the database, reshape
sigmas = sigmas.reshape((3, sigmas.shape[0] / 3))
# determine if parameters are coming from the X vector (LSQ) or from the database (solution for self only)
if params.shape[1] > self.param_count:
# X vector
self.p.params = params[:, self.column_index]
self.p.sigmas = sigmas[:, self.column_index]
else:
# database (solution for self only; no need for column_index)
self.p.params = params
self.p.sigmas = sigmas
class Jump(EtmFunction):
"""
generic jump (mechanic jump, frame change, etc) class
:argument NetworkCode
:argument StationCode
"""
def __init__(self, NetworkCode, StationCode, soln, t, date, metadata, dtype=GENERIC_JUMP, action='A', fit=True):
super(Jump, self).__init__(NetworkCode=NetworkCode, StationCode=StationCode, soln=soln)
# in the future, can load parameters from the db
self.p.object = 'jump'
# define initial state variables
self.date = date
self.p.jump_date = date.datetime()
self.p.metadata = metadata
self.p.jump_type = dtype
# new property to identify manually added (or removed) jumps
self.action = action
# new property indicating if jump should be adjusted or not
self.fit = fit
# add the magnitude property to allow transformation from CO_SEISMIC_JUMP_DECAY to CO_SEISMIC_JUMP and still
# print the magnitude of the event in the jump table
self.magnitude = np.nan
# the param count of a jump is one!
self.param_count = 1
if self.fit:
# evaluate only if the jump is not flagged as NO EFFECT
self.design = Jump.eval(self, t)
else:
self.design = np.array([])
if not np.any(self.design) or np.all(self.design):
# a valid jump only has some rows == 1 in the design table,
# not all rows (all rows produces a singular matrix)
self.design = np.array([])
self.fit = False
if dtype not in (CO_SEISMIC_JUMP, CO_SEISMIC_DECAY, CO_SEISMIC_JUMP_DECAY):
logger.info('Mechanical Jump -> Adding jump on %s type: %s; Action: %s; Fit: %s'
% (self.date.yyyyddd(), type_dict[dtype], action, str(self.fit)))
Jump.rehash(self)
def rehash(self):
self.p.hash = crc32(str(self.date) + str(self.fit) + VERSION)
def remove_from_fit(self):
# this method will make this jump type = NO_EFFECT and adjust its params
self.fit = False
self.design = np.array([])
self.rehash()
def eval(self, t):
# given a time vector t, return the design matrix column vector(s)
if not self.fit:
return np.array([])
ht = np.zeros((t.shape[0], 1))
ht[t > self.date.fyear] = 1.
return ht
def load_parameters(self, **kwargs):
if self.fit:
EtmFunction.load_parameters(self, **kwargs)
def __eq__(self, jump):
if not isinstance(jump, Jump):
raise pyETMException('type: ' + str(type(jump)) + ' invalid. Can compare two Jump objects')
if not self.fit and jump.fit:
# if comparing to a self that has NO_EFFECT, remove and keep jump
return True, jump
elif self.fit and not jump.fit:
# if comparing against a jump that has NO_EFFECT, remove jump keep self
return True, self
elif not self.fit and not jump.fit:
# no jump has an effect, return None. This will be interpreted as False (if not result)
return None, None
# if we got here, then both jumps have fit == True
# compare two jumps together and make sure they will not generate a singular (or near singular) system of eq
c = np.sum(np.logical_xor(self.design[:, 0], jump.design[:, 0]))
dt = jump.date - self.date
# print ' ', jump.date, self.date, dt, c
if self.p.jump_type >= 10 and jump.p.jump_type >= 10:
# jump type > 10 => co-seismic jump
# if self is a co-seismic jump and next jump is also co-seismic
# and there are more than two weeks of data to constrain params, return false (not equal)
# otherwise, decide based on the magnitude of events
if c < self.param_count + 1 or (dt < 365 and c / 365.25 < 0.1):
if self.magnitude < jump.magnitude:
return True, jump
else:
return True, self
else:
return False, None
elif self.p.jump_type >= 10 and 0 < jump.p.jump_type < 10:
if c < self.param_count + 1 or (dt < 365 and c / 365.25 < 0.1):
# can't fit the co-seismic or generic jump AND the generic jump after, remove generic jump
return True, self
else:
return False, None
elif 0 < self.p.jump_type < 10 and jump.p.jump_type >= 10:
if c < self.param_count + 1 or (dt < 365 and c / 365.25 < 0.1):
# if generic jump before an earthquake jump and less than 5 days, co-seismic prevails
return True, jump
else:
return False, None
elif 0 < self.p.jump_type < 10 and 0 < jump.p.jump_type < 10:
# two generic jumps. As long as they can be constrained, we are fine
if c < self.param_count + 1 or (dt < 365 and c / 365.25 < 0.1):
return True, jump
else:
return False, None
def __str__(self):
return 'date=' + str(self.date) + ', type=' + type_dict[self.p.jump_type] + ', metadata="' + self.p.metadata + \
'", action="' + str(self.action) + '", fit=' + str(self.fit)
def __repr__(self):
return 'pyPPPETM.Jump(' + str(self) + ')'
def __lt__(self, jump):
if not isinstance(jump, Jump):
raise pyETMException('type: '+str(type(jump))+' invalid. Can only compare Jump objects')
return self.date.fyear < jump.date.fyear
def __le__(self, jump):
if not isinstance(jump, Jump):
raise pyETMException('type: '+str(type(jump))+' invalid. Can only compare Jump objects')
return self.date.fyear <= jump.date.fyear
def __gt__(self, jump):
if not isinstance(jump, Jump):
raise pyETMException('type: '+str(type(jump))+' invalid. Can only compare Jump objects')
return self.date.fyear > jump.date.fyear
def __ge__(self, jump):
if not isinstance(jump, Jump):
raise pyETMException('type: '+str(type(jump))+' invalid. Can only compare Jump objects')
return self.date.fyear >= jump.date.fyear
def __hash__(self):
# to make the object hashable
return hash(self.date.fyear)
class CoSeisJump(Jump):
def __init__(self, NetworkCode, StationCode, soln, t, date, relaxation, metadata,
dtype=CO_SEISMIC_JUMP_DECAY, magnitude=0., action='A', fit=True):
# super-class initialization
Jump.__init__(self, NetworkCode, StationCode, soln, t, date, metadata, dtype, action, fit)
# if t.min() > date, change to CO_SEISMIC_DECAY
# if jump / decay manually deactivated, fit == False and it's not changed below
if date.fyear < t.min():
self.p.jump_type = CO_SEISMIC_DECAY
else:
self.p.jump_type = dtype
# new feature informs the magnitude of the event in the plot
self.magnitude = magnitude
if not self.fit and fit:
# came back from init with empty design matrix (fit = false) and originally fit was True.
# May be a jump before t.min()
# assign just the decay
self.p.jump_type = CO_SEISMIC_DECAY
# put fit back to original state
self.fit = fit
# if T is an array, it contains the corresponding decays
# otherwise, it is a single decay
if not isinstance(relaxation, np.ndarray):
relaxation = np.array([relaxation])
self.param_count += relaxation.shape[0]
if self.p.jump_type == CO_SEISMIC_DECAY:
# if CO_SEISMIC_DECAY, subtract one from parameters
self.param_count -= 1
self.nr = relaxation.shape[0]
self.p.relaxation = relaxation
if self.fit:
self.design = self.eval(t)
else:
self.design = np.array([])
logger.info('Geophysical Jump -> Adding jump on %s type: %s; Mag: %.1f; Action: %s; Fit: %s'
% (self.date.yyyyddd(), type_dict[dtype], magnitude, action, str(self.fit)))
self.rehash()
def rehash(self):
# co-seismic jump already has the version hash value from Jump object
self.p.hash = crc32(str(self.date) + str(self.fit) + str(self.param_count) + str(self.p.jump_type) +
str(self.p.relaxation) + str(self.fit) + VERSION)
def eval(self, t):
ht = Jump.eval(self, t)
# if there is nothing in ht, then there is no expected output, return none
if not np.any(ht):
return np.array([])
# if it was determined that this is just a co-seismic jump (no decay), return ht
if self.p.jump_type == CO_SEISMIC_JUMP:
return ht
# support more than one decay
hl = np.zeros((t.shape[0], self.nr))
for i, T in enumerate(self.p.relaxation):
hl[t > self.date.fyear, i] = np.log10(1. + (t[t > self.date.fyear] - self.date.fyear) / T)
# if it's both jump and decay, return ht + hl
if np.any(hl) and self.p.jump_type == CO_SEISMIC_JUMP_DECAY:
return np.column_stack((ht, hl))
# if decay only, return hl
elif np.any(hl) and self.p.jump_type == CO_SEISMIC_DECAY:
return hl
def __str__(self):
return Jump.__str__(self) + ', relax=' + str(self.p.relaxation)
def __repr__(self):
return 'pyPPPETM.CoSeisJump(' + str(self) + ')'
class Earthquakes:
def __init__(self, cnn, NetworkCode, StationCode, soln, t, FitEarthquakes=True):
self.StationCode = StationCode
self.NetworkCode = NetworkCode
# station location
stn = cnn.query('SELECT * FROM stations WHERE "NetworkCode" = \'%s\' AND "StationCode" = \'%s\''
% (NetworkCode, StationCode))
stn = stn.dictresult()[0]
# load metadata
lat = float(stn['lat'])
lon = float(stn['lon'])
# establish the limit dates. Ignore jumps before 5 years from the earthquake
# sdate = pyDate.Date(fyear=t.min() - 5)
# DDG 30/04/2020: now do not treat the earthquakes before the start date
# the same as those happening after the start date
sdate = pyDate.Date(fyear=t.min())
edate = pyDate.Date(fyear=t.max())
# get the earthquakes based on Mike's expression
# earthquakes before the start data: only magnitude 7+
jumps = cnn.query_float('SELECT * FROM earthquakes '
'WHERE date BETWEEN \'%s\' AND \'%s\' UNION '
'SELECT * FROM earthquakes '
'WHERE date BETWEEN \'%s\' AND \'%s\' AND mag >= 7 '
'ORDER BY date'
% (sdate.yyyymmdd(), edate.yyyymmdd(),
pyDate.Date(fyear=t.min() - 5).yyyymmdd(), sdate.yyyymmdd()), as_dict=True)
# check if data range returned any jumps
if jumps and FitEarthquakes:
eq = [[float(jump['lat']), float(jump['lon']), float(jump['mag']),
int(jump['date'].year), int(jump['date'].month), int(jump['date'].day),
int(jump['date'].hour), int(jump['date'].minute), int(jump['date'].second)] for jump in jumps]
eq = np.array(list(eq))
dist = distance(lon, lat, eq[:, 1], eq[:, 0])
m = -0.8717 * (np.log10(dist) - 2.25) + 0.4901 * (eq[:, 2] - 6.6928)
# build the earthquake jump table
# remove event events that happened the same day
eq_jumps = list(set((float(eqs[2]), pyDate.Date(year=int(eqs[3]), month=int(eqs[4]), day=int(eqs[5]),
hour=int(eqs[6]), minute=int(eqs[7]), second=int(eqs[8])))
for eqs in eq[m > 0, :]))
eq_jumps.sort(key=lambda x: (x[1], -x[0]))
# open the jumps table
jp = cnn.query_float('SELECT * FROM etm_params WHERE "NetworkCode" = \'%s\' AND "StationCode" = \'%s\' '
'AND soln = \'%s\' AND jump_type <> 0 AND object = \'jump\''
% (NetworkCode, StationCode, soln.type), as_dict=True)
# start by collapsing all earthquakes for the same day.
# Do not allow more than one earthquake on the same day
f_jumps = []
next_date = None
for mag, date in eq_jumps:
# jumps are analyzed in windows that are EQ_MIN_DAYS long
# a date should not be analyzed if it's < next_date
if next_date is not None:
if date < next_date:
continue
# obtain jumps in a EQ_MIN_DAYS window
jumps = [(m, d) for m, d in eq_jumps if date <= d < date + EQ_MIN_DAYS]
if len(jumps) > 1:
# if more than one jump, get the max magnitude
mmag = max([m for m, _ in jumps])
# only keep the earthquake with the largest magnitude
for m, d in jumps:
table = [j['action'] for j in jp if j['Year'] == d.year and j['DOY'] == d.doy]
# get a different relaxation for this date
relax = [j['relaxation'] for j in jp if j['Year'] == d.year and j['DOY'] == d.doy]
if relax:
if relax[0] is not None:
relaxation = np.array(relax[0])
else:
relaxation = DEFAULT_RELAXATION
else:
relaxation = DEFAULT_RELAXATION
# if present in jump table, with either + of -, don't use default decay
if m == mmag and '-' not in table:
f_jumps += [CoSeisJump(NetworkCode, StationCode, soln, t, d, relaxation,
'mag=%.1f' % m, magnitude=m, action='+' if '+' in table else 'A')]
# once the jump was added, exit for loop
break
else:
# add only if in jump list with a '+'
if '+' in table:
f_jumps += [CoSeisJump(NetworkCode, StationCode, soln, t, d,
relaxation, 'mag=%.1f' % m, magnitude=m, action='+')]
# once the jump was added, exit for loop
break
else:
f_jumps += [CoSeisJump(NetworkCode, StationCode, soln, t, d,
relaxation, 'mag=%.1f' % m, action='-', fit=False)]
else:
# add, unless marked in table with '-'
table = [j['action'] for j in jp if j['Year'] == date.year and j['DOY'] == date.doy]
# get a different relaxation for this date
relax = [j['relaxation'] for j in jp if j['Year'] == date.year and j['DOY'] == date.doy]
if relax:
if relax[0] is not None:
relaxation = np.array(relax[0])
else:
relaxation = DEFAULT_RELAXATION
else:
relaxation = DEFAULT_RELAXATION
if '-' not in table:
f_jumps += [CoSeisJump(NetworkCode, StationCode, soln, t, date,
relaxation, 'mag=%.1f' % mag, magnitude=mag,
action='+' if '+' in table else 'A')]
else:
# add it with NO_EFFECT for display purposes
f_jumps += [CoSeisJump(NetworkCode, StationCode, soln, t, date,
relaxation, 'mag=%.1f' % mag, magnitude=mag, action='-', fit=False)]
next_date = date + EQ_MIN_DAYS
# final jump table
self.table = f_jumps
else:
self.table = []
class GenericJumps(object):
def __init__(self, cnn, NetworkCode, StationCode, soln, t, FitGenericJumps=True):
self.solution_type = soln.type
self.table = []
if t.size >= 2:
# analyze if it is possible to add the jumps (based on the available data)
wt = np.sort(np.unique(t - np.fix(t)))
# analyze the gaps in the data
dt = np.diff(wt)
# max dt (internal)
dtmax = np.max(dt)
# dt wrapped around
dt_interyr = 1 - wt[-1] + wt[0]
if dt_interyr > dtmax:
dtmax = dt_interyr
if dtmax <= 0.2465 and FitGenericJumps:
# put jumps in
self.add_metadata_jumps = True
else:
# no jumps
self.add_metadata_jumps = False
else:
self.add_metadata_jumps = False
# open the jumps table
jp = cnn.query('SELECT * FROM etm_params WHERE "NetworkCode" = \'%s\' AND "StationCode" = \'%s\' '
'AND soln = \'%s\' AND jump_type = 0 AND object = \'jump\''
% (NetworkCode, StationCode, self.solution_type))
jp = jp.dictresult()
# get station information
self.stninfo = pyStationInfo.StationInfo(cnn, NetworkCode, StationCode)
for stninfo in self.stninfo.records[1:]:
date = stninfo['DateStart']
table = [j['action'] for j in jp if j['Year'] == date.year and j['DOY'] == date.doy]
# add to list only if:
# 1) add_meta = True AND there is no '-' OR
# 2) add_meta = False AND there is a '+'
self.table.append(Jump(NetworkCode, StationCode, soln, t, date,
'Ant-Rec: %s-%s' % (stninfo['AntennaCode'], stninfo['ReceiverCode']),
dtype=ANTENNA_CHANGE,
action=table[0] if table else 'A',
fit=True if '+' in table or (self.add_metadata_jumps and '-' not in table)
else False))
# frame changes if ppp
if self.solution_type == 'ppp':
frames = cnn.query(
'SELECT distinct on ("ReferenceFrame") "ReferenceFrame", "Year", "DOY" from ppp_soln WHERE '
'"NetworkCode" = \'%s\' AND "StationCode" = \'%s\' order by "ReferenceFrame", "Year", "DOY"' %
(NetworkCode, StationCode))
frames = frames.dictresult()
if len(frames) > 1:
# more than one frame, add a jump
frames.sort(key=lambda k: k['Year'])
for frame in frames[1:]:
date = pyDate.Date(Year=frame['Year'], doy=frame['DOY'])
table = [j['action'] for j in jp if j['Year'] == date.year and j['DOY'] == date.doy]
self.table.append(Jump(NetworkCode, StationCode, soln, t, date,
'Frame Change: %s' % frame['ReferenceFrame'],
dtype=REFERENCE_FRAME_JUMP,
action=table[0] if table else 'A',
fit=True if '-' not in table else False))
# now check the jump table to add specific jumps
jp = cnn.query('SELECT * FROM etm_params WHERE "NetworkCode" = \'%s\' AND "StationCode" = \'%s\' '
'AND soln = \'%s\' AND jump_type = 0 AND object = \'jump\' '
'AND action = \'+\'' % (NetworkCode, StationCode, self.solution_type))
jp = jp.dictresult()
table = [j.date for j in self.table]
for j in jp:
date = pyDate.Date(Year=j['Year'], doy=j['DOY'])
if date not in table:
self.table.append(Jump(NetworkCode, StationCode, soln, t, date, 'mechanic-jump',
dtype=GENERIC_JUMP, action='+'))
class Periodic(EtmFunction):
""""class to determine the periodic terms to be included in the ETM"""
def __init__(self, cnn, NetworkCode, StationCode, soln, t, FitPeriodic=True):
super(Periodic, self).__init__(NetworkCode=NetworkCode, StationCode=StationCode, soln=soln)
try:
# load the frequencies from the database
etm_param = cnn.get('etm_params',
{'NetworkCode': NetworkCode, 'StationCode': StationCode, 'soln': soln.type,
'object': 'periodic'},
['NetworkCode', 'StationCode', 'soln', 'object'])
self.p.frequencies = np.array([float(p) for p in etm_param['frequencies']])
except pg.DatabaseError:
self.p.frequencies = DEFAULT_FREQUENCIES
self.p.object = 'periodic'
if t.size > 1 and FitPeriodic:
# wrap around the solutions
wt = np.sort(np.unique(t - np.fix(t)))
# analyze the gaps in the data
dt = np.diff(wt)
# max dt (internal)
dtmax = np.max(dt)
# dt wrapped around
dt_interyr = 1 - wt[-1] + wt[0]
if dt_interyr > dtmax:
dtmax = dt_interyr
# save the value of the max wrapped delta time
self.dt_max = dtmax
# get the 50 % of Nyquist for each component (and convert to average fyear)
self.nyquist = ((1 / self.p.frequencies) / 2.) * 0.5 * 1 / 365.25
# frequency count
self.frequency_count = int(np.sum(self.dt_max <= self.nyquist))
# redefine the frequencies vector to accommodate only the frequencies that can be fit
self.p.frequencies = self.p.frequencies[self.dt_max <= self.nyquist]
else:
# no periodic terms
self.frequency_count = 0
self.p.frequencies = np.array([])
self.dt_max = 1 # one year of delta t
logger.info('Periodic -> Frequency count: %i; FitPeriodic: %s' % (self.frequency_count, str(FitPeriodic)))
# build the metadata description for the json string
self.p.metadata = '['
for k in ['n', 'e', 'u']:
self.p.metadata = self.p.metadata + '['
meta = []
for i in ['sin', 'cos']:
for f in (1 / (self.p.frequencies * 365.25)).tolist():
meta.append('%s:%s(%.1f yr)' % (k, i, f))
self.p.metadata = self.p.metadata + ','.join(meta) + '],'
self.p.metadata = self.p.metadata + ']'
self.design = self.get_design_ts(t)
self.param_count = self.frequency_count * 2
# declare the location of the answer (to be filled by Design object)
self.column_index = np.array([])
self.format_str = language[LANG]['periodic'] + ' (' + \
', '.join(['%.1f yr' % i for i in (1 / (self.p.frequencies * 365.25)).tolist()]) + \
') N: %s E: %s U: %s [mm]'
self.p.hash = crc32(str(self.p.frequencies) + VERSION)
def get_design_ts(self, ts):
# if dtmax < 3 months (90 days = 0.1232), then we can fit the annual
# if dtmax < 1.5 months (45 days = 0.24657), then we can fit the semi-annual too
if self.frequency_count > 0:
f = self.p.frequencies
f = np.tile(f, (ts.shape[0], 1))
As = np.array(sin(2 * pi * f * 365.25 * np.tile(ts[:, np.newaxis], (1, f.shape[1]))))
Ac = np.array(cos(2 * pi * f * 365.25 * np.tile(ts[:, np.newaxis], (1, f.shape[1]))))
A = np.column_stack((As, Ac))
else:
# no periodic terms
A = np.array([])
return A
def print_parameters(self):
n = np.array([])
e = np.array([])
u = np.array([])
for p in np.arange(self.param_count):
psc = self.p.params[:, p]
sn = psc[0]
se = psc[1]
su = psc[2]
n = np.append(n, sn)
e = np.append(e, se)
u = np.append(u, su)
n = n.reshape((2, self.param_count / 2))
e = e.reshape((2, self.param_count / 2))
u = u.reshape((2, self.param_count / 2))
# calculate the amplitude of the components
an = np.sqrt(np.square(n[0, :]) + np.square(n[1, :]))
ae = np.sqrt(np.square(e[0, :]) + np.square(e[1, :]))
au = np.sqrt(np.square(u[0, :]) + np.square(u[1, :]))
return self.format_str % (np.array_str(an * 1000.0, precision=1),
np.array_str(ae * 1000.0, precision=1),
np.array_str(au * 1000.0, precision=1))
class Polynomial(EtmFunction):
""""class to build the linear portion of the design matrix"""
def __init__(self, cnn, NetworkCode, StationCode, soln, t, t_ref=0, interseismic=None):
super(Polynomial, self).__init__(NetworkCode=NetworkCode, StationCode=StationCode, soln=soln)
# t ref (just the beginning of t vector)
if t_ref == 0:
t_ref = np.min(t)
self.p.object = 'polynomial'
self.p.t_ref = t_ref
self.interseismic = np.zeros((3, t.shape[0]))
if interseismic:
logger.info('Polynomial -> Interseismic velocity provided: removing velocity from fit')
# interseismic model provided, do not fit linear (remove trend)
tt = (t - t_ref)
if type(interseismic) is list:
interseismic = np.array(interseismic)
# convert to np if list is given
for i in range(3):
self.interseismic[i] = tt * interseismic[i]
self.terms = 1
self.format_str = language[LANG]['position'] + ' (' + '%.3f' % t_ref + \
') X: {:.3f} Y: {:.3f} Z: {:.3f} [m]\n' \
+ language[LANG]['velocity'] + ' (' \
+ language[LANG]['from_model'] + ')' + \
' N: {:.2f} E: {:.2f} U: {:.2f} [mm/yr]'.format(*(interseismic * 1000))
self.p.metadata = '[[n:pos, n:vel],[e:pos, e:vel],[u:pos, u:vel]]'
else:
try:
# load the number of terms from the database
etm_param = cnn.get('etm_params',
{'NetworkCode': NetworkCode, 'StationCode': StationCode, 'soln': soln.type,
'object': 'polynomial'},
['NetworkCode', 'StationCode', 'soln', 'object'])
self.terms = int(etm_param['terms'])
except pg.DatabaseError:
self.terms = DEFAULT_POL_TERMS
logger.info('Polynomial -> Fitting %i term(s)' % self.terms)
if self.terms == 1:
self.format_str = language[LANG]['position'] + ' (' + '%.3f' % t_ref + \
') X: {:.3f} Y: {:.3f} Z: {:.3f} [m]'
self.p.metadata = '[[n:pos],[e:pos],[u:pos]]'
elif self.terms == 2:
self.format_str = language[LANG]['position'] + ' (' + '%.3f' % t_ref + \
') X: {:.3f} Y: {:.3f} Z: {:.3f} [m]\n' \
+ language[LANG]['velocity'] + ' N: {:.2f} E: {:.2f} U: {:.2f} [mm/yr]'
self.p.metadata = '[[n:pos, n:vel],[e:pos, e:vel],[u:pos, u:vel]]'
elif self.terms == 3:
self.format_str = language[LANG]['position'] + ' (' + '%.3f' % t_ref + \
') X: {:.3f} Y: {:.3f} Z: {:.3f} [m]\n' \
+ language[LANG]['velocity'] + ' N: {:.3f} E: {:.3f} U: {:.3f} [mm/yr]\n' \
+ language[LANG]['acceleration'] + ' N: {:.2f} E: {:.2f} U: {:.2f} [mm/yr**2]'
self.p.metadata = '[[n:pos, n:vel, n:acc],[e:pos, e:vel, e:acc],[u:pos, u:vel, u:acc]]'
elif self.terms > 3:
self.format_str = language[LANG]['position'] + ' (' + '%.3f' % t_ref + \
') X: {:.3f} Y: {:.3f} Z: {:.3f} [m]\n' \
+ language[LANG]['velocity'] + ' N: {:.3f} E: {:.3f} U: {:.3f} [mm/yr]\n' \
+ language[LANG]['acceleration'] + ' N: {:.2f} E: {:.2f} U: {:.2f} [mm/yr**2] + ' \
+ '%i ' % (self.terms - 3) + language[LANG]['other']
self.p.metadata = '[[n:pos, n:vel, n:acc, n:tx...],' \
'[e:pos, e:vel, e:acc, e:tx...],' \
'[u:pos, u:vel, u:acc, u:tx...]]'
self.design = self.get_design_ts(t)
# always first in the list of A, index columns are fixed
self.column_index = np.arange(self.terms)
# param count is the same as terms
self.param_count = self.terms
# save the hash of the object
self.p.hash = crc32(str(self.terms) + VERSION)
def load_parameters(self, params, sigmas, t_ref):
super(Polynomial, self).load_parameters(params=params, sigmas=sigmas)
self.p.t_ref = t_ref
def print_parameters(self, ref_xyz, lat, lon):
params = np.zeros((3,))
for p in np.arange(self.terms):
if p == 0:
params[0], params[1], params[2] = lg2ct(self.p.params[0, 0],
self.p.params[1, 0],
self.p.params[2, 0], lat, lon)
params += ref_xyz.flatten()
elif p > 0:
n = self.p.params[0, p]
e = self.p.params[1, p]
u = self.p.params[2, p]
params = np.append(params, (n*1000, e*1000, u*1000))
return self.format_str.format(*params.tolist())
def get_design_ts(self, ts):
A = np.zeros((ts.size, self.terms))
for p in np.arange(self.terms):
A[:, p] = np.power(ts - self.p.t_ref, p)
return A
class Design(np.ndarray):
def __new__(subtype, Linear, Jumps, Periodic, dtype=float, buffer=None, offset=0, strides=None, order=None):
# Create the ndarray instance of our type, given the usual
# ndarray input arguments. This will call the standard
# ndarray constructor, but return an object of our type.
# It also triggers a call to InfoArray.__array_finalize__
shape = (Linear.design.shape[0], Linear.param_count + Jumps.param_count() + Periodic.param_count)
A = super(Design, subtype).__new__(subtype, shape, dtype, buffer, offset, strides, order)
A[:, Linear.column_index] = Linear.design
# determine the column_index for all objects
col_index = Linear.param_count
for jump in Jumps.table:
# save the column index
if jump.fit:
jump.column_index = np.arange(col_index, col_index + jump.param_count)
# assign the portion of the design matrix
A[:, jump.column_index] = jump.design
# increment the col_index
col_index += jump.param_count
Periodic.column_index = np.arange(col_index, col_index + Periodic.param_count)
A[:, Periodic.column_index] = Periodic.design
# save the object list
A.objects = (Linear, Jumps, Periodic)
# save the number of total parameters
A.linear_params = Linear.param_count
A.jump_params = Jumps.param_count()
A.periodic_params = Periodic.param_count
A.params = Linear.param_count + Jumps.param_count() + Periodic.param_count
# save the constrains matrix
A.constrains = Jumps.constrains
# Finally, we must return the newly created object:
return A
def __call__(self, ts=None, constrains=False):
if ts is None:
if constrains:
if self.constrains.size:
A = self.copy()
# resize matrix (use A.resize so that it fills with zeros)
A.resize((self.shape[0] + self.constrains.shape[0], self.shape[1]), refcheck=False)
# apply constrains
A[-self.constrains.shape[0]:, self.jump_params] = self.constrains
return A
else:
return self
else:
return self
else:
A = np.array([])
for obj in self.objects:
tA = obj.get_design_ts(ts)
if A.size:
A = np.column_stack((A, tA)) if tA.size else A
else:
A = tA
return A
def get_l(self, L, constrains=False):
if constrains:
if self.constrains.size:
tL = L.copy()
tL.resize((L.shape[0] + self.constrains.shape[0]), refcheck=False)
return tL
else:
return L
else:
return L
def get_p(self, constrains=False):
# return a weight matrix full of ones with or without the extra elements for the constrains
return np.ones((self.shape[0])) if not constrains else \
np.ones((self.shape[0] + self.constrains.shape[0]))
def remove_constrains(self, v):
# remove the constrains to whatever vector is passed
if self.constrains.size:
return v[0:-self.constrains.shape[0]]
else:
return v
class ETM:
def __init__(self, cnn, soln, no_model=False, FitEarthquakes=True, FitGenericJumps=True, FitPeriodic=True,
interseismic=None):
# to display more verbose warnings
# warnings.showwarning = self.warn_with_traceback
self.C = np.array([])
self.S = np.array([])
self.F = np.array([])
self.R = np.array([])
self.P = np.array([])
self.factor = np.array([])
self.covar = np.zeros((3, 3))
self.A = None
self.param_origin = ESTIMATION
self.soln = soln
self.no_model = no_model
self.FitEarthquakes = FitEarthquakes
self.FitGenericJumps = FitGenericJumps
self.FitPeriodic = FitPeriodic
self.NetworkCode = soln.NetworkCode
self.StationCode = soln.StationCode
logger.info('Creating ETM object for %s.%s' % (self.NetworkCode, self.StationCode))
# save the function objects
self.Linear = Polynomial(cnn, soln.NetworkCode, soln.StationCode, self.soln, self.soln.t,
interseismic=interseismic)
self.Periodic = Periodic(cnn, soln.NetworkCode, soln.StationCode, self.soln, self.soln.t, FitPeriodic)
self.Jumps = JumpTable(cnn, soln.NetworkCode, soln.StationCode, self.soln, self.soln.t,
FitEarthquakes, FitGenericJumps)
# calculate the hash value for this station
# now hash also includes the timestamp of the last time pyETM was modified.
self.hash = soln.hash
# anything less than four is not worth it
if soln.solutions > 4 and not no_model:
# to obtain the parameters
self.A = Design(self.Linear, self.Jumps, self.Periodic)
# check if problem can be solved!
if self.A.shape[1] >= soln.solutions:
self.A = None
return
self.As = self.A(soln.ts)
else:
logger.info('Less than 4 solutions, cannot calculate ETM')
def run_adjustment(self, cnn, l, plotit=False, soln=None):
if self.A is not None:
# try to load the last ETM solution from the database
etm_objects = cnn.query_float('SELECT * FROM etms WHERE "NetworkCode" = \'%s\' '
'AND "StationCode" = \'%s\' AND soln = \'%s\' AND stack = \'%s\''
% (self.NetworkCode, self.StationCode, self.soln.type,
self.soln.stack_name), as_dict=True)
# DDG: Attention: it is not always possible to retrieve the parameters from the database using the hash
# strategy. The jump table is determined and their hash values calculated. The fit attribute goes into the
# hash value. When an unrealistic jump is detected, the jump is removed from the fit and the final
# parameters are saved without this jump. Thus, when loading the object, the jump will be added to fit but
# it will not be present in the database.
db_hash_sum = sum([obj['hash'] for obj in etm_objects])
jumps_hash = sum([o.p.hash for o in self.Jumps.table if o.fit])
ob_hash_sum = self.Periodic.p.hash + self.Linear.p.hash + self.hash + jumps_hash
cn_object_sum = len([o.p.hash for o in self.Jumps.table if o.fit]) + 2
# -1 to account for the var_factor entry
if len(etm_objects) - 1 == cn_object_sum and db_hash_sum == ob_hash_sum:
logger.info('ETM -> Loading parameters from database (db hash %i; ob hash %i)'
% (db_hash_sum, ob_hash_sum))
# load the parameters from th db
self.load_parameters(etm_objects, l)
# signal the outside world that the parameters were loaded from the database (no need to save them)
self.param_origin = DATABASE
else:
logger.info('ETM -> Estimating parameters (db hash %i; ob hash %i)'
% (db_hash_sum, ob_hash_sum))
# signal the outside world that the parameters were estimated (and need to be saves)
self.param_origin = ESTIMATION
# purge table and recompute
cnn.query('DELETE FROM etms WHERE "NetworkCode" = \'%s\' AND '
'"StationCode" = \'%s\' AND soln = \'%s\' AND stack = \'%s\''
% (self.NetworkCode, self.StationCode, self.soln.type, self.soln.stack_name))
if self.soln.type == 'dra':
# if the solution is of type 'dra', delete the excluded solutions
cnn.query('DELETE FROM gamit_soln_excl WHERE "NetworkCode" = \'%s\' AND '
'"StationCode" = \'%s\'' % (self.NetworkCode, self.StationCode))
# use the default parameters from the objects
t_ref = self.Linear.p.t_ref
j = 0
do_again = False
while j < 10:
c = []
f = []
s = []
r = []
p = []
factor = []
for i in range(3):
x, sigma, index, residuals, fact, w = self.adjust_lsq(self.A, l[i])
c.append(x)
s.append(sigma)
f.append(index)
r.append(residuals)
factor.append(fact)
p.append(w)
self.C = np.array(c)
self.S = np.array(s)
self.F = np.array(f)
self.R = np.array(r)
self.factor = np.array(factor)
self.P = np.array(p)
# load_parameters to the objects
self.Linear.load_parameters(self.C, self.S, t_ref)
self.Jumps.load_parameters(self.C, self.S)
self.Periodic.load_parameters(params=self.C, sigmas=self.S)
# determine if any jumps are unrealistic
for jump in self.Jumps.table:
if jump.fit and jump.p.jump_type in (CO_SEISMIC_JUMP_DECAY, CO_SEISMIC_DECAY) \
and np.any(np.abs(jump.p.params[:, -jump.nr:]) > 0.5):
# unrealistic, remove
jump.remove_from_fit()
do_again = True
logger.info('ETM -> Unrealistic jump detected (%s : %s), removing and redoing fit'
% (np.array_str(jump.p.params[:, -jump.nr:].flatten(), precision=1),
type_dict[jump.p.jump_type]))
if not do_again:
break
else:
self.A = Design(self.Linear, self.Jumps, self.Periodic)
if soln:
self.As = self.A(soln.ts)
j += 1
# load the covariances using the correlations
self.process_covariance()
if plotit:
self.plot()
else:
logger.info('ETM -> Empty design matrix')
def process_covariance(self):
cov = np.zeros((3, 1))
# save the covariance between N-E, E-U, N-U
f = self.F[0] * self.F[1] * self.F[2]
# load the covariances using the correlations
cov[0] = np.corrcoef(self.R[0][f], self.R[1][f])[0, 1] * self.factor[0] * self.factor[1]
cov[1] = np.corrcoef(self.R[1][f], self.R[2][f])[0, 1] * self.factor[1] * self.factor[2]
cov[2] = np.corrcoef(self.R[0][f], self.R[2][f])[0, 1] * self.factor[0] * self.factor[2]
# build a variance-covariance matrix
self.covar = np.diag(np.square(self.factor))
self.covar[0, 1] = cov[0]
self.covar[1, 0] = cov[0]
self.covar[2, 1] = cov[1]
self.covar[1, 2] = cov[1]
self.covar[0, 2] = cov[2]
self.covar[2, 0] = cov[2]
if not self.isPD(self.covar):
self.covar = self.nearestPD(self.covar)
def save_excluded_soln(self, cnn):
for date, f, r in zip(self.soln.date, np.logical_and(np.logical_and(self.F[0], self.F[1]), self.F[2]),
np.sqrt(np.sum(np.square(self.R), axis=0))):
if not cnn.query_float('SELECT * FROM gamit_soln_excl WHERE "NetworkCode" = \'%s\' AND '
'"StationCode" = \'%s\' AND "Project" = \'%s\' AND "Year" = %i AND "DOY" = %i'
% (self.NetworkCode, self.StationCode, self.soln.stack_name, date.year, date.doy)) \
and not f:
cnn.query('INSERT INTO gamit_soln_excl ("NetworkCode", "StationCode", "Project", "Year", "DOY", '
'residual) VALUES (\'%s\', \'%s\', \'%s\', %i ,%i, %.4f)'
% (self.NetworkCode, self.StationCode, self.soln.stack_name, date.year, date.doy, r))
def save_parameters(self, cnn):
# only save the parameters when they've been estimated, not when loaded from database
if self.param_origin == ESTIMATION:
# insert linear parameters
cnn.insert('etms', row=to_postgres(self.Linear.p.toDict()))
# insert jumps
for jump in self.Jumps.table:
if jump.fit:
cnn.insert('etms', row=to_postgres(jump.p.toDict()))
# insert periodic params
cnn.insert('etms', row=to_postgres(self.Periodic.p.toDict()))
# save the variance factors
cnn.query('INSERT INTO etms ("NetworkCode", "StationCode", soln, object, params, hash, stack) VALUES '
'(\'%s\', \'%s\', \'%s\', \'var_factor\', \'%s\', %i, \'%s\')'
% (self.NetworkCode, self.StationCode, self.soln.type, to_postgres(self.factor),
self.hash, self.soln.stack_name))
def plot(self, pngfile=None, t_win=None, residuals=False, plot_missing=True,
ecef=False, plot_outliers=True, fileio=None):
import matplotlib.pyplot as plt
L = self.l * 1000
# definitions
m = []
if ecef:
labels = ('X [mm]', 'Y [mm]', 'Z [mm]')
else:
labels = (language[LANG]['north'] + ' [mm]',
language[LANG]['east'] + ' [mm]',
language[LANG]['up'] + ' [mm]')
# get filtered observations
if self.A is not None:
filt = self.F[0] * self.F[1] * self.F[2]
for i in range(3):
m.append((np.dot(self.As, self.C[i])) * 1000)
else:
filt = np.ones(self.soln.x.shape[0], dtype=bool)
# rotate to NEU
if ecef:
lneu = self.rotate_2xyz(L)
else:
lneu = L
# determine the window of the plot, if requested
if t_win is not None:
if type(t_win) is tuple:
# data range, with possibly a final value
if len(t_win) == 1:
t_win = (t_win[0], self.soln.t.max())
else:
# approximate a day in fyear
t_win = (self.soln.t.max() - t_win/365.25, self.soln.t.max())
# new behaviour: plots the time series even if there is no ETM fit
if self.A is not None:
# create the axis
if plot_outliers:
f, axis = plt.subplots(nrows=3, ncols=2, sharex=True, figsize=(15, 10)) # type: plt.subplots
axis_vect = (axis[0][0], axis[1][0], axis[2][0])
else:
f, axis = plt.subplots(nrows=3, ncols=1, sharex=True, figsize=(15, 10)) # type: plt.subplots
axis_vect = (axis[0], axis[1], axis[2])
# rotate modeled ts
if not ecef:
mneu = m
rneu = self.R
fneu = self.factor * 1000
else:
mneu = self.rotate_2xyz(m)
# rotate residuals
rneu = self.rotate_2xyz(self.R)
fneu = np.sqrt(np.diag(self.rotate_sig_cov(covar=self.covar))) * 1000
# ################# FILTERED PLOT #################
f.suptitle(language[LANG]['station'] + ' %s.%s (%s %.2f%%) lat: %.5f lon: %.5f\n'
'%s\n%s\n'
'NEU wrms [mm]: %5.2f %5.2f %5.2f' %
(self.NetworkCode, self.StationCode, self.soln.stack_name.upper(), self.soln.completion,
self.soln.lat, self.soln.lon,
self.Linear.print_parameters(np.array([self.soln.auto_x, self.soln.auto_y, self.soln.auto_z]),
self.soln.lat, self.soln.lon),
self.Periodic.print_parameters(),
fneu[0], fneu[1], fneu[2]), fontsize=9, family='monospace')
table_n, table_e, table_u = self.Jumps.print_parameters()
tables = (table_n, table_e, table_u)
for i, ax in enumerate(axis_vect):
# plot filtered time series
if not residuals:
ax.plot(self.soln.t[filt], lneu[i][filt], 'ob', markersize=2)
ax.plot(self.soln.ts, mneu[i], 'r')
# error bars
ax.plot(self.soln.ts, mneu[i] - fneu[i] * LIMIT, 'b', alpha=0.1)
ax.plot(self.soln.ts, mneu[i] + fneu[i] * LIMIT, 'b', alpha=0.1)
ax.fill_between(self.soln.ts, mneu[i] - fneu[i] * LIMIT, mneu[i] + fneu[i] * LIMIT,
antialiased=True, alpha=0.2)
else:
ax.plot(self.soln.t[filt], rneu[i][filt]*1000, 'ob', markersize=2)
# error bars
ax.plot(self.soln.ts, - np.repeat(fneu[i], self.soln.ts.shape[0]) * LIMIT, 'b', alpha=0.1)
ax.plot(self.soln.ts, np.repeat(fneu[i], self.soln.ts.shape[0]) * LIMIT, 'b', alpha=0.1)
ax.fill_between(self.soln.ts, -fneu[i] * LIMIT, fneu[i] * LIMIT, antialiased=True, alpha=0.2)
ax.grid(True)
# labels
ax.set_ylabel(labels[i])
p = ax.get_position()
f.text(0.005, p.y0, tables[i], fontsize=8, family='monospace')
# window data
self.set_lims(t_win, plt, ax)
# plot jumps
self.plot_jumps(ax)
# ################# OUTLIERS PLOT #################
if plot_outliers:
for i, ax in enumerate((axis[0][1], axis[1][1], axis[2][1])):
ax.plot(self.soln.t, lneu[i], 'oc', markersize=2)
ax.plot(self.soln.t[filt], lneu[i][filt], 'ob', markersize=2)
ax.plot(self.soln.ts, mneu[i], 'r')
# error bars
ax.plot(self.soln.ts, mneu[i] - fneu[i] * LIMIT, 'b', alpha=0.1)
ax.plot(self.soln.ts, mneu[i] + fneu[i] * LIMIT, 'b', alpha=0.1)
ax.fill_between(self.soln.ts, mneu[i] - fneu[i]*LIMIT, mneu[i] + fneu[i]*LIMIT,
antialiased=True, alpha=0.2)
self.set_lims(t_win, plt, ax)
ax.set_ylabel(labels[i])
ax.grid(True)
if plot_missing:
self.plot_missing_soln(ax)
f.subplots_adjust(left=0.18)
else:
f, axis = plt.subplots(nrows=3, ncols=1, sharex=True, figsize=(15, 10)) # type: plt.subplots
f.suptitle(language[LANG]['station'] + ' %s.%s (%s %.2f%%) lat: %.5f lon: %.5f'
% (self.NetworkCode, self.StationCode, self.soln.type.upper(), self.soln.completion,
self.soln.lat, self.soln.lon) +
'\n' + language[LANG]['not_enough'], fontsize=9, family='monospace')
for i, ax in enumerate((axis[0], axis[1], axis[2])):
ax.plot(self.soln.t, lneu[i], 'ob', markersize=2)
ax.set_ylabel(labels[i])
ax.grid(True)
self.set_lims(t_win, plt, ax)
self.plot_jumps(ax)
if plot_missing:
self.plot_missing_soln(ax)
if pngfile is not None:
plt.savefig(pngfile)
plt.close()
elif fileio is not None:
plt.savefig(fileio, format='png')
# plt.show()
fileio.seek(0) # rewind to beginning of file
plt.close()
return base64.b64encode(fileio.getvalue())
else:
self.f = f
self.picking = False
self.plt = plt
axprev = plt.axes([0.85, 0.01, 0.08, 0.055])
bcut = Button(axprev, 'Add jump', color='red', hovercolor='green')
bcut.on_clicked(self.enable_picking)
plt.show()
plt.close()
def onpick(self, event):
import dbConnection
self.f.canvas.mpl_disconnect(self.cid)
self.picking = False
print 'Epoch: %s' % pyDate.Date(fyear=event.xdata).yyyyddd()
jtype = int(input(' -- Enter type of jump (0 = mechanic; 1 = geophysical): '))
if jtype == 1:
relx = input(' -- Enter relaxation (e.g. 0.5, 0.5,0.01): ')
operation = str(raw_input(' -- Enter operation (+, -): '))
print ' >> Jump inserted'
# now insert the jump into the db
cnn = dbConnection.Cnn('gnss_data.cfg')
self.plt.close()
# reinitialize ETM
# wait for 'keep' or 'undo' command
def enable_picking(self, event):
if not self.picking:
print 'Entering picking mode'
self.picking = True
self.cid = self.f.canvas.mpl_connect('button_press_event', self.onpick)
else:
print 'Disabling picking mode'
self.picking = False
self.f.canvas.mpl_disconnect(self.cid)
def plot_hist(self, pngfile=None):
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
from scipy.stats import norm
from matplotlib.patches import Ellipse
labels = (language[LANG]['north'] + ' [mm]',
language[LANG]['east'] + ' [mm]',
language[LANG]['up'] + ' [mm]')
if self.A is not None:
filt = self.F[0] * self.F[1] * self.F[2]
f, axis = plt.subplots(nrows=2, ncols=2, figsize=(15, 10)) # type: plt.subplots
f.suptitle(language[LANG]['station'] + ' %s.%s (%s %.2f%%) lat: %.5f lon: %.5f\n'
'VAR (N E U) : %s\n'
'COV (N-E N-U E-U): %s'
% (self.NetworkCode, self.StationCode, self.soln.type.upper(), self.soln.completion,
self.soln.lat, self.soln.lon, ' '.join(['%10.3e' % i for i in np.diag(self.covar)]),
' '.join(['%10.3e' % i for i in [self.covar[0, 1], self.covar[0, 2], self.covar[1, 2]]])),
fontsize=9, family='monospace')
n = np.sqrt(np.sum(self.R ** 2, axis=0))
N = self.R[0][n <= 0.05] * 1000
E = self.R[1][n <= 0.05] * 1000
U = self.R[2][n <= 0.05] * 1000
# N-E residuals and error ellipse
ax = axis[0][0]
ax.plot(E, N, 'ob', markersize=2)
# ax.plot(E[filt], N[filt], 'ob', markersize=2)
# ax.plot(E[np.logical_not(filt)], N[np.logical_not(filt)], 'oc', markersize=2)
# process the covariance matrix
c = self.covar[0:2, 0:2]
c[1, 1], c[0, 0] = c[0, 0], c[1, 1]
w, v = np.linalg.eigh(self.covar[0:2, 0:2])
order = w.argsort()[::-1]
w, v = w[order], v[:, order]
theta = np.degrees(np.arctan2(*v[:, 0][::-1]))
ellipse = Ellipse((np.mean(self.R[1][filt]), np.mean(self.R[1][filt])),
width=2. * np.sqrt(w[0]) * 2.5 * 1000,
height=2. * np.sqrt(w[1]) * 2.5 * 1000,
angle=theta,
facecolor='none',
edgecolor='red',
zorder=3,
label=r'$2.5\sigma$')
ax.add_patch(ellipse)
ax.grid(True)
ax.set_ylabel(labels[0])
ax.set_xlabel(labels[1])
ax.set_title(language[LANG]['residual plot'] + ' ' + language[LANG]['north'] + '-' + language[LANG]['east'])
ax.axis('equal')
f.canvas.draw()
ax.legend()
nn = ax.get_ylim()
ee = ax.get_xlim()
# N histogram
ax = axis[0][1]
# (mu, sigma) = norm.fit(N)
n, bins, patches = ax.hist(N, 200, alpha=0.75, facecolor='blue', orientation='horizontal')
# y = mlab.normpdf(bins, mu, sigma)
# ax.plot(y, bins, 'r--', linewidth=2)
ax.grid(True)
ax.set_xlabel(language[LANG]['frequency'])
ax.set_ylabel(language[LANG]['N residuals'] + ' [mm]')
ax.set_title(language[LANG]['histogram plot'] + ' ' + language[LANG]['north'])
ax.set_ylim(nn)
# E histogram
ax = axis[1][0]
# (mu, sigma) = norm.fit(E)
n, bins, patches = ax.hist(E, 200, alpha=0.75, facecolor='blue')
# y = mlab.normpdf(bins, mu, sigma)
# ax.plot(bins, y, 'r--', linewidth=2)
ax.grid(True)
ax.set_ylabel(language[LANG]['frequency'])
ax.set_xlabel(language[LANG]['E residuals'] + ' [mm]')
ax.set_title(language[LANG]['histogram plot'] + ' ' + language[LANG]['east'])
ax.set_xlim(ee)
# Up histogram
ax = axis[1][1]
# (mu, sigma) = norm.fit(U)
n, bins, patches = ax.hist(U, 200, alpha=0.75, facecolor='blue')
# y = mlab.normpdf(bins, mu, sigma)
# ax.plot(bins, y, 'r--', linewidth=2)
ax.grid(True)
ax.set_ylabel(language[LANG]['frequency'])
ax.set_xlabel(language[LANG]['U residuals'] + ' [mm]')
ax.set_title(language[LANG]['histogram plot'] + ' ' + language[LANG]['up'])
#residuals = np.sqrt(np.square(L[0]) + np.square(L[1]) + np.square(L[2])) - \
# np.sqrt(np.square(np.dot(self.A, self.C[0])) + np.square(np.dot(self.A, self.C[1])) +
# np.square(np.dot(self.A, self.C[2])))
#(mu, sigma) = norm.fit(residuals)
#n, bins, patches = plt.hist(residuals, 200, normed=1, alpha=0.75, facecolor='blue')
#y = mlab.normpdf(bins, mu, sigma)
#plt.plot(bins, y, 'r--', linewidth=2)
#plt.title(r'$\mathrm{Histogram\ of\ residuals (mm):}\ \mu=%.3f,\ \sigma=%.3f$' % (mu*1000, sigma*1000))
#plt.grid(True)
if not pngfile:
plt.show()
plt.close()
else:
plt.savefig(pngfile)
plt.close()
@staticmethod
def autoscale_y(ax, margin=0.1):
"""This function rescales the y-axis based on the data that is visible given the current xlim of the axis.
ax -- a matplotlib axes object
margin -- the fraction of the total height of the y-data to pad the upper and lower ylims"""
def get_bottom_top(line):
xd = line.get_xdata()
yd = line.get_ydata()
lo, hi = ax.get_xlim()
y_displayed = yd[((xd > lo) & (xd < hi))]
h = np.max(y_displayed) - np.min(y_displayed)
bot = np.min(y_displayed) - margin * h
top = np.max(y_displayed) + margin * h
return bot, top
lines = ax.get_lines()
bot, top = np.inf, -np.inf
for line in lines:
new_bot, new_top = get_bottom_top(line)
if new_bot < bot:
bot = new_bot
if new_top > top:
top = new_top
if bot == top:
ax.autoscale(enable=True, axis='y', tight=False)
ax.autoscale(enable=False, axis='y', tight=False)
else:
ax.set_ylim(bot, top)
def set_lims(self, t_win, plt, ax):
if t_win is None:
# turn on to adjust the limits, then turn off to plot jumps
ax.autoscale(enable=True, axis='x', tight=False)
ax.autoscale(enable=False, axis='x', tight=False)
ax.autoscale(enable=True, axis='y', tight=False)
ax.autoscale(enable=False, axis='y', tight=False)
else:
if t_win[0] == t_win[1]:
t_win[0] = t_win[0] - 1./365.25
t_win[1] = t_win[1] + 1./365.25
plt.xlim(t_win)
self.autoscale_y(ax)
def plot_missing_soln(self, ax):
# plot missing solutions
for missing in self.soln.ts_ns:
ax.plot((missing, missing), ax.get_ylim(), color=(1, 0, 1, 0.2), linewidth=1)
# plot the position of the outliers
for blunder in self.soln.ts_blu:
ax.quiver((blunder, blunder), ax.get_ylim(), (0, 0), (-0.01, 0.01), scale_units='height',
units='height', pivot='tip', width=0.008, edgecolors='r')
def plot_jumps(self, ax):
for jump in self.Jumps.table:
if jump.date < self.soln.date[0] or jump.date > self.soln.date[-1]:
continue
if not jump.fit:
ax.plot((jump.date.fyear, jump.date.fyear), ax.get_ylim(), ':', color='tab:gray')
elif jump.p.jump_type == GENERIC_JUMP:
ax.plot((jump.date.fyear, jump.date.fyear), ax.get_ylim(), 'c:')
elif jump.p.jump_type == ANTENNA_CHANGE:
ax.plot((jump.date.fyear, jump.date.fyear), ax.get_ylim(), 'b:')
elif jump.p.jump_type == REFERENCE_FRAME_JUMP:
ax.plot((jump.date.fyear, jump.date.fyear), ax.get_ylim(), ':', color='tab:green')
elif jump.p.jump_type == CO_SEISMIC_JUMP_DECAY:
ax.plot((jump.date.fyear, jump.date.fyear), ax.get_ylim(), 'r:')
elif jump.p.jump_type == CO_SEISMIC_JUMP:
ax.plot((jump.date.fyear, jump.date.fyear), ax.get_ylim(), ':', color='tab:purple')
def todictionary(self, time_series=False, model=False):
# convert the ETM adjustment into a dictionary
# optionally, output the whole time series and evaluated model as well
L = self.l
# start with the parameters
etm = dict()
etm['Network'] = self.NetworkCode
etm['Station'] = self.StationCode
etm['lat'] = self.soln.lat[0]
etm['lon'] = self.soln.lon[0]
etm['ref_x'] = self.soln.auto_x[0]
etm['ref_y'] = self.soln.auto_y[0]
etm['ref_z'] = self.soln.auto_z[0]
etm['Jumps'] = [to_list(jump.p.toDict()) for jump in self.Jumps.table]
if self.A is not None:
etm['Polynomial'] = to_list(self.Linear.p.toDict())
etm['Periodic'] = to_list(self.Periodic.p.toDict())
etm['wrms'] = {'n': self.factor[0], 'e': self.factor[1], 'u': self.factor[2]}
etm['xyz_covariance'] = self.rotate_sig_cov(covar=self.covar).tolist()
etm['neu_covariance'] = self.covar.tolist()
if time_series:
ts = dict()
ts['t'] = np.array([self.soln.t.tolist(), self.soln.mjd.tolist()]).transpose().tolist()
ts['mjd'] = self.soln.mjd.tolist()
ts['x'] = self.soln.x.tolist()
ts['y'] = self.soln.y.tolist()
ts['z'] = self.soln.z.tolist()
ts['n'] = L[0].tolist()
ts['e'] = L[1].tolist()
ts['u'] = L[2].tolist()
ts['residuals'] = self.R.tolist()
ts['weights'] = self.P.transpose().tolist()
ts['model_neu'] = []
if model:
if self.A is not None:
for i in range(3):
ts['model_neu'].append((np.dot(self.As, self.C[i]).tolist()))
if self.A is not None:
ts['filter'] = np.logical_and(np.logical_and(self.F[0], self.F[1]), self.F[2]).tolist()
else:
ts['filter'] = []
etm['time_series'] = ts
return etm
def get_xyz_s(self, year, doy, jmp=None, sigma_h=SIGMA_FLOOR_H, sigma_v=SIGMA_FLOOR_V, force_model=False):
# this function find the requested epochs and returns an X Y Z and sigmas
# jmp = 'pre' returns the coordinate immediately before a jump
# jmp = 'post' returns the coordinate immediately after a jump
# jmp = None returns either the coordinate before or after, depending on the time of the jump.
# find this epoch in the t vector
date = pyDate.Date(year=year, doy=doy)
window = None
for jump in self.Jumps.table:
if jump.date == date and \
jump.p.jump_type in (GENERIC_JUMP, CO_SEISMIC_JUMP_DECAY, ANTENNA_CHANGE, CO_SEISMIC_JUMP) \
and jump.fit:
if np.sqrt(np.sum(np.square(jump.p.params[:, 0]))) > 0.02:
window = jump.date
# if no pre or post specified, then determine using the time of the jump
if jmp is None:
if (jump.date.datetime().hour + jump.date.datetime().minute / 60.0) < 12:
jmp = 'post'
else:
jmp = 'pre'
# use the previous or next date to get the APR
# if jmp == 'pre':
# date -= 1
# else:
# date += 1
index = np.where(self.soln.mjd == date.mjd)
index = index[0]
neu = np.zeros((3, 1))
L = self.L
ref_pos = np.array([self.soln.auto_x, self.soln.auto_y, self.soln.auto_z])
if index.size and self.A is not None:
# found a valid epoch in the t vector
# now see if this epoch was filtered
if np.all(self.F[:, index]) and force_model is False:
# the coordinate is good
xyz = L[:, index]
sig = self.R[:, index]
source = self.soln.stack_name.upper() + ' with ETM solution: good'
else:
# the coordinate is marked as bad
# get the requested epoch from the ETM
idt = np.argmin(np.abs(self.soln.ts - date.fyear))
for i in range(3):
neu[i] = np.dot(self.As[idt, :], self.C[i])
xyz = self.rotate_2xyz(neu) + ref_pos
# Use the deviation from the ETM multiplied by 2.5 to estimate the error
sig = 2.5 * self.R[:, index]
source = self.soln.stack_name.upper() + ' with ETM solution: filtered'
elif not index.size and self.A is not None:
# the coordinate doesn't exist, get it from the ETM
idt = np.argmin(np.abs(self.soln.ts - date.fyear))
source = 'No ' + self.soln.stack_name.upper() + ' solution: ETM'
for i in range(3):
neu[i] = np.dot(self.As[idt, :], self.C[i])
xyz = self.rotate_2xyz(neu) + ref_pos
# since there is no way to estimate the error,
# use the nominal sigma multiplied by 2.5
sig = 2.5 * self.factor[:, np.newaxis]
elif index.size and self.A is None:
# no ETM (too few points), but we have a solution for the requested day
xyz = L[:, index]
# set the uncertainties in NEU by hand
sig = np.array([[9.99], [9.99], [9.99]])
source = self.soln.stack_name.upper() + ' solution, no ETM'
else:
# no ETM (too few points) and no solution for this day, get average
source = 'No ' + self.soln.stack_name.upper() + ' solution, no ETM: mean coordinate'
xyz = np.mean(L, axis=1)[:, np.newaxis]
# set the uncertainties in NEU by hand
sig = np.array([[9.99], [9.99], [9.99]])
if self.A is not None:
# get the velocity of the site
if np.sqrt(np.square(self.Linear.p.params[0, 1]) +
np.square(self.Linear.p.params[1, 1]) +
np.square(self.Linear.p.params[2, 1])) > 0.2:
# fast moving station! bump up the sigma floor
sigma_h = 99.9
sigma_v = 99.9
source += '. fast moving station, bumping up sigmas'
# apply floor sigmas
sig = np.sqrt(np.square(sig) + np.square(np.array([[sigma_h], [sigma_h], [sigma_v]])))
return xyz, sig, window, source
def rotate_2neu(self, ecef):
return np.array(ct2lg(ecef[0], ecef[1], ecef[2], self.soln.lat, self.soln.lon))
def rotate_2xyz(self, neu):
return np.array(lg2ct(neu[0], neu[1], neu[2], self.soln.lat, self.soln.lon))
def rotate_sig_cov(self, sigmas=None, covar=None):
if sigmas is None and covar is None:
raise pyETMException('Error in rotate_sig_cov: must provide either sigmas or covariance matrix')
R = rotlg2ct(self.soln.lat, self.soln.lon)
if sigmas is not None:
# build a covariance matrix based on sigmas
sd = np.diagflat(np.square(sigmas))
sd[0, 1] = self.covar[0, 1]
sd[1, 0] = self.covar[1, 0]
sd[2, 1] = self.covar[2, 1]
sd[1, 2] = self.covar[1, 2]
sd[0, 2] = self.covar[0, 2]
sd[2, 0] = self.covar[2, 0]
# check that resulting matrix is PSD:
if not self.isPD(sd):
sd = self.nearestPD(sd)
sneu = np.dot(np.dot(R[:, :, 0], sd), R[:, :, 0].transpose())
dneu = np.sqrt(np.diag(sneu))
else:
# covariance matrix given, assume it is a covariance matrix
dneu = np.dot(np.dot(R[:, :, 0], covar), R[:, :, 0].transpose())
return dneu
def nearestPD(self, A):
"""Find the nearest positive-definite matrix to input
A Python/Numpy port of John D'Errico's `nearestSPD` MATLAB code [1], which
credits [2].
[1] https://www.mathworks.com/matlabcentral/fileexchange/42885-nearestspd
[2] N.J. Higham, "Computing a nearest symmetric positive semidefinite
matrix" (1988): https://doi.org/10.1016/0024-3795(88)90223-6
"""
B = (A + A.T) / 2
_, s, V = np.linalg.svd(B)
H = np.dot(V.T, np.dot(np.diag(s), V))
A2 = (B + H) / 2
A3 = (A2 + A2.T) / 2
if self.isPD(A3):
return A3
spacing = np.spacing(np.linalg.norm(A))
# The above is different from [1]. It appears that MATLAB's `chol` Cholesky
# decomposition will accept matrixes with exactly 0-eigenvalue, whereas
# Numpy's will not. So where [1] uses `eps(mineig)` (where `eps` is Matlab
# for `np.spacing`), we use the above definition. CAVEAT: our `spacing`
# will be much larger than [1]'s `eps(mineig)`, since `mineig` is usually on
# the order of 1e-16, and `eps(1e-16)` is on the order of 1e-34, whereas
# `spacing` will, for Gaussian random matrixes of small dimension, be on
# othe order of 1e-16. In practice, both ways converge, as the unit test
# below suggests.
I = np.eye(A.shape[0])
k = 1
while not self.isPD(A3):
mineig = np.min(np.real(np.linalg.eigvals(A3)))
A3 += I * (-mineig * k ** 2 + spacing)
k += 1
return A3
@staticmethod
def isPD(B):
"""Returns true when input is positive-definite, via Cholesky"""
try:
_ = np.linalg.cholesky(B)
return True
except np.linalg.LinAlgError:
return False
def load_parameters(self, params, l):
factor = 1
index = []
residuals = []
p = []
for param in params:
par = np.array(param['params'])
sig = np.array(param['sigmas'])
if param['object'] == 'polynomial':
self.Linear.load_parameters(par, sig, param['t_ref'])
if param['object'] == 'periodic':
self.Periodic.load_parameters(params=par, sigmas=sig)
if param['object'] == 'jump':
for jump in self.Jumps.table:
if jump.p.hash == param['hash']:
jump.load_parameters(params=par, sigmas=sig)
if param['object'] == 'var_factor':
# already a vector in the db
factor = par
x = self.Linear.p.params
s = self.Linear.p.sigmas
for jump in self.Jumps.table:
if jump.fit:
x = np.append(x, jump.p.params, axis=1)
s = np.append(s, jump.p.sigmas, axis=1)
x = np.append(x, self.Periodic.p.params, axis=1)
s = np.append(s, self.Periodic.p.sigmas, axis=1)
for i in range(3):
residuals.append(l[i] - np.dot(self.A(constrains=False), x[i, :]))
ss = np.abs(np.divide(residuals[i], factor[i]))
index.append(ss <= LIMIT)
f = np.ones((l.shape[1],))
sw = np.power(10, LIMIT - ss[ss > LIMIT])
sw[sw < np.finfo(np.float).eps] = np.finfo(np.float).eps
f[ss > LIMIT] = sw
p.append(np.square(np.divide(f, factor[i])))
self.C = x
self.S = s
self.F = np.array(index)
self.R = np.array(residuals)
self.factor = factor
self.P = np.array(p)
def adjust_lsq(self, Ai, Li):
A = Ai(constrains=True)
L = Ai.get_l(Li, constrains=True)
cst_pass = False
iteration = 0
factor = 1
So = 1
dof = (Ai.shape[0] - Ai.shape[1])
X1 = chi2.ppf(1 - 0.05 / 2, dof)
X2 = chi2.ppf(0.05 / 2, dof)
s = np.array([])
v = np.array([])
C = np.array([])
P = Ai.get_p(constrains=True)
while not cst_pass and iteration <= 10:
W = np.sqrt(P)
Aw = np.multiply(W[:, None], A)
Lw = np.multiply(W, L)
C = np.linalg.lstsq(Aw, Lw, rcond=-1)[0]
v = L - np.dot(A, C)
# unit variance
So = np.sqrt(np.dot(v, np.multiply(P, v)) / dof)
x = np.power(So, 2) * dof
# obtain the overall uncertainty predicted by lsq
factor = factor * So
# calculate the normalized sigmas
s = np.abs(np.divide(v, factor))
if x < X2 or x > X1:
# if it falls in here it's because it didn't pass the Chi2 test
cst_pass = False
# reweigh by Mike's method of equal weight until 2 sigma
f = np.ones((v.shape[0], ))
# f[s > LIMIT] = 1. / (np.power(10, LIMIT - s[s > LIMIT]))
# do not allow sigmas > 100 m, which is basically not putting
# the observation in. Otherwise, due to a model problem
# (missing jump, etc) you end up with very unstable inversions
# f[f > 500] = 500
sw = np.power(10, LIMIT - s[s > LIMIT])
sw[sw < np.finfo(np.float).eps] = np.finfo(np.float).eps
f[s > LIMIT] = sw
P = np.square(np.divide(f, factor))
else:
cst_pass = True
iteration += 1
# make sure there are no values below eps. Otherwise matrix becomes singular
P[P < np.finfo(np.float).eps] = 1e-6
# some statistics
SS = np.linalg.inv(np.dot(A.transpose(), np.multiply(P[:, None], A)))
sigma = So*np.sqrt(np.diag(SS))
# mark observations with sigma <= LIMIT
index = Ai.remove_constrains(s <= LIMIT)
v = Ai.remove_constrains(v)
return C, sigma, index, v, factor, P
@staticmethod
def chi2inv(chi, df):
"""Return prob(chisq >= chi, with df degrees of
freedom).
df must be even.
"""
assert df & 1 == 0
# XXX If chi is very large, exp(-m) will underflow to 0.
m = chi / 2.0
sum = term = np.exp(-m)
for i in range(1, df // 2):
term *= m / i
sum += term
# With small chi and large df, accumulated
# roundoff error, plus error in
# the platform exp(), can cause this to spill
# a few ULP above 1.0. For
# example, chi2P(100, 300) on my box
# has sum == 1.0 + 2.0**-52 at this
# point. Returning a value even a teensy
# bit over 1.0 is no good.
return np.min(sum)
@staticmethod
def warn_with_traceback(message, category, filename, lineno, file=None, line=None):
log = file if hasattr(file, 'write') else sys.stderr
traceback.print_stack(file=log)
log.write(warnings.formatwarning(message, category, filename, lineno, line))
def get_outliers_list(self):
"""
Function to obtain the outliers based on the ETMs sigma
:return: a list containing the network code, station code and dates of the outliers in the time series
"""
filt = self.F[0] * self.F[1] * self.F[2]
dates = [pyDate.Date(mjd=mjd) for mjd in self.soln.mjd[~filt]]
return [(net, stn, date) for net, stn, date in zip(repeat(self.NetworkCode), repeat(self.StationCode), dates)]
class PPPETM(ETM):
def __init__(self, cnn, NetworkCode, StationCode, plotit=False, no_model=False, interseismic=None):
# load all the PPP coordinates available for this station
# exclude ppp solutions in the exclude table and any solution that is more than 100 meters from the auto coord
self.ppp_soln = PppSoln(cnn, NetworkCode, StationCode)
ETM.__init__(self, cnn, self.ppp_soln, no_model)
# no offset applied
self.L = np.array([self.soln.x,
self.soln.y,
self.soln.z])
# reduced to x y z coordinate of the station
self.l = self.rotate_2neu(np.array([self.ppp_soln.x - self.ppp_soln.auto_x,
self.ppp_soln.y - self.ppp_soln.auto_y,
self.ppp_soln.z - self.ppp_soln.auto_z]))
self.run_adjustment(cnn, self.l, plotit, self.ppp_soln)
# save the parameters to the db
# always save for PPP
self.save_parameters(cnn)
class GamitETM(ETM):
def __init__(self, cnn, NetworkCode, StationCode, plotit=False,
no_model=False, gamit_soln=None, stack_name=None, interseismic=None):
if gamit_soln is None:
self.polyhedrons = cnn.query_float('SELECT "X", "Y", "Z", "Year", "DOY" FROM stacks '
'WHERE "name" = \'%s\' AND "NetworkCode" = \'%s\' AND '
'"StationCode" = \'%s\' '
'ORDER BY "Year", "DOY", "NetworkCode", "StationCode"'
% (stack_name, NetworkCode, StationCode))
self.gamit_soln = GamitSoln(cnn, self.polyhedrons, NetworkCode, StationCode, stack_name)
else:
# load the GAMIT polyhedrons
self.gamit_soln = gamit_soln
ETM.__init__(self, cnn, self.gamit_soln, no_model, interseismic=interseismic)
# no offset applied
self.L = np.array([self.gamit_soln.x,
self.gamit_soln.y,
self.gamit_soln.z])
# reduced to x y z coordinate of the station
self.l = self.rotate_2neu(np.array([self.gamit_soln.x - self.gamit_soln.auto_x,
self.gamit_soln.y - self.gamit_soln.auto_y,
self.gamit_soln.z - self.gamit_soln.auto_z]))
if interseismic:
self.l -= self.Linear.interseismic
self.run_adjustment(cnn, self.l, plotit, self.gamit_soln)
# save parameters to db
# the object will also save parameters if the list object is invoked
self.save_parameters(cnn)
def get_etm_soln_list(self, use_ppp_model=False, cnn=None):
# this function return the values of the ETM ONLY
dict_o = []
if self.A is not None:
neu = []
if not use_ppp_model:
# get residuals from GAMIT solutions to GAMIT model
for i in range(3):
neu.append(np.dot(self.A, self.C[i]))
else:
# get residuals from GAMIT solutions to PPP model
etm = PPPETM(cnn, self.NetworkCode, self.StationCode)
# DDG: 20-SEP-2018 compare using MJD not FYEAR to avoid round off errors
index = np.isin(etm.soln.mjds, self.soln.mjd)
for i in range(3):
# use the etm object to obtain the design matrix that matches the dimensions of self.soln.t
neu.append(np.dot(etm.As[index, :], etm.C[i]))
del etm
rxyz = self.rotate_2xyz(np.array(neu)) + np.array([self.soln.auto_x, self.soln.auto_y, self.soln.auto_z])
dict_o += [(net_stn, x, y, z, year, doy, fyear)
for x, y, z, net_stn, year, doy, fyear in
zip(rxyz[0].tolist(), rxyz[1].tolist(), rxyz[2].tolist(),
repeat(self.NetworkCode + '.' + self.StationCode),
[date.year for date in self.gamit_soln.date],
[date.doy for date in self.gamit_soln.date],
[date.fyear for date in self.gamit_soln.date])]
else:
raise pyETMException_NoDesignMatrix('No design matrix available for %s.%s' %
(self.NetworkCode, self.StationCode))
return dict_o
class DailyRep(ETM):
def __init__(self, cnn, NetworkCode, StationCode, plotit=False,
no_model=False, gamit_soln=None, project=None):
if gamit_soln is None:
self.polyhedrons = cnn.query_float('SELECT "X", "Y", "Z", "Year", "DOY" FROM gamit_soln '
'WHERE "Project" = \'%s\' AND "NetworkCode" = \'%s\' AND '
'"StationCode" = \'%s\' '
'ORDER BY "Year", "DOY", "NetworkCode", "StationCode"'
% (project, NetworkCode, StationCode))
self.gamit_soln = GamitSoln(cnn, self.polyhedrons, NetworkCode, StationCode, project)
else:
# load the GAMIT polyhedrons
self.gamit_soln = gamit_soln
ETM.__init__(self, cnn, self.gamit_soln, no_model, False, False, False)
# the the solution type to dra
self.soln.type = 'dra'
# for repetitivities, vector with difference
self.l = self.rotate_2neu(np.array([self.gamit_soln.x,
self.gamit_soln.y,
self.gamit_soln.z]))
# for repetitivities, same vector for both
self.L = self.l
self.run_adjustment(cnn, self.l, plotit, self.gamit_soln)
# only save the excluded solutions in this module (DailyRep)
self.save_excluded_soln(cnn)
def get_residuals_dict(self):
# this function return the values of the ETM ONLY
dict_o = []
if self.A is not None:
neu = []
for i in range(3):
neu.append(np.dot(self.A, self.C[i]))
xyz = self.rotate_2xyz(np.array(neu)) + np.array([self.soln.auto_x, self.soln.auto_y, self.soln.auto_z])
rxyz = xyz - self.L
px = np.ones(self.P[0].shape)
py = np.ones(self.P[1].shape)
pz = np.ones(self.P[2].shape)
dict_o += [(net, stn, x, y, z, sigx, sigy, sigz, year, doy)
for x, y, z, sigx, sigy, sigz, net, stn, year, doy in
zip(rxyz[0].tolist(), rxyz[1].tolist(), rxyz[2].tolist(),
px.tolist(), py.tolist(), pz.tolist(),
repeat(self.NetworkCode), repeat(self.StationCode),
[date.year for date in self.gamit_soln.date],
[date.doy for date in self.gamit_soln.date])]
else:
raise pyETMException_NoDesignMatrix('No design matrix available for %s.%s' %
(self.NetworkCode, self.StationCode))
return dict_o
class FileETM(ETM):
def __init__(self, cnn, poly_list=None, plotit=False, no_model=False):
ETM.__init__(self, cnn, poly_list, no_model)
self.soln.type = 'file'
# no offset applied
self.L = np.array([self.soln.x,
self.soln.y,
self.soln.z])
# reduced to x y z coordinate of the station
self.l = self.rotate_2neu(np.array([self.soln.x - self.soln.auto_x,
self.soln.y - self.soln.auto_y,
self.soln.z - self.soln.auto_z]))
self.run_adjustment(cnn, self.l, plotit, poly_list)
| gpl-3.0 |
jlanga/exon_finder | tests/custom_assertions.py | 2 | 5708 | #!/usr/bin/env python3
"""
tests.custom_assertions.py: custom assertions for unit tests:
- assertEqualListOfSeqrecords: check if a list of seqrecords have:
- the same length
- the same id
- the same sequence
- assertEqualSpliceGraphs: check if two splice graphs:
- are isomorphic with nx.is_isomorphic
- each node have the same coordinates
- each edge have the same overlap
"""
from typing import List, Dict
import networkx as nx
import pandas as pd
from Bio.SeqRecord import SeqRecord
def check_same_keys(dict1: dict, dict2: dict) -> None:
"""Check if two dicts have the exact same keys"""
if set(dict1.keys()) != set(dict2.keys()):
raise KeyError("Keys differ: {keys1} {keys2}".format(
keys1=dict1.keys(), keys2=dict2.keys()
))
def check_same_values(dict1: dict, dict2: dict) -> None:
"""Check if two dicts have the same values"""
for key, value1 in dict1.items(): # Check same values
value2 = dict2[key]
if value1 != value2:
raise ValueError("{key1}: {value1} != {key2} : {value2}".format(
key1=key, value1=value1, key2=key, value2=value2
))
def check_same_dict(dict1: dict, dict2: dict) -> None:
"""Check if two dicts contain the exact same values"""
check_same_keys(dict1, dict2)
check_same_values(dict1, dict2)
def check_equal_node2coord(sg1: dict, sg2: dict) -> None:
"""Check if two splice graphs have the same node2coord dicts"""
node2coord1 = nx.get_node_attributes(G=sg1, name="coordinates")
node2coord2 = nx.get_node_attributes(G=sg2, name="coordinates")
check_same_dict(node2coord1, node2coord2)
def check_equal_edge2overlap(sg1: dict, sg2: dict) -> None:
"""Check if two splice graphs have the same node2coord dicts"""
edge2overlap1 = nx.get_edge_attributes(G=sg1, name="overlaps")
edge2overlap2 = nx.get_edge_attributes(G=sg2, name="overlaps")
check_same_dict(edge2overlap1, edge2overlap2)
def check_equal_df_dict_values(dict1: dict, dict2: dict) -> None:
"""Check if two data frames are equal
Solution: https://stackoverflow.com/a/33223893
"""
from numpy import array_equal
for key, df1 in dict1.items():
df2 = dict2[key]
if not array_equal(df1, df2):
raise ValueError("df1 != df2:\n{df1}\n{df2}".format(df1=df1, df2=df2))
def check_equal_splice_graphs(sg1: dict, sg2: dict) -> None:
"""Check if two splice graphs are:
- isomorphic
- node2coord are equal
- edge2overlaps are equal
"""
if not nx.is_isomorphic(sg1, sg2):
AssertionError("splicegraph are not isomorphic")
check_equal_node2coord(sg1, sg2)
check_equal_edge2overlap(sg1, sg2)
def check_equal_dict_of_sg(dict1: dict, dict2: dict) -> None:
"""Check if each key, element are equal splice graphs"""
check_same_keys(dict1, dict2)
for key, sg1 in dict1.items():
sg2 = dict2[key]
check_equal_splice_graphs(sg1, sg2)
def check_equal_length(iter1: List, iter2: List) -> None:
"""Check if two iterables have the same length"""
length_1 = len(iter1)
length_2 = len(iter2)
if length_1 != length_2:
raise AssertionError('Lengths differ: {len_1} != {len_2}'.format(
len_1=length_1, len_2=length_2
))
def check_equal_seqrecrods(seqrecord1: SeqRecord, seqrecord2: SeqRecord) -> None:
"""Check if id and seq are equal"""
if seqrecord1.id != seqrecord2.id or seqrecord1.seq != seqrecord2.seq:
raise AssertionError(
'Records differ: {id1}: {seq1} {id2}: {seq2}'.format(
id1=seqrecord1.id, seq1=seqrecord1.seq, id2=seqrecord2.id, seq2=seqrecord2.seq
)
)
def check_equal_list_seqrecords(iter1: List[SeqRecord], iter2: List[SeqRecord]) -> None:
"""Check if a list of SeqRecords are equal"""
for i, _ in enumerate(iter1):
check_equal_seqrecrods(iter1[i], iter2[i])
class CustomAssertions:
"""
Custom assertions not covered in unittest:
- assertEqualListOfSeqrecords
"""
@classmethod
def assertEqualDict(self, dict1: dict, dict2: dict) -> None:
"""Check if two dicts are equal (values are compared with ==)"""
# pylint: disable=invalid-name, bad-classmethod-argument
check_same_dict(dict1, dict2)
@classmethod
def assertEqualListOfSeqrecords(
self, records1: List[SeqRecord], records2: List[SeqRecord]) -> None:
"""
Check if each element of list_of_seqrecords1 is exactly equal to each one of
list_of_seqrecords2.
"""
# pylint: disable=invalid-name, bad-classmethod-argument
check_equal_length(records1, records2)
check_equal_list_seqrecords(records1, records2)
@classmethod
def assertEqualSpliceGraphs(self, sg1: dict, sg2: dict) -> None:
"""Check if two splice graph are equal:"""
# pylint: disable=invalid-name,bad-classmethod-argument
check_equal_splice_graphs(sg1, sg2)
@classmethod
def assertEqualDictOfDF(
self, dict1: Dict[str, pd.DataFrame], dict2: Dict[str, pd.DataFrame]) -> None:
"""Check if two dicts of pd.DataFrame are equal"""
# pylint: disable=invalid-name,bad-classmethod-argument
check_same_keys(dict1, dict2)
check_equal_df_dict_values(dict1, dict2)
@classmethod
def assertEqualDictOfSpliceGraphs(self, dict1: dict, dict2: dict) -> None:
"""Check if two dicts of nx.DiGraph and some data attached to nodes and edges are equal"""
# pylint: disable=invalid-name, bad-classmethod-argument
check_equal_dict_of_sg(dict1, dict2)
| mit |
JuBzzz/PyImageScripts | Scripts/labeler.py | 1 | 13464 | import tkinter as tk
from tkinter import ttk
from PIL import ImageFont, ImageDraw, Image
from matplotlib import font_manager
from ._helper import *
import os
DEFAULT_FONTS = ["Arial", "Helvetica", "Times New Roman", "Times",
"Courier New", "Verdana"]
MIN_FONT = 1
MAX_FONT = 10000
WHITE = (255, 255, 255, 255)
def _validate_to_number_range(StringVar, from_=0, to=255):
text = StringVar.get()
if len(text):
num = int(text)
if num < from_:
StringVar.set(str(from_))
elif num > to:
StringVar.set(str(to))
else:
StringVar.set(str(from_))
return StringVar.get()
def get_fonts_from_dir(path=''):
fonts = {}
if os.path.isdir(path):
font_paths = font_manager.list_fonts(path, ['ttf'])
font_list = font_manager.createFontList(font_paths)
for font in font_list:
fonts[font.name] = font.fname
return fonts
def make_label(image, bg, label_position, label_thickness):
if label_position in ["top", "bottom"]:
return Image.new('RGBA', (image.width, label_thickness), bg)
if label_position in ["left", "right"]:
return Image.new('RGBA', (label_thickness, image.height), bg)
def draw_text_on_label(label, text, font, text_color, v_align, h_align):
draw = ImageDraw.Draw(label)
text_size = draw.textsize(text, font=font)
if h_align == "left":
x = 0
if h_align == "center":
x = label.width // 2 - text_size[0] // 2
if h_align == "right":
x = label.width - text_size[0]
if v_align == "top":
y = 0
if v_align == "center":
y = label.height // 2 - text_size[1] // 2
if v_align == "bottom":
y = label.height - text_size[1]
draw.text((x, y), text=text, font=font, fill=text_color)
return draw
def compute_label_box_area(image, label, label_position):
if label_position == "top":
label_box = (0, 0, image.width, label.height)
if label_position == "bottom":
label_box = (0, image.height - label.height, image.width, image.height)
if label_position == "left":
label_box = (0, 0, label.width, image.height)
if label_position == "right":
label_box = (image.width - label.width, 0, image.width, image.height)
return label_box
def increase_image_canvas(image, label, label_position):
if label_position == "top":
labeled_image_size = (image.width, image.height + label.height)
image_box = (0, label.height, image.width, image.height + label.height)
if label_position == "left":
labeled_image_size = (image.width + label.width, image.height)
image_box = (label.width, 0, image.width + label.width, image.height)
if label_position == "bottom":
labeled_image_size = (image.width, image.height + label.height)
image_box = (0, 0, image.width, image.height)
if label_position == "right":
labeled_image_size = (image.width + label.width, image.height)
image_box = (0, 0, image.width, image.height)
labeled_image = Image.new(image.mode, labeled_image_size, WHITE)
labeled_image.paste(image, image_box)
return labeled_image
def run(image, text, font_size, font_file, text_color, bg, v_align, h_align,
label_thickness, label_position, label_over_image):
label = make_label(image, bg, label_position, label_thickness)
font = ImageFont.truetype(font_file, font_size)
draw = draw_text_on_label(label, text, font, text_color, v_align, h_align)
if not label_over_image:
image = increase_image_canvas(image, label, label_position)
label_box = compute_label_box_area(image, label, label_position)
image.paste(label, label_box, label)
return image
class widgets(tk.Frame):
def __init__(self, parent):
super(widgets, self).__init__(parent, relief=RELIEF, bd=BD, padx=PAD,
pady=PAD, height=HEIGHT, width=WIDTH)
val_digit = (parent.register(digits_validation),
'%d', '%i', '%P', '%s', '%S', '%v', '%V', '%W')
text_lbl = tk.Label(self, text="Text:")
text_lbl.grid(column=0, row=0, columnspan=12)
self.text_txt = tk.Text(self, height=3, width=35)
self.text_txt.grid(column=0, row=1, columnspan=12)
font_dir_lbl = tk.Label(self, text="Font\ndirectory: ")
font_dir_lbl.grid(column=0, row=2, columnspan=3)
self.font_directory = tk.StringVar(self)
font_dir_ent = tk.Entry(self, textvariable=self.font_directory)
font_dir_ent.grid(column=3, row=2, columnspan=6)
font_reset_btn = tk.Button(self, text="RESET",
command=self._load_default_fonts)
font_reset_btn.grid(column=8, row=2, columnspan=2)
font_dir_btn = tk.Button(self, text="OK", command=self._load_fonts)
font_dir_btn.grid(column=10, row=2, columnspan=2)
font_size_lbl = tk.Label(self, text="Size:")
font_size_lbl.grid(column=0, row=3, columnspan=2)
self.font_size = tk.StringVar(self, value="15")
font_size_ent = tk.Spinbox(self, textvariable=self.font_size, width=3,
from_=MIN_FONT, to=MAX_FONT, validate='key',
validatecommand=val_digit)
font_size_ent.grid(column=2, row=3, columnspan=2)
font_family_lbl = tk.Label(self, text="Font\nfamily: ")
font_family_lbl.grid(column=4, row=3, columnspan=3)
self.font_family_cmb = ttk.Combobox(self, state="readonly", width=16)
self.font_family_cmb.grid(column=7, row=3, columnspan=5)
self._load_default_fonts()
text_color_lbl = tk.Label(self, text="Text color:")
text_color_lbl.grid(column=0, row=4, columnspan=3)
text_color_frm = tk.Frame(self)
text_color_frm.grid(column=3, row=4, columnspan=9)
self.TEXT_RGBA = [tk.StringVar(self, value="0"),
tk.StringVar(self, value="0"),
tk.StringVar(self, value="0"),
tk.StringVar(self, value="255")]
text_red_lbl = tk.Label(text_color_frm, text="R:")
text_red_lbl.grid(column=0, row=0)
text_red = tk.Spinbox(text_color_frm, textvariable=self.TEXT_RGBA[0],
width=3, from_=0, to=255, validate='key',
validatecommand=val_digit)
text_red.grid(column=1, row=0)
text_green_lbl = tk.Label(text_color_frm, text="G:")
text_green_lbl.grid(column=2, row=0)
text_green = tk.Spinbox(text_color_frm, textvariable=self.TEXT_RGBA[1],
width=3, from_=0, to=255, validate='key',
validatecommand=val_digit)
text_green.grid(column=3, row=0)
text_blue_lbl = tk.Label(text_color_frm, text="B:")
text_blue_lbl.grid(column=4, row=0)
text_blue = tk.Spinbox(text_color_frm, textvariable=self.TEXT_RGBA[2],
width=3, from_=0, to=255, validate='key',
validatecommand=val_digit)
text_blue.grid(column=5, row=0)
text_alpha_lbl = tk.Label(text_color_frm, text="A:")
text_alpha_lbl.grid(column=6, row=0)
text_alpha = tk.Spinbox(text_color_frm, textvariable=self.TEXT_RGBA[3],
width=3, from_=0, to=255, validate='key',
validatecommand=val_digit)
text_alpha.grid(column=7, row=0)
v_align_lbl = tk.Label(self, text="Vertical\nalign:")
v_align_lbl.grid(column=0, row=5, columnspan=3)
self.v_align_cmb = ttk.Combobox(self, width=6, state="readonly",
values=["top", "center", "bottom"])
self.v_align_cmb.grid(column=3, row=5, columnspan=3)
self.v_align_cmb.set("top")
h_align_lbl = tk.Label(self, text="Horizontal\nalign:")
h_align_lbl.grid(column=6, row=5, columnspan=3)
self.h_align_cmb = ttk.Combobox(self, width=6, state="readonly",
values=["left", "center", "right"])
self.h_align_cmb.grid(column=9, row=5, columnspan=3)
self.h_align_cmb.set("left")
bg_color_lbl = tk.Label(self, text="Background\ncolor:")
bg_color_lbl.grid(column=0, row=6, columnspan=3)
bg_color_frm = tk.Frame(self)
bg_color_frm.grid(column=3, row=6, columnspan=9)
self.BG_RGBA = [tk.StringVar(self, value="255"),
tk.StringVar(self, value="255"),
tk.StringVar(self, value="255"),
tk.StringVar(self, value="255")]
bg_red_lbl = tk.Label(bg_color_frm, text="R:")
bg_red_lbl.grid(column=0, row=0)
bg_red = tk.Spinbox(bg_color_frm, textvariable=self.BG_RGBA[0],
width=3, from_=0, to=255, validate='key',
validatecommand=val_digit)
bg_red.grid(column=1, row=0)
bg_green_lbl = tk.Label(bg_color_frm, text="G:")
bg_green_lbl.grid(column=2, row=0)
bg_green = tk.Spinbox(bg_color_frm, textvariable=self.BG_RGBA[1],
width=3, from_=0, to=255, validate='key',
validatecommand=val_digit)
bg_green.grid(column=3, row=0)
bg_blue_lbl = tk.Label(bg_color_frm, text="B:")
bg_blue_lbl.grid(column=4, row=0)
bg_blue = tk.Spinbox(bg_color_frm, textvariable=self.BG_RGBA[2],
width=3, from_=0, to=255, validate='key',
validatecommand=val_digit)
bg_blue.grid(column=5, row=0)
bg_alpha_lbl = tk.Label(bg_color_frm, text="A:")
bg_alpha_lbl.grid(column=6, row=0)
bg_alpha = tk.Spinbox(bg_color_frm, textvariable=self.BG_RGBA[3],
width=3, from_=0, to=255, validate='key',
validatecommand=val_digit)
bg_alpha.grid(column=7, row=0)
label_thickness_lbl = tk.Label(self, text="Label\nthickness:")
label_thickness_lbl.grid(column=0, row=7, columnspan=3)
self.label_thickness = tk.StringVar(self, value="25")
label_thick_ent = tk.Spinbox(self, textvariable=self.label_thickness,
width=3, from_=MIN_FONT, to=MAX_FONT,
validate='key', validatecommand=val_digit)
label_thick_ent.grid(column=3, row=7, columnspan=2)
label_position_lbl = tk.Label(self, text="Label\nposition:")
label_position_lbl.grid(column=5, row=7, columnspan=3)
label_position_list = ["top", "bottom", "left", "right"]
self.label_position_cmb = ttk.Combobox(self, state="readonly", width=8,
values=label_position_list)
self.label_position_cmb.grid(column=8, row=7, columnspan=4)
self.label_position_cmb.set("bottom")
self.label_over_image = tk.IntVar()
label_over_image_chk = tk.Checkbutton(self, text="Label over\nimage",
variable=self.label_over_image)
label_over_image_chk.grid(column=0, row=8, columnspan=3)
def _load_fonts(self):
self.font_dict = get_fonts_from_dir(self.font_directory.get())
sorted_font_list = sorted(list(self.font_dict.keys()))
self.font_family_cmb['values'] = sorted_font_list
available_fonts = self.font_dict.keys()
for def_font in DEFAULT_FONTS:
current_font = self.font_family_cmb.get()
if current_font == "" and def_font in available_fonts:
self.font_family_cmb.set(def_font)
else:
if current_font == "":
self.font_family_cmb.set(list(available_fonts)[0])
def _load_default_fonts(self):
self.font_directory.set(font_manager.win32FontDirectory())
self._load_fonts()
def get_args(self):
text = self.text_txt.get("1.0",'end-1c')
font_size = int(_validate_to_number_range(self.font_size,
MIN_FONT, MAX_FONT))
font_file = self.font_dict[self.font_family_cmb.get()]
text_color = [int(_validate_to_number_range(value))
for value in self.TEXT_RGBA]
bg_color = [int(_validate_to_number_range(value))
for value in self.BG_RGBA]
v_align = self.v_align_cmb.get()
h_align = self.h_align_cmb.get()
label_thickness = int(_validate_to_number_range(self.label_thickness,
MIN_FONT, MAX_FONT))
label_position = self.label_position_cmb.get()
label_over_image = self.label_over_image.get()
## TODO: Allow the user to choose a different directory to load the
# font files from
return {"text": text,
"font_size": font_size,
"font_file": font_file,
"text_color": tuple(text_color),
"bg": tuple(bg_color),
"v_align": v_align,
"h_align": h_align,
"label_thickness": label_thickness,
"label_position": label_position,
"label_over_image": label_over_image
}
| mit |
deepesch/scikit-learn | sklearn/datasets/svmlight_format.py | 114 | 15826 | """This module implements a loader and dumper for the svmlight format
This format is a text-based format, with one sample per line. It does
not store zero valued features hence is suitable for sparse dataset.
The first element of each line can be used to store a target variable to
predict.
This format is used as the default format for both svmlight and the
libsvm command line programs.
"""
# Authors: Mathieu Blondel <[email protected]>
# Lars Buitinck <[email protected]>
# Olivier Grisel <[email protected]>
# License: BSD 3 clause
from contextlib import closing
import io
import os.path
import numpy as np
import scipy.sparse as sp
from ._svmlight_format import _load_svmlight_file
from .. import __version__
from ..externals import six
from ..externals.six import u, b
from ..externals.six.moves import range, zip
from ..utils import check_array
from ..utils.fixes import frombuffer_empty
def load_svmlight_file(f, n_features=None, dtype=np.float64,
multilabel=False, zero_based="auto", query_id=False):
"""Load datasets in the svmlight / libsvm format into sparse CSR matrix
This format is a text-based format, with one sample per line. It does
not store zero valued features hence is suitable for sparse dataset.
The first element of each line can be used to store a target variable
to predict.
This format is used as the default format for both svmlight and the
libsvm command line programs.
Parsing a text based source can be expensive. When working on
repeatedly on the same dataset, it is recommended to wrap this
loader with joblib.Memory.cache to store a memmapped backup of the
CSR results of the first call and benefit from the near instantaneous
loading of memmapped structures for the subsequent calls.
In case the file contains a pairwise preference constraint (known
as "qid" in the svmlight format) these are ignored unless the
query_id parameter is set to True. These pairwise preference
constraints can be used to constraint the combination of samples
when using pairwise loss functions (as is the case in some
learning to rank problems) so that only pairs with the same
query_id value are considered.
This implementation is written in Cython and is reasonably fast.
However, a faster API-compatible loader is also available at:
https://github.com/mblondel/svmlight-loader
Parameters
----------
f : {str, file-like, int}
(Path to) a file to load. If a path ends in ".gz" or ".bz2", it will
be uncompressed on the fly. If an integer is passed, it is assumed to
be a file descriptor. A file-like or file descriptor will not be closed
by this function. A file-like object must be opened in binary mode.
n_features : int or None
The number of features to use. If None, it will be inferred. This
argument is useful to load several files that are subsets of a
bigger sliced dataset: each subset might not have examples of
every feature, hence the inferred shape might vary from one
slice to another.
multilabel : boolean, optional, default False
Samples may have several labels each (see
http://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multilabel.html)
zero_based : boolean or "auto", optional, default "auto"
Whether column indices in f are zero-based (True) or one-based
(False). If column indices are one-based, they are transformed to
zero-based to match Python/NumPy conventions.
If set to "auto", a heuristic check is applied to determine this from
the file contents. Both kinds of files occur "in the wild", but they
are unfortunately not self-identifying. Using "auto" or True should
always be safe.
query_id : boolean, default False
If True, will return the query_id array for each file.
dtype : numpy data type, default np.float64
Data type of dataset to be loaded. This will be the data type of the
output numpy arrays ``X`` and ``y``.
Returns
-------
X: scipy.sparse matrix of shape (n_samples, n_features)
y: ndarray of shape (n_samples,), or, in the multilabel a list of
tuples of length n_samples.
query_id: array of shape (n_samples,)
query_id for each sample. Only returned when query_id is set to
True.
See also
--------
load_svmlight_files: similar function for loading multiple files in this
format, enforcing the same number of features/columns on all of them.
Examples
--------
To use joblib.Memory to cache the svmlight file::
from sklearn.externals.joblib import Memory
from sklearn.datasets import load_svmlight_file
mem = Memory("./mycache")
@mem.cache
def get_data():
data = load_svmlight_file("mysvmlightfile")
return data[0], data[1]
X, y = get_data()
"""
return tuple(load_svmlight_files([f], n_features, dtype, multilabel,
zero_based, query_id))
def _gen_open(f):
if isinstance(f, int): # file descriptor
return io.open(f, "rb", closefd=False)
elif not isinstance(f, six.string_types):
raise TypeError("expected {str, int, file-like}, got %s" % type(f))
_, ext = os.path.splitext(f)
if ext == ".gz":
import gzip
return gzip.open(f, "rb")
elif ext == ".bz2":
from bz2 import BZ2File
return BZ2File(f, "rb")
else:
return open(f, "rb")
def _open_and_load(f, dtype, multilabel, zero_based, query_id):
if hasattr(f, "read"):
actual_dtype, data, ind, indptr, labels, query = \
_load_svmlight_file(f, dtype, multilabel, zero_based, query_id)
# XXX remove closing when Python 2.7+/3.1+ required
else:
with closing(_gen_open(f)) as f:
actual_dtype, data, ind, indptr, labels, query = \
_load_svmlight_file(f, dtype, multilabel, zero_based, query_id)
# convert from array.array, give data the right dtype
if not multilabel:
labels = frombuffer_empty(labels, np.float64)
data = frombuffer_empty(data, actual_dtype)
indices = frombuffer_empty(ind, np.intc)
indptr = np.frombuffer(indptr, dtype=np.intc) # never empty
query = frombuffer_empty(query, np.intc)
data = np.asarray(data, dtype=dtype) # no-op for float{32,64}
return data, indices, indptr, labels, query
def load_svmlight_files(files, n_features=None, dtype=np.float64,
multilabel=False, zero_based="auto", query_id=False):
"""Load dataset from multiple files in SVMlight format
This function is equivalent to mapping load_svmlight_file over a list of
files, except that the results are concatenated into a single, flat list
and the samples vectors are constrained to all have the same number of
features.
In case the file contains a pairwise preference constraint (known
as "qid" in the svmlight format) these are ignored unless the
query_id parameter is set to True. These pairwise preference
constraints can be used to constraint the combination of samples
when using pairwise loss functions (as is the case in some
learning to rank problems) so that only pairs with the same
query_id value are considered.
Parameters
----------
files : iterable over {str, file-like, int}
(Paths of) files to load. If a path ends in ".gz" or ".bz2", it will
be uncompressed on the fly. If an integer is passed, it is assumed to
be a file descriptor. File-likes and file descriptors will not be
closed by this function. File-like objects must be opened in binary
mode.
n_features: int or None
The number of features to use. If None, it will be inferred from the
maximum column index occurring in any of the files.
This can be set to a higher value than the actual number of features
in any of the input files, but setting it to a lower value will cause
an exception to be raised.
multilabel: boolean, optional
Samples may have several labels each (see
http://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multilabel.html)
zero_based: boolean or "auto", optional
Whether column indices in f are zero-based (True) or one-based
(False). If column indices are one-based, they are transformed to
zero-based to match Python/NumPy conventions.
If set to "auto", a heuristic check is applied to determine this from
the file contents. Both kinds of files occur "in the wild", but they
are unfortunately not self-identifying. Using "auto" or True should
always be safe.
query_id: boolean, defaults to False
If True, will return the query_id array for each file.
dtype : numpy data type, default np.float64
Data type of dataset to be loaded. This will be the data type of the
output numpy arrays ``X`` and ``y``.
Returns
-------
[X1, y1, ..., Xn, yn]
where each (Xi, yi) pair is the result from load_svmlight_file(files[i]).
If query_id is set to True, this will return instead [X1, y1, q1,
..., Xn, yn, qn] where (Xi, yi, qi) is the result from
load_svmlight_file(files[i])
Notes
-----
When fitting a model to a matrix X_train and evaluating it against a
matrix X_test, it is essential that X_train and X_test have the same
number of features (X_train.shape[1] == X_test.shape[1]). This may not
be the case if you load the files individually with load_svmlight_file.
See also
--------
load_svmlight_file
"""
r = [_open_and_load(f, dtype, multilabel, bool(zero_based), bool(query_id))
for f in files]
if (zero_based is False
or zero_based == "auto" and all(np.min(tmp[1]) > 0 for tmp in r)):
for ind in r:
indices = ind[1]
indices -= 1
n_f = max(ind[1].max() for ind in r) + 1
if n_features is None:
n_features = n_f
elif n_features < n_f:
raise ValueError("n_features was set to {},"
" but input file contains {} features"
.format(n_features, n_f))
result = []
for data, indices, indptr, y, query_values in r:
shape = (indptr.shape[0] - 1, n_features)
X = sp.csr_matrix((data, indices, indptr), shape)
X.sort_indices()
result += X, y
if query_id:
result.append(query_values)
return result
def _dump_svmlight(X, y, f, multilabel, one_based, comment, query_id):
is_sp = int(hasattr(X, "tocsr"))
if X.dtype.kind == 'i':
value_pattern = u("%d:%d")
else:
value_pattern = u("%d:%.16g")
if y.dtype.kind == 'i':
label_pattern = u("%d")
else:
label_pattern = u("%.16g")
line_pattern = u("%s")
if query_id is not None:
line_pattern += u(" qid:%d")
line_pattern += u(" %s\n")
if comment:
f.write(b("# Generated by dump_svmlight_file from scikit-learn %s\n"
% __version__))
f.write(b("# Column indices are %s-based\n"
% ["zero", "one"][one_based]))
f.write(b("#\n"))
f.writelines(b("# %s\n" % line) for line in comment.splitlines())
for i in range(X.shape[0]):
if is_sp:
span = slice(X.indptr[i], X.indptr[i + 1])
row = zip(X.indices[span], X.data[span])
else:
nz = X[i] != 0
row = zip(np.where(nz)[0], X[i, nz])
s = " ".join(value_pattern % (j + one_based, x) for j, x in row)
if multilabel:
nz_labels = np.where(y[i] != 0)[0]
labels_str = ",".join(label_pattern % j for j in nz_labels)
else:
labels_str = label_pattern % y[i]
if query_id is not None:
feat = (labels_str, query_id[i], s)
else:
feat = (labels_str, s)
f.write((line_pattern % feat).encode('ascii'))
def dump_svmlight_file(X, y, f, zero_based=True, comment=None, query_id=None,
multilabel=False):
"""Dump the dataset in svmlight / libsvm file format.
This format is a text-based format, with one sample per line. It does
not store zero valued features hence is suitable for sparse dataset.
The first element of each line can be used to store a target variable
to predict.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
f : string or file-like in binary mode
If string, specifies the path that will contain the data.
If file-like, data will be written to f. f should be opened in binary
mode.
zero_based : boolean, optional
Whether column indices should be written zero-based (True) or one-based
(False).
comment : string, optional
Comment to insert at the top of the file. This should be either a
Unicode string, which will be encoded as UTF-8, or an ASCII byte
string.
If a comment is given, then it will be preceded by one that identifies
the file as having been dumped by scikit-learn. Note that not all
tools grok comments in SVMlight files.
query_id : array-like, shape = [n_samples]
Array containing pairwise preference constraints (qid in svmlight
format).
multilabel: boolean, optional
Samples may have several labels each (see
http://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multilabel.html)
"""
if comment is not None:
# Convert comment string to list of lines in UTF-8.
# If a byte string is passed, then check whether it's ASCII;
# if a user wants to get fancy, they'll have to decode themselves.
# Avoid mention of str and unicode types for Python 3.x compat.
if isinstance(comment, bytes):
comment.decode("ascii") # just for the exception
else:
comment = comment.encode("utf-8")
if six.b("\0") in comment:
raise ValueError("comment string contains NUL byte")
y = np.asarray(y)
if y.ndim != 1 and not multilabel:
raise ValueError("expected y of shape (n_samples,), got %r"
% (y.shape,))
Xval = check_array(X, accept_sparse='csr')
if Xval.shape[0] != y.shape[0]:
raise ValueError("X.shape[0] and y.shape[0] should be the same, got"
" %r and %r instead." % (Xval.shape[0], y.shape[0]))
# We had some issues with CSR matrices with unsorted indices (e.g. #1501),
# so sort them here, but first make sure we don't modify the user's X.
# TODO We can do this cheaper; sorted_indices copies the whole matrix.
if Xval is X and hasattr(Xval, "sorted_indices"):
X = Xval.sorted_indices()
else:
X = Xval
if hasattr(X, "sort_indices"):
X.sort_indices()
if query_id is not None:
query_id = np.asarray(query_id)
if query_id.shape[0] != y.shape[0]:
raise ValueError("expected query_id of shape (n_samples,), got %r"
% (query_id.shape,))
one_based = not zero_based
if hasattr(f, "write"):
_dump_svmlight(X, y, f, multilabel, one_based, comment, query_id)
else:
with open(f, "wb") as f:
_dump_svmlight(X, y, f, multilabel, one_based, comment, query_id)
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.