repo_name
stringlengths 7
79
| path
stringlengths 4
179
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 959
798k
| license
stringclasses 15
values |
---|---|---|---|---|---|
mementum/backtrader
|
backtrader/plot/utils.py
|
1
|
2920
|
#!/usr/bin/env python
# -*- coding: utf-8; py-indent-offset:4 -*-
###############################################################################
#
# Copyright (C) 2015-2020 Daniel Rodriguez
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from colorsys import rgb_to_hls as rgb2hls, hls_to_rgb as hls2rgb
import matplotlib.colors as mplcolors
import matplotlib.path as mplpath
def tag_box_style(x0, y0, width, height, mutation_size, mutation_aspect=1):
"""
Given the location and size of the box, return the path of
the box around it.
- *x0*, *y0*, *width*, *height* : location and size of the box
- *mutation_size* : a reference scale for the mutation.
- *aspect_ratio* : aspect-ration for the mutation.
"""
# note that we are ignoring mutation_aspect. This is okay in general.
mypad = 0.2
pad = mutation_size * mypad
# width and height with padding added.
width, height = width + 2.*pad, height + 2.*pad,
# boundary of the padded box
x0, y0 = x0-pad, y0-pad,
x1, y1 = x0+width, y0 + height
cp = [(x0, y0),
(x1, y0), (x1, y1), (x0, y1),
(x0-pad, (y0+y1)/2.), (x0, y0),
(x0, y0)]
com = [mplpath.Path.MOVETO,
mplpath.Path.LINETO, mplpath.Path.LINETO, mplpath.Path.LINETO,
mplpath.Path.LINETO, mplpath.Path.LINETO,
mplpath.Path.CLOSEPOLY]
path = mplpath.Path(cp, com)
return path
def shade_color(color, percent):
"""Shade Color
This color utility function allows the user to easily darken or
lighten a color for plotting purposes.
Parameters
----------
color : string, list, hexvalue
Any acceptable Matplotlib color value, such as
'red', 'slategrey', '#FFEE11', (1,0,0)
percent : the amount by which to brighten or darken the color.
Returns
-------
color : tuple of floats
tuple representing converted rgb values
"""
rgb = mplcolors.colorConverter.to_rgb(color)
h, l, s = rgb2hls(*rgb)
l *= 1 + float(percent)/100
l = min(1, l)
l = max(0, l)
r, g, b = hls2rgb(h, l, s)
return r, g, b
|
gpl-3.0
|
percyfal/snakemakelib-core
|
snakemakelib/applications/rnaseq.py
|
1
|
3292
|
# Copyright (C) 2015 by Per Unneberg
import numpy as np
import pandas as pd
import math
from snakemakelib.log import LoggerManager
from snakemakelib.odo.utils import annotate_df
from snakemakelib.odo import rpkmforgenes, rsem
logger = LoggerManager().getLogger(__name__)
__all__ = ['number_of_detected_genes', 'estimate_size_factors_for_matrix', 'summarize_expression_data']
def number_of_detected_genes(expr_long, cutoff=1.0, quantification="TPM", **kwargs):
"""Aggregate expression data frame to count number of detected genes
Args:
expr_long (DataFrame): pandas data frame with expression values in long format
cutoff (float): cutoff for detected gene
quantification (str): quantification label, TPM or FPKM
Returns:
detected_genes (DataFrame): aggregated data fram with number of detected genes per sample
"""
try:
detected_genes = expr_long.groupby(kwargs.get("groupby", 'SM')).agg(lambda x: sum(x > cutoff))
except Exception as e:
logger.warning("Failed to group genes by sample :", e)
detected_genes = None
return detected_genes
def _gene_name_map_from_gtf(gtf, unit_id, unit_name):
"""Get a mapping from gene_id to gene_name"""
mapping = {}
for feature in gtf[8]:
tmp = {k.replace("\"", ""):v.replace("\"", "") for k, v in [x.split(" ") for x in feature.split("; ")]}
mapping[tmp.get(unit_id, "")] = tmp.get(unit_name, tmp.get(unit_id, ""))
return mapping
def read_gene_expression(infile, annotation=None, unit_id="gene_id",
unit_name="gene_name"):
"""Read gene expression file, renaming genes if annotation present.
NB: currently assumes annotation file is in gtf format and that
gene expression levels, not transcript, are used
Args:
infile (str): infile name
annotation (str): annotation file, gtf format
unit_id (str): id of measurement unit; gene_id or transcript_id
unit_name (str): name of measurement unit, as defined by annotation file
Returns:
expr (DataFrame): (possibly annotated) data frame
"""
expr = pd.read_csv(infile)
if annotation:
annot = pd.read_table(annotation, header=None)
mapping = _gene_name_map_from_gtf(annot, unit_id, unit_name)
expr[unit_name] = expr[unit_id].map(mapping.get)
return expr
def summarize_expression_data(targets, outfile, parser, groupnames=["SM"]):
"""Summarize several expression result files and save as csv output file"""
dflist = [annotate_df(t, parser, groupnames=groupnames) for t in targets]
df_long = pd.concat(dflist)
df_long.to_csv(outfile)
def estimate_size_factors_for_matrix(counts, locfunc=np.median):
"""Estimate size factors from count data frame.
See bioconductor:DEseq2::estimateSizeFactorsForMatrix for original
R implementation.
Args:
counts (DataFrame): counts data frame in wide format
locfunc (func): location function
Returns:
sizes (Series): size factors for groups
"""
loggeomeans = counts.apply(np.log, axis=1).mean(axis=1)
finite = loggeomeans.apply(np.isfinite)
factors = counts.apply(np.log, axis=1).apply(lambda x: np.exp( locfunc ((x - loggeomeans).loc[finite])))
return factors
|
mit
|
ryfeus/lambda-packs
|
Skimage_numpy/source/numpy/doc/creation.py
|
118
|
5507
|
"""
==============
Array Creation
==============
Introduction
============
There are 5 general mechanisms for creating arrays:
1) Conversion from other Python structures (e.g., lists, tuples)
2) Intrinsic numpy array array creation objects (e.g., arange, ones, zeros,
etc.)
3) Reading arrays from disk, either from standard or custom formats
4) Creating arrays from raw bytes through the use of strings or buffers
5) Use of special library functions (e.g., random)
This section will not cover means of replicating, joining, or otherwise
expanding or mutating existing arrays. Nor will it cover creating object
arrays or structured arrays. Both of those are covered in their own sections.
Converting Python array_like Objects to Numpy Arrays
====================================================
In general, numerical data arranged in an array-like structure in Python can
be converted to arrays through the use of the array() function. The most
obvious examples are lists and tuples. See the documentation for array() for
details for its use. Some objects may support the array-protocol and allow
conversion to arrays this way. A simple way to find out if the object can be
converted to a numpy array using array() is simply to try it interactively and
see if it works! (The Python Way).
Examples: ::
>>> x = np.array([2,3,1,0])
>>> x = np.array([2, 3, 1, 0])
>>> x = np.array([[1,2.0],[0,0],(1+1j,3.)]) # note mix of tuple and lists,
and types
>>> x = np.array([[ 1.+0.j, 2.+0.j], [ 0.+0.j, 0.+0.j], [ 1.+1.j, 3.+0.j]])
Intrinsic Numpy Array Creation
==============================
Numpy has built-in functions for creating arrays from scratch:
zeros(shape) will create an array filled with 0 values with the specified
shape. The default dtype is float64.
``>>> np.zeros((2, 3))
array([[ 0., 0., 0.], [ 0., 0., 0.]])``
ones(shape) will create an array filled with 1 values. It is identical to
zeros in all other respects.
arange() will create arrays with regularly incrementing values. Check the
docstring for complete information on the various ways it can be used. A few
examples will be given here: ::
>>> np.arange(10)
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> np.arange(2, 10, dtype=np.float)
array([ 2., 3., 4., 5., 6., 7., 8., 9.])
>>> np.arange(2, 3, 0.1)
array([ 2. , 2.1, 2.2, 2.3, 2.4, 2.5, 2.6, 2.7, 2.8, 2.9])
Note that there are some subtleties regarding the last usage that the user
should be aware of that are described in the arange docstring.
linspace() will create arrays with a specified number of elements, and
spaced equally between the specified beginning and end values. For
example: ::
>>> np.linspace(1., 4., 6)
array([ 1. , 1.6, 2.2, 2.8, 3.4, 4. ])
The advantage of this creation function is that one can guarantee the
number of elements and the starting and end point, which arange()
generally will not do for arbitrary start, stop, and step values.
indices() will create a set of arrays (stacked as a one-higher dimensioned
array), one per dimension with each representing variation in that dimension.
An example illustrates much better than a verbal description: ::
>>> np.indices((3,3))
array([[[0, 0, 0], [1, 1, 1], [2, 2, 2]], [[0, 1, 2], [0, 1, 2], [0, 1, 2]]])
This is particularly useful for evaluating functions of multiple dimensions on
a regular grid.
Reading Arrays From Disk
========================
This is presumably the most common case of large array creation. The details,
of course, depend greatly on the format of data on disk and so this section
can only give general pointers on how to handle various formats.
Standard Binary Formats
-----------------------
Various fields have standard formats for array data. The following lists the
ones with known python libraries to read them and return numpy arrays (there
may be others for which it is possible to read and convert to numpy arrays so
check the last section as well)
::
HDF5: PyTables
FITS: PyFITS
Examples of formats that cannot be read directly but for which it is not hard to
convert are those formats supported by libraries like PIL (able to read and
write many image formats such as jpg, png, etc).
Common ASCII Formats
------------------------
Comma Separated Value files (CSV) are widely used (and an export and import
option for programs like Excel). There are a number of ways of reading these
files in Python. There are CSV functions in Python and functions in pylab
(part of matplotlib).
More generic ascii files can be read using the io package in scipy.
Custom Binary Formats
---------------------
There are a variety of approaches one can use. If the file has a relatively
simple format then one can write a simple I/O library and use the numpy
fromfile() function and .tofile() method to read and write numpy arrays
directly (mind your byteorder though!) If a good C or C++ library exists that
read the data, one can wrap that library with a variety of techniques though
that certainly is much more work and requires significantly more advanced
knowledge to interface with C or C++.
Use of Special Libraries
------------------------
There are libraries that can be used to generate arrays for special purposes
and it isn't possible to enumerate all of them. The most common uses are use
of the many array generation functions in random that can generate arrays of
random values, and some utility functions to generate special matrices (e.g.
diagonal).
"""
from __future__ import division, absolute_import, print_function
|
mit
|
DistrictDataLabs/yellowbrick
|
tests/test_style/test_palettes.py
|
1
|
9781
|
# tests.test_style.test_palettes
# Tests the palettes module of the yellowbrick library.
#
# Author: Benjamin Bengfort <[email protected]>
# Created: Tue Oct 04 16:21:58 2016 -0400
#
# Copyright (C) 2016 The scikit-yb developers
# For license information, see LICENSE.txt
#
# ID: test_palettes.py [c6aff34] [email protected] $
"""
Tests the palettes module of the yellowbrick library.
"""
##########################################################################
## Imports
##########################################################################
import pytest
import numpy as np
import matplotlib as mpl
from yellowbrick.exceptions import *
from yellowbrick.style.palettes import *
from yellowbrick.style.colors import get_color_cycle
from yellowbrick.style.rcmod import set_aesthetic, set_palette
from yellowbrick.style.palettes import color_sequence, color_palette
from yellowbrick.style.palettes import ColorPalette, PALETTES, SEQUENCES
from tests.base import VisualTestCase
##########################################################################
## Color Palette Tests
##########################################################################
class TestColorPaletteObject(VisualTestCase):
"""
Tests the ColorPalette object
"""
def test_init_palette_by_name(self):
"""
Test that a palette can be initialized by name
"""
# Try all the names in the palettes
for name, value in PALETTES.items():
try:
palette = ColorPalette(name)
except YellowbrickValueError:
self.fail("Could not instantiate {} color palette by name".format(name))
assert value == palette
# Try a name not in PALETTES
with pytest.raises(YellowbrickValueError):
assert (
"foo" not in PALETTES
), "Cannot test bad name 'foo' it is in PALETTES!"
palette = ColorPalette("foo")
def test_init_palette_by_list(self):
"""
Test that a palette can be initialized by a list
"""
# Try all the values in the palettes (HEX)
for value in PALETTES.values():
palette = ColorPalette(value)
assert len(value) == len(palette)
# Try all the values converted to RGB
for value in PALETTES.values():
palette = ColorPalette(map(mpl.colors.colorConverter.to_rgb, value))
assert len(value) == len(palette)
def test_color_palette_context(self):
"""
Test ColorPalette context management
"""
default = color_palette()
context = color_palette("dark")
with ColorPalette("dark") as palette:
assert isinstance(palette, ColorPalette)
assert get_color_cycle() == context
assert get_color_cycle() == default
def test_as_hex_as_rgb(self):
"""
Test the conversion of a ColorPalette to hex values and back to rgb
"""
palette = color_palette("flatui")
expected = PALETTES["flatui"]
morgified = palette.as_hex()
assert morgified is not palette
assert isinstance(morgified, ColorPalette)
assert morgified == expected
remorgified = morgified.as_rgb()
assert remorgified is not morgified
assert remorgified is not palette
assert remorgified == palette
@pytest.mark.skip(reason="not implemented yet")
def test_plot_color_palette(self):
"""
Test the plotting of a color palette for color visualization
"""
raise NotImplementedError("Not quite sure how to implement this yet")
class TestColorPaletteFunction(VisualTestCase):
"""
Tests the color_palette function.
"""
def test_current_palette(self):
"""
Test modifying the current palette with a simple palette
"""
pal = color_palette(["red", "blue", "green"], 3)
set_palette(pal, 3)
assert pal == get_color_cycle()
# Reset the palette
set_aesthetic()
def test_palette_context(self):
"""
Test the context manager for the color_palette function
"""
default_pal = color_palette()
context_pal = color_palette("muted")
with color_palette(context_pal):
assert get_color_cycle() == context_pal
assert get_color_cycle() == default_pal
def test_big_palette_context(self):
"""
Test that the context manager also resets the number of colors
"""
original_pal = color_palette("accent", n_colors=8)
context_pal = color_palette("bold", 10)
set_palette(original_pal)
with color_palette(context_pal, 10):
assert get_color_cycle() == context_pal
assert get_color_cycle() == original_pal
# Reset default
set_aesthetic()
def test_yellowbrick_palettes(self):
"""
Test the yellowbrick palettes have length 6 (bgrmyck)
"""
pals = ["accent", "dark", "pastel", "bold", "muted"]
for name in pals:
pal_out = color_palette(name)
assert len(pal_out) == 6, "{} is not of len 6".format(name)
def test_seaborn_palettes(self):
"""
Test the seaborn palettes have length 6 (bgrmyck)
"""
pals = [
"sns_deep",
"sns_muted",
"sns_pastel",
"sns_bright",
"sns_dark",
"sns_colorblind",
]
for name in pals:
pal_out = color_palette(name)
assert len(pal_out) == 6
def test_other_palettes(self):
"""
Test that the other palettes exist
"""
pals = ["flatui", "paired", "neural_paint", "set1"]
for name in pals:
pal_out = color_palette(name)
assert pal_out is not None
assert len(pal_out) > 0
def test_bad_palette_name(self):
"""
Test that a bad palette name raises an exception
"""
with pytest.raises(ValueError):
color_palette("IAmNotAPalette")
with pytest.raises(YellowbrickValueError):
color_palette("IAmNotAPalette")
def test_bad_palette_colors(self):
"""
Test that bad color names raise an exception
"""
pal = ["red", "blue", "iamnotacolor"]
with pytest.raises(ValueError):
color_palette(pal)
with pytest.raises(YellowbrickValueError):
color_palette(pal)
def test_palette_is_list_of_tuples(self):
"""
Assert that color_palette returns a list of RGB tuples
"""
pal_in = np.array(["red", "blue", "green"])
pal_out = color_palette(pal_in, 3)
assert isinstance(pal_out, list)
assert isinstance(pal_out[0], tuple)
assert isinstance(pal_out[0][0], float)
assert len(pal_out[0]) == 3
def test_palette_cycles(self):
"""
Test that the color palette cycles for more colors
"""
accent = color_palette("accent")
double_accent = color_palette("accent", 12)
assert double_accent == accent + accent
@pytest.mark.skip(reason="discovered this commented out, don't know why")
def test_cbrewer_qual(self):
"""
Test colorbrewer qualitative palettes
"""
pal_short = mpl_palette("Set1", 4)
pal_long = mpl_palette("Set1", 6)
assert pal_short == pal_long[:4]
pal_full = palettes.mpl_palette("Set2", 8)
pal_long = palettes.mpl_palette("Set2", 10)
assert pal_full == pal_long[:8]
def test_color_codes(self):
"""
Test the setting of color codes
"""
set_color_codes("accent")
colors = color_palette("accent") + ["0.06666666666666667"]
for code, color in zip("bgrmyck", colors):
rgb_want = mpl.colors.colorConverter.to_rgb(color)
rgb_got = mpl.colors.colorConverter.to_rgb(code)
assert rgb_want == rgb_got
set_color_codes("reset")
def test_as_hex(self):
"""
Test converting a color palette to hex and back to rgb.
"""
pal = color_palette("accent")
for rgb, hex in zip(pal, pal.as_hex()):
assert mpl.colors.rgb2hex(rgb) == hex
for rgb_e, rgb_v in zip(pal, pal.as_hex().as_rgb()):
assert rgb_e == rgb_v
def test_preserved_palette_length(self):
"""
Test palette length is preserved when modified
"""
pal_in = color_palette("Set1", 10)
pal_out = color_palette(pal_in)
assert pal_in == pal_out
def test_color_sequence(self):
"""
Ensure the color sequence returns listed colors.
"""
for name, ncols in SEQUENCES.items():
for n in ncols.keys():
cmap = color_sequence(name, n)
assert name == cmap.name
assert n == cmap.N
def test_color_sequence_default(self):
"""
Assert the default color sequence is RdBu
"""
cmap = color_sequence()
assert cmap.name == "RdBu"
assert cmap.N == 11
def test_color_sequence_unrecocognized(self):
"""
Test value errors for unrecognized sequences
"""
with pytest.raises(YellowbrickValueError):
color_sequence("PepperBucks", 3)
def test_color_sequence_bounds(self):
"""
Test color sequence out of bounds value error
"""
with pytest.raises(YellowbrickValueError):
color_sequence("RdBu", 18)
with pytest.raises(YellowbrickValueError):
color_sequence("RdBu", 2)
|
apache-2.0
|
jakobworldpeace/scikit-learn
|
sklearn/datasets/tests/test_lfw.py
|
42
|
7253
|
"""This test for the LFW require medium-size data downloading and processing
If the data has not been already downloaded by running the examples,
the tests won't run (skipped).
If the test are run, the first execution will be long (typically a bit
more than a couple of minutes) but as the dataset loader is leveraging
joblib, successive runs will be fast (less than 200ms).
"""
import random
import os
import shutil
import tempfile
import numpy as np
from sklearn.externals import six
try:
try:
from scipy.misc import imsave
except ImportError:
from scipy.misc.pilutil import imsave
except ImportError:
imsave = None
from sklearn.datasets import fetch_lfw_pairs
from sklearn.datasets import fetch_lfw_people
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import raises
SCIKIT_LEARN_DATA = tempfile.mkdtemp(prefix="scikit_learn_lfw_test_")
SCIKIT_LEARN_EMPTY_DATA = tempfile.mkdtemp(prefix="scikit_learn_empty_test_")
LFW_HOME = os.path.join(SCIKIT_LEARN_DATA, 'lfw_home')
FAKE_NAMES = [
'Abdelatif_Smith',
'Abhati_Kepler',
'Camara_Alvaro',
'Chen_Dupont',
'John_Lee',
'Lin_Bauman',
'Onur_Lopez',
]
def setup_module():
"""Test fixture run once and common to all tests of this module"""
if imsave is None:
raise SkipTest("PIL not installed.")
if not os.path.exists(LFW_HOME):
os.makedirs(LFW_HOME)
random_state = random.Random(42)
np_rng = np.random.RandomState(42)
# generate some random jpeg files for each person
counts = {}
for name in FAKE_NAMES:
folder_name = os.path.join(LFW_HOME, 'lfw_funneled', name)
if not os.path.exists(folder_name):
os.makedirs(folder_name)
n_faces = np_rng.randint(1, 5)
counts[name] = n_faces
for i in range(n_faces):
file_path = os.path.join(folder_name, name + '_%04d.jpg' % i)
uniface = np_rng.randint(0, 255, size=(250, 250, 3))
try:
imsave(file_path, uniface)
except ImportError:
raise SkipTest("PIL not installed")
# add some random file pollution to test robustness
with open(os.path.join(LFW_HOME, 'lfw_funneled', '.test.swp'), 'wb') as f:
f.write(six.b('Text file to be ignored by the dataset loader.'))
# generate some pairing metadata files using the same format as LFW
with open(os.path.join(LFW_HOME, 'pairsDevTrain.txt'), 'wb') as f:
f.write(six.b("10\n"))
more_than_two = [name for name, count in six.iteritems(counts)
if count >= 2]
for i in range(5):
name = random_state.choice(more_than_two)
first, second = random_state.sample(range(counts[name]), 2)
f.write(six.b('%s\t%d\t%d\n' % (name, first, second)))
for i in range(5):
first_name, second_name = random_state.sample(FAKE_NAMES, 2)
first_index = random_state.choice(np.arange(counts[first_name]))
second_index = random_state.choice(np.arange(counts[second_name]))
f.write(six.b('%s\t%d\t%s\t%d\n' % (first_name, first_index,
second_name, second_index)))
with open(os.path.join(LFW_HOME, 'pairsDevTest.txt'), 'wb') as f:
f.write(six.b("Fake place holder that won't be tested"))
with open(os.path.join(LFW_HOME, 'pairs.txt'), 'wb') as f:
f.write(six.b("Fake place holder that won't be tested"))
def teardown_module():
"""Test fixture (clean up) run once after all tests of this module"""
if os.path.isdir(SCIKIT_LEARN_DATA):
shutil.rmtree(SCIKIT_LEARN_DATA)
if os.path.isdir(SCIKIT_LEARN_EMPTY_DATA):
shutil.rmtree(SCIKIT_LEARN_EMPTY_DATA)
@raises(IOError)
def test_load_empty_lfw_people():
fetch_lfw_people(data_home=SCIKIT_LEARN_EMPTY_DATA,
download_if_missing=False)
def test_load_fake_lfw_people():
lfw_people = fetch_lfw_people(data_home=SCIKIT_LEARN_DATA,
min_faces_per_person=3,
download_if_missing=False)
# The data is croped around the center as a rectangular bounding box
# around the face. Colors are converted to gray levels:
assert_equal(lfw_people.images.shape, (10, 62, 47))
assert_equal(lfw_people.data.shape, (10, 2914))
# the target is array of person integer ids
assert_array_equal(lfw_people.target, [2, 0, 1, 0, 2, 0, 2, 1, 1, 2])
# names of the persons can be found using the target_names array
expected_classes = ['Abdelatif Smith', 'Abhati Kepler', 'Onur Lopez']
assert_array_equal(lfw_people.target_names, expected_classes)
# It is possible to ask for the original data without any croping or color
# conversion and not limit on the number of picture per person
lfw_people = fetch_lfw_people(data_home=SCIKIT_LEARN_DATA, resize=None,
slice_=None, color=True,
download_if_missing=False)
assert_equal(lfw_people.images.shape, (17, 250, 250, 3))
# the ids and class names are the same as previously
assert_array_equal(lfw_people.target,
[0, 0, 1, 6, 5, 6, 3, 6, 0, 3, 6, 1, 2, 4, 5, 1, 2])
assert_array_equal(lfw_people.target_names,
['Abdelatif Smith', 'Abhati Kepler', 'Camara Alvaro',
'Chen Dupont', 'John Lee', 'Lin Bauman', 'Onur Lopez'])
@raises(ValueError)
def test_load_fake_lfw_people_too_restrictive():
fetch_lfw_people(data_home=SCIKIT_LEARN_DATA, min_faces_per_person=100,
download_if_missing=False)
@raises(IOError)
def test_load_empty_lfw_pairs():
fetch_lfw_pairs(data_home=SCIKIT_LEARN_EMPTY_DATA,
download_if_missing=False)
def test_load_fake_lfw_pairs():
lfw_pairs_train = fetch_lfw_pairs(data_home=SCIKIT_LEARN_DATA,
download_if_missing=False)
# The data is croped around the center as a rectangular bounding box
# around the face. Colors are converted to gray levels:
assert_equal(lfw_pairs_train.pairs.shape, (10, 2, 62, 47))
# the target is whether the person is the same or not
assert_array_equal(lfw_pairs_train.target, [1, 1, 1, 1, 1, 0, 0, 0, 0, 0])
# names of the persons can be found using the target_names array
expected_classes = ['Different persons', 'Same person']
assert_array_equal(lfw_pairs_train.target_names, expected_classes)
# It is possible to ask for the original data without any croping or color
# conversion
lfw_pairs_train = fetch_lfw_pairs(data_home=SCIKIT_LEARN_DATA, resize=None,
slice_=None, color=True,
download_if_missing=False)
assert_equal(lfw_pairs_train.pairs.shape, (10, 2, 250, 250, 3))
# the ids and class names are the same as previously
assert_array_equal(lfw_pairs_train.target, [1, 1, 1, 1, 1, 0, 0, 0, 0, 0])
assert_array_equal(lfw_pairs_train.target_names, expected_classes)
|
bsd-3-clause
|
durandtibo/mantra-python
|
demo/demo.py
|
1
|
3839
|
import csv
import profile
import time
import numpy as np
from mantra import (BagReader, Evaluation, LabeledObject, MantraWithSGD,
MantraWithSSG, MultiClassMantraModel4Bag,
MultiClassMultiInstanceMantraModel4Bag, Preprocessing, RankingAPMantraModel4Bag, RankingUtils)
#from sklearn.preprocessing import normalize
class DatasetDemo:
def read_dataset_csv(filename, verbose=False):
""" return the list of images and labels """
list_data = list()
print('read', filename)
with open(filename, newline='') as f:
reader = csv.reader(f)
rownum = 0
for row in reader:
if rownum == 0:
header = row
else:
name = row[0]
label = int(row[1])
example = LabeledObject(name, label)
list_data.append(example)
rownum += 1
if verbose:
print('read %d examples' % len(list_data))
return list(list_data)
def demo_mantra_multiclass():
print('\n**************************')
print('* Demo MANTRA multiclass *')
print('**************************')
# path to the data
path_data = "/Users/thibautdurand/Desktop/data/json/uiuc"
filename_train = path_data + "/train.csv"
filename_test = path_data + "/test.csv"
# Read train data
# Read image name and labels
list_data = DatasetDemo.read_dataset_csv(filename_train, True)
# Read bags
data = BagReader.read_data_json(list_data, path_data, True)
# Preprocess the data
train = Preprocessing.normalize_bag(data)
# Read test data
list_data = DatasetDemo.read_dataset_csv(filename_test, True)
data = BagReader.read_data_json(list_data, path_data, True)
test = Preprocessing.normalize_bag(data)
# Define model
model = MultiClassMantraModel4Bag()
# Define solver
solver = MantraWithSGD(num_epochs=50, lambdaa=1e-4)
# Learn model
solver.optimize(model, train)
# Evaluate performance on train data
prediction_and_labels = model.compute_prediction_and_labels(train)
Evaluation.multiclass_accuracy(prediction_and_labels)
# Evaluate performance on test data
prediction_and_labels = model.compute_prediction_and_labels(test)
Evaluation.multiclass_accuracy(prediction_and_labels)
def demo_mantra_ranking():
print('\n***********************')
print('* Demo MANTRA ranking *')
print('***********************')
# path to the data
path_data = "/Users/thibautdurand/Desktop/data/json/uiuc"
filename_train = path_data + "/train.csv"
filename_test = path_data + "/test.csv"
# Read train data
# Read image name and labels
list_data = DatasetDemo.read_dataset_csv(filename_train, True)
# Read bags
data = BagReader.read_data_json(list_data, path_data, True)
# Preprocess the data
train = Preprocessing.normalize_bag(data)
# Define the positive and negative examples
for example in train:
if example.label == 2:
example.label = 1
else:
example.label = 0
# Read test data
# Read image name and labels
list_data = DatasetDemo.read_dataset_csv(filename_test, True)
# Read bags
data = BagReader.read_data_json(list_data, path_data, True)
# Preprocess the data
test = Preprocessing.normalize_bag(data)
# Define the positive and negative examples
for example in test:
if example.label == 2:
example.label = 1
else:
example.label = 0
# Generate ranking example
train_rank = RankingUtils.generate_ranking_example(train)
# Define model
model = RankingAPMantraModel4Bag()
# Define solver
solver = MantraWithSGD(num_epochs=50)
# Learn model
solver.optimize(model, train_rank)
# Evaluate performance on train data
scores_and_labels = model.compute_scores_and_labels(train)
Evaluation.average_precision(scores_and_labels)
# Evaluate performance on test data
scores_and_labels = model.compute_scores_and_labels(test)
Evaluation.average_precision(scores_and_labels)
if __name__ == "__main__":
demo_mantra_multiclass()
demo_mantra_ranking()
|
mit
|
DonBeo/statsmodels
|
statsmodels/datasets/copper/data.py
|
28
|
2316
|
"""World Copper Prices 1951-1975 dataset."""
__docformat__ = 'restructuredtext'
COPYRIGHT = """Used with express permission from the original author,
who retains all rights."""
TITLE = "World Copper Market 1951-1975 Dataset"
SOURCE = """
Jeff Gill's `Generalized Linear Models: A Unified Approach`
http://jgill.wustl.edu/research/books.html
"""
DESCRSHORT = """World Copper Market 1951-1975"""
DESCRLONG = """This data describes the world copper market from 1951 through 1975. In an
example, in Gill, the outcome variable (of a 2 stage estimation) is the world
consumption of copper for the 25 years. The explanatory variables are the
world consumption of copper in 1000 metric tons, the constant dollar adjusted
price of copper, the price of a substitute, aluminum, an index of real per
capita income base 1970, an annual measure of manufacturer inventory change,
and a time trend.
"""
NOTE = """
Number of Observations - 25
Number of Variables - 6
Variable name definitions::
WORLDCONSUMPTION - World consumption of copper (in 1000 metric tons)
COPPERPRICE - Constant dollar adjusted price of copper
INCOMEINDEX - An index of real per capita income (base 1970)
ALUMPRICE - The price of aluminum
INVENTORYINDEX - A measure of annual manufacturer inventory trend
TIME - A time trend
Years are included in the data file though not returned by load.
"""
from numpy import recfromtxt, column_stack, array
from statsmodels.datasets import utils as du
from os.path import dirname, abspath
def load():
"""
Load the copper data and returns a Dataset class.
Returns
--------
Dataset instance:
See DATASET_PROPOSAL.txt for more information.
"""
data = _get_data()
return du.process_recarray(data, endog_idx=0, dtype=float)
def _get_data():
filepath = dirname(abspath(__file__))
data = recfromtxt(open(filepath + '/copper.csv', 'rb'), delimiter=",",
names=True, dtype=float, usecols=(1,2,3,4,5,6))
return data
def load_pandas():
"""
Load the copper data and returns a Dataset class.
Returns
--------
Dataset instance:
See DATASET_PROPOSAL.txt for more information.
"""
data = _get_data()
return du.process_recarray_pandas(data, endog_idx=0, dtype=float)
|
bsd-3-clause
|
ahoyosid/scikit-learn
|
examples/neighbors/plot_species_kde.py
|
282
|
4059
|
"""
================================================
Kernel Density Estimate of Species Distributions
================================================
This shows an example of a neighbors-based query (in particular a kernel
density estimate) on geospatial data, using a Ball Tree built upon the
Haversine distance metric -- i.e. distances over points in latitude/longitude.
The dataset is provided by Phillips et. al. (2006).
If available, the example uses
`basemap <http://matplotlib.sourceforge.net/basemap/doc/html/>`_
to plot the coast lines and national boundaries of South America.
This example does not perform any learning over the data
(see :ref:`example_applications_plot_species_distribution_modeling.py` for
an example of classification based on the attributes in this dataset). It
simply shows the kernel density estimate of observed data points in
geospatial coordinates.
The two species are:
- `"Bradypus variegatus"
<http://www.iucnredlist.org/apps/redlist/details/3038/0>`_ ,
the Brown-throated Sloth.
- `"Microryzomys minutus"
<http://www.iucnredlist.org/apps/redlist/details/13408/0>`_ ,
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
Colombia, Ecuador, Peru, and Venezuela.
References
----------
* `"Maximum entropy modeling of species geographic distributions"
<http://www.cs.princeton.edu/~schapire/papers/ecolmod.pdf>`_
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
190:231-259, 2006.
"""
# Author: Jake Vanderplas <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_species_distributions
from sklearn.datasets.species_distributions import construct_grids
from sklearn.neighbors import KernelDensity
# if basemap is available, we'll use it.
# otherwise, we'll improvise later...
try:
from mpl_toolkits.basemap import Basemap
basemap = True
except ImportError:
basemap = False
# Get matrices/arrays of species IDs and locations
data = fetch_species_distributions()
species_names = ['Bradypus Variegatus', 'Microryzomys Minutus']
Xtrain = np.vstack([data['train']['dd lat'],
data['train']['dd long']]).T
ytrain = np.array([d.decode('ascii').startswith('micro')
for d in data['train']['species']], dtype='int')
Xtrain *= np.pi / 180. # Convert lat/long to radians
# Set up the data grid for the contour plot
xgrid, ygrid = construct_grids(data)
X, Y = np.meshgrid(xgrid[::5], ygrid[::5][::-1])
land_reference = data.coverages[6][::5, ::5]
land_mask = (land_reference > -9999).ravel()
xy = np.vstack([Y.ravel(), X.ravel()]).T
xy = xy[land_mask]
xy *= np.pi / 180.
# Plot map of South America with distributions of each species
fig = plt.figure()
fig.subplots_adjust(left=0.05, right=0.95, wspace=0.05)
for i in range(2):
plt.subplot(1, 2, i + 1)
# construct a kernel density estimate of the distribution
print(" - computing KDE in spherical coordinates")
kde = KernelDensity(bandwidth=0.04, metric='haversine',
kernel='gaussian', algorithm='ball_tree')
kde.fit(Xtrain[ytrain == i])
# evaluate only on the land: -9999 indicates ocean
Z = -9999 + np.zeros(land_mask.shape[0])
Z[land_mask] = np.exp(kde.score_samples(xy))
Z = Z.reshape(X.shape)
# plot contours of the density
levels = np.linspace(0, Z.max(), 25)
plt.contourf(X, Y, Z, levels=levels, cmap=plt.cm.Reds)
if basemap:
print(" - plot coastlines using basemap")
m = Basemap(projection='cyl', llcrnrlat=Y.min(),
urcrnrlat=Y.max(), llcrnrlon=X.min(),
urcrnrlon=X.max(), resolution='c')
m.drawcoastlines()
m.drawcountries()
else:
print(" - plot coastlines from coverage")
plt.contour(X, Y, land_reference,
levels=[-9999], colors="k",
linestyles="solid")
plt.xticks([])
plt.yticks([])
plt.title(species_names[i])
plt.show()
|
bsd-3-clause
|
Northrend/mxnet
|
example/gluon/dcgan.py
|
30
|
8796
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import matplotlib as mpl
mpl.use('Agg')
from matplotlib import pyplot as plt
import argparse
import mxnet as mx
from mxnet import gluon
from mxnet.gluon import nn
from mxnet import autograd
import numpy as np
import logging
from datetime import datetime
import os
import time
def fill_buf(buf, i, img, shape):
n = buf.shape[0]//shape[1]
m = buf.shape[1]//shape[0]
sx = (i%m)*shape[0]
sy = (i//m)*shape[1]
buf[sy:sy+shape[1], sx:sx+shape[0], :] = img
return None
def visual(title, X, name):
assert len(X.shape) == 4
X = X.transpose((0, 2, 3, 1))
X = np.clip((X - np.min(X))*(255.0/(np.max(X) - np.min(X))), 0, 255).astype(np.uint8)
n = np.ceil(np.sqrt(X.shape[0]))
buff = np.zeros((int(n*X.shape[1]), int(n*X.shape[2]), int(X.shape[3])), dtype=np.uint8)
for i, img in enumerate(X):
fill_buf(buff, i, img, X.shape[1:3])
buff = buff[:,:,::-1]
plt.imshow(buff)
plt.title(title)
plt.savefig(name)
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', type=str, default='cifar10', help='dataset to use. options are cifar10 and imagenet.')
parser.add_argument('--batch-size', type=int, default=64, help='input batch size')
parser.add_argument('--nz', type=int, default=100, help='size of the latent z vector')
parser.add_argument('--ngf', type=int, default=64)
parser.add_argument('--ndf', type=int, default=64)
parser.add_argument('--nepoch', type=int, default=25, help='number of epochs to train for')
parser.add_argument('--lr', type=float, default=0.0002, help='learning rate, default=0.0002')
parser.add_argument('--beta1', type=float, default=0.5, help='beta1 for adam. default=0.5')
parser.add_argument('--cuda', action='store_true', help='enables cuda')
parser.add_argument('--ngpu', type=int, default=1, help='number of GPUs to use')
parser.add_argument('--netG', default='', help="path to netG (to continue training)")
parser.add_argument('--netD', default='', help="path to netD (to continue training)")
parser.add_argument('--outf', default='./results', help='folder to output images and model checkpoints')
parser.add_argument('--check-point', default=True, help="save results at each epoch or not")
opt = parser.parse_args()
print(opt)
logging.basicConfig(level=logging.DEBUG)
ngpu = int(opt.ngpu)
nz = int(opt.nz)
ngf = int(opt.ngf)
ndf = int(opt.ndf)
nc = 3
if opt.cuda:
ctx = mx.gpu(0)
else:
ctx = mx.cpu()
check_point = bool(opt.check_point)
outf = opt.outf
if not os.path.exists(outf):
os.makedirs(outf)
def transformer(data, label):
# resize to 64x64
data = mx.image.imresize(data, 64, 64)
# transpose from (64, 64, 3) to (3, 64, 64)
data = mx.nd.transpose(data, (2,0,1))
# normalize to [-1, 1]
data = data.astype(np.float32)/128 - 1
# if image is greyscale, repeat 3 times to get RGB image.
if data.shape[0] == 1:
data = mx.nd.tile(data, (3, 1, 1))
return data, label
train_data = gluon.data.DataLoader(
gluon.data.vision.MNIST('./data', train=True, transform=transformer),
batch_size=opt.batch_size, shuffle=True, last_batch='discard')
val_data = gluon.data.DataLoader(
gluon.data.vision.MNIST('./data', train=False, transform=transformer),
batch_size=opt.batch_size, shuffle=False)
# build the generator
netG = nn.Sequential()
with netG.name_scope():
# input is Z, going into a convolution
netG.add(nn.Conv2DTranspose(ngf * 8, 4, 1, 0, use_bias=False))
netG.add(nn.BatchNorm())
netG.add(nn.Activation('relu'))
# state size. (ngf*8) x 4 x 4
netG.add(nn.Conv2DTranspose(ngf * 4, 4, 2, 1, use_bias=False))
netG.add(nn.BatchNorm())
netG.add(nn.Activation('relu'))
# state size. (ngf*8) x 8 x 8
netG.add(nn.Conv2DTranspose(ngf * 2, 4, 2, 1, use_bias=False))
netG.add(nn.BatchNorm())
netG.add(nn.Activation('relu'))
# state size. (ngf*8) x 16 x 16
netG.add(nn.Conv2DTranspose(ngf, 4, 2, 1, use_bias=False))
netG.add(nn.BatchNorm())
netG.add(nn.Activation('relu'))
# state size. (ngf*8) x 32 x 32
netG.add(nn.Conv2DTranspose(nc, 4, 2, 1, use_bias=False))
netG.add(nn.Activation('tanh'))
# state size. (nc) x 64 x 64
# build the discriminator
netD = nn.Sequential()
with netD.name_scope():
# input is (nc) x 64 x 64
netD.add(nn.Conv2D(ndf, 4, 2, 1, use_bias=False))
netD.add(nn.LeakyReLU(0.2))
# state size. (ndf) x 32 x 32
netD.add(nn.Conv2D(ndf * 2, 4, 2, 1, use_bias=False))
netD.add(nn.BatchNorm())
netD.add(nn.LeakyReLU(0.2))
# state size. (ndf) x 16 x 16
netD.add(nn.Conv2D(ndf * 4, 4, 2, 1, use_bias=False))
netD.add(nn.BatchNorm())
netD.add(nn.LeakyReLU(0.2))
# state size. (ndf) x 8 x 8
netD.add(nn.Conv2D(ndf * 8, 4, 2, 1, use_bias=False))
netD.add(nn.BatchNorm())
netD.add(nn.LeakyReLU(0.2))
# state size. (ndf) x 4 x 4
netD.add(nn.Conv2D(2, 4, 1, 0, use_bias=False))
# loss
loss = gluon.loss.SoftmaxCrossEntropyLoss()
# initialize the generator and the discriminator
netG.initialize(mx.init.Normal(0.02), ctx=ctx)
netD.initialize(mx.init.Normal(0.02), ctx=ctx)
# trainer for the generator and the discriminator
trainerG = gluon.Trainer(netG.collect_params(), 'adam', {'learning_rate': opt.lr, 'beta1': opt.beta1})
trainerD = gluon.Trainer(netD.collect_params(), 'adam', {'learning_rate': opt.lr, 'beta1': opt.beta1})
# ============printing==============
real_label = mx.nd.ones((opt.batch_size,), ctx=ctx)
fake_label = mx.nd.zeros((opt.batch_size,), ctx=ctx)
metric = mx.metric.Accuracy()
print('Training... ')
stamp = datetime.now().strftime('%Y_%m_%d-%H_%M')
iter = 0
for epoch in range(opt.nepoch):
tic = time.time()
btic = time.time()
for data, _ in train_data:
############################
# (1) Update D network: maximize log(D(x)) + log(1 - D(G(z)))
###########################
# train with real_t
data = data.as_in_context(ctx)
noise = mx.nd.random.normal(0, 1, shape=(opt.batch_size, nz, 1, 1), ctx=ctx)
with autograd.record():
output = netD(data)
output = output.reshape((opt.batch_size, 2))
errD_real = loss(output, real_label)
metric.update([real_label,], [output,])
fake = netG(noise)
output = netD(fake.detach())
output = output.reshape((opt.batch_size, 2))
errD_fake = loss(output, fake_label)
errD = errD_real + errD_fake
errD.backward()
metric.update([fake_label,], [output,])
trainerD.step(opt.batch_size)
############################
# (2) Update G network: maximize log(D(G(z)))
###########################
with autograd.record():
output = netD(fake)
output = output.reshape((-1, 2))
errG = loss(output, real_label)
errG.backward()
trainerG.step(opt.batch_size)
name, acc = metric.get()
# logging.info('speed: {} samples/s'.format(opt.batch_size / (time.time() - btic)))
logging.info('discriminator loss = %f, generator loss = %f, binary training acc = %f at iter %d epoch %d' %(mx.nd.mean(errD).asscalar(), mx.nd.mean(errG).asscalar(), acc, iter, epoch))
if iter % 1 == 0:
visual('gout', fake.asnumpy(), name=os.path.join(outf,'fake_img_iter_%d.png' %iter))
visual('data', data.asnumpy(), name=os.path.join(outf,'real_img_iter_%d.png' %iter))
iter = iter + 1
btic = time.time()
name, acc = metric.get()
metric.reset()
logging.info('\nbinary training acc at epoch %d: %s=%f' % (epoch, name, acc))
logging.info('time: %f' % (time.time() - tic))
if check_point:
netG.save_params(os.path.join(outf,'generator_epoch_%d.params' %epoch))
netD.save_params(os.path.join(outf,'discriminator_epoch_%d.params' % epoch))
netG.save_params(os.path.join(outf, 'generator.params'))
netD.save_params(os.path.join(outf, 'discriminator.params'))
|
apache-2.0
|
jpinedaf/pyspeckit
|
pyspeckit/spectrum/models/modelgrid.py
|
5
|
2036
|
"""
==========
Model Grid
==========
Fit a line based on parameters output from a grid of models
Module API
^^^^^^^^^^
"""
import numpy as np
from pyspeckit.mpfit import mpfit
import matplotlib.cbook as mpcb
import copy
try:
import scipy.interpolate
import scipy.ndimage
scipyOK = True
except ImportError:
scipyOK=False
def gaussian_line(xax, maxamp, tau, offset, width):
"""
A Gaussian line function in which the
"""
return np.exp(-(xax-offset)**2/(2.0*width**2)) * maxamp * (1.0-np.exp(-1*tau))
def line_params_2D(gridval1, gridval2, griddim1, griddim2, valuegrid):
"""
Given a 2D grid of modeled line values - the amplitude, e.g. excitation temperature,
and the optical depth, tau - return the model spectrum
griddims contains the names of the axes and their values... it should have the same
number of entries as gridpars
"""
if not scipyOK:
raise ImportError("Scipy could not be imported, therefore interpolation is not available.")
#matchpt1 = np.argmin( np.abs( gridval1 - griddim1[0,:] ))
#matchpt2 = np.argmin( np.abs( gridval2 - griddim2[:,0] ))
return scipy.ndimage.map_coordinates(valuegrid,np.array([[gridval2],[gridval1]]),order=1)
interpgrid = scipy.interpolate.interp2d(
griddim1[ gridval1-5:gridval1+5, gridval2-5:gridval2+5].ravel(),
griddim2[ gridval1-5:gridval1+5, gridval2-5:gridval2+5].ravel(),
valuegrid[gridval1-5:gridval1+5, gridval2-5:gridval2+5].ravel())
return interpgrid(gridval1,gridval2)
def line_model_2par(xax, center, width, gridval1, gridval2, griddim1, griddim2, maxampgrid, taugrid,
linefunction=gaussian_line):
"""
Returns the spectral line that matches the given x-axis
xax, center, width must be in the same units!
"""
maxamp = line_params_2D(gridval1, gridval2, griddim1, griddim2, maxampgrid)
tau = line_params_2D(gridval1, gridval2, griddim1, griddim2, taugrid)
return linefunction(xax,maxamp,tau,offset,width)
|
mit
|
AnasGhrab/scikit-learn
|
sklearn/manifold/tests/test_isomap.py
|
226
|
3941
|
from itertools import product
import numpy as np
from numpy.testing import assert_almost_equal, assert_array_almost_equal
from sklearn import datasets
from sklearn import manifold
from sklearn import neighbors
from sklearn import pipeline
from sklearn import preprocessing
from sklearn.utils.testing import assert_less
eigen_solvers = ['auto', 'dense', 'arpack']
path_methods = ['auto', 'FW', 'D']
def test_isomap_simple_grid():
# Isomap should preserve distances when all neighbors are used
N_per_side = 5
Npts = N_per_side ** 2
n_neighbors = Npts - 1
# grid of equidistant points in 2D, n_components = n_dim
X = np.array(list(product(range(N_per_side), repeat=2)))
# distances from each point to all others
G = neighbors.kneighbors_graph(X, n_neighbors,
mode='distance').toarray()
for eigen_solver in eigen_solvers:
for path_method in path_methods:
clf = manifold.Isomap(n_neighbors=n_neighbors, n_components=2,
eigen_solver=eigen_solver,
path_method=path_method)
clf.fit(X)
G_iso = neighbors.kneighbors_graph(clf.embedding_,
n_neighbors,
mode='distance').toarray()
assert_array_almost_equal(G, G_iso)
def test_isomap_reconstruction_error():
# Same setup as in test_isomap_simple_grid, with an added dimension
N_per_side = 5
Npts = N_per_side ** 2
n_neighbors = Npts - 1
# grid of equidistant points in 2D, n_components = n_dim
X = np.array(list(product(range(N_per_side), repeat=2)))
# add noise in a third dimension
rng = np.random.RandomState(0)
noise = 0.1 * rng.randn(Npts, 1)
X = np.concatenate((X, noise), 1)
# compute input kernel
G = neighbors.kneighbors_graph(X, n_neighbors,
mode='distance').toarray()
centerer = preprocessing.KernelCenterer()
K = centerer.fit_transform(-0.5 * G ** 2)
for eigen_solver in eigen_solvers:
for path_method in path_methods:
clf = manifold.Isomap(n_neighbors=n_neighbors, n_components=2,
eigen_solver=eigen_solver,
path_method=path_method)
clf.fit(X)
# compute output kernel
G_iso = neighbors.kneighbors_graph(clf.embedding_,
n_neighbors,
mode='distance').toarray()
K_iso = centerer.fit_transform(-0.5 * G_iso ** 2)
# make sure error agrees
reconstruction_error = np.linalg.norm(K - K_iso) / Npts
assert_almost_equal(reconstruction_error,
clf.reconstruction_error())
def test_transform():
n_samples = 200
n_components = 10
noise_scale = 0.01
# Create S-curve dataset
X, y = datasets.samples_generator.make_s_curve(n_samples, random_state=0)
# Compute isomap embedding
iso = manifold.Isomap(n_components, 2)
X_iso = iso.fit_transform(X)
# Re-embed a noisy version of the points
rng = np.random.RandomState(0)
noise = noise_scale * rng.randn(*X.shape)
X_iso2 = iso.transform(X + noise)
# Make sure the rms error on re-embedding is comparable to noise_scale
assert_less(np.sqrt(np.mean((X_iso - X_iso2) ** 2)), 2 * noise_scale)
def test_pipeline():
# check that Isomap works fine as a transformer in a Pipeline
# only checks that no error is raised.
# TODO check that it actually does something useful
X, y = datasets.make_blobs(random_state=0)
clf = pipeline.Pipeline(
[('isomap', manifold.Isomap()),
('clf', neighbors.KNeighborsClassifier())])
clf.fit(X, y)
assert_less(.9, clf.score(X, y))
|
bsd-3-clause
|
hlin117/scikit-learn
|
examples/missing_values.py
|
71
|
3055
|
"""
======================================================
Imputing missing values before building an estimator
======================================================
This example shows that imputing the missing values can give better results
than discarding the samples containing any missing value.
Imputing does not always improve the predictions, so please check via cross-validation.
Sometimes dropping rows or using marker values is more effective.
Missing values can be replaced by the mean, the median or the most frequent
value using the ``strategy`` hyper-parameter.
The median is a more robust estimator for data with high magnitude variables
which could dominate results (otherwise known as a 'long tail').
Script output::
Score with the entire dataset = 0.56
Score without the samples containing missing values = 0.48
Score after imputation of the missing values = 0.55
In this case, imputing helps the classifier get close to the original score.
"""
import numpy as np
from sklearn.datasets import load_boston
from sklearn.ensemble import RandomForestRegressor
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import Imputer
from sklearn.model_selection import cross_val_score
rng = np.random.RandomState(0)
dataset = load_boston()
X_full, y_full = dataset.data, dataset.target
n_samples = X_full.shape[0]
n_features = X_full.shape[1]
# Estimate the score on the entire dataset, with no missing values
estimator = RandomForestRegressor(random_state=0, n_estimators=100)
score = cross_val_score(estimator, X_full, y_full).mean()
print("Score with the entire dataset = %.2f" % score)
# Add missing values in 75% of the lines
missing_rate = 0.75
n_missing_samples = np.floor(n_samples * missing_rate)
missing_samples = np.hstack((np.zeros(n_samples - n_missing_samples,
dtype=np.bool),
np.ones(n_missing_samples,
dtype=np.bool)))
rng.shuffle(missing_samples)
missing_features = rng.randint(0, n_features, n_missing_samples)
# Estimate the score without the lines containing missing values
X_filtered = X_full[~missing_samples, :]
y_filtered = y_full[~missing_samples]
estimator = RandomForestRegressor(random_state=0, n_estimators=100)
score = cross_val_score(estimator, X_filtered, y_filtered).mean()
print("Score without the samples containing missing values = %.2f" % score)
# Estimate the score after imputation of the missing values
X_missing = X_full.copy()
X_missing[np.where(missing_samples)[0], missing_features] = 0
y_missing = y_full.copy()
estimator = Pipeline([("imputer", Imputer(missing_values=0,
strategy="mean",
axis=0)),
("forest", RandomForestRegressor(random_state=0,
n_estimators=100))])
score = cross_val_score(estimator, X_missing, y_missing).mean()
print("Score after imputation of the missing values = %.2f" % score)
|
bsd-3-clause
|
AlexanderFabisch/scikit-learn
|
sklearn/ensemble/voting_classifier.py
|
9
|
8052
|
"""
Soft Voting/Majority Rule classifier.
This module contains a Soft Voting/Majority Rule classifier for
classification estimators.
"""
# Authors: Sebastian Raschka <[email protected]>,
# Gilles Louppe <[email protected]>
#
# Licence: BSD 3 clause
import numpy as np
from ..base import BaseEstimator
from ..base import ClassifierMixin
from ..base import TransformerMixin
from ..base import clone
from ..preprocessing import LabelEncoder
from ..externals import six
class VotingClassifier(BaseEstimator, ClassifierMixin, TransformerMixin):
"""Soft Voting/Majority Rule classifier for unfitted estimators.
.. versionadded:: 0.17
Read more in the :ref:`User Guide <voting_classifier>`.
Parameters
----------
estimators : list of (string, estimator) tuples
Invoking the ``fit`` method on the ``VotingClassifier`` will fit clones
of those original estimators that will be stored in the class attribute
`self.estimators_`.
voting : str, {'hard', 'soft'} (default='hard')
If 'hard', uses predicted class labels for majority rule voting.
Else if 'soft', predicts the class label based on the argmax of
the sums of the predicted probalities, which is recommended for
an ensemble of well-calibrated classifiers.
weights : array-like, shape = [n_classifiers], optional (default=`None`)
Sequence of weights (`float` or `int`) to weight the occurances of
predicted class labels (`hard` voting) or class probabilities
before averaging (`soft` voting). Uses uniform weights if `None`.
Attributes
----------
classes_ : array-like, shape = [n_predictions]
Examples
--------
>>> import numpy as np
>>> from sklearn.linear_model import LogisticRegression
>>> from sklearn.naive_bayes import GaussianNB
>>> from sklearn.ensemble import RandomForestClassifier
>>> clf1 = LogisticRegression(random_state=1)
>>> clf2 = RandomForestClassifier(random_state=1)
>>> clf3 = GaussianNB()
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> eclf1 = VotingClassifier(estimators=[
... ('lr', clf1), ('rf', clf2), ('gnb', clf3)], voting='hard')
>>> eclf1 = eclf1.fit(X, y)
>>> print(eclf1.predict(X))
[1 1 1 2 2 2]
>>> eclf2 = VotingClassifier(estimators=[
... ('lr', clf1), ('rf', clf2), ('gnb', clf3)],
... voting='soft')
>>> eclf2 = eclf2.fit(X, y)
>>> print(eclf2.predict(X))
[1 1 1 2 2 2]
>>> eclf3 = VotingClassifier(estimators=[
... ('lr', clf1), ('rf', clf2), ('gnb', clf3)],
... voting='soft', weights=[2,1,1])
>>> eclf3 = eclf3.fit(X, y)
>>> print(eclf3.predict(X))
[1 1 1 2 2 2]
>>>
"""
def __init__(self, estimators, voting='hard', weights=None):
self.estimators = estimators
self.named_estimators = dict(estimators)
self.voting = voting
self.weights = weights
def fit(self, X, y):
""" Fit the estimators.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
Returns
-------
self : object
"""
if isinstance(y, np.ndarray) and len(y.shape) > 1 and y.shape[1] > 1:
raise NotImplementedError('Multilabel and multi-output'
' classification is not supported.')
if self.voting not in ('soft', 'hard'):
raise ValueError("Voting must be 'soft' or 'hard'; got (voting=%r)"
% self.voting)
if self.weights and len(self.weights) != len(self.estimators):
raise ValueError('Number of classifiers and weights must be equal'
'; got %d weights, %d estimators'
% (len(self.weights), len(self.estimators)))
self.le_ = LabelEncoder()
self.le_.fit(y)
self.classes_ = self.le_.classes_
self.estimators_ = []
for name, clf in self.estimators:
fitted_clf = clone(clf).fit(X, self.le_.transform(y))
self.estimators_.append(fitted_clf)
return self
def predict(self, X):
""" Predict class labels for X.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
Returns
----------
maj : array-like, shape = [n_samples]
Predicted class labels.
"""
if self.voting == 'soft':
maj = np.argmax(self.predict_proba(X), axis=1)
else: # 'hard' voting
predictions = self._predict(X)
maj = np.apply_along_axis(lambda x:
np.argmax(np.bincount(x,
weights=self.weights)),
axis=1,
arr=predictions.astype('int'))
maj = self.le_.inverse_transform(maj)
return maj
def _collect_probas(self, X):
"""Collect results from clf.predict calls. """
return np.asarray([clf.predict_proba(X) for clf in self.estimators_])
def _predict_proba(self, X):
"""Predict class probabilities for X in 'soft' voting """
avg = np.average(self._collect_probas(X), axis=0, weights=self.weights)
return avg
@property
def predict_proba(self):
"""Compute probabilities of possible outcomes for samples in X.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
Returns
----------
avg : array-like, shape = [n_samples, n_classes]
Weighted average probability for each class per sample.
"""
if self.voting == 'hard':
raise AttributeError("predict_proba is not available when"
" voting=%r" % self.voting)
return self._predict_proba
def transform(self, X):
"""Return class labels or probabilities for X for each estimator.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
Returns
-------
If `voting='soft'`:
array-like = [n_classifiers, n_samples, n_classes]
Class probabilties calculated by each classifier.
If `voting='hard'`:
array-like = [n_classifiers, n_samples]
Class labels predicted by each classifier.
"""
if self.voting == 'soft':
return self._collect_probas(X)
else:
return self._predict(X)
def get_params(self, deep=True):
"""Return estimator parameter names for GridSearch support"""
if not deep:
return super(VotingClassifier, self).get_params(deep=False)
else:
out = super(VotingClassifier, self).get_params(deep=False)
out.update(self.named_estimators.copy())
for name, step in six.iteritems(self.named_estimators):
for key, value in six.iteritems(step.get_params(deep=True)):
out['%s__%s' % (name, key)] = value
return out
def _predict(self, X):
"""Collect results from clf.predict calls. """
return np.asarray([clf.predict(X) for clf in self.estimators_]).T
|
bsd-3-clause
|
adamrvfisher/TechnicalAnalysisLibrary
|
MovingAverageRemoteSignalOptimizerTwoAssetSuper.py
|
1
|
9139
|
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 30 19:07:37 2017
@author: AmatVictoriaCuramIII
"""
import numpy as np
import random as rand
import pandas as pd
import time as t
from DatabaseGrabber import DatabaseGrabber
from YahooGrabber import YahooGrabber
Empty = []
Dataset = pd.DataFrame()
Dataset2 = pd.DataFrame()
Portfolio = pd.DataFrame()
Start = t.time()
Counter = 0
#Input
Ticker1 = 'UVXY'
#Ticker2 = '^VIX'
#Remote Signal
Ticker3 = '^VIX'
#Here we go
Asset1 = YahooGrabber(Ticker1)
#Asset2 = YahooGrabber(Ticker2)
Asset2 = read_csv('C:\\Users\\AmatVictoriaCuramIII\\Desktop\\Python\\VX1CC.csv', sep = ',')
Asset2.Date = pd.to_datetime(Asset2.Date, format = "%m/%d/%Y")
Asset2 = Asset2.set_index('Date')
Asset2 = Asset2.reindex(index=Asset2.index[::-1])
Asset1 = Asset1[:-6]
#Remote Signal
Asset3 = Asset2
#Asset3 = YahooGrabber(Ticker3)
#Match lengths
#Trimmer
trim = abs(len(Asset1) - len(Asset2))
if len(Asset1) == len(Asset2):
pass
else:
if len(Asset1) > len(Asset2):
Asset1 = Asset1[trim:]
else:
Asset2 = Asset2[trim:]
Asset3 = Asset3[-len(Asset2):]
#Asset2 = Asset2[-600:]
#Log Returns
Asset1['LogRet'] = np.log(Asset1['Adj Close']/Asset1['Adj Close'].shift(1))
Asset1['LogRet'] = Asset1['LogRet'].fillna(0)
Asset2['LogRet'] = np.log(Asset2['Adj Close']/Asset2['Adj Close'].shift(1))
Asset2['LogRet'] = Asset2['LogRet'].fillna(0)
#Prepare the remote controller
Asset3['LogRet'] = np.log(Asset3['Adj Close']/Asset3['Adj Close'].shift(1))
Asset3['LogRet'] = Asset3['LogRet'].fillna(0)
#Primary Brute Force Optimization
iterations = range(0, 7500)
for i in iterations:
Counter = Counter + 1
a = rand.random()
b = 1 - a
c = 0#rand.random()
d = 0#rand.random()
if c + d > 1:
continue
e = rand.randint(3,30)
window = int(e)
Asset3['MA'] = Asset3['Close'].rolling(window=window, center=False).mean()
Asset3['MA'] = Asset3['MA'].fillna(0)
Asset1['Position'] = a
Asset1['Position'] = np.where(Asset3['Close'].shift(1) > Asset3['MA'].shift(1),
c,a)
Asset1['Pass'] = (Asset1['LogRet'] * Asset1['Position'])
Asset2['Position'] = b
Asset2['Position'] = np.where(Asset3['Close'].shift(1) > Asset3['MA'].shift(1),
d,b)
Asset2['Pass'] = (Asset2['LogRet'] * Asset2['Position'])
Portfolio['Asset1Pass'] = (Asset1['Pass']) * (-1) #Pass a short position
Portfolio['Asset2Pass'] = (Asset2['Pass']) #* (-1)
Portfolio['LongShort'] = Portfolio['Asset1Pass'] + Portfolio['Asset2Pass']
if Portfolio['LongShort'].std() == 0:
continue
Portfolio['Multiplier'] = Portfolio['LongShort'].cumsum().apply(np.exp)
drawdown = 1 - Portfolio['Multiplier'].div(Portfolio['Multiplier'].cummax())
MaxDD = max(drawdown)
if MaxDD > float(.2):
continue
#
dailyreturn = Portfolio['LongShort'].mean()
if dailyreturn < .003:
continue
dailyvol = Portfolio['LongShort'].std()
sharpe =(dailyreturn/dailyvol)
print(Counter)
Empty.append(a)
Empty.append(b)
Empty.append(c)
Empty.append(d)
Empty.append(e)
Empty.append(sharpe)
Empty.append(sharpe/MaxDD)
Empty.append(dailyreturn/MaxDD)
Empty.append(MaxDD)
Emptyseries = pd.Series(Empty)
Dataset[0] = Emptyseries.values
Dataset[i] = Emptyseries.values
Empty[:] = []
#primary optimization output sorting
z1 = Dataset.iloc[6]
w1 = np.percentile(z1, 80)
v1 = [] #this variable stores the Nth percentile of top performers
DS1W = pd.DataFrame() #this variable stores your financial advisors for specific dataset
for h in z1:
if h > w1:
v1.append(h)
for j in v1:
r = Dataset.columns[(Dataset == j).iloc[6]]
DS1W = pd.concat([DS1W,Dataset[r]], axis = 1)
y = max(z1)
k = Dataset.columns[(Dataset == y).iloc[6]] #this is the column number
kfloat = float(k[0])
End = t.time()
#Secondary optimization. Wow. Much involved.
for i in iterations:
Counter = Counter + 1
f = rand.randint(31,252)
g = rand.random() - .3
if g < .1:
continue
window2 = int(f)
Asset3['MA2'] = Asset3['Adj Close'].rolling(window=window2, center=False).mean()
Asset3['MA2'] = Asset3['MA2'].fillna(0)
Asset3['LongVIX'] = np.where(Portfolio['LongShort'] == 0, 1, 0)
Asset3['VIX<MA2'] = np.where(Asset3['Adj Close'] > Asset3['MA2'], 1, 0)
Asset3['VolRegime'] = Asset3['LongVIX'] - Asset3['VIX<MA2']
Asset3['VolRegime'] = np.where(Asset3['VolRegime'] < 0, 0, Asset3['VolRegime'])
Asset3['SignalReturns'] = np.where(Asset3['VolRegime'] == 1, Asset3['LogRet'], 0)
#Asset3['SignalReturns'].cumsum().apply(np.exp).plot()
Asset3['Super'] = (Asset3['SignalReturns'] * g ) + Portfolio['LongShort']
Asset3['SuperMultiplier'] = Asset3['Super'].cumsum().apply(np.exp)
SuperDrawdown = 1 - Asset3['SuperMultiplier'].div(Asset3['SuperMultiplier'].cummax())
SuperDrawdown = SuperDrawdown.fillna(0)
SuperMaxDD = max(SuperDrawdown)
superdailyreturn = Asset3['Super'].mean()
# if dailyreturn > superdailyreturn:
# continue
superdailyvol = Asset3['Super'].std()
supersharpe =(superdailyreturn/superdailyvol)
print(Counter)
Empty.append(f)
Empty.append(g)
Empty.append(supersharpe)
Empty.append(superdailyreturn)
Empty.append(supersharpe/SuperMaxDD)
Empty.append(superdailyreturn/SuperMaxDD)
Empty.append(SuperMaxDD)
Empty.append(superdailyreturn)
Emptyseries = pd.Series(Empty)
Dataset2[0] = Emptyseries.values
Dataset2[i] = Emptyseries.values
Empty[:] = []
#secondary optimization output sorting
z2 = Dataset2.iloc[2]
w2 = np.percentile(z2, 80)
v2 = [] #this variable stores the Nth percentile of top performers
DS2W = pd.DataFrame() #this variable stores your financial advisors for specific dataset
for h in z2:
if h > w1:
v2.append(h)
for j in v2:
r = Dataset2.columns[(Dataset2 == j).iloc[2]]
DS2W = pd.concat([DS2W,Dataset2[r]], axis = 1)
y2 = max(z2)
k2 = Dataset2.columns[(Dataset2 == y2).iloc[2]] #this is the column number
k2float = float(k2[0])
End2 = t.time()
print(End2-Start, 'seconds later')
print('Dataset[k]')
print(Dataset[k])
print('Dataset2[k2]')
print(Dataset2[k2])
window = int((Dataset[kfloat][4]))
window2 = int((Dataset2[k2float][0]))
Asset3['MA'] = Asset3['Adj Close'].rolling(window=window, center=False).mean()
Asset3['MA'] = Asset3['MA'].fillna(0)
Asset3['MA2'] = Asset3['Adj Close'].rolling(window=window2, center=False).mean()
Asset3['MA2'] = Asset3['MA2'].fillna(0)
Asset1['Position'] = (Dataset[kfloat][0])
Asset1['Position'] = np.where(Asset3['Adj Close'].shift(1) > Asset3['MA'].shift(1),
Dataset[kfloat][2],Dataset[kfloat][0])
Asset1['Pass'] = (Asset1['LogRet'] * Asset1['Position'])
Asset2['Position'] = (Dataset[kfloat][1])
Asset2['Position'] = np.where(Asset3['Adj Close'].shift(1) > Asset3['MA'].shift(1),
Dataset[kfloat][3],Dataset[kfloat][1])
Asset2['Pass'] = (Asset2['LogRet'] * Asset2['Position'])
Portfolio['Asset1Pass'] = Asset1['Pass'] * (-1)
Portfolio['Asset2Pass'] = Asset2['Pass'] #* (-1)
Portfolio['LongShort'] = Portfolio['Asset1Pass'] + Portfolio['Asset2Pass']
Portfolio['LongShort'][:].cumsum().apply(np.exp).plot(grid=True,
figsize=(8,5))
dailyreturn = Portfolio['LongShort'].mean()
dailyvol = Portfolio['LongShort'].std()
sharpe =(dailyreturn/dailyvol)
Portfolio['Multiplier'] = Portfolio['LongShort'].cumsum().apply(np.exp)
drawdown2 = 1 - Portfolio['Multiplier'].div(Portfolio['Multiplier'].cummax())
print(max(drawdown2))
Asset3['LongVIX'] = np.where(Portfolio['LongShort'] == 0, 1, 0)
Asset3['VIX<MA2'] = np.where(Asset3['Adj Close'] < Asset3['MA2'], 1, 0)
Asset3['VolRegime'] = Asset3['LongVIX'] - Asset3['VIX<MA2']
Asset3['VolRegime'] = np.where(Asset3['VolRegime'] < 0, 0, Asset3['VolRegime'])
Asset3['SignalReturns'] = np.where(Asset3['VolRegime'] == 1, Asset3['LogRet'], 0)
#Asset3['SignalReturns'].cumsum().apply(np.exp).plot()
SuperFactor = Dataset2[k2float][1]
Asset3['Super'] = (Asset3['SignalReturns'] * SuperFactor) + Portfolio['LongShort']
Asset3['SuperMultiplier'] = Asset3['Super'].cumsum().apply(np.exp)
SuperDrawdown = 1 - Asset3['SuperMultiplier'].div(Asset3['SuperMultiplier'].cummax())
SuperDrawdown = SuperDrawdown.fillna(0)
SuperMaxDD = max(SuperDrawdown)
superdailyreturn = Asset3['Super'].mean()
superdailyvol = Asset3['Super'].std()
supersharpe =(superdailyreturn/superdailyvol)
print(SuperMaxDD)
Asset3['SuperMultiplier'][:].plot()
Portfolio['LongShort'][:].cumsum().apply(np.exp).plot(grid=True,
figsize=(8,5))
#pd.to_pickle(Portfolio, 'VXX:UVXY')
|
apache-2.0
|
murali-munna/scikit-learn
|
examples/neighbors/plot_kde_1d.py
|
347
|
5100
|
"""
===================================
Simple 1D Kernel Density Estimation
===================================
This example uses the :class:`sklearn.neighbors.KernelDensity` class to
demonstrate the principles of Kernel Density Estimation in one dimension.
The first plot shows one of the problems with using histograms to visualize
the density of points in 1D. Intuitively, a histogram can be thought of as a
scheme in which a unit "block" is stacked above each point on a regular grid.
As the top two panels show, however, the choice of gridding for these blocks
can lead to wildly divergent ideas about the underlying shape of the density
distribution. If we instead center each block on the point it represents, we
get the estimate shown in the bottom left panel. This is a kernel density
estimation with a "top hat" kernel. This idea can be generalized to other
kernel shapes: the bottom-right panel of the first figure shows a Gaussian
kernel density estimate over the same distribution.
Scikit-learn implements efficient kernel density estimation using either
a Ball Tree or KD Tree structure, through the
:class:`sklearn.neighbors.KernelDensity` estimator. The available kernels
are shown in the second figure of this example.
The third figure compares kernel density estimates for a distribution of 100
samples in 1 dimension. Though this example uses 1D distributions, kernel
density estimation is easily and efficiently extensible to higher dimensions
as well.
"""
# Author: Jake Vanderplas <[email protected]>
#
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import norm
from sklearn.neighbors import KernelDensity
#----------------------------------------------------------------------
# Plot the progression of histograms to kernels
np.random.seed(1)
N = 20
X = np.concatenate((np.random.normal(0, 1, 0.3 * N),
np.random.normal(5, 1, 0.7 * N)))[:, np.newaxis]
X_plot = np.linspace(-5, 10, 1000)[:, np.newaxis]
bins = np.linspace(-5, 10, 10)
fig, ax = plt.subplots(2, 2, sharex=True, sharey=True)
fig.subplots_adjust(hspace=0.05, wspace=0.05)
# histogram 1
ax[0, 0].hist(X[:, 0], bins=bins, fc='#AAAAFF', normed=True)
ax[0, 0].text(-3.5, 0.31, "Histogram")
# histogram 2
ax[0, 1].hist(X[:, 0], bins=bins + 0.75, fc='#AAAAFF', normed=True)
ax[0, 1].text(-3.5, 0.31, "Histogram, bins shifted")
# tophat KDE
kde = KernelDensity(kernel='tophat', bandwidth=0.75).fit(X)
log_dens = kde.score_samples(X_plot)
ax[1, 0].fill(X_plot[:, 0], np.exp(log_dens), fc='#AAAAFF')
ax[1, 0].text(-3.5, 0.31, "Tophat Kernel Density")
# Gaussian KDE
kde = KernelDensity(kernel='gaussian', bandwidth=0.75).fit(X)
log_dens = kde.score_samples(X_plot)
ax[1, 1].fill(X_plot[:, 0], np.exp(log_dens), fc='#AAAAFF')
ax[1, 1].text(-3.5, 0.31, "Gaussian Kernel Density")
for axi in ax.ravel():
axi.plot(X[:, 0], np.zeros(X.shape[0]) - 0.01, '+k')
axi.set_xlim(-4, 9)
axi.set_ylim(-0.02, 0.34)
for axi in ax[:, 0]:
axi.set_ylabel('Normalized Density')
for axi in ax[1, :]:
axi.set_xlabel('x')
#----------------------------------------------------------------------
# Plot all available kernels
X_plot = np.linspace(-6, 6, 1000)[:, None]
X_src = np.zeros((1, 1))
fig, ax = plt.subplots(2, 3, sharex=True, sharey=True)
fig.subplots_adjust(left=0.05, right=0.95, hspace=0.05, wspace=0.05)
def format_func(x, loc):
if x == 0:
return '0'
elif x == 1:
return 'h'
elif x == -1:
return '-h'
else:
return '%ih' % x
for i, kernel in enumerate(['gaussian', 'tophat', 'epanechnikov',
'exponential', 'linear', 'cosine']):
axi = ax.ravel()[i]
log_dens = KernelDensity(kernel=kernel).fit(X_src).score_samples(X_plot)
axi.fill(X_plot[:, 0], np.exp(log_dens), '-k', fc='#AAAAFF')
axi.text(-2.6, 0.95, kernel)
axi.xaxis.set_major_formatter(plt.FuncFormatter(format_func))
axi.xaxis.set_major_locator(plt.MultipleLocator(1))
axi.yaxis.set_major_locator(plt.NullLocator())
axi.set_ylim(0, 1.05)
axi.set_xlim(-2.9, 2.9)
ax[0, 1].set_title('Available Kernels')
#----------------------------------------------------------------------
# Plot a 1D density example
N = 100
np.random.seed(1)
X = np.concatenate((np.random.normal(0, 1, 0.3 * N),
np.random.normal(5, 1, 0.7 * N)))[:, np.newaxis]
X_plot = np.linspace(-5, 10, 1000)[:, np.newaxis]
true_dens = (0.3 * norm(0, 1).pdf(X_plot[:, 0])
+ 0.7 * norm(5, 1).pdf(X_plot[:, 0]))
fig, ax = plt.subplots()
ax.fill(X_plot[:, 0], true_dens, fc='black', alpha=0.2,
label='input distribution')
for kernel in ['gaussian', 'tophat', 'epanechnikov']:
kde = KernelDensity(kernel=kernel, bandwidth=0.5).fit(X)
log_dens = kde.score_samples(X_plot)
ax.plot(X_plot[:, 0], np.exp(log_dens), '-',
label="kernel = '{0}'".format(kernel))
ax.text(6, 0.38, "N={0} points".format(N))
ax.legend(loc='upper left')
ax.plot(X[:, 0], -0.005 - 0.01 * np.random.random(X.shape[0]), '+k')
ax.set_xlim(-4, 9)
ax.set_ylim(-0.02, 0.4)
plt.show()
|
bsd-3-clause
|
prikhodkop/ECG_project
|
scripts/filters/data_filtering.py
|
1
|
3604
|
from collections import OrderedDict
import numpy as np
import pandas as pd
import logging
def get_default_RR_filtering_params():
"""
Returns OrderedDict of filters parameters. The order is important.
Value 'None' means no actions.
"""
filtering_params = OrderedDict(( ('interval type', 'N'), # e.g. None
('interval range', [200.0, 2000.0]), # min_interval, max_interval, e.g. [None, 2000.0]
('successive intervals ration range', None)
))
return filtering_params
def filter_data_RR(data_RR, RR_filtering_params):
"""
Filter pulse intervals data.
Args:
data_RR (np.array): data in format (time since midnight [ms], beat-beat interval [ms], interval type)
RR_filtering_params (OrderedDict): see example in get_default_RR_filtering_params()
Returns:
filtered_data_RR (np.array or None): data in format (time since midnight [ms], interval [ms]), or
None if no data are available after filtering
filtration_info (dict): initial and remaining intervals number
"""
filtration_info = {'initial size': len(data_RR)}
for filter_name in RR_filtering_params:
if RR_filtering_params[filter_name] is not None:
if filter_name == 'interval type':
permitted_beat_type = RR_filtering_params[filter_name]
data_RR = data_RR[data_RR[:, -1] == permitted_beat_type]
elif filter_name == 'interval range':
min_interval, max_interval = RR_filtering_params[filter_name]
if min_interval is not None:
data_RR = data_RR[data_RR[:, 1] >= min_interval]
if max_interval is not None:
data_RR = data_RR[data_RR[:, 1] <= max_interval]
elif filter_name == 'successive intervals ration range':
min_ratio, max_ratio = RR_filtering_params[filter_name]
#TODO
if min_ratio is not None or max_ratio is not None:
msg = 'Filtration based on successive intervals ration range is not implemented.'
logging.critical(msg)
raise Exception(msg)
else:
msg = 'An unknown filter name: %s'%filter_name
logging.critical(msg)
raise Exception(msg)
if data_RR.size == 0:
filtration_info['final size'] = 0
return None, filtration_info
filtered_data_RR = data_RR[:, :-1] # exclude interval type
filtration_info['final size'] = len(filtered_data_RR)
return filtered_data_RR, filtration_info
if __name__ == '__main__':
# 'Simulate' data_RR format
data_RR = pd.DataFrame([ [43748861, 11, 'N'],
[43749368, 507, 'N'],
[43749879, 111, 'A'],
[122322922, 3523, 'N'],
[122323448, 526, 'N'],
[122323983, 535, 'A']
]).as_matrix()
print 'Initial:\n', data_RR
def_params = get_default_RR_filtering_params()
print '\ndef_params', def_params
data_RR_filtered, filter_info = filter_data_RR(data_RR, def_params)
print '\nFiltered for default configuration:\n', data_RR_filtered
params_empty = OrderedDict(( ('interval type', None),
('interval range', [4000, None]),
('successive intervals ration range', [None, None])
))
data_RR_filtered, filter_info = filter_data_RR(data_RR, params_empty)
print '\nFiltered for empty configuration:\n', data_RR_filtered
|
gpl-2.0
|
equialgo/scikit-learn
|
sklearn/metrics/cluster/supervised.py
|
25
|
31477
|
"""Utilities to evaluate the clustering performance of models.
Functions named as *_score return a scalar value to maximize: the higher the
better.
"""
# Authors: Olivier Grisel <[email protected]>
# Wei LI <[email protected]>
# Diego Molla <[email protected]>
# Arnaud Fouchet <[email protected]>
# Thierry Guillemot <[email protected]>
# Gregory Stupp <[email protected]>
# Joel Nothman <[email protected]>
# License: BSD 3 clause
from __future__ import division
from math import log
import numpy as np
from scipy.misc import comb
from scipy import sparse as sp
from .expected_mutual_info_fast import expected_mutual_information
from ...utils.fixes import bincount
from ...utils.validation import check_array
def comb2(n):
# the exact version is faster for k == 2: use it by default globally in
# this module instead of the float approximate variant
return comb(n, 2, exact=1)
def check_clusterings(labels_true, labels_pred):
"""Check that the two clusterings matching 1D integer arrays."""
labels_true = np.asarray(labels_true)
labels_pred = np.asarray(labels_pred)
# input checks
if labels_true.ndim != 1:
raise ValueError(
"labels_true must be 1D: shape is %r" % (labels_true.shape,))
if labels_pred.ndim != 1:
raise ValueError(
"labels_pred must be 1D: shape is %r" % (labels_pred.shape,))
if labels_true.shape != labels_pred.shape:
raise ValueError(
"labels_true and labels_pred must have same size, got %d and %d"
% (labels_true.shape[0], labels_pred.shape[0]))
return labels_true, labels_pred
def contingency_matrix(labels_true, labels_pred, eps=None, sparse=False):
"""Build a contingency matrix describing the relationship between labels.
Parameters
----------
labels_true : int array, shape = [n_samples]
Ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
Cluster labels to evaluate
eps : None or float, optional.
If a float, that value is added to all values in the contingency
matrix. This helps to stop NaN propagation.
If ``None``, nothing is adjusted.
sparse : boolean, optional.
If True, return a sparse CSR continency matrix. If ``eps is not None``,
and ``sparse is True``, will throw ValueError.
.. versionadded:: 0.18
Returns
-------
contingency : {array-like, sparse}, shape=[n_classes_true, n_classes_pred]
Matrix :math:`C` such that :math:`C_{i, j}` is the number of samples in
true class :math:`i` and in predicted class :math:`j`. If
``eps is None``, the dtype of this array will be integer. If ``eps`` is
given, the dtype will be float.
Will be a ``scipy.sparse.csr_matrix`` if ``sparse=True``.
"""
if eps is not None and sparse:
raise ValueError("Cannot set 'eps' when sparse=True")
classes, class_idx = np.unique(labels_true, return_inverse=True)
clusters, cluster_idx = np.unique(labels_pred, return_inverse=True)
n_classes = classes.shape[0]
n_clusters = clusters.shape[0]
# Using coo_matrix to accelerate simple histogram calculation,
# i.e. bins are consecutive integers
# Currently, coo_matrix is faster than histogram2d for simple cases
contingency = sp.coo_matrix((np.ones(class_idx.shape[0]),
(class_idx, cluster_idx)),
shape=(n_classes, n_clusters),
dtype=np.int)
if sparse:
contingency = contingency.tocsr()
contingency.sum_duplicates()
else:
contingency = contingency.toarray()
if eps is not None:
# don't use += as contingency is integer
contingency = contingency + eps
return contingency
# clustering measures
def adjusted_rand_score(labels_true, labels_pred):
"""Rand index adjusted for chance.
The Rand Index computes a similarity measure between two clusterings
by considering all pairs of samples and counting pairs that are
assigned in the same or different clusters in the predicted and
true clusterings.
The raw RI score is then "adjusted for chance" into the ARI score
using the following scheme::
ARI = (RI - Expected_RI) / (max(RI) - Expected_RI)
The adjusted Rand index is thus ensured to have a value close to
0.0 for random labeling independently of the number of clusters and
samples and exactly 1.0 when the clusterings are identical (up to
a permutation).
ARI is a symmetric measure::
adjusted_rand_score(a, b) == adjusted_rand_score(b, a)
Read more in the :ref:`User Guide <adjusted_rand_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
Ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
Cluster labels to evaluate
Returns
-------
ari : float
Similarity score between -1.0 and 1.0. Random labelings have an ARI
close to 0.0. 1.0 stands for perfect match.
Examples
--------
Perfectly maching labelings have a score of 1 even
>>> from sklearn.metrics.cluster import adjusted_rand_score
>>> adjusted_rand_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> adjusted_rand_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Labelings that assign all classes members to the same clusters
are complete be not always pure, hence penalized::
>>> adjusted_rand_score([0, 0, 1, 2], [0, 0, 1, 1]) # doctest: +ELLIPSIS
0.57...
ARI is symmetric, so labelings that have pure clusters with members
coming from the same classes but unnecessary splits are penalized::
>>> adjusted_rand_score([0, 0, 1, 1], [0, 0, 1, 2]) # doctest: +ELLIPSIS
0.57...
If classes members are completely split across different clusters, the
assignment is totally incomplete, hence the ARI is very low::
>>> adjusted_rand_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
References
----------
.. [Hubert1985] `L. Hubert and P. Arabie, Comparing Partitions,
Journal of Classification 1985`
http://link.springer.com/article/10.1007%2FBF01908075
.. [wk] https://en.wikipedia.org/wiki/Rand_index#Adjusted_Rand_index
See also
--------
adjusted_mutual_info_score: Adjusted Mutual Information
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
n_samples = labels_true.shape[0]
n_classes = np.unique(labels_true).shape[0]
n_clusters = np.unique(labels_pred).shape[0]
# Special limit cases: no clustering since the data is not split;
# or trivial clustering where each document is assigned a unique cluster.
# These are perfect matches hence return 1.0.
if (n_classes == n_clusters == 1 or
n_classes == n_clusters == 0 or
n_classes == n_clusters == n_samples):
return 1.0
# Compute the ARI using the contingency data
contingency = contingency_matrix(labels_true, labels_pred, sparse=True)
sum_comb_c = sum(comb2(n_c) for n_c in np.ravel(contingency.sum(axis=1)))
sum_comb_k = sum(comb2(n_k) for n_k in np.ravel(contingency.sum(axis=0)))
sum_comb = sum(comb2(n_ij) for n_ij in contingency.data)
prod_comb = (sum_comb_c * sum_comb_k) / comb(n_samples, 2)
mean_comb = (sum_comb_k + sum_comb_c) / 2.
return (sum_comb - prod_comb) / (mean_comb - prod_comb)
def homogeneity_completeness_v_measure(labels_true, labels_pred):
"""Compute the homogeneity and completeness and V-Measure scores at once.
Those metrics are based on normalized conditional entropy measures of
the clustering labeling to evaluate given the knowledge of a Ground
Truth class labels of the same samples.
A clustering result satisfies homogeneity if all of its clusters
contain only data points which are members of a single class.
A clustering result satisfies completeness if all the data points
that are members of a given class are elements of the same cluster.
Both scores have positive values between 0.0 and 1.0, larger values
being desirable.
Those 3 metrics are independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score values in any way.
V-Measure is furthermore symmetric: swapping ``labels_true`` and
``label_pred`` will give the same score. This does not hold for
homogeneity and completeness.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
homogeneity : float
score between 0.0 and 1.0. 1.0 stands for perfectly homogeneous labeling
completeness : float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
v_measure : float
harmonic mean of the first two
See also
--------
homogeneity_score
completeness_score
v_measure_score
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
if len(labels_true) == 0:
return 1.0, 1.0, 1.0
entropy_C = entropy(labels_true)
entropy_K = entropy(labels_pred)
contingency = contingency_matrix(labels_true, labels_pred, sparse=True)
MI = mutual_info_score(None, None, contingency=contingency)
homogeneity = MI / (entropy_C) if entropy_C else 1.0
completeness = MI / (entropy_K) if entropy_K else 1.0
if homogeneity + completeness == 0.0:
v_measure_score = 0.0
else:
v_measure_score = (2.0 * homogeneity * completeness /
(homogeneity + completeness))
return homogeneity, completeness, v_measure_score
def homogeneity_score(labels_true, labels_pred):
"""Homogeneity metric of a cluster labeling given a ground truth.
A clustering result satisfies homogeneity if all of its clusters
contain only data points which are members of a single class.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is not symmetric: switching ``label_true`` with ``label_pred``
will return the :func:`completeness_score` which will be different in
general.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
homogeneity : float
score between 0.0 and 1.0. 1.0 stands for perfectly homogeneous labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
completeness_score
v_measure_score
Examples
--------
Perfect labelings are homogeneous::
>>> from sklearn.metrics.cluster import homogeneity_score
>>> homogeneity_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Non-perfect labelings that further split classes into more clusters can be
perfectly homogeneous::
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 0, 1, 2]))
... # doctest: +ELLIPSIS
1.0...
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
1.0...
Clusters that include samples from different classes do not make for an
homogeneous labeling::
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 1, 0, 1]))
... # doctest: +ELLIPSIS
0.0...
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 0, 0, 0]))
... # doctest: +ELLIPSIS
0.0...
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred)[0]
def completeness_score(labels_true, labels_pred):
"""Completeness metric of a cluster labeling given a ground truth.
A clustering result satisfies completeness if all the data points
that are members of a given class are elements of the same cluster.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is not symmetric: switching ``label_true`` with ``label_pred``
will return the :func:`homogeneity_score` which will be different in
general.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
completeness : float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
homogeneity_score
v_measure_score
Examples
--------
Perfect labelings are complete::
>>> from sklearn.metrics.cluster import completeness_score
>>> completeness_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Non-perfect labelings that assign all classes members to the same clusters
are still complete::
>>> print(completeness_score([0, 0, 1, 1], [0, 0, 0, 0]))
1.0
>>> print(completeness_score([0, 1, 2, 3], [0, 0, 1, 1]))
1.0
If classes members are split across different clusters, the
assignment cannot be complete::
>>> print(completeness_score([0, 0, 1, 1], [0, 1, 0, 1]))
0.0
>>> print(completeness_score([0, 0, 0, 0], [0, 1, 2, 3]))
0.0
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred)[1]
def v_measure_score(labels_true, labels_pred):
"""V-measure cluster labeling given a ground truth.
This score is identical to :func:`normalized_mutual_info_score`.
The V-measure is the harmonic mean between homogeneity and completeness::
v = 2 * (homogeneity * completeness) / (homogeneity + completeness)
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
v_measure : float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
homogeneity_score
completeness_score
Examples
--------
Perfect labelings are both homogeneous and complete, hence have score 1.0::
>>> from sklearn.metrics.cluster import v_measure_score
>>> v_measure_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> v_measure_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Labelings that assign all classes members to the same clusters
are complete be not homogeneous, hence penalized::
>>> print("%.6f" % v_measure_score([0, 0, 1, 2], [0, 0, 1, 1]))
... # doctest: +ELLIPSIS
0.8...
>>> print("%.6f" % v_measure_score([0, 1, 2, 3], [0, 0, 1, 1]))
... # doctest: +ELLIPSIS
0.66...
Labelings that have pure clusters with members coming from the same
classes are homogeneous but un-necessary splits harms completeness
and thus penalize V-measure as well::
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 0, 1, 2]))
... # doctest: +ELLIPSIS
0.8...
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
0.66...
If classes members are completely split across different clusters,
the assignment is totally incomplete, hence the V-Measure is null::
>>> print("%.6f" % v_measure_score([0, 0, 0, 0], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
0.0...
Clusters that include samples from totally different classes totally
destroy the homogeneity of the labeling, hence::
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 0, 0, 0]))
... # doctest: +ELLIPSIS
0.0...
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred)[2]
def mutual_info_score(labels_true, labels_pred, contingency=None):
"""Mutual Information between two clusterings.
The Mutual Information is a measure of the similarity between two labels of
the same data. Where :math:`P(i)` is the probability of a random sample
occurring in cluster :math:`U_i` and :math:`P'(j)` is the probability of a
random sample occurring in cluster :math:`V_j`, the Mutual Information
between clusterings :math:`U` and :math:`V` is given as:
.. math::
MI(U,V)=\sum_{i=1}^R \sum_{j=1}^C P(i,j)\log\\frac{P(i,j)}{P(i)P'(j)}
This is equal to the Kullback-Leibler divergence of the joint distribution
with the product distribution of the marginals.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
contingency : {None, array, sparse matrix},
shape = [n_classes_true, n_classes_pred]
A contingency matrix given by the :func:`contingency_matrix` function.
If value is ``None``, it will be computed, otherwise the given value is
used, with ``labels_true`` and ``labels_pred`` ignored.
Returns
-------
mi : float
Mutual information, a non-negative value
See also
--------
adjusted_mutual_info_score: Adjusted against chance Mutual Information
normalized_mutual_info_score: Normalized Mutual Information
"""
if contingency is None:
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
contingency = contingency_matrix(labels_true, labels_pred, sparse=True)
else:
contingency = check_array(contingency,
accept_sparse=['csr', 'csc', 'coo'],
dtype=[int, np.int32, np.int64])
if isinstance(contingency, np.ndarray):
# For an array
nzx, nzy = np.nonzero(contingency)
nz_val = contingency[nzx, nzy]
elif sp.issparse(contingency):
# For a sparse matrix
nzx, nzy, nz_val = sp.find(contingency)
else:
raise ValueError("Unsupported type for 'contingency': %s" %
type(contingency))
contingency_sum = contingency.sum()
pi = np.ravel(contingency.sum(axis=1))
pj = np.ravel(contingency.sum(axis=0))
log_contingency_nm = np.log(nz_val)
contingency_nm = nz_val / contingency_sum
# Don't need to calculate the full outer product, just for non-zeroes
outer = pi.take(nzx) * pj.take(nzy)
log_outer = -np.log(outer) + log(pi.sum()) + log(pj.sum())
mi = (contingency_nm * (log_contingency_nm - log(contingency_sum)) +
contingency_nm * log_outer)
return mi.sum()
def adjusted_mutual_info_score(labels_true, labels_pred):
"""Adjusted Mutual Information between two clusterings.
Adjusted Mutual Information (AMI) is an adjustment of the Mutual
Information (MI) score to account for chance. It accounts for the fact that
the MI is generally higher for two clusterings with a larger number of
clusters, regardless of whether there is actually more information shared.
For two clusterings :math:`U` and :math:`V`, the AMI is given as::
AMI(U, V) = [MI(U, V) - E(MI(U, V))] / [max(H(U), H(V)) - E(MI(U, V))]
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Be mindful that this function is an order of magnitude slower than other
metrics, such as the Adjusted Rand Index.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
Returns
-------
ami: float(upperlimited by 1.0)
The AMI returns a value of 1 when the two partitions are identical
(ie perfectly matched). Random partitions (independent labellings) have
an expected AMI around 0 on average hence can be negative.
See also
--------
adjusted_rand_score: Adjusted Rand Index
mutual_information_score: Mutual Information (not adjusted for chance)
Examples
--------
Perfect labelings are both homogeneous and complete, hence have
score 1.0::
>>> from sklearn.metrics.cluster import adjusted_mutual_info_score
>>> adjusted_mutual_info_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> adjusted_mutual_info_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
If classes members are completely split across different clusters,
the assignment is totally in-complete, hence the AMI is null::
>>> adjusted_mutual_info_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
References
----------
.. [1] `Vinh, Epps, and Bailey, (2010). Information Theoretic Measures for
Clusterings Comparison: Variants, Properties, Normalization and
Correction for Chance, JMLR
<http://jmlr.csail.mit.edu/papers/volume11/vinh10a/vinh10a.pdf>`_
.. [2] `Wikipedia entry for the Adjusted Mutual Information
<https://en.wikipedia.org/wiki/Adjusted_Mutual_Information>`_
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
n_samples = labels_true.shape[0]
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split.
# This is a perfect match hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1 or
classes.shape[0] == clusters.shape[0] == 0):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred, sparse=True)
contingency = contingency.astype(np.float64)
# Calculate the MI for the two clusterings
mi = mutual_info_score(labels_true, labels_pred,
contingency=contingency)
# Calculate the expected value for the mutual information
emi = expected_mutual_information(contingency, n_samples)
# Calculate entropy for each labeling
h_true, h_pred = entropy(labels_true), entropy(labels_pred)
ami = (mi - emi) / (max(h_true, h_pred) - emi)
return ami
def normalized_mutual_info_score(labels_true, labels_pred):
"""Normalized Mutual Information between two clusterings.
Normalized Mutual Information (NMI) is an normalization of the Mutual
Information (MI) score to scale the results between 0 (no mutual
information) and 1 (perfect correlation). In this function, mutual
information is normalized by ``sqrt(H(labels_true) * H(labels_pred))``
This measure is not adjusted for chance. Therefore
:func:`adjusted_mustual_info_score` might be preferred.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
Returns
-------
nmi : float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
See also
--------
adjusted_rand_score: Adjusted Rand Index
adjusted_mutual_info_score: Adjusted Mutual Information (adjusted
against chance)
Examples
--------
Perfect labelings are both homogeneous and complete, hence have
score 1.0::
>>> from sklearn.metrics.cluster import normalized_mutual_info_score
>>> normalized_mutual_info_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> normalized_mutual_info_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
If classes members are completely split across different clusters,
the assignment is totally in-complete, hence the NMI is null::
>>> normalized_mutual_info_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split.
# This is a perfect match hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1 or
classes.shape[0] == clusters.shape[0] == 0):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred, sparse=True)
contingency = contingency.astype(np.float64)
# Calculate the MI for the two clusterings
mi = mutual_info_score(labels_true, labels_pred,
contingency=contingency)
# Calculate the expected value for the mutual information
# Calculate entropy for each labeling
h_true, h_pred = entropy(labels_true), entropy(labels_pred)
nmi = mi / max(np.sqrt(h_true * h_pred), 1e-10)
return nmi
def fowlkes_mallows_score(labels_true, labels_pred, sparse=False):
"""Measure the similarity of two clusterings of a set of points.
The Fowlkes-Mallows index (FMI) is defined as the geometric mean between of
the precision and recall::
FMI = TP / sqrt((TP + FP) * (TP + FN))
Where ``TP`` is the number of **True Positive** (i.e. the number of pair of
points that belongs in the same clusters in both ``labels_true`` and
``labels_pred``), ``FP`` is the number of **False Positive** (i.e. the
number of pair of points that belongs in the same clusters in
``labels_true`` and not in ``labels_pred``) and ``FN`` is the number of
**False Negative** (i.e the number of pair of points that belongs in the
same clusters in ``labels_pred`` and not in ``labels_True``).
The score ranges from 0 to 1. A high value indicates a good similarity
between two clusters.
Read more in the :ref:`User Guide <fowlkes_mallows_scores>`.
Parameters
----------
labels_true : int array, shape = (``n_samples``,)
A clustering of the data into disjoint subsets.
labels_pred : array, shape = (``n_samples``, )
A clustering of the data into disjoint subsets.
Returns
-------
score : float
The resulting Fowlkes-Mallows score.
Examples
--------
Perfect labelings are both homogeneous and complete, hence have
score 1.0::
>>> from sklearn.metrics.cluster import fowlkes_mallows_score
>>> fowlkes_mallows_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> fowlkes_mallows_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
If classes members are completely split across different clusters,
the assignment is totally random, hence the FMI is null::
>>> fowlkes_mallows_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
References
----------
.. [1] `E. B. Fowkles and C. L. Mallows, 1983. "A method for comparing two
hierarchical clusterings". Journal of the American Statistical
Association
<http://wildfire.stat.ucla.edu/pdflibrary/fowlkes.pdf>`_
.. [2] `Wikipedia entry for the Fowlkes-Mallows Index
<https://en.wikipedia.org/wiki/Fowlkes-Mallows_index>`_
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
n_samples, = labels_true.shape
c = contingency_matrix(labels_true, labels_pred, sparse=True)
tk = np.dot(c.data, c.data) - n_samples
pk = np.sum(np.asarray(c.sum(axis=0)).ravel() ** 2) - n_samples
qk = np.sum(np.asarray(c.sum(axis=1)).ravel() ** 2) - n_samples
return tk / np.sqrt(pk * qk) if tk != 0. else 0.
def entropy(labels):
"""Calculates the entropy for a labeling."""
if len(labels) == 0:
return 1.0
label_idx = np.unique(labels, return_inverse=True)[1]
pi = bincount(label_idx).astype(np.float64)
pi = pi[pi > 0]
pi_sum = np.sum(pi)
# log(a / b) should be calculated as log(a) - log(b) for
# possible loss of precision
return -np.sum((pi / pi_sum) * (np.log(pi) - log(pi_sum)))
|
bsd-3-clause
|
kd0aij/RFExplorer-for-Python
|
implot.py
|
1
|
4909
|
#pylint: disable=trailing-whitespace, line-too-long, bad-whitespace, invalid-name, R0204, C0200
#pylint: disable=superfluous-parens, missing-docstring, broad-except
#pylint: disable=too-many-lines, too-many-instance-attributes, too-many-statements, too-many-nested-blocks
#pylint: disable=too-many-branches, too-many-public-methods, too-many-locals, too-many-arguments
#============================================================================
# plot CSV file generated by rfe.py
#============================================================================
import numpy as np
import matplotlib
#matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter
import os
fignum = 0
# find CSV files with names in the form YYYY-MM-DD_HH:MM:SS
# signal strength measurements are appended with ".csv"
# and cpu temp files are appended with "_T.csv"
files = os.listdir('.')
sigfiles = []
tfiles = []
for fname in sorted(files):
print(fname)
index = fname.find('.csv')
if (index == 19):
sigfiles.append(fname)
elif (index == 21):
tfiles.append(fname)
# print the list and have user select the plot range
index = 0
for fname in sigfiles:
print('{0:02d}: {1:s}'.format(index, fname.split('.')[0]))
index += 1
# concatenate all cputemp files into strtemp
# assuming the local directory contains all the csv files for a single day,
# skip the last file since it probably spans midnight
strtemp = ''
for index in range(len(tfiles)-1):
#print(tfiles[index])
infile = open(tfiles[index], 'r')
strtemp += infile.read()
# plot cpu temps
templines = strtemp.split('\n')
ntemps = len(templines)-1
tempc = np.zeros(ntemps)
# construct x axis
starthms = templines[0].split(',')[0].split(' ')[1].split(':')
endhms = templines[ntemps-1].split(',')[0].split(' ')[1].split(':')
startsec = int(starthms[0])*3600 + int(starthms[1])*60 + int(starthms[2])
endsec = int(endhms[0])*3600 + int(endhms[1])*60 + int(endhms[2])
xvals = np.linspace(startsec/3600, endsec/3600, ntemps)
for rec in range(ntemps):
tempc[rec] = float(templines[rec].split(',')[1])
fig, ax = plt.subplots(num=fignum, figsize=(8,4))
ax.plot(xvals,tempc)
plt.xlabel('hours')
plt.ylabel('degC')
plt.title('CPU temp {0:s}'.format(templines[0].split(',')[0]))
plt.savefig("cpuTemp.png")
plt.show(block=False)
# while (True):
# instr = input("starting, ending index, (return to quit): ").split(',')
# if (instr[0] == ''): break
# istart = instr[0]
# iend = instr[1]
nsig = len(sigfiles)
interval = 6
for istart in range(0,nsig,interval):
iend = istart + interval
if (iend > nsig): iend = nsig
# concatenate selected signal files into strsig
strsig = ''
for index in range(istart, iend):
print(sigfiles[index])
infile = open(sigfiles[index], 'r')
strsig += infile.read()
# get number of freq bins from first record
strlines = strsig.split('\n')
fields = strlines[0].split(',')
nfreq = len(fields) - 12
print('{0:d} frequency bins'.format(nfreq))
# the split results in an empty line at the end of strlines
nrecs = len(strlines) - 1
print('{0:d} scans'.format(nrecs))
# get start/end date, time and freq from first and last records
data = np.zeros(nfreq)
scanTime = fields[0].split('.')[0]
datetime = fields[0].split(' ')
startdate = datetime[0]
starttime = datetime[1].split('.')[0]
startFreq = float(fields[2])
endFreq = float(fields[5])
fields = strlines[nrecs-1].split(',')
datetime = fields[0].split(' ')
enddate = datetime[0]
endtime = datetime[1].split('.')[0]
# construct image
plotdata = np.full((nrecs,nfreq), -120)
peakAmp = -120
peakBin = 0
peakRec = 0
for rec in range(nrecs):
fields = strlines[rec].split(',')
nfreq = len(fields) - 12
for bin in range(nfreq):
amp = float(fields[12+bin])
plotdata[rec,bin] = amp
if (peakAmp < amp):
peakAmp = amp
peakBin = bin
peakRec = rec
# create plot
deltaFreq = (endFreq - startFreq) / nfreq
fignum += 1
fig, ax = plt.subplots(num=fignum, figsize=(8,8))
plt.imshow(plotdata,interpolation='nearest',cmap="hot")
plt.xlabel('MHz')
plt.ylabel('minutes')
locs,labels = plt.xticks()
labels = ['{0:.1f}'.format(startFreq+locs[iTick]*deltaFreq) for iTick in range(len(labels))]
ax.set_xticklabels(labels)
tickIntvl = 6
locs = range(0,nrecs,tickIntvl)
ax.set_yticks(locs)
labels = ['{0:d}'.format(iTick) for iTick in range(len(locs))]
ax.set_yticklabels(labels)
peakFreq = startFreq + (peakBin * deltaFreq)
plt.title('{2:s}\npeak amp: {0:.1f}, freq: {1:.1f}, time: {3:.1f}'.format(peakAmp, peakFreq, str(scanTime).split('.')[0],peakRec/6.0))
plt.savefig("RFimage_{0:s}.png".format(str(scanTime).split(' ')[1].split('.')[0]))
plt.close(fignum)
#plt.show(block=False)
|
lgpl-3.0
|
Raag079/self-driving-car
|
Term01-Computer-Vision-and-Deep-Learning/Labs/05-CarND-Alexnet-Feature-Extraction/feature_extraction.py
|
2
|
1499
|
import time
import tensorflow as tf
import numpy as np
import pandas as pd
from scipy.misc import imread
from alexnet import AlexNet
sign_names = pd.read_csv('signnames.csv')
nb_classes = 43
x = tf.placeholder(tf.float32, (None, 32, 32, 3))
resized = tf.image.resize_images(x, (227, 227))
# NOTE: By setting `feature_extract` to `True` we return
# the second to last layer.
fc7 = AlexNet(resized, feature_extract=True)
# TODO: Define a new fully connected layer followed by a softmax activation to classify
# the traffic signs. Assign the result of the softmax activation to `probs` below.
shape = (fc7.get_shape().as_list()[-1], nb_classes) # use this shape for the weight matrix
fc8W = tf.Variable(tf.truncated_normal(shape, stddev=1e-2))
fc8b = tf.Variable(tf.zeros(nb_classes))
logits = tf.nn.xw_plus_b(fc7, fc8W, fc8b)
probs = tf.nn.softmax(logits)
init = tf.initialize_all_variables()
sess = tf.Session()
sess.run(init)
# Read Images
im1 = imread("construction.jpg").astype(np.float32)
im1 = im1 - np.mean(im1)
im2 = imread("stop.jpg").astype(np.float32)
im2 = im2 - np.mean(im2)
# Run Inference
t = time.time()
output = sess.run(probs, feed_dict={x: [im1, im2]})
# Print Output
for input_im_ind in range(output.shape[0]):
inds = np.argsort(output)[input_im_ind, :]
print("Image", input_im_ind)
for i in range(5):
print("%s: %.3f" % (sign_names.ix[inds[-1 - i]][1], output[input_im_ind, inds[-1 - i]]))
print()
print("Time: %.3f seconds" % (time.time() - t))
|
mit
|
Vimos/scikit-learn
|
examples/cluster/plot_kmeans_digits.py
|
42
|
4491
|
"""
===========================================================
A demo of K-Means clustering on the handwritten digits data
===========================================================
In this example we compare the various initialization strategies for
K-means in terms of runtime and quality of the results.
As the ground truth is known here, we also apply different cluster
quality metrics to judge the goodness of fit of the cluster labels to the
ground truth.
Cluster quality metrics evaluated (see :ref:`clustering_evaluation` for
definitions and discussions of the metrics):
=========== ========================================================
Shorthand full name
=========== ========================================================
homo homogeneity score
compl completeness score
v-meas V measure
ARI adjusted Rand index
AMI adjusted mutual information
silhouette silhouette coefficient
=========== ========================================================
"""
print(__doc__)
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn import metrics
from sklearn.cluster import KMeans
from sklearn.datasets import load_digits
from sklearn.decomposition import PCA
from sklearn.preprocessing import scale
np.random.seed(42)
digits = load_digits()
data = scale(digits.data)
n_samples, n_features = data.shape
n_digits = len(np.unique(digits.target))
labels = digits.target
sample_size = 300
print("n_digits: %d, \t n_samples %d, \t n_features %d"
% (n_digits, n_samples, n_features))
print(82 * '_')
print('init\t\ttime\tinertia\thomo\tcompl\tv-meas\tARI\tAMI\tsilhouette')
def bench_k_means(estimator, name, data):
t0 = time()
estimator.fit(data)
print('%-9s\t%.2fs\t%i\t%.3f\t%.3f\t%.3f\t%.3f\t%.3f\t%.3f'
% (name, (time() - t0), estimator.inertia_,
metrics.homogeneity_score(labels, estimator.labels_),
metrics.completeness_score(labels, estimator.labels_),
metrics.v_measure_score(labels, estimator.labels_),
metrics.adjusted_rand_score(labels, estimator.labels_),
metrics.adjusted_mutual_info_score(labels, estimator.labels_),
metrics.silhouette_score(data, estimator.labels_,
metric='euclidean',
sample_size=sample_size)))
bench_k_means(KMeans(init='k-means++', n_clusters=n_digits, n_init=10),
name="k-means++", data=data)
bench_k_means(KMeans(init='random', n_clusters=n_digits, n_init=10),
name="random", data=data)
# in this case the seeding of the centers is deterministic, hence we run the
# kmeans algorithm only once with n_init=1
pca = PCA(n_components=n_digits).fit(data)
bench_k_means(KMeans(init=pca.components_, n_clusters=n_digits, n_init=1),
name="PCA-based",
data=data)
print(82 * '_')
###############################################################################
# Visualize the results on PCA-reduced data
reduced_data = PCA(n_components=2).fit_transform(data)
kmeans = KMeans(init='k-means++', n_clusters=n_digits, n_init=10)
kmeans.fit(reduced_data)
# Step size of the mesh. Decrease to increase the quality of the VQ.
h = .02 # point in the mesh [x_min, x_max]x[y_min, y_max].
# Plot the decision boundary. For that, we will assign a color to each
x_min, x_max = reduced_data[:, 0].min() - 1, reduced_data[:, 0].max() + 1
y_min, y_max = reduced_data[:, 1].min() - 1, reduced_data[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
# Obtain labels for each point in mesh. Use last trained model.
Z = kmeans.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure(1)
plt.clf()
plt.imshow(Z, interpolation='nearest',
extent=(xx.min(), xx.max(), yy.min(), yy.max()),
cmap=plt.cm.Paired,
aspect='auto', origin='lower')
plt.plot(reduced_data[:, 0], reduced_data[:, 1], 'k.', markersize=2)
# Plot the centroids as a white X
centroids = kmeans.cluster_centers_
plt.scatter(centroids[:, 0], centroids[:, 1],
marker='x', s=169, linewidths=3,
color='w', zorder=10)
plt.title('K-means clustering on the digits dataset (PCA-reduced data)\n'
'Centroids are marked with white cross')
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
plt.show()
|
bsd-3-clause
|
alan-unravel/bokeh
|
examples/compat/seaborn/violin.py
|
34
|
1153
|
import seaborn as sns
from bokeh import mpl
from bokeh.plotting import output_file, show
tips = sns.load_dataset("tips")
sns.set_style("whitegrid")
# ax = sns.violinplot(x="size", y="tip", data=tips.sort("size"))
# ax = sns.violinplot(x="size", y="tip", data=tips,
# order=np.arange(1, 7), palette="Blues_d")
# ax = sns.violinplot(x="day", y="total_bill", hue="sex",
# data=tips, palette="Set2", split=True,
# scale="count")
ax = sns.violinplot(x="day", y="total_bill", hue="sex",
data=tips, palette="Set2", split=True,
scale="count", inner="stick")
# ax = sns.violinplot(x="day", y="total_bill", hue="smoker",
# data=tips, palette="muted", split=True)
# ax = sns.violinplot(x="day", y="total_bill", hue="smoker",
# data=tips, palette="muted")
# planets = sns.load_dataset("planets")
# ax = sns.violinplot(x="orbital_period", y="method",
# data=planets[planets.orbital_period < 1000],
# scale="width", palette="Set3")
output_file("violin.html")
show(mpl.to_bokeh())
|
bsd-3-clause
|
evidation-health/bokeh
|
bokeh/_legacy_charts/builder/histogram_builder.py
|
43
|
9142
|
"""This is the Bokeh charts interface. It gives you a high level API to build
complex plot is a simple way.
This is the Histogram class which lets you build your histograms just passing
the arguments to the Chart class and calling the proper functions.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import absolute_import
try:
import scipy.special
_is_scipy = True
except ImportError as e:
_is_scipy = False
import numpy as np
from ..utils import chunk, cycle_colors
from .._builder import Builder, create_and_build
from ...models import ColumnDataSource, GlyphRenderer, Range1d
from ...models.glyphs import Line, Quad
from ...properties import Bool, Float, Int
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
def Histogram(values, bins, mu=None, sigma=None, density=True, **kws):
""" Create a histogram chart using :class:`HistogramBuilder <bokeh.charts.builder.histogram_builder.HistogramBuilder>`
to render the geometry from values, bins, sigma and density.
Args:
values (iterable): iterable 2d representing the data series
values matrix.
bins (int): number of bins to use in the Histogram building.
mu (float, optional): theoretical mean value for the normal
distribution. (default: None)
sigma (float, optional): theoretical sigma value for the
normal distribution. (default: None)
density (bool, optional): If False, the result will contain
the number of samples in each bin. If True, the result
is the value of the probability *density* function at
the bin, normalized such that the *integral* over the
range is 1. For more info check numpy.histogram
function documentation. (default: True)
In addition the the parameters specific to this chart,
:ref:`userguide_charts_generic_arguments` are also accepted as keyword parameters.
Returns:
a new :class:`Chart <bokeh.charts.Chart>`
Examples:
.. bokeh-plot::
:source-position: above
import pandas as pd
from bokeh.charts import Histogram, output_file, show
# (dict, OrderedDict, lists, arrays and DataFrames are valid inputs)
xyvalues = pd.DataFrame(dict(normal=[1, 2, 3, 1], lognormal=[5, 4, 4, 1]))
hm = Histogram(xyvalues, bins=5, title='Histogram')
output_file('histogram.html')
show(hm)
"""
return create_and_build(
HistogramBuilder, values, bins=bins, mu=mu, sigma=sigma, density=density,
**kws
)
class HistogramBuilder(Builder):
"""This is the Histogram class and it is in charge of plotting
histograms in an easy and intuitive way.
Essentially, we provide a way to ingest the data, make the proper
calculations and push the references into a source object.
We additionally make calculations for the ranges.
And finally add the needed glyphs (quads and lines) taking the
references from the source.
"""
bins = Int(10, help="""
Number of bins to use for the histogram. (default: 10)
""")
mu = Float(help="""
Theoretical mean value for the normal distribution. (default: None)
""")
sigma = Float(help="""
Theoretical standard deviation value for the normal distribution.
(default: None)
""")
density = Bool(True, help="""
Whether to normalize the histogram. (default: True)
If True, the result is the value of the probability *density* function
at the bin, normalized such that the *integral* over the range is 1. If
False, the result will contain the number of samples in each bin.
For more info check ``numpy.histogram`` function documentation.
""")
def _process_data(self):
"""Take the Histogram data from the input **value.
It calculates the chart properties accordingly. Then build a dict
containing references to all the calculated points to be used by
the quad and line glyphs inside the ``_yield_renderers`` method.
"""
# list to save all the groups available in the incomming input
self._groups.extend(self._values.keys())
# fill the data dictionary with the proper values
for i, (val, values) in enumerate(self._values.items()):
self.set_and_get("", val, values)
#build the histogram using the set bins number
hist, edges = np.histogram(
np.array(values), density=self.density, bins=self.bins
)
self.set_and_get("hist", val, hist)
self.set_and_get("edges", val, edges)
self.set_and_get("left", val, edges[:-1])
self.set_and_get("right", val, edges[1:])
self.set_and_get("bottom", val, np.zeros(len(hist)))
self._mu_and_sigma = False
if self.mu is not None and self.sigma is not None:
if _is_scipy:
self._mu_and_sigma = True
self.set_and_get("x", val, np.linspace(-2, 2, len(self._data[val])))
den = 2 * self.sigma ** 2
x_val = self._data["x" + val]
x_val_mu = x_val - self.mu
sigsqr2pi = self.sigma * np.sqrt(2 * np.pi)
pdf = 1 / (sigsqr2pi) * np.exp(-x_val_mu ** 2 / den)
self.set_and_get("pdf", val, pdf)
self._groups.append("pdf")
cdf = (1 + scipy.special.erf(x_val_mu / np.sqrt(den))) / 2
self.set_and_get("cdf", val, cdf)
self._groups.append("cdf")
else:
print("You need scipy to get the theoretical probability distributions.")
def _set_sources(self):
"""Push the Histogram data into the ColumnDataSource and calculate
the proper ranges."""
self._source = ColumnDataSource(data=self._data)
if not self._mu_and_sigma:
x_names, y_names = self._attr[2::6], self._attr[1::6]
else:
x_names, y_names = self._attr[2::9], self._attr[1::9]
endx = max(max(self._data[i]) for i in x_names)
startx = min(min(self._data[i]) for i in x_names)
self.x_range = Range1d(start=startx - 0.1 * (endx - startx),
end=endx + 0.1 * (endx - startx))
endy = max(max(self._data[i]) for i in y_names)
self.y_range = Range1d(start=0, end=1.1 * endy)
def _yield_renderers(self):
"""Use the several glyphs to display the Histogram and pdf/cdf.
It uses the quad (and line) glyphs to display the Histogram
bars, taking as reference points the data loaded at the
ColumnDataSurce.
"""
if not self._mu_and_sigma:
sextets = list(chunk(self._attr, 6))
colors = cycle_colors(sextets, self.palette)
# TODO (bev) this is a perfect use for a namedtuple
# sextet: values, his, edges, left, right, bottom
for i, sextet in enumerate(sextets):
glyph = Quad(
top=sextet[1], bottom=sextet[5], left=sextet[3], right=sextet[4],
fill_color=colors[i], fill_alpha=0.7,
line_color="white", line_alpha=1.0
)
renderer = GlyphRenderer(data_source=self._source, glyph=glyph)
self._legends.append((self._groups[i], [renderer]))
yield renderer
else:
nonets = list(chunk(self._attr, 9))
colors = cycle_colors(nonets, self.palette)
# TODO (bev) this is a perfect use for a namedtuple
# nonet: values, his, edges, left, right, bottom, x, pdf, cdf
for i, nonet in enumerate(nonets):
glyph = Quad(
top=nonet[1], bottom=nonet[5], left=nonet[3], right=nonet[4],
fill_color=colors[i], fill_alpha=0.7,
line_color="white", line_alpha=1.0
)
renderer = GlyphRenderer(data_source=self._source, glyph=glyph)
self._legends.append((self._groups[i], [renderer]))
yield renderer
glyph = Line(x=nonet[6], y=nonet[7], line_color="black")
yield GlyphRenderer(data_source=self._source, glyph=glyph)
glyph = Line(x=nonet[6], y=nonet[8], line_color="blue")
yield GlyphRenderer(data_source=self._source, glyph=glyph)
|
bsd-3-clause
|
sgrieve/LH_Paper_Plotting
|
Analysis_Code/LH_Relief_Fitting.py
|
1
|
7738
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2015 Stuart W.D Grieve 2015
Developer can be contacted by s.grieve _at_ ed.ac.uk
This program is free software;
you can redistribute it and/or modify it under the terms of the
GNU General Public License as published by the Free Software Foundation;
either version 2 of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY;
without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See the GNU General Public License for more details.
You should have received a copy of the
GNU General Public License along with this program;
if not, write to:
Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor,
Boston, MA 02110-1301
USA
Script to perform fitting of LH and Relief data from *_HilltopData.csv generated
by LH_Driver.cpp
Plots the data and calculates the best fit S_c value given the erosion rate,
diffusivity and material densities.
Parameters and paths to be modified are highlighted by comments.
@author: SWDG
"""
def mm_to_inch(mm):
return mm*0.0393700787
def LH_Rel(k,Sc):
"""
This is a bit messy. To use the scipy optimizer we need to declare a fn with independent variables
(LH,pr,pr,Diff,erosion rate) as a tuple followed by the parameter to be optimised, Sc, as the second
input argument.
We have the values of all of the independent variables so we want to create a series of numpy arrays
of the same dimensions as LH, filled with the parameter values which have been published.
This FN is implementing equation 10 from Grieve et al 2015, based on work in Roering 2007
"""
x = k[0]
DD = k[1]
EE = k[2]
pr = k[3]
ps = k[4]
A = (2.*EE*pr)/(DD*Sc*ps)
return (Sc * (-1. + np.sqrt(1 + A**2. * x**2.) + np.log(3.) - np.log(2. + np.sqrt(1. + A**2. * x**2.))))/A
def r_squared(modeled, measured):
mean_measured = np.mean(measured)
sqr_err_w_line=[]
sqr_err_mean = []
for measure,mod in zip(measured,modeled):
sqr_err_w_line.append((mod-measure)**2)
sqr_err_mean.append((measure-mean_measured)**2)
r_sq = 1-(np.sum(sqr_err_w_line)/np.sum(sqr_err_mean))
return r_sq
def fill_array(array, value):
"""
Quick wrapper to replace the np.ndarray.full_like function which is not present in numpy 1.6
"""
New_Array = array.copy()
New_Array.fill(value)
return New_Array
import matplotlib.pyplot as plt
import numpy as np
from matplotlib import rcParams
import scipy.optimize as optimization
# Set up fonts for plots
rcParams['font.family'] = 'sans-serif'
rcParams['font.sans-serif'] = ['arial']
rcParams['font.size'] = 10
rcParams['xtick.direction'] = 'out'
rcParams['ytick.direction'] = 'out'
#================ modifyable parameters start here ====================
path = 'C:/Users/Stuart/Desktop/FR/' #path to the folder contaning the hilltopdata files
Filename = 'CR2_HilltopData.csv'
figpath = path #path to save the final figures
#soil and rock density data
#roering 2007 has a ratio of 1.5 to 2 but does not double rho_s
#hillel 1980 fundamentals of soil physics says soil ranges between 1.1 and 1.6
pr = 2.4 *1000 #kg/m^3 (2.4 is in g/cm^3)
ps = 1.4 *1000
#diffusivity and erosion rate data published for the sites
DD = 0.0086
EE = 0.25
#plot style parameters
xmax = 400
ymax = 300
xstep = 100
ystep = 100
#plot labels
location = 'Sierra Nevada'
#initial Sc guess - we need to see the fitting with a sane value
#optimizer has been tested and is NOT sensitive to this param. Just choose something vaguely sane
init_Sc = 0.8
#================ modifyable parameters end here ====================
fig = plt.figure()
#load the lh and relief data from the hilltopdata file
with open(path + Filename,'r') as f:
f.readline()
data = f.readlines()
LH_Data = []
R_Data = []
#get the data and remove any values below 2 as these are probably artifacts
for d in data:
if 'fail' not in d and len(d.split(','))>10: #len is used to skip incomplete data when processing plots on files that have ont finished running
split = d.split(',')
relief = float(split[4])
lh = float(split[5])
slope = float(split[8])
EucDist = float(split[13])
if (lh > 2.0):
if (relief > 2.0):
if (slope < 1.2):
if (EucDist/lh > 0.9999 and EucDist/lh < 1.0001):
LH_Data.append(lh)
R_Data.append(relief)
#convert the lists into arrays
LH_Data = np.array(LH_Data[:])
R_Data = np.array(R_Data[:])
#create the subplot and put the location name at the top
ax = plt.gca()
ax.text(.5,.9,location, horizontalalignment='center', transform=ax.transAxes, fontsize=16)
#create the parameter arrays
EE_array = fill_array(LH_Data,EE)
DD_array = fill_array(LH_Data,DD)
Pr_array = fill_array(LH_Data,pr)
Ps_array = fill_array(LH_Data,ps)
#just want the params from the fit, dont need the covariance matrix, _
params, _ = optimization.curve_fit(LH_Rel, (LH_Data,DD_array,EE_array,Pr_array,Ps_array), R_Data, init_Sc)
#get the optimized Sc
Sc = params[0]
#generate the best fit line using the S_c value
modeled_y = []
for data in LH_Data:
modeled_y.append(LH_Rel((data,DD_array[0],EE_array[0],Pr_array[0],Ps_array[0]),params[0]))
#get the r_squared of the fit
r_sq = r_squared(R_Data,modeled_y)
#sort the modeled data by LH, so that it can be plotted as a line
sorted_data = sorted(zip(LH_Data, modeled_y))
LH_Sorted = [x[0] for x in sorted_data]
R_Sorted = [x[1] for x in sorted_data]
#plot the raw data and the line
plt.scatter(LH_Data, R_Data, s=5, marker="o", c='k', edgecolors='none')
plt.plot(LH_Sorted,R_Sorted,'r-')
#annotate the figure with the r squared and critical slope value
plt.annotate('$\mathregular{R^2}$= '+str(round(r_sq,2)), xy=(0.1, 0.8), xycoords='axes fraction', fontsize=12,
horizontalalignment='left', verticalalignment='bottom')
plt.annotate('$\mathregular{S_c}$= '+str(round(Sc,20)), xy=(0.1, 0.73), xycoords='axes fraction', fontsize=12,
horizontalalignment='left', verticalalignment='bottom')
#set the x and y max based on the input params
plt.xlim(0,xmax)
plt.ylim(0,ymax)
#configure tick spacing based on the defined spacings given
ax.xaxis.set_ticks(np.arange(0,xmax+1,xstep))
ax.yaxis.set_ticks(np.arange(0,ymax+1,ystep))
#format the ticks to only appear on the bottom and left axes
plt.tick_params(axis='x', which='both', top='off',length=2)
plt.tick_params(axis='y', which='both', right='off',length=2)
#adjust the spacing between the 4 plots
plt.subplots_adjust(hspace = 0.3)
#add in the x and y labels
fig.text(0.5, 0.02, 'Hillslope length (m)', ha='center', va='center', size=12)
fig.text(0.06, 0.5, 'Relief (m)', ha='center', va='center', rotation='vertical', size=12)
plt.show()
#set the size of the plot to be saved. These are the JGR sizes:
#quarter page = 95*115
#half page = 190*115 (horizontal) 95*230 (vertical)
#full page = 190*230
fig.set_size_inches(mm_to_inch(190), mm_to_inch(115))
plt .savefig(figpath + 'LH_Relief_Fit.png', dpi = 500) #change to *.tif for submission
|
gpl-2.0
|
aavanian/bokeh
|
bokeh/util/tests/test_serialization.py
|
2
|
11583
|
from __future__ import absolute_import
import datetime
import base64
import pytest
import numpy as np
import pandas as pd
import pytz
import bokeh.util.serialization as bus
def test_id():
assert len(bus.make_id()) == 36
assert isinstance(bus.make_id(), str)
def test_id_with_simple_ids():
import os
os.environ["BOKEH_SIMPLE_IDS"] = "yes"
assert bus.make_id() == "1001"
assert bus.make_id() == "1002"
del os.environ["BOKEH_SIMPLE_IDS"]
def test_np_consts():
assert bus.NP_EPOCH == np.datetime64(0, 'ms')
assert bus.NP_MS_DELTA == np.timedelta64(1, 'ms')
def test_binary_array_types():
assert len(bus.BINARY_ARRAY_TYPES) == 8
for typ in [np.dtype(np.float32),
np.dtype(np.float64),
np.dtype(np.uint8),
np.dtype(np.int8),
np.dtype(np.uint16),
np.dtype(np.int16),
np.dtype(np.uint32),
np.dtype(np.int32)]:
assert typ in bus.BINARY_ARRAY_TYPES
def test_datetime_types():
# includes pandas types during tests
assert len(bus.DATETIME_TYPES) == 8
def test_is_datetime_type():
assert bus.is_datetime_type(datetime.datetime(2016, 5, 11))
assert bus.is_datetime_type(datetime.timedelta(3000))
assert bus.is_datetime_type(datetime.date(2016, 5, 11))
assert bus.is_datetime_type(datetime.time(3, 54))
assert bus.is_datetime_type(np.datetime64("2011-05-11"))
assert bus.is_datetime_type(np.timedelta64(3000, 'ms'))
assert bus.is_datetime_type(pd.Timedelta("3000ms"))
assert bus.is_datetime_type(bus._pd_timestamp(3000000))
def test_convert_datetime_type():
assert bus.convert_datetime_type(datetime.datetime(2018, 1, 3, 15, 37, 59, 922452)) == 1514993879922.452
assert bus.convert_datetime_type(datetime.datetime(2018, 1, 3, 15, 37, 59)) == 1514993879000.0
assert bus.convert_datetime_type(datetime.datetime(2016, 5, 11)) == 1462924800000.0
assert bus.convert_datetime_type(datetime.timedelta(3000)) == 259200000000.0
assert bus.convert_datetime_type(datetime.date(2016, 5, 11)) == 1462924800000.0
assert bus.convert_datetime_type(datetime.time(3, 54)) == 14040000.0
assert bus.convert_datetime_type(np.datetime64("2016-05-11")) == 1462924800000.0
assert bus.convert_datetime_type(np.timedelta64(3000, 'ms')) == 3000.0
assert bus.convert_datetime_type(pd.Timedelta("3000ms")) == 3000.0
assert bus.convert_datetime_type(bus._pd_timestamp(3000000)) == 3.0
@pytest.mark.parametrize('obj', [[1,2], (1,2), dict(), set(), 10.2, "foo"])
@pytest.mark.unit
def test_convert_datetime_type_array_ignores_non_array(obj):
assert bus.convert_datetime_array(obj) is obj
def test_convert_datetime_type_array_ignores_non_datetime_array():
a = np.arange(0,10,100)
assert bus.convert_datetime_array(a) is a
def test_convert_datetime_type_array():
a = np.array(['2018-01-03T15:37:59', '2018-01-03T15:37:59.922452', '2016-05-11'], dtype='datetime64')
r = bus.convert_datetime_array(a)
assert r[0] == 1514993879000.0
assert r[1] == 1514993879922.452
assert r[2] == 1462924800000.0
assert r.dtype == 'float64'
def test_convert_datetime_type_with_tz():
# This ensures datetimes are sent to BokehJS timezone-naive
# see https://github.com/bokeh/bokeh/issues/6480
for tz in pytz.all_timezones:
assert bus.convert_datetime_type(datetime.datetime(2016, 5, 11, tzinfo=datetime.tzinfo(tz))) == 1462924800000.0
testing = [[float('nan'), 3], [float('-inf'), [float('inf')]]]
expected = [['NaN', 3.0], ['-Infinity', ['Infinity']]]
def test_traverse_return_valid_json():
assert bus.traverse_data(testing) == expected
def test_traverse_with_numpy():
assert bus.traverse_data(testing, True) == expected
def test_traverse_without_numpy():
assert bus.traverse_data(testing, False) == expected
@pytest.mark.parametrize('dt', bus.BINARY_ARRAY_TYPES)
@pytest.mark.unit
def test_transform_array_force_list_default(dt):
a = np.empty(shape=10, dtype=dt)
out = bus.transform_array(a)
assert isinstance(out, dict)
@pytest.mark.parametrize('dt', bus.BINARY_ARRAY_TYPES)
@pytest.mark.unit
def test_transform_array_force_list_default_with_buffers(dt):
a = np.empty(shape=10, dtype=dt)
bufs = []
out = bus.transform_array(a, buffers=bufs)
assert isinstance(out, dict)
assert len(bufs) == 1
assert len(bufs[0]) == 2
assert bufs[0][1] == a.tobytes()
assert 'shape' in out
assert out['shape'] == a.shape
assert 'dtype' in out
assert out['dtype'] == a.dtype.name
assert '__buffer__' in out
@pytest.mark.parametrize('dt', bus.BINARY_ARRAY_TYPES)
@pytest.mark.unit
def test_transform_array_force_list_true(dt):
a = np.empty(shape=10, dtype=dt)
out = bus.transform_array(a, force_list=True)
assert isinstance(out, list)
def test_transform_series_force_list_default():
# default int seems to be int64, can't be encoded!
df = pd.Series([1, 3, 5, 6, 8])
out = bus.transform_series(df)
assert isinstance(out, list)
assert out == [1, 3, 5, 6, 8]
df = pd.Series([1, 3, 5, 6, 8], dtype=np.int32)
out = bus.transform_series(df)
assert isinstance(out, dict)
df = pd.Series([1.0, 3, 5, 6, 8])
out = bus.transform_series(df)
assert isinstance(out, dict)
df = pd.Series(np.array([np.nan, np.inf, -np.inf, 0]))
out = bus.transform_series(df)
assert isinstance(out, dict)
def test_transform_series_force_list_default_with_buffers():
# default int seems to be int64, can't be converted to buffer!
df = pd.Series([1, 3, 5, 6, 8])
out = bus.transform_series(df)
assert isinstance(out, list)
assert out == [1, 3, 5, 6, 8]
df = pd.Series([1, 3, 5, 6, 8], dtype=np.int32)
bufs = []
out = bus.transform_series(df, buffers=bufs)
assert isinstance(out, dict)
assert len(bufs) == 1
assert len(bufs[0]) == 2
assert bufs[0][1] == np.array(df).tobytes()
assert 'shape' in out
assert out['shape'] == df.shape
assert 'dtype' in out
assert out['dtype'] == df.dtype.name
assert '__buffer__' in out
df = pd.Series([1.0, 3, 5, 6, 8])
bufs = []
out = bus.transform_series(df, buffers=bufs)
assert isinstance(out, dict)
assert len(bufs) == 1
assert len(bufs[0]) == 2
assert bufs[0][1] == np.array(df).tobytes()
assert 'shape' in out
assert out['shape'] == df.shape
assert 'dtype' in out
assert out['dtype'] == df.dtype.name
assert '__buffer__' in out
df = pd.Series(np.array([np.nan, np.inf, -np.inf, 0]))
bufs = []
out = bus.transform_series(df, buffers=bufs)
assert isinstance(out, dict)
assert len(bufs) == 1
assert len(bufs[0]) == 2
assert bufs[0][1] == np.array(df).tobytes()
assert 'shape' in out
assert out['shape'] == df.shape
assert 'dtype' in out
assert out['dtype'] == df.dtype.name
assert '__buffer__' in out
def test_transform_series_force_list_true():
df = pd.Series([1, 3, 5, 6, 8])
out = bus.transform_series(df, force_list=True)
assert isinstance(out, list)
df = pd.Series([1, 3, 5, 6, 8], dtype=np.int32)
out = bus.transform_series(df, force_list=True)
assert isinstance(out, list)
df = pd.Series([1.0, 3, 5, 6, 8])
out = bus.transform_series(df, force_list=True)
assert isinstance(out, list)
df = pd.Series(np.array([np.nan, np.inf, -np.inf, 0]))
out = bus.transform_series(df, force_list=True)
assert isinstance(out, list)
@pytest.mark.parametrize('dt', bus.BINARY_ARRAY_TYPES)
@pytest.mark.unit
def test_transform_array_to_list(dt):
a = np.empty(shape=10, dtype=dt)
out = bus.transform_array_to_list(a)
assert isinstance(out, list)
@pytest.mark.parametrize('values', [(['cat', 'dog']), ([1.2, 'apple'])])
@pytest.mark.unit
def test_transform_array_with_nans_to_list(values):
s = pd.Series([np.nan, values[0], values[1]])
out = bus.transform_array_to_list(s)
assert isinstance(out, list)
assert out == ['NaN', values[0], values[1]]
def test_array_encoding_disabled_by_dtype():
assert len(bus.BINARY_ARRAY_TYPES) > 0
dt_ok = bus.BINARY_ARRAY_TYPES
dt_bad = set(np.dtype(x) for x in set(np.typeDict.values()) - set([np.void])) - dt_ok
for dt in dt_ok:
a = np.empty(shape=10, dtype=dt)
assert not bus.array_encoding_disabled(a)
for dt in dt_bad:
a = np.empty(shape=10, dtype=dt)
assert bus.array_encoding_disabled(a)
@pytest.mark.parametrize('dt', [np.float32, np.float64, np.int64])
@pytest.mark.parametrize('shape', [(12,), (2, 6), (2,2,3)])
@pytest.mark.unit
def test_encode_base64_dict(dt, shape):
a = np.arange(12, dtype=dt)
a.reshape(shape)
d = bus.encode_base64_dict(a)
assert 'shape' in d
assert d['shape'] == a.shape
assert 'dtype' in d
assert d['dtype'] == a.dtype.name
assert '__ndarray__' in d
b64 = base64.b64decode(d['__ndarray__'])
aa = np.fromstring(b64, dtype=d['dtype'])
assert np.array_equal(a, aa)
@pytest.mark.parametrize('dt', [np.float32, np.float64, np.int64])
@pytest.mark.parametrize('shape', [(12,), (2, 6), (2,2,3)])
@pytest.mark.unit
def test_decode_base64_dict(dt, shape):
a = np.arange(12, dtype=dt)
a.reshape(shape)
data = base64.b64encode(a).decode('utf-8')
d = {
'__ndarray__' : data,
'dtype' : a.dtype.name,
'shape' : a.shape
}
aa = bus.decode_base64_dict(d)
assert aa.shape == a.shape
assert aa.dtype.name == a.dtype.name
assert np.array_equal(a, aa)
@pytest.mark.parametrize('dt', [np.float32, np.float64, np.int64])
@pytest.mark.parametrize('shape', [(12,), (2, 6), (2,2,3)])
@pytest.mark.unit
def test_encode_decode_roundtrip(dt, shape):
a = np.arange(12, dtype=dt)
a.reshape(shape)
d = bus.encode_base64_dict(a)
aa = bus.decode_base64_dict(d)
assert np.array_equal(a, aa)
@pytest.mark.parametrize('dt', bus.BINARY_ARRAY_TYPES)
@pytest.mark.parametrize('shape', [(12,), (2, 6), (2,2,3)])
@pytest.mark.unit
def test_encode_binary_dict(dt, shape):
a = np.arange(12, dtype=dt)
a.reshape(shape)
bufs = []
d = bus.encode_binary_dict(a, buffers=bufs)
assert len(bufs) == 1
assert len(bufs[0]) == 2
assert bufs[0][1] == a.tobytes()
assert 'shape' in d
assert d['shape'] == a.shape
assert 'dtype' in d
assert d['dtype'] == a.dtype.name
assert '__buffer__' in d
@pytest.mark.parametrize('cols', [None, [], ['a'], ['a', 'b'], ['a', 'b', 'c']])
@pytest.mark.parametrize('dt1', [np.float32, np.float64, np.int64])
@pytest.mark.parametrize('dt2', [np.float32, np.float64, np.int64])
@pytest.mark.unit
def test_transform_column_source_data_with_buffers(cols, dt1, dt2):
d = dict(a=[1,2,3], b=np.array([4,5,6], dtype=dt1), c=pd.Series([7,8,9], dtype=dt2))
bufs = []
out = bus.transform_column_source_data(d, buffers=bufs, cols=cols)
assert set(out) == (set(d) if cols is None else set(cols))
if 'a' in out:
assert out['a'] == [1,2,3]
for x in ['b', 'c']:
dt = d[x].dtype
if x in out:
if dt in bus.BINARY_ARRAY_TYPES:
assert isinstance(out[x], dict)
assert 'shape' in out[x]
assert out[x]['shape'] == d[x].shape
assert 'dtype' in out[x]
assert out[x]['dtype'] == d[x].dtype.name
assert '__buffer__' in out[x]
else:
assert isinstance(out[x], list)
assert out[x] == list(d[x])
|
bsd-3-clause
|
hunering/demo-code
|
python/books/DLFS/7.4.3-train_convnet.py
|
1
|
1884
|
import numpy as np
import matplotlib.pyplot as plt
from convnet import ConvoNet
from trainer import Trainer
from utils import img_show, load_mnist
import sys, os
sys.path.append(os.getcwd()+'\\books\\dlfs-orig\\')
import common.util as book_util
import layers as book_layers
import dataset.mnist as book_mnist
import ch07.simple_convnet as bool_convnet
#(x_train, t_train), (x_test, t_test) = book_mnist.load_mnist(flatten=False)
(x_train, t_train), (x_test, t_test) = load_mnist()
x_train = x_train.reshape(x_train.shape[0],1,x_train.shape[1],x_train.shape[1])
x_test = x_test.reshape(x_test.shape[0],1,x_test.shape[1],x_test.shape[1])
# 处理花费时间较长的情况下减少数据
x_train, t_train = x_train[:5000], t_train[:5000]
x_test, t_test = x_test[:1000], t_test[:1000]
max_epochs = 20
'''
network = bool_convnet.SimpleConvNet(input_dim=(1,28,28),
conv_param = {'filter_num': 30, 'filter_size': 5, 'pad': 0, 'stride': 1},
hidden_size=100, output_size=10, weight_init_std=0.01)
'''
network = ConvoNet(input_shape=(1,28,28),
conv_param = {'filter_num': 30, 'filter_size': 5, 'pad': 0, 'stride': 1},
hidden_size=100, output_size=10, weight_init_std=0.01)
trainer = Trainer(network, x_train, t_train, x_test, t_test,
epochs=max_epochs, mini_batch_size=100,
optimizer='Adam', optimizer_param={'lr': 0.001},
evaluate_sample_num_per_epoch=1000)
trainer.train()
# 绘制图形
markers = {'train': 'o', 'test': 's'}
x = np.arange(max_epochs)
plt.plot(x, trainer.train_acc_list, marker='o', label='train', markevery=2)
plt.plot(x, trainer.test_acc_list, marker='s', label='test', markevery=2)
plt.xlabel("epochs")
plt.ylabel("accuracy")
plt.ylim(0, 1.0)
plt.legend(loc='lower right')
plt.show()
|
gpl-3.0
|
azariven/BioSig_SEAS
|
bin_stable/a.Observation/construct_real_earth_atmosphere_TS.py
|
1
|
4430
|
"""
Construct a Earth Transmission Spectra from Real TS data from the ACE experiment
In order to estimate the transmission spectra for the entire globe,
we can assume it's the average of transmission spectra from summer and winter.
For a given time, the entire globe will have half in winter and half in summer,
thus we can represent the global transmission spectra as the sum of summer and winter.
For such instance, we will represent North as Winter ans South as Summer.
Since the ACE data are collected for:
Arctic Winter
Arctic Summer
Mid Latitude Winter
Mid Latitude Summer
Tropics
we made the simple assumption to slice the earth atmosphere cross section into 8 equal size regions, with:
Arctic Winter : 90N-67.5N (67.5NW - 67.5NE, 45 degree total)
Mid Latitude Winter x2 : 67.5N-22.5N (67.5NW - 22.5NW, 67.5NE - 22.5NE, 90 degree total)
Tropics x2 : 22.5N-22.5S (22.5NW - 22.5SW, 22.5NE - 22.5SE, 90 degree total)
Mid Latitude Summer x2 : 22.5S-67.5S (22.5SW - 67.5SW, 22.5SE - 67.5SE, 90 degree total)
Arctic Summer : 67.5S-90S (67.5SW - 67.5SE, 45 degree total)
Thus the total transmission spectra for a given "beam" of the atmosphere is
T_layer = 1./8 (AW+AS+2*(MW+T+MS))
"""
import os
import sys
import numpy as np
import matplotlib.pyplot as plt
DIR = os.path.abspath(os.path.dirname(__file__))
sys.path.insert(0, os.path.join(DIR, '../..'))
from SEAS_Utils.common_utils.timer import simple_timer
from SEAS_Utils.common_utils.data_saver import save_txt
def generate_beam():
beam = []
start = 0
while True:
start+=4
end = start+4
beam.append("_%03d-%03dkm_trim.npy"%(start,end))
if end == 124:
break
return beam,len(beam)
def display_one_layer():
inputfilepath = "../../input/absorption_data/ACE_Earth_Spectra/"
Timer = simple_timer()
location = {"as":"ArcticSummer",
"aw":"ArcticWinter",
"mls":"MidLatitudeSummer",
"mlw":"MidLatitudeWinter",
"tro":"Tropics"}
name = "_004-008km_trim.npy"
total = np.zeros(12000)
legends = []
for i,folder in enumerate(location.keys()):
header = "".join([location[folder],name])
xdata,ydata = np.load(os.path.join(inputfilepath,folder,header))
legs, = plt.plot(xdata,ydata+i+1, label=location[folder])
legends.append(legs)
if folder == "aw" or folder == "as":
total += ydata
else:
total += ydata*2
legs, = plt.plot(xdata,total/8.,color="k", label = "Global Average")
legends.append(legs)
plt.legend(handles=legends)
plt.title("ACE Atmosphere Data at 004-008km ")
plt.xlabel("wavenumber (cm^-1)")
plt.ylabel("Transmission Spectra")
plt.show()
def main():
inputfilepath = "../../input/absorption_data/ACE_Earth_Spectra/"
Timer = simple_timer()
location = {"as":"ArcticSummer",
"aw":"ArcticWinter",
"mls":"MidLatitudeSummer",
"mlw":"MidLatitudeWinter",
"tro":"Tropics"}
all_beam, beam_num = generate_beam()
total = np.zeros(12000)
for name in all_beam:
beam_total = np.zeros(12000)
for i,folder in enumerate(location.keys()):
header = "".join([location[folder],name])
xdata,ydata = np.load(os.path.join(inputfilepath,folder,header))
if folder == "aw" or folder == "as":
beam_total += ydata
else:
beam_total += ydata*2
total += (beam_total/8.)
print Timer.elapse()
average = -np.log(total/beam_num)*0.001+0.01
data = np.array([10000./xdata[::-1],average[::-1]]).T
print data
#plt.xscale("log")
save_txt("../../input/absorption_data/ACE_Earth_Spectra","Test_Sim_1",data)
plt.plot(10000./xdata,average)
plt.title("ACE Atmosphere Data, Earth Global TS reconstruction ")
plt.xlabel("wavenumber (cm^-1)")
plt.ylabel("(R_P/R_S)^2")
plt.show()
if __name__ == "__main__":
main()
|
gpl-3.0
|
jlegendary/scikit-learn
|
sklearn/manifold/locally_linear.py
|
206
|
25061
|
"""Locally Linear Embedding"""
# Author: Fabian Pedregosa -- <[email protected]>
# Jake Vanderplas -- <[email protected]>
# License: BSD 3 clause (C) INRIA 2011
import numpy as np
from scipy.linalg import eigh, svd, qr, solve
from scipy.sparse import eye, csr_matrix
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_random_state, check_array
from ..utils.arpack import eigsh
from ..utils.validation import check_is_fitted
from ..utils.validation import FLOAT_DTYPES
from ..neighbors import NearestNeighbors
def barycenter_weights(X, Z, reg=1e-3):
"""Compute barycenter weights of X from Y along the first axis
We estimate the weights to assign to each point in Y[i] to recover
the point X[i]. The barycenter weights sum to 1.
Parameters
----------
X : array-like, shape (n_samples, n_dim)
Z : array-like, shape (n_samples, n_neighbors, n_dim)
reg: float, optional
amount of regularization to add for the problem to be
well-posed in the case of n_neighbors > n_dim
Returns
-------
B : array-like, shape (n_samples, n_neighbors)
Notes
-----
See developers note for more information.
"""
X = check_array(X, dtype=FLOAT_DTYPES)
Z = check_array(Z, dtype=FLOAT_DTYPES, allow_nd=True)
n_samples, n_neighbors = X.shape[0], Z.shape[1]
B = np.empty((n_samples, n_neighbors), dtype=X.dtype)
v = np.ones(n_neighbors, dtype=X.dtype)
# this might raise a LinalgError if G is singular and has trace
# zero
for i, A in enumerate(Z.transpose(0, 2, 1)):
C = A.T - X[i] # broadcasting
G = np.dot(C, C.T)
trace = np.trace(G)
if trace > 0:
R = reg * trace
else:
R = reg
G.flat[::Z.shape[1] + 1] += R
w = solve(G, v, sym_pos=True)
B[i, :] = w / np.sum(w)
return B
def barycenter_kneighbors_graph(X, n_neighbors, reg=1e-3):
"""Computes the barycenter weighted graph of k-Neighbors for points in X
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree, NearestNeighbors}
Sample data, shape = (n_samples, n_features), in the form of a
numpy array, sparse array, precomputed tree, or NearestNeighbors
object.
n_neighbors : int
Number of neighbors for each sample.
reg : float, optional
Amount of regularization when solving the least-squares
problem. Only relevant if mode='barycenter'. If None, use the
default.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
See also
--------
sklearn.neighbors.kneighbors_graph
sklearn.neighbors.radius_neighbors_graph
"""
knn = NearestNeighbors(n_neighbors + 1).fit(X)
X = knn._fit_X
n_samples = X.shape[0]
ind = knn.kneighbors(X, return_distance=False)[:, 1:]
data = barycenter_weights(X, X[ind], reg=reg)
indptr = np.arange(0, n_samples * n_neighbors + 1, n_neighbors)
return csr_matrix((data.ravel(), ind.ravel(), indptr),
shape=(n_samples, n_samples))
def null_space(M, k, k_skip=1, eigen_solver='arpack', tol=1E-6, max_iter=100,
random_state=None):
"""
Find the null space of a matrix M.
Parameters
----------
M : {array, matrix, sparse matrix, LinearOperator}
Input covariance matrix: should be symmetric positive semi-definite
k : integer
Number of eigenvalues/vectors to return
k_skip : integer, optional
Number of low eigenvalues to skip.
eigen_solver : string, {'auto', 'arpack', 'dense'}
auto : algorithm will attempt to choose the best method for input data
arpack : use arnoldi iteration in shift-invert mode.
For this method, M may be a dense matrix, sparse matrix,
or general linear operator.
Warning: ARPACK can be unstable for some problems. It is
best to try several random seeds in order to check results.
dense : use standard dense matrix operations for the eigenvalue
decomposition. For this method, M must be an array
or matrix type. This method should be avoided for
large problems.
tol : float, optional
Tolerance for 'arpack' method.
Not used if eigen_solver=='dense'.
max_iter : maximum number of iterations for 'arpack' method
not used if eigen_solver=='dense'
random_state: numpy.RandomState or int, optional
The generator or seed used to determine the starting vector for arpack
iterations. Defaults to numpy.random.
"""
if eigen_solver == 'auto':
if M.shape[0] > 200 and k + k_skip < 10:
eigen_solver = 'arpack'
else:
eigen_solver = 'dense'
if eigen_solver == 'arpack':
random_state = check_random_state(random_state)
v0 = random_state.rand(M.shape[0])
try:
eigen_values, eigen_vectors = eigsh(M, k + k_skip, sigma=0.0,
tol=tol, maxiter=max_iter,
v0=v0)
except RuntimeError as msg:
raise ValueError("Error in determining null-space with ARPACK. "
"Error message: '%s'. "
"Note that method='arpack' can fail when the "
"weight matrix is singular or otherwise "
"ill-behaved. method='dense' is recommended. "
"See online documentation for more information."
% msg)
return eigen_vectors[:, k_skip:], np.sum(eigen_values[k_skip:])
elif eigen_solver == 'dense':
if hasattr(M, 'toarray'):
M = M.toarray()
eigen_values, eigen_vectors = eigh(
M, eigvals=(k_skip, k + k_skip - 1), overwrite_a=True)
index = np.argsort(np.abs(eigen_values))
return eigen_vectors[:, index], np.sum(eigen_values)
else:
raise ValueError("Unrecognized eigen_solver '%s'" % eigen_solver)
def locally_linear_embedding(
X, n_neighbors, n_components, reg=1e-3, eigen_solver='auto', tol=1e-6,
max_iter=100, method='standard', hessian_tol=1E-4, modified_tol=1E-12,
random_state=None):
"""Perform a Locally Linear Embedding analysis on the data.
Read more in the :ref:`User Guide <locally_linear_embedding>`.
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree, NearestNeighbors}
Sample data, shape = (n_samples, n_features), in the form of a
numpy array, sparse array, precomputed tree, or NearestNeighbors
object.
n_neighbors : integer
number of neighbors to consider for each point.
n_components : integer
number of coordinates for the manifold.
reg : float
regularization constant, multiplies the trace of the local covariance
matrix of the distances.
eigen_solver : string, {'auto', 'arpack', 'dense'}
auto : algorithm will attempt to choose the best method for input data
arpack : use arnoldi iteration in shift-invert mode.
For this method, M may be a dense matrix, sparse matrix,
or general linear operator.
Warning: ARPACK can be unstable for some problems. It is
best to try several random seeds in order to check results.
dense : use standard dense matrix operations for the eigenvalue
decomposition. For this method, M must be an array
or matrix type. This method should be avoided for
large problems.
tol : float, optional
Tolerance for 'arpack' method
Not used if eigen_solver=='dense'.
max_iter : integer
maximum number of iterations for the arpack solver.
method : {'standard', 'hessian', 'modified', 'ltsa'}
standard : use the standard locally linear embedding algorithm.
see reference [1]_
hessian : use the Hessian eigenmap method. This method requires
n_neighbors > n_components * (1 + (n_components + 1) / 2.
see reference [2]_
modified : use the modified locally linear embedding algorithm.
see reference [3]_
ltsa : use local tangent space alignment algorithm
see reference [4]_
hessian_tol : float, optional
Tolerance for Hessian eigenmapping method.
Only used if method == 'hessian'
modified_tol : float, optional
Tolerance for modified LLE method.
Only used if method == 'modified'
random_state: numpy.RandomState or int, optional
The generator or seed used to determine the starting vector for arpack
iterations. Defaults to numpy.random.
Returns
-------
Y : array-like, shape [n_samples, n_components]
Embedding vectors.
squared_error : float
Reconstruction error for the embedding vectors. Equivalent to
``norm(Y - W Y, 'fro')**2``, where W are the reconstruction weights.
References
----------
.. [1] `Roweis, S. & Saul, L. Nonlinear dimensionality reduction
by locally linear embedding. Science 290:2323 (2000).`
.. [2] `Donoho, D. & Grimes, C. Hessian eigenmaps: Locally
linear embedding techniques for high-dimensional data.
Proc Natl Acad Sci U S A. 100:5591 (2003).`
.. [3] `Zhang, Z. & Wang, J. MLLE: Modified Locally Linear
Embedding Using Multiple Weights.`
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.70.382
.. [4] `Zhang, Z. & Zha, H. Principal manifolds and nonlinear
dimensionality reduction via tangent space alignment.
Journal of Shanghai Univ. 8:406 (2004)`
"""
if eigen_solver not in ('auto', 'arpack', 'dense'):
raise ValueError("unrecognized eigen_solver '%s'" % eigen_solver)
if method not in ('standard', 'hessian', 'modified', 'ltsa'):
raise ValueError("unrecognized method '%s'" % method)
nbrs = NearestNeighbors(n_neighbors=n_neighbors + 1)
nbrs.fit(X)
X = nbrs._fit_X
N, d_in = X.shape
if n_components > d_in:
raise ValueError("output dimension must be less than or equal "
"to input dimension")
if n_neighbors >= N:
raise ValueError("n_neighbors must be less than number of points")
if n_neighbors <= 0:
raise ValueError("n_neighbors must be positive")
M_sparse = (eigen_solver != 'dense')
if method == 'standard':
W = barycenter_kneighbors_graph(
nbrs, n_neighbors=n_neighbors, reg=reg)
# we'll compute M = (I-W)'(I-W)
# depending on the solver, we'll do this differently
if M_sparse:
M = eye(*W.shape, format=W.format) - W
M = (M.T * M).tocsr()
else:
M = (W.T * W - W.T - W).toarray()
M.flat[::M.shape[0] + 1] += 1 # W = W - I = W - I
elif method == 'hessian':
dp = n_components * (n_components + 1) // 2
if n_neighbors <= n_components + dp:
raise ValueError("for method='hessian', n_neighbors must be "
"greater than "
"[n_components * (n_components + 3) / 2]")
neighbors = nbrs.kneighbors(X, n_neighbors=n_neighbors + 1,
return_distance=False)
neighbors = neighbors[:, 1:]
Yi = np.empty((n_neighbors, 1 + n_components + dp), dtype=np.float)
Yi[:, 0] = 1
M = np.zeros((N, N), dtype=np.float)
use_svd = (n_neighbors > d_in)
for i in range(N):
Gi = X[neighbors[i]]
Gi -= Gi.mean(0)
#build Hessian estimator
if use_svd:
U = svd(Gi, full_matrices=0)[0]
else:
Ci = np.dot(Gi, Gi.T)
U = eigh(Ci)[1][:, ::-1]
Yi[:, 1:1 + n_components] = U[:, :n_components]
j = 1 + n_components
for k in range(n_components):
Yi[:, j:j + n_components - k] = (U[:, k:k + 1]
* U[:, k:n_components])
j += n_components - k
Q, R = qr(Yi)
w = Q[:, n_components + 1:]
S = w.sum(0)
S[np.where(abs(S) < hessian_tol)] = 1
w /= S
nbrs_x, nbrs_y = np.meshgrid(neighbors[i], neighbors[i])
M[nbrs_x, nbrs_y] += np.dot(w, w.T)
if M_sparse:
M = csr_matrix(M)
elif method == 'modified':
if n_neighbors < n_components:
raise ValueError("modified LLE requires "
"n_neighbors >= n_components")
neighbors = nbrs.kneighbors(X, n_neighbors=n_neighbors + 1,
return_distance=False)
neighbors = neighbors[:, 1:]
#find the eigenvectors and eigenvalues of each local covariance
# matrix. We want V[i] to be a [n_neighbors x n_neighbors] matrix,
# where the columns are eigenvectors
V = np.zeros((N, n_neighbors, n_neighbors))
nev = min(d_in, n_neighbors)
evals = np.zeros([N, nev])
#choose the most efficient way to find the eigenvectors
use_svd = (n_neighbors > d_in)
if use_svd:
for i in range(N):
X_nbrs = X[neighbors[i]] - X[i]
V[i], evals[i], _ = svd(X_nbrs,
full_matrices=True)
evals **= 2
else:
for i in range(N):
X_nbrs = X[neighbors[i]] - X[i]
C_nbrs = np.dot(X_nbrs, X_nbrs.T)
evi, vi = eigh(C_nbrs)
evals[i] = evi[::-1]
V[i] = vi[:, ::-1]
#find regularized weights: this is like normal LLE.
# because we've already computed the SVD of each covariance matrix,
# it's faster to use this rather than np.linalg.solve
reg = 1E-3 * evals.sum(1)
tmp = np.dot(V.transpose(0, 2, 1), np.ones(n_neighbors))
tmp[:, :nev] /= evals + reg[:, None]
tmp[:, nev:] /= reg[:, None]
w_reg = np.zeros((N, n_neighbors))
for i in range(N):
w_reg[i] = np.dot(V[i], tmp[i])
w_reg /= w_reg.sum(1)[:, None]
#calculate eta: the median of the ratio of small to large eigenvalues
# across the points. This is used to determine s_i, below
rho = evals[:, n_components:].sum(1) / evals[:, :n_components].sum(1)
eta = np.median(rho)
#find s_i, the size of the "almost null space" for each point:
# this is the size of the largest set of eigenvalues
# such that Sum[v; v in set]/Sum[v; v not in set] < eta
s_range = np.zeros(N, dtype=int)
evals_cumsum = np.cumsum(evals, 1)
eta_range = evals_cumsum[:, -1:] / evals_cumsum[:, :-1] - 1
for i in range(N):
s_range[i] = np.searchsorted(eta_range[i, ::-1], eta)
s_range += n_neighbors - nev # number of zero eigenvalues
#Now calculate M.
# This is the [N x N] matrix whose null space is the desired embedding
M = np.zeros((N, N), dtype=np.float)
for i in range(N):
s_i = s_range[i]
#select bottom s_i eigenvectors and calculate alpha
Vi = V[i, :, n_neighbors - s_i:]
alpha_i = np.linalg.norm(Vi.sum(0)) / np.sqrt(s_i)
#compute Householder matrix which satisfies
# Hi*Vi.T*ones(n_neighbors) = alpha_i*ones(s)
# using prescription from paper
h = alpha_i * np.ones(s_i) - np.dot(Vi.T, np.ones(n_neighbors))
norm_h = np.linalg.norm(h)
if norm_h < modified_tol:
h *= 0
else:
h /= norm_h
#Householder matrix is
# >> Hi = np.identity(s_i) - 2*np.outer(h,h)
#Then the weight matrix is
# >> Wi = np.dot(Vi,Hi) + (1-alpha_i) * w_reg[i,:,None]
#We do this much more efficiently:
Wi = (Vi - 2 * np.outer(np.dot(Vi, h), h)
+ (1 - alpha_i) * w_reg[i, :, None])
#Update M as follows:
# >> W_hat = np.zeros( (N,s_i) )
# >> W_hat[neighbors[i],:] = Wi
# >> W_hat[i] -= 1
# >> M += np.dot(W_hat,W_hat.T)
#We can do this much more efficiently:
nbrs_x, nbrs_y = np.meshgrid(neighbors[i], neighbors[i])
M[nbrs_x, nbrs_y] += np.dot(Wi, Wi.T)
Wi_sum1 = Wi.sum(1)
M[i, neighbors[i]] -= Wi_sum1
M[neighbors[i], i] -= Wi_sum1
M[i, i] += s_i
if M_sparse:
M = csr_matrix(M)
elif method == 'ltsa':
neighbors = nbrs.kneighbors(X, n_neighbors=n_neighbors + 1,
return_distance=False)
neighbors = neighbors[:, 1:]
M = np.zeros((N, N))
use_svd = (n_neighbors > d_in)
for i in range(N):
Xi = X[neighbors[i]]
Xi -= Xi.mean(0)
# compute n_components largest eigenvalues of Xi * Xi^T
if use_svd:
v = svd(Xi, full_matrices=True)[0]
else:
Ci = np.dot(Xi, Xi.T)
v = eigh(Ci)[1][:, ::-1]
Gi = np.zeros((n_neighbors, n_components + 1))
Gi[:, 1:] = v[:, :n_components]
Gi[:, 0] = 1. / np.sqrt(n_neighbors)
GiGiT = np.dot(Gi, Gi.T)
nbrs_x, nbrs_y = np.meshgrid(neighbors[i], neighbors[i])
M[nbrs_x, nbrs_y] -= GiGiT
M[neighbors[i], neighbors[i]] += 1
return null_space(M, n_components, k_skip=1, eigen_solver=eigen_solver,
tol=tol, max_iter=max_iter, random_state=random_state)
class LocallyLinearEmbedding(BaseEstimator, TransformerMixin):
"""Locally Linear Embedding
Read more in the :ref:`User Guide <locally_linear_embedding>`.
Parameters
----------
n_neighbors : integer
number of neighbors to consider for each point.
n_components : integer
number of coordinates for the manifold
reg : float
regularization constant, multiplies the trace of the local covariance
matrix of the distances.
eigen_solver : string, {'auto', 'arpack', 'dense'}
auto : algorithm will attempt to choose the best method for input data
arpack : use arnoldi iteration in shift-invert mode.
For this method, M may be a dense matrix, sparse matrix,
or general linear operator.
Warning: ARPACK can be unstable for some problems. It is
best to try several random seeds in order to check results.
dense : use standard dense matrix operations for the eigenvalue
decomposition. For this method, M must be an array
or matrix type. This method should be avoided for
large problems.
tol : float, optional
Tolerance for 'arpack' method
Not used if eigen_solver=='dense'.
max_iter : integer
maximum number of iterations for the arpack solver.
Not used if eigen_solver=='dense'.
method : string ('standard', 'hessian', 'modified' or 'ltsa')
standard : use the standard locally linear embedding algorithm. see
reference [1]
hessian : use the Hessian eigenmap method. This method requires
``n_neighbors > n_components * (1 + (n_components + 1) / 2``
see reference [2]
modified : use the modified locally linear embedding algorithm.
see reference [3]
ltsa : use local tangent space alignment algorithm
see reference [4]
hessian_tol : float, optional
Tolerance for Hessian eigenmapping method.
Only used if ``method == 'hessian'``
modified_tol : float, optional
Tolerance for modified LLE method.
Only used if ``method == 'modified'``
neighbors_algorithm : string ['auto'|'brute'|'kd_tree'|'ball_tree']
algorithm to use for nearest neighbors search,
passed to neighbors.NearestNeighbors instance
random_state: numpy.RandomState or int, optional
The generator or seed used to determine the starting vector for arpack
iterations. Defaults to numpy.random.
Attributes
----------
embedding_vectors_ : array-like, shape [n_components, n_samples]
Stores the embedding vectors
reconstruction_error_ : float
Reconstruction error associated with `embedding_vectors_`
nbrs_ : NearestNeighbors object
Stores nearest neighbors instance, including BallTree or KDtree
if applicable.
References
----------
.. [1] `Roweis, S. & Saul, L. Nonlinear dimensionality reduction
by locally linear embedding. Science 290:2323 (2000).`
.. [2] `Donoho, D. & Grimes, C. Hessian eigenmaps: Locally
linear embedding techniques for high-dimensional data.
Proc Natl Acad Sci U S A. 100:5591 (2003).`
.. [3] `Zhang, Z. & Wang, J. MLLE: Modified Locally Linear
Embedding Using Multiple Weights.`
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.70.382
.. [4] `Zhang, Z. & Zha, H. Principal manifolds and nonlinear
dimensionality reduction via tangent space alignment.
Journal of Shanghai Univ. 8:406 (2004)`
"""
def __init__(self, n_neighbors=5, n_components=2, reg=1E-3,
eigen_solver='auto', tol=1E-6, max_iter=100,
method='standard', hessian_tol=1E-4, modified_tol=1E-12,
neighbors_algorithm='auto', random_state=None):
self.n_neighbors = n_neighbors
self.n_components = n_components
self.reg = reg
self.eigen_solver = eigen_solver
self.tol = tol
self.max_iter = max_iter
self.method = method
self.hessian_tol = hessian_tol
self.modified_tol = modified_tol
self.random_state = random_state
self.neighbors_algorithm = neighbors_algorithm
def _fit_transform(self, X):
self.nbrs_ = NearestNeighbors(self.n_neighbors,
algorithm=self.neighbors_algorithm)
random_state = check_random_state(self.random_state)
X = check_array(X)
self.nbrs_.fit(X)
self.embedding_, self.reconstruction_error_ = \
locally_linear_embedding(
self.nbrs_, self.n_neighbors, self.n_components,
eigen_solver=self.eigen_solver, tol=self.tol,
max_iter=self.max_iter, method=self.method,
hessian_tol=self.hessian_tol, modified_tol=self.modified_tol,
random_state=random_state, reg=self.reg)
def fit(self, X, y=None):
"""Compute the embedding vectors for data X
Parameters
----------
X : array-like of shape [n_samples, n_features]
training set.
Returns
-------
self : returns an instance of self.
"""
self._fit_transform(X)
return self
def fit_transform(self, X, y=None):
"""Compute the embedding vectors for data X and transform X.
Parameters
----------
X : array-like of shape [n_samples, n_features]
training set.
Returns
-------
X_new: array-like, shape (n_samples, n_components)
"""
self._fit_transform(X)
return self.embedding_
def transform(self, X):
"""
Transform new points into embedding space.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
X_new : array, shape = [n_samples, n_components]
Notes
-----
Because of scaling performed by this method, it is discouraged to use
it together with methods that are not scale-invariant (like SVMs)
"""
check_is_fitted(self, "nbrs_")
X = check_array(X)
ind = self.nbrs_.kneighbors(X, n_neighbors=self.n_neighbors,
return_distance=False)
weights = barycenter_weights(X, self.nbrs_._fit_X[ind],
reg=self.reg)
X_new = np.empty((X.shape[0], self.n_components))
for i in range(X.shape[0]):
X_new[i] = np.dot(self.embedding_[ind[i]].T, weights[i])
return X_new
|
bsd-3-clause
|
ilo10/scikit-learn
|
sklearn/linear_model/tests/test_omp.py
|
272
|
7752
|
# Author: Vlad Niculae
# Licence: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.linear_model import (orthogonal_mp, orthogonal_mp_gram,
OrthogonalMatchingPursuit,
OrthogonalMatchingPursuitCV,
LinearRegression)
from sklearn.utils import check_random_state
from sklearn.datasets import make_sparse_coded_signal
n_samples, n_features, n_nonzero_coefs, n_targets = 20, 30, 5, 3
y, X, gamma = make_sparse_coded_signal(n_targets, n_features, n_samples,
n_nonzero_coefs, random_state=0)
G, Xy = np.dot(X.T, X), np.dot(X.T, y)
# this makes X (n_samples, n_features)
# and y (n_samples, 3)
def test_correct_shapes():
assert_equal(orthogonal_mp(X, y[:, 0], n_nonzero_coefs=5).shape,
(n_features,))
assert_equal(orthogonal_mp(X, y, n_nonzero_coefs=5).shape,
(n_features, 3))
def test_correct_shapes_gram():
assert_equal(orthogonal_mp_gram(G, Xy[:, 0], n_nonzero_coefs=5).shape,
(n_features,))
assert_equal(orthogonal_mp_gram(G, Xy, n_nonzero_coefs=5).shape,
(n_features, 3))
def test_n_nonzero_coefs():
assert_true(np.count_nonzero(orthogonal_mp(X, y[:, 0],
n_nonzero_coefs=5)) <= 5)
assert_true(np.count_nonzero(orthogonal_mp(X, y[:, 0], n_nonzero_coefs=5,
precompute=True)) <= 5)
def test_tol():
tol = 0.5
gamma = orthogonal_mp(X, y[:, 0], tol=tol)
gamma_gram = orthogonal_mp(X, y[:, 0], tol=tol, precompute=True)
assert_true(np.sum((y[:, 0] - np.dot(X, gamma)) ** 2) <= tol)
assert_true(np.sum((y[:, 0] - np.dot(X, gamma_gram)) ** 2) <= tol)
def test_with_without_gram():
assert_array_almost_equal(
orthogonal_mp(X, y, n_nonzero_coefs=5),
orthogonal_mp(X, y, n_nonzero_coefs=5, precompute=True))
def test_with_without_gram_tol():
assert_array_almost_equal(
orthogonal_mp(X, y, tol=1.),
orthogonal_mp(X, y, tol=1., precompute=True))
def test_unreachable_accuracy():
assert_array_almost_equal(
orthogonal_mp(X, y, tol=0),
orthogonal_mp(X, y, n_nonzero_coefs=n_features))
assert_array_almost_equal(
assert_warns(RuntimeWarning, orthogonal_mp, X, y, tol=0,
precompute=True),
orthogonal_mp(X, y, precompute=True,
n_nonzero_coefs=n_features))
def test_bad_input():
assert_raises(ValueError, orthogonal_mp, X, y, tol=-1)
assert_raises(ValueError, orthogonal_mp, X, y, n_nonzero_coefs=-1)
assert_raises(ValueError, orthogonal_mp, X, y,
n_nonzero_coefs=n_features + 1)
assert_raises(ValueError, orthogonal_mp_gram, G, Xy, tol=-1)
assert_raises(ValueError, orthogonal_mp_gram, G, Xy, n_nonzero_coefs=-1)
assert_raises(ValueError, orthogonal_mp_gram, G, Xy,
n_nonzero_coefs=n_features + 1)
def test_perfect_signal_recovery():
idx, = gamma[:, 0].nonzero()
gamma_rec = orthogonal_mp(X, y[:, 0], 5)
gamma_gram = orthogonal_mp_gram(G, Xy[:, 0], 5)
assert_array_equal(idx, np.flatnonzero(gamma_rec))
assert_array_equal(idx, np.flatnonzero(gamma_gram))
assert_array_almost_equal(gamma[:, 0], gamma_rec, decimal=2)
assert_array_almost_equal(gamma[:, 0], gamma_gram, decimal=2)
def test_estimator():
omp = OrthogonalMatchingPursuit(n_nonzero_coefs=n_nonzero_coefs)
omp.fit(X, y[:, 0])
assert_equal(omp.coef_.shape, (n_features,))
assert_equal(omp.intercept_.shape, ())
assert_true(np.count_nonzero(omp.coef_) <= n_nonzero_coefs)
omp.fit(X, y)
assert_equal(omp.coef_.shape, (n_targets, n_features))
assert_equal(omp.intercept_.shape, (n_targets,))
assert_true(np.count_nonzero(omp.coef_) <= n_targets * n_nonzero_coefs)
omp.set_params(fit_intercept=False, normalize=False)
omp.fit(X, y[:, 0])
assert_equal(omp.coef_.shape, (n_features,))
assert_equal(omp.intercept_, 0)
assert_true(np.count_nonzero(omp.coef_) <= n_nonzero_coefs)
omp.fit(X, y)
assert_equal(omp.coef_.shape, (n_targets, n_features))
assert_equal(omp.intercept_, 0)
assert_true(np.count_nonzero(omp.coef_) <= n_targets * n_nonzero_coefs)
def test_identical_regressors():
newX = X.copy()
newX[:, 1] = newX[:, 0]
gamma = np.zeros(n_features)
gamma[0] = gamma[1] = 1.
newy = np.dot(newX, gamma)
assert_warns(RuntimeWarning, orthogonal_mp, newX, newy, 2)
def test_swapped_regressors():
gamma = np.zeros(n_features)
# X[:, 21] should be selected first, then X[:, 0] selected second,
# which will take X[:, 21]'s place in case the algorithm does
# column swapping for optimization (which is the case at the moment)
gamma[21] = 1.0
gamma[0] = 0.5
new_y = np.dot(X, gamma)
new_Xy = np.dot(X.T, new_y)
gamma_hat = orthogonal_mp(X, new_y, 2)
gamma_hat_gram = orthogonal_mp_gram(G, new_Xy, 2)
assert_array_equal(np.flatnonzero(gamma_hat), [0, 21])
assert_array_equal(np.flatnonzero(gamma_hat_gram), [0, 21])
def test_no_atoms():
y_empty = np.zeros_like(y)
Xy_empty = np.dot(X.T, y_empty)
gamma_empty = ignore_warnings(orthogonal_mp)(X, y_empty, 1)
gamma_empty_gram = ignore_warnings(orthogonal_mp)(G, Xy_empty, 1)
assert_equal(np.all(gamma_empty == 0), True)
assert_equal(np.all(gamma_empty_gram == 0), True)
def test_omp_path():
path = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=True)
last = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=False)
assert_equal(path.shape, (n_features, n_targets, 5))
assert_array_almost_equal(path[:, :, -1], last)
path = orthogonal_mp_gram(G, Xy, n_nonzero_coefs=5, return_path=True)
last = orthogonal_mp_gram(G, Xy, n_nonzero_coefs=5, return_path=False)
assert_equal(path.shape, (n_features, n_targets, 5))
assert_array_almost_equal(path[:, :, -1], last)
def test_omp_return_path_prop_with_gram():
path = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=True,
precompute=True)
last = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=False,
precompute=True)
assert_equal(path.shape, (n_features, n_targets, 5))
assert_array_almost_equal(path[:, :, -1], last)
def test_omp_cv():
y_ = y[:, 0]
gamma_ = gamma[:, 0]
ompcv = OrthogonalMatchingPursuitCV(normalize=True, fit_intercept=False,
max_iter=10, cv=5)
ompcv.fit(X, y_)
assert_equal(ompcv.n_nonzero_coefs_, n_nonzero_coefs)
assert_array_almost_equal(ompcv.coef_, gamma_)
omp = OrthogonalMatchingPursuit(normalize=True, fit_intercept=False,
n_nonzero_coefs=ompcv.n_nonzero_coefs_)
omp.fit(X, y_)
assert_array_almost_equal(ompcv.coef_, omp.coef_)
def test_omp_reaches_least_squares():
# Use small simple data; it's a sanity check but OMP can stop early
rng = check_random_state(0)
n_samples, n_features = (10, 8)
n_targets = 3
X = rng.randn(n_samples, n_features)
Y = rng.randn(n_samples, n_targets)
omp = OrthogonalMatchingPursuit(n_nonzero_coefs=n_features)
lstsq = LinearRegression()
omp.fit(X, Y)
lstsq.fit(X, Y)
assert_array_almost_equal(omp.coef_, lstsq.coef_)
|
bsd-3-clause
|
wzbozon/scikit-learn
|
sklearn/tests/test_naive_bayes.py
|
70
|
17509
|
import pickle
from io import BytesIO
import numpy as np
import scipy.sparse
from sklearn.datasets import load_digits, load_iris
from sklearn.cross_validation import cross_val_score, train_test_split
from sklearn.externals.six.moves import zip
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.naive_bayes import GaussianNB, BernoulliNB, MultinomialNB
# Data is just 6 separable points in the plane
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
y = np.array([1, 1, 1, 2, 2, 2])
# A bit more random tests
rng = np.random.RandomState(0)
X1 = rng.normal(size=(10, 3))
y1 = (rng.normal(size=(10)) > 0).astype(np.int)
# Data is 6 random integer points in a 100 dimensional space classified to
# three classes.
X2 = rng.randint(5, size=(6, 100))
y2 = np.array([1, 1, 2, 2, 3, 3])
def test_gnb():
# Gaussian Naive Bayes classification.
# This checks that GaussianNB implements fit and predict and returns
# correct values for a simple toy dataset.
clf = GaussianNB()
y_pred = clf.fit(X, y).predict(X)
assert_array_equal(y_pred, y)
y_pred_proba = clf.predict_proba(X)
y_pred_log_proba = clf.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba), y_pred_log_proba, 8)
# Test whether label mismatch between target y and classes raises
# an Error
# FIXME Remove this test once the more general partial_fit tests are merged
assert_raises(ValueError, GaussianNB().partial_fit, X, y, classes=[0, 1])
def test_gnb_prior():
# Test whether class priors are properly set.
clf = GaussianNB().fit(X, y)
assert_array_almost_equal(np.array([3, 3]) / 6.0,
clf.class_prior_, 8)
clf.fit(X1, y1)
# Check that the class priors sum to 1
assert_array_almost_equal(clf.class_prior_.sum(), 1)
def test_gnb_sample_weight():
"""Test whether sample weights are properly used in GNB. """
# Sample weights all being 1 should not change results
sw = np.ones(6)
clf = GaussianNB().fit(X, y)
clf_sw = GaussianNB().fit(X, y, sw)
assert_array_almost_equal(clf.theta_, clf_sw.theta_)
assert_array_almost_equal(clf.sigma_, clf_sw.sigma_)
# Fitting twice with half sample-weights should result
# in same result as fitting once with full weights
sw = rng.rand(y.shape[0])
clf1 = GaussianNB().fit(X, y, sample_weight=sw)
clf2 = GaussianNB().partial_fit(X, y, classes=[1, 2], sample_weight=sw / 2)
clf2.partial_fit(X, y, sample_weight=sw / 2)
assert_array_almost_equal(clf1.theta_, clf2.theta_)
assert_array_almost_equal(clf1.sigma_, clf2.sigma_)
# Check that duplicate entries and correspondingly increased sample
# weights yield the same result
ind = rng.randint(0, X.shape[0], 20)
sample_weight = np.bincount(ind, minlength=X.shape[0])
clf_dupl = GaussianNB().fit(X[ind], y[ind])
clf_sw = GaussianNB().fit(X, y, sample_weight)
assert_array_almost_equal(clf_dupl.theta_, clf_sw.theta_)
assert_array_almost_equal(clf_dupl.sigma_, clf_sw.sigma_)
def test_discrete_prior():
# Test whether class priors are properly set.
for cls in [BernoulliNB, MultinomialNB]:
clf = cls().fit(X2, y2)
assert_array_almost_equal(np.log(np.array([2, 2, 2]) / 6.0),
clf.class_log_prior_, 8)
def test_mnnb():
# Test Multinomial Naive Bayes classification.
# This checks that MultinomialNB implements fit and predict and returns
# correct values for a simple toy dataset.
for X in [X2, scipy.sparse.csr_matrix(X2)]:
# Check the ability to predict the learning set.
clf = MultinomialNB()
assert_raises(ValueError, clf.fit, -X, y2)
y_pred = clf.fit(X, y2).predict(X)
assert_array_equal(y_pred, y2)
# Verify that np.log(clf.predict_proba(X)) gives the same results as
# clf.predict_log_proba(X)
y_pred_proba = clf.predict_proba(X)
y_pred_log_proba = clf.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba), y_pred_log_proba, 8)
# Check that incremental fitting yields the same results
clf2 = MultinomialNB()
clf2.partial_fit(X[:2], y2[:2], classes=np.unique(y2))
clf2.partial_fit(X[2:5], y2[2:5])
clf2.partial_fit(X[5:], y2[5:])
y_pred2 = clf2.predict(X)
assert_array_equal(y_pred2, y2)
y_pred_proba2 = clf2.predict_proba(X)
y_pred_log_proba2 = clf2.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba2), y_pred_log_proba2, 8)
assert_array_almost_equal(y_pred_proba2, y_pred_proba)
assert_array_almost_equal(y_pred_log_proba2, y_pred_log_proba)
# Partial fit on the whole data at once should be the same as fit too
clf3 = MultinomialNB()
clf3.partial_fit(X, y2, classes=np.unique(y2))
y_pred3 = clf3.predict(X)
assert_array_equal(y_pred3, y2)
y_pred_proba3 = clf3.predict_proba(X)
y_pred_log_proba3 = clf3.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba3), y_pred_log_proba3, 8)
assert_array_almost_equal(y_pred_proba3, y_pred_proba)
assert_array_almost_equal(y_pred_log_proba3, y_pred_log_proba)
def check_partial_fit(cls):
clf1 = cls()
clf1.fit([[0, 1], [1, 0]], [0, 1])
clf2 = cls()
clf2.partial_fit([[0, 1], [1, 0]], [0, 1], classes=[0, 1])
assert_array_equal(clf1.class_count_, clf2.class_count_)
assert_array_equal(clf1.feature_count_, clf2.feature_count_)
clf3 = cls()
clf3.partial_fit([[0, 1]], [0], classes=[0, 1])
clf3.partial_fit([[1, 0]], [1])
assert_array_equal(clf1.class_count_, clf3.class_count_)
assert_array_equal(clf1.feature_count_, clf3.feature_count_)
def test_discretenb_partial_fit():
for cls in [MultinomialNB, BernoulliNB]:
yield check_partial_fit, cls
def test_gnb_partial_fit():
clf = GaussianNB().fit(X, y)
clf_pf = GaussianNB().partial_fit(X, y, np.unique(y))
assert_array_almost_equal(clf.theta_, clf_pf.theta_)
assert_array_almost_equal(clf.sigma_, clf_pf.sigma_)
assert_array_almost_equal(clf.class_prior_, clf_pf.class_prior_)
clf_pf2 = GaussianNB().partial_fit(X[0::2, :], y[0::2], np.unique(y))
clf_pf2.partial_fit(X[1::2], y[1::2])
assert_array_almost_equal(clf.theta_, clf_pf2.theta_)
assert_array_almost_equal(clf.sigma_, clf_pf2.sigma_)
assert_array_almost_equal(clf.class_prior_, clf_pf2.class_prior_)
def test_discretenb_pickle():
# Test picklability of discrete naive Bayes classifiers
for cls in [BernoulliNB, MultinomialNB, GaussianNB]:
clf = cls().fit(X2, y2)
y_pred = clf.predict(X2)
store = BytesIO()
pickle.dump(clf, store)
clf = pickle.load(BytesIO(store.getvalue()))
assert_array_equal(y_pred, clf.predict(X2))
if cls is not GaussianNB:
# TODO re-enable me when partial_fit is implemented for GaussianNB
# Test pickling of estimator trained with partial_fit
clf2 = cls().partial_fit(X2[:3], y2[:3], classes=np.unique(y2))
clf2.partial_fit(X2[3:], y2[3:])
store = BytesIO()
pickle.dump(clf2, store)
clf2 = pickle.load(BytesIO(store.getvalue()))
assert_array_equal(y_pred, clf2.predict(X2))
def test_input_check_fit():
# Test input checks for the fit method
for cls in [BernoulliNB, MultinomialNB, GaussianNB]:
# check shape consistency for number of samples at fit time
assert_raises(ValueError, cls().fit, X2, y2[:-1])
# check shape consistency for number of input features at predict time
clf = cls().fit(X2, y2)
assert_raises(ValueError, clf.predict, X2[:, :-1])
def test_input_check_partial_fit():
for cls in [BernoulliNB, MultinomialNB]:
# check shape consistency
assert_raises(ValueError, cls().partial_fit, X2, y2[:-1],
classes=np.unique(y2))
# classes is required for first call to partial fit
assert_raises(ValueError, cls().partial_fit, X2, y2)
# check consistency of consecutive classes values
clf = cls()
clf.partial_fit(X2, y2, classes=np.unique(y2))
assert_raises(ValueError, clf.partial_fit, X2, y2,
classes=np.arange(42))
# check consistency of input shape for partial_fit
assert_raises(ValueError, clf.partial_fit, X2[:, :-1], y2)
# check consistency of input shape for predict
assert_raises(ValueError, clf.predict, X2[:, :-1])
def test_discretenb_predict_proba():
# Test discrete NB classes' probability scores
# The 100s below distinguish Bernoulli from multinomial.
# FIXME: write a test to show this.
X_bernoulli = [[1, 100, 0], [0, 1, 0], [0, 100, 1]]
X_multinomial = [[0, 1], [1, 3], [4, 0]]
# test binary case (1-d output)
y = [0, 0, 2] # 2 is regression test for binary case, 02e673
for cls, X in zip([BernoulliNB, MultinomialNB],
[X_bernoulli, X_multinomial]):
clf = cls().fit(X, y)
assert_equal(clf.predict(X[-1:]), 2)
assert_equal(clf.predict_proba([X[0]]).shape, (1, 2))
assert_array_almost_equal(clf.predict_proba(X[:2]).sum(axis=1),
np.array([1., 1.]), 6)
# test multiclass case (2-d output, must sum to one)
y = [0, 1, 2]
for cls, X in zip([BernoulliNB, MultinomialNB],
[X_bernoulli, X_multinomial]):
clf = cls().fit(X, y)
assert_equal(clf.predict_proba(X[0:1]).shape, (1, 3))
assert_equal(clf.predict_proba(X[:2]).shape, (2, 3))
assert_almost_equal(np.sum(clf.predict_proba([X[1]])), 1)
assert_almost_equal(np.sum(clf.predict_proba([X[-1]])), 1)
assert_almost_equal(np.sum(np.exp(clf.class_log_prior_)), 1)
assert_almost_equal(np.sum(np.exp(clf.intercept_)), 1)
def test_discretenb_uniform_prior():
# Test whether discrete NB classes fit a uniform prior
# when fit_prior=False and class_prior=None
for cls in [BernoulliNB, MultinomialNB]:
clf = cls()
clf.set_params(fit_prior=False)
clf.fit([[0], [0], [1]], [0, 0, 1])
prior = np.exp(clf.class_log_prior_)
assert_array_equal(prior, np.array([.5, .5]))
def test_discretenb_provide_prior():
# Test whether discrete NB classes use provided prior
for cls in [BernoulliNB, MultinomialNB]:
clf = cls(class_prior=[0.5, 0.5])
clf.fit([[0], [0], [1]], [0, 0, 1])
prior = np.exp(clf.class_log_prior_)
assert_array_equal(prior, np.array([.5, .5]))
# Inconsistent number of classes with prior
assert_raises(ValueError, clf.fit, [[0], [1], [2]], [0, 1, 2])
assert_raises(ValueError, clf.partial_fit, [[0], [1]], [0, 1],
classes=[0, 1, 1])
def test_discretenb_provide_prior_with_partial_fit():
# Test whether discrete NB classes use provided prior
# when using partial_fit
iris = load_iris()
iris_data1, iris_data2, iris_target1, iris_target2 = train_test_split(
iris.data, iris.target, test_size=0.4, random_state=415)
for cls in [BernoulliNB, MultinomialNB]:
for prior in [None, [0.3, 0.3, 0.4]]:
clf_full = cls(class_prior=prior)
clf_full.fit(iris.data, iris.target)
clf_partial = cls(class_prior=prior)
clf_partial.partial_fit(iris_data1, iris_target1,
classes=[0, 1, 2])
clf_partial.partial_fit(iris_data2, iris_target2)
assert_array_almost_equal(clf_full.class_log_prior_,
clf_partial.class_log_prior_)
def test_sample_weight_multiclass():
for cls in [BernoulliNB, MultinomialNB]:
# check shape consistency for number of samples at fit time
yield check_sample_weight_multiclass, cls
def check_sample_weight_multiclass(cls):
X = [
[0, 0, 1],
[0, 1, 1],
[0, 1, 1],
[1, 0, 0],
]
y = [0, 0, 1, 2]
sample_weight = np.array([1, 1, 2, 2], dtype=np.float)
sample_weight /= sample_weight.sum()
clf = cls().fit(X, y, sample_weight=sample_weight)
assert_array_equal(clf.predict(X), [0, 1, 1, 2])
# Check sample weight using the partial_fit method
clf = cls()
clf.partial_fit(X[:2], y[:2], classes=[0, 1, 2],
sample_weight=sample_weight[:2])
clf.partial_fit(X[2:3], y[2:3], sample_weight=sample_weight[2:3])
clf.partial_fit(X[3:], y[3:], sample_weight=sample_weight[3:])
assert_array_equal(clf.predict(X), [0, 1, 1, 2])
def test_sample_weight_mnb():
clf = MultinomialNB()
clf.fit([[1, 2], [1, 2], [1, 0]],
[0, 0, 1],
sample_weight=[1, 1, 4])
assert_array_equal(clf.predict([[1, 0]]), [1])
positive_prior = np.exp(clf.intercept_[0])
assert_array_almost_equal([1 - positive_prior, positive_prior],
[1 / 3., 2 / 3.])
def test_coef_intercept_shape():
# coef_ and intercept_ should have shapes as in other linear models.
# Non-regression test for issue #2127.
X = [[1, 0, 0], [1, 1, 1]]
y = [1, 2] # binary classification
for clf in [MultinomialNB(), BernoulliNB()]:
clf.fit(X, y)
assert_equal(clf.coef_.shape, (1, 3))
assert_equal(clf.intercept_.shape, (1,))
def test_check_accuracy_on_digits():
# Non regression test to make sure that any further refactoring / optim
# of the NB models do not harm the performance on a slightly non-linearly
# separable dataset
digits = load_digits()
X, y = digits.data, digits.target
binary_3v8 = np.logical_or(digits.target == 3, digits.target == 8)
X_3v8, y_3v8 = X[binary_3v8], y[binary_3v8]
# Multinomial NB
scores = cross_val_score(MultinomialNB(alpha=10), X, y, cv=10)
assert_greater(scores.mean(), 0.86)
scores = cross_val_score(MultinomialNB(alpha=10), X_3v8, y_3v8, cv=10)
assert_greater(scores.mean(), 0.94)
# Bernoulli NB
scores = cross_val_score(BernoulliNB(alpha=10), X > 4, y, cv=10)
assert_greater(scores.mean(), 0.83)
scores = cross_val_score(BernoulliNB(alpha=10), X_3v8 > 4, y_3v8, cv=10)
assert_greater(scores.mean(), 0.92)
# Gaussian NB
scores = cross_val_score(GaussianNB(), X, y, cv=10)
assert_greater(scores.mean(), 0.77)
scores = cross_val_score(GaussianNB(), X_3v8, y_3v8, cv=10)
assert_greater(scores.mean(), 0.86)
def test_feature_log_prob_bnb():
# Test for issue #4268.
# Tests that the feature log prob value computed by BernoulliNB when
# alpha=1.0 is equal to the expression given in Manning, Raghavan,
# and Schuetze's "Introduction to Information Retrieval" book:
# http://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html
X = np.array([[0, 0, 0], [1, 1, 0], [0, 1, 0], [1, 0, 1], [0, 1, 0]])
Y = np.array([0, 0, 1, 2, 2])
# Fit Bernoulli NB w/ alpha = 1.0
clf = BernoulliNB(alpha=1.0)
clf.fit(X, Y)
# Manually form the (log) numerator and denominator that
# constitute P(feature presence | class)
num = np.log(clf.feature_count_ + 1.0)
denom = np.tile(np.log(clf.class_count_ + 2.0), (X.shape[1], 1)).T
# Check manual estimate matches
assert_array_equal(clf.feature_log_prob_, (num - denom))
def test_bnb():
# Tests that BernoulliNB when alpha=1.0 gives the same values as
# those given for the toy example in Manning, Raghavan, and
# Schuetze's "Introduction to Information Retrieval" book:
# http://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html
# Training data points are:
# Chinese Beijing Chinese (class: China)
# Chinese Chinese Shanghai (class: China)
# Chinese Macao (class: China)
# Tokyo Japan Chinese (class: Japan)
# Features are Beijing, Chinese, Japan, Macao, Shanghai, and Tokyo
X = np.array([[1, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 1, 0, 1, 0, 0],
[0, 1, 1, 0, 0, 1]])
# Classes are China (0), Japan (1)
Y = np.array([0, 0, 0, 1])
# Fit BernoulliBN w/ alpha = 1.0
clf = BernoulliNB(alpha=1.0)
clf.fit(X, Y)
# Check the class prior is correct
class_prior = np.array([0.75, 0.25])
assert_array_almost_equal(np.exp(clf.class_log_prior_), class_prior)
# Check the feature probabilities are correct
feature_prob = np.array([[0.4, 0.8, 0.2, 0.4, 0.4, 0.2],
[1/3.0, 2/3.0, 2/3.0, 1/3.0, 1/3.0, 2/3.0]])
assert_array_almost_equal(np.exp(clf.feature_log_prob_), feature_prob)
# Testing data point is:
# Chinese Chinese Chinese Tokyo Japan
X_test = np.array([[0, 1, 1, 0, 0, 1]])
# Check the predictive probabilities are correct
unnorm_predict_proba = np.array([[0.005183999999999999,
0.02194787379972565]])
predict_proba = unnorm_predict_proba / np.sum(unnorm_predict_proba)
assert_array_almost_equal(clf.predict_proba(X_test), predict_proba)
|
bsd-3-clause
|
Paolopost/tractome
|
streamshow.py
|
2
|
26015
|
# -*- coding: utf-8 -*-
"""This is the part that connects the logic of the tractome
functionalities to the GUI.
Copyright (c) 2012-2014, Emanuele Olivetti and Eleftherios Garyfallidis
Distributed under the BSD 3-clause license. See COPYING.txt.
"""
import numpy as np
# fos modules
from fos import Actor
from fos.modelmat import screen_to_model
import fos.interact.collision as cll
from fos.coords import img_to_ras_coords, from_matvec
# pyglet module
from pyglet.gl import *
from ctypes import cast, c_int, POINTER
# dipy modules
from dipy.io.dpy import Dpy
from dipy.io.pickles import load_pickle
from dipy.viz.colormap import orient2rgb
from dipy.tracking.vox2track import track_counts
# other
import copy
import cPickle as pickle
# Tk dialogs
import Tkinter, tkFileDialog
# Pyside for windowing
from PySide.QtCore import Qt
# Interaction Logic:
from manipulator import Manipulator
from itertools import chain
import time
from sklearn.cluster import MiniBatchKMeans
from sklearn.neighbors import KDTree
question_message = """
>>>>Track Labeler
P : select/unselect the representative track.
E : expand/collapse the selected streamlines
F : keep selected streamlines rerun QuickBundles and hide everything else.
A : select all representative streamlines which are currently visible.
I : invert selected streamlines to unselected
H : hide/show all representative streamlines.
>>>Mouse
Left Button: keep pressed with dragging - rotation
Scrolling : zoom
Shift + Scrolling : fast zoom
Right Button : panning - translation
Shift + Right Button : fast panning - translation
>>>General
F1 : Fullscreen.
F2 : Next time frame.
F3 : Previous time frame.
F4 : Automatic rotation.
F12 : Reset camera.
ESC: Exit.
? : Print this help information.
"""
def streamline2rgb(streamline):
"""Compute orientation of a streamline and retrieve and appropriate RGB
color to represent it.
"""
# simplest implementation:
tmp = orient2rgb(streamline[0] - streamline[-1])
return tmp
def apply_transformation(ijk, affine):
""" Apply a 4x4 affine transformation
Parameters
----------
ijk : array, shape (N, 3)
image coordinates
affine : array, shape (4, 4)
transformation matrix
Returns
-------
xyz : array, shape (N, 3)
world coordinates in RAS (Neurological Convention)
"""
ijk = ijk.T
ijk1 = np.vstack((ijk, np.ones(ijk.shape[1])))
xyz1 = np.dot(affine, ijk1)
xyz = xyz1[:-1, :]
return xyz.T
def compute_colors(streamlines, alpha):
"""Compute colors for a list of streamlines.
"""
# assert(type(streamlines) == type([]))
tot_vertices = np.sum([len(curve) for curve in streamlines])
color = np.empty((tot_vertices,4), dtype='f4')
counter = 0
for curve in streamlines:
color[counter:counter+len(curve),:3] = streamline2rgb(curve).astype('f4')
counter += len(curve)
color[:,3] = alpha
return color
def compute_buffers(streamlines, alpha, save=False, filename=None):
"""Compute buffers for GL.
"""
tmp = streamlines
if type(tmp) is not type([]):
tmp = streamlines.tolist()
streamlines_buffer = np.ascontiguousarray(np.concatenate(tmp).astype('f4'))
streamlines_colors = np.ascontiguousarray(compute_colors(streamlines, alpha))
streamlines_count = np.ascontiguousarray(np.array([len(curve) for curve in streamlines],dtype='i4'))
streamlines_first = np.ascontiguousarray(np.concatenate([[0],np.cumsum(streamlines_count)[:-1]]).astype('i4'))
tmp = {'buffer': streamlines_buffer,
'colors': streamlines_colors,
'count': streamlines_count,
'first': streamlines_first}
if save:
print "saving buffers to", filename
np.savez_compressed(filename, **tmp)
return tmp
def compute_buffers_representatives(buffers, representative_ids):
"""Compute OpenGL buffers for representatives from tractography
buffers.
"""
print "Creating buffers for representatives."
count = buffers['count'][representative_ids].astype('i4')
first = buffers['first'][representative_ids].astype('i4')
representative_buffers = {'buffer': buffers['buffer'],
'colors': buffers['colors'].copy(),
'count': np.ascontiguousarray(count),
'first': np.ascontiguousarray(first)}
return representative_buffers
def buffer2coordinates(buffer, first, count):
"""Extract an array of streamlines' coordinates from a buffer.
This is meant mainly when the input 'buffers' is
'representative_buffers'.
"""
return np.array([buffer[first[i]:first[i]+count[i]].astype(np.object) \
for i in range(len(first))])
def mbkm_wrapper(full_dissimilarity_matrix, n_clusters, streamlines_ids):
"""Wrapper of MBKM with API compatible to the Manipulator.
streamlines_ids can be set or list.
"""
sids = np.array(list(streamlines_ids))
dissimilarity_matrix = full_dissimilarity_matrix[sids]
print "MBKM clustering time:",
init = 'random'
mbkm = MiniBatchKMeans(init=init, n_clusters=n_clusters, batch_size=1000,
n_init=10, max_no_improvement=5, verbose=0)
t0 = time.time()
mbkm.fit(dissimilarity_matrix)
t_mini_batch = time.time() - t0
print t_mini_batch
print "exhaustive smarter search of the medoids:",
medoids_exhs = np.zeros(n_clusters, dtype=np.int)
t0 = time.time()
idxs = []
for i, centroid in enumerate(mbkm.cluster_centers_):
idx_i = np.where(mbkm.labels_==i)[0]
if idx_i.size == 0: idx_i = [0]
tmp = full_dissimilarity_matrix[idx_i] - centroid
medoids_exhs[i] = sids[idx_i[(tmp * tmp).sum(1).argmin()]]
idxs.append(set(sids[idx_i].tolist()))
t_exhs_query = time.time() - t0
print t_exhs_query, "sec"
clusters = dict(zip(medoids_exhs, idxs))
return clusters
class StreamlineLabeler(Actor, Manipulator):
"""The Labeler for streamlines.
"""
def __init__(self, name, buffers, clusters, representative_buffers=None, colors=None, vol_shape=None, representatives_line_width=5.0, streamlines_line_width=2.0, representatives_alpha=1.0, streamlines_alpha=1.0, affine=None, verbose=False, clustering_parameter=None, clustering_parameter_max=None, full_dissimilarity_matrix=None):
"""StreamlineLabeler is meant to explore and select subsets of
the streamlines. The exploration occurs through clustering in
order to simplify the scene.
"""
# super(StreamlineLabeler, self).__init__(name)
Actor.__init__(self, name) # direct call of the __init__ seems better in case of multiple inheritance
if affine is None: self.affine = np.eye(4, dtype = np.float32)
else: self.affine = affine
if vol_shape is not None:
I, J, K = vol_shape
centershift = img_to_ras_coords(np.array([[I/2., J/2., K/2.]]), affine)
centeraffine = from_matvec(np.eye(3), centershift.squeeze())
affine[:3,3] = affine[:3, 3] - centeraffine[:3, 3]
self.glaffine = (GLfloat * 16)(*tuple(affine.T.ravel()))
self.glaff = affine
self.mouse_x=None
self.mouse_y=None
self.buffers = buffers
self.clusters = clusters
self.save_init_set = True
# MBKM:
Manipulator.__init__(self, initial_clusters=clusters, clustering_function=mbkm_wrapper)
# We keep the representative_ids as list to preserve order,
# which is necessary for presentation purposes:
self.representative_ids_ordered = sorted(self.clusters.keys())
self.representatives_alpha = representatives_alpha
# representative buffers:
if representative_buffers is None:
representative_buffers = compute_buffers_representatives(buffers, self.representative_ids_ordered)
self.representatives_buffer = representative_buffers['buffer']
self.representatives_colors = representative_buffers['colors']
self.representatives_first = representative_buffers['first']
self.representatives_count = representative_buffers['count']
self.representatives = buffer2coordinates(self.representatives_buffer,
self.representatives_first,
self.representatives_count)
# full tractography buffers:
self.streamlines_buffer = buffers['buffer']
self.streamlines_colors = buffers['colors']
self.streamlines_first = buffers['first']
self.streamlines_count = buffers['count']
print('MBytes %f' % (self.streamlines_buffer.nbytes/2.**20,))
self.hide_representatives = False
self.expand = False
self.knnreset = False
self.representatives_line_width = representatives_line_width
self.streamlines_line_width = streamlines_line_width
self.vertices = self.streamlines_buffer # this is apparently requested by Actor
self.color_storage = {}
# This is the color of a selected representative.
self.color_selected = np.array([1.0, 1.0, 1.0, 1.0], dtype='f4')
# This are the visualized streamlines.
# (Note: maybe a copy is not strictly necessary here)
self.streamlines_visualized_first = self.streamlines_first.copy()
self.streamlines_visualized_count = self.streamlines_count.copy()
# Clustering:
self.clustering_parameter = clustering_parameter
self.clustering_parameter_max = clustering_parameter_max
self.full_dissimilarity_matrix = full_dissimilarity_matrix
self.cantroi = 0
def set_streamlines_ROIs(self, streamlines_rois_ids):
"""
Set streamlines belonging to ROIs
"""
if not hasattr(self, 'clusters_before_roi') or len(self.clusters_before_roi)==0:
self.clusters_before_roi = self.clusters
self.streamlines_rois = streamlines_rois_ids
if len(streamlines_rois_ids)>0:
#1- Intersect ROIs based on the whole tractography with
#actual clusters. From here I should obtain the "same"
#clusters but only with streamlines from ROI.
clusters_new = {}
for rid in self.clusters_before_roi:
new_cluster_ids = self.clusters_before_roi[rid].intersection(streamlines_rois_ids)
if len(new_cluster_ids) > 0:
clusters_new[rid] = new_cluster_ids
clusters_new[list(new_cluster_ids)[0]] = clusters_new.pop(rid)
self.clusters_reset(clusters_new)
self.recluster_action()
self.hide_representatives = True
self.select_all()
self.expand = True
#
#
# else:
# #Going back to show Clsuters before ROI was applied
# # 1) sync self.representative_ids_ordered with original clusters before ROI:
# self.representative_ids_ordered = sorted(self.clusters.keys())
# # 2) change first and count buffers of representatives:
# self.representatives_first = np.ascontiguousarray(self.streamlines_first[self.representative_ids_ordered], dtype='i4')
# self.representatives_count = np.ascontiguousarray(self.streamlines_count[self.representative_ids_ordered], dtype='i4')
# # 3) recompute self.representatives:
# # (this is needed just for get_pointed_representative())
# self.representatives = buffer2coordinates(self.representatives_buffer,
# self.representatives_first,
# self.representatives_count)
# # 4) recompute self.streamlines_visualized_first/count:
# streamlines_ids = list(reduce(chain, [self.clusters[rid] for rid in self.clusters]))
# self.streamlines_visualized_first = np.ascontiguousarray(self.streamlines_first[streamlines_ids], dtype='i4')
# self.streamlines_visualized_count = np.ascontiguousarray(self.streamlines_count[streamlines_ids], dtype='i4')
# self.hide_representatives = False
# self.expand = False
# self.numstream_handler.fire(len(streamlines_ids))
# self.numrep_handler.fire(len(representative_ids))
def set_streamlines_knn(self, streamlines_knn):
"""
Set streamlines for KNN-extension
"""
# 1) Saving the clusters available before the extension is done. In case the user goes back to k=0, we go directly to this stage.
if self.save_init_set == True :
self.clusters_before_knn = copy.deepcopy(self.clusters)
# This KDTree is only computed on the medoids of clusters, for the assignment process. It is only computed once, unless the initial set of clusters changes and it is recomputed.
self.kdtree_medoids= KDTree(self.full_dissimilarity_matrix[self.clusters.keys()])
self.save_init_set = False
clusters_new = copy.deepcopy(self.clusters_before_knn)
clusters_representatives = self.clusters.keys()
# 2) If the number of available clusters is 1, all neighbors will of course automatically be assigned to this cluster
if len(clusters_representatives) == 1:
clusters_new[clusters_representatives[0]].update(streamlines_knn)
# 3) Query to previously computed KDTree, in order to find the nearest medoid (representative) of each streamline to be added.
else:
a2 = self.kdtree_medoids.query(self.full_dissimilarity_matrix[streamlines_knn],k=1, return_distance = False)
for i in range(0, len(streamlines_knn)):
clusters_new[clusters_representatives[a2[i, 0]]].add(streamlines_knn[i])
self.clusters_reset(clusters_new)
self.recluster_action()
self.knnreset = True
self.select_all()
self.expand = True
def set_empty_scene(self):
"""
Hides all element in the screen if the ROI returns an empty set of streamlines
"""
if not hasattr(self, 'clusters_before_roi') or len(self.clusters_before_roi)==0:
self.clusters_before_roi = self.clusters
self.hide_representatives = True
self.expand = False
def reset_state(self, function):
"""
Show clustering state before any ROI or KNN-extension was applied
"""
if function =='roi':
self.clusters_reset(self.clusters_before_roi)
self.clusters_before_roi = {}
self.recluster_action()
elif function == 'knn':
try:
self.clusters_before_knn
if self.save_init_set == True :
self.clusters_before_knn = copy.deepcopy(self.clusters)
# This KDTree is only computed on the medoids of clusters, for the assignment process. It is only computed once, unless the initial set of clusters changes and it is recomputed.
self.kdtree_medoids= KDTree(self.full_dissimilarity_matrix[self.clusters.keys()])
self.save_init_set = False
self.clusters_reset(self.clusters_before_knn)
self.recluster_action()
self.select_all()
self.expand = True
except AttributeError:
pass
self.save_init_set = True
self.hide_representatives = False
def draw(self):
"""Draw virtual and real streamlines.
This is done at every frame and therefore must be real fast.
"""
glDisable(GL_LIGHTING)
# representatives
glEnable(GL_DEPTH_TEST)
glEnable(GL_BLEND)
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
glEnable(GL_LINE_SMOOTH)
glHint(GL_LINE_SMOOTH_HINT, GL_NICEST)
glEnableClientState(GL_VERTEX_ARRAY)
glEnableClientState(GL_COLOR_ARRAY)
# plot representatives if necessary:
if not self.hide_representatives:
glVertexPointer(3,GL_FLOAT,0,self.representatives_buffer.ctypes.data)
glColorPointer(4,GL_FLOAT,0,self.representatives_colors.ctypes.data)
glLineWidth(self.representatives_line_width)
glPushMatrix()
glMultMatrixf(self.glaffine)
if isinstance(self.representatives_first, tuple): print '>> first Tuple'
if isinstance(self.representatives_count, tuple): print '>> count Tuple'
glMultiDrawArrays(GL_LINE_STRIP,
cast(self.representatives_first.ctypes.data,POINTER(c_int)),
cast(self.representatives_count.ctypes.data,POINTER(c_int)),
len(self.representatives_first))
glPopMatrix()
# plot tractography if necessary:
if self.expand and len(self.selected) > 0:
glVertexPointer(3,GL_FLOAT,0,self.streamlines_buffer.ctypes.data)
glColorPointer(4,GL_FLOAT,0,self.streamlines_colors.ctypes.data)
glLineWidth(self.streamlines_line_width)
glPushMatrix()
glMultMatrixf(self.glaffine)
glMultiDrawArrays(GL_LINE_STRIP,
cast(self.streamlines_visualized_first.ctypes.data,POINTER(c_int)),
cast(self.streamlines_visualized_count.ctypes.data,POINTER(c_int)),
len(self.streamlines_visualized_first))
glPopMatrix()
glDisableClientState(GL_COLOR_ARRAY)
glDisableClientState(GL_VERTEX_ARRAY)
glLineWidth(1.)
glDisable(GL_DEPTH_TEST)
glDisable(GL_BLEND)
glDisable(GL_LINE_SMOOTH)
glEnable(GL_LIGHTING)
# DO WE NEED THIS METHOD?
def process_messages(self,messages):
"""
"""
msg=messages['key_pressed']
if msg!=None:
self.process_keys(msg,None)
msg=messages['mouse_position']
if msg!=None:
self.process_mouse_position(*msg)
def process_mouse_position(self, x, y):
"""
"""
self.mouse_x = x
self.mouse_y = y
def process_keys(self, symbol, modifiers):
"""Bind actions to key press.
"""
if symbol == Qt.Key_P:
rid = self.get_pointed_representative()
print 'P : pick the representative pointed by the mouse =', rid
self.select_toggle(rid)
elif symbol == Qt.Key_A:
print 'A: select all representatives.'
self.select_all_toggle()
elif symbol == Qt.Key_I:
print 'I: invert selection of representatives.'
self.invert()
elif symbol == Qt.Key_H:
print 'H: Hide/show representatives.'
self.hide_representatives = not self.hide_representatives
elif symbol == Qt.Key_E:
print 'E: Expand/collapse streamlines of selected representatives.'
self.expand_collapse_selected()
elif symbol == Qt.Key_Backspace:
print 'Backspace: Remove unselected representatives.'
self.remove_unselected()
self.save_init_set = True
#elif symbol == Qt.Key_Delete:
# print 'Delete: Remove selected representatives.'
#self.remove_selected()
elif symbol == Qt.Key_B:
print "Go Back one step in the history."
self.simple_history_back_one_step()
elif symbol == Qt.Key_F:
print "Go one step Forward in the history."
self.simple_history_forward_one_step()
def get_pointed_representative(self, min_dist=1e-3):
"""Compute the id of the closest streamline to the mouse pointer.
"""
x, y = self.mouse_x, self.mouse_y
# Define two points in model space from mouse+screen(=0) position and mouse+horizon(=1) position
near = screen_to_model(x, y, 0)
far = screen_to_model(x, y, 1)
# Compute distance of representatives from screen and from the line defined by the two points above
tmp = np.array([cll.mindistance_segment2track_info(near, far, apply_transformation(xyz, self.glaff)) \
for xyz in self.representatives])
line_distance, screen_distance = tmp[:,0], tmp[:,1]
return self.representative_ids_ordered[np.argmin(line_distance + screen_distance)]
def select_action(self, representative_id):
"""
Steps for visualizing a selected representative.
"""
print "select_action:", representative_id
rid_position = self.representative_ids_ordered.index(representative_id)
first = self.representatives_first[rid_position]
count = self.representatives_count[rid_position]
# this check is needed to let select_all_action() work,
# otherwise a previously selected representative would be
# stored as white and never get its original color back.
if representative_id not in self.color_storage:
self.color_storage[representative_id] = self.representatives_colors[first:first+count].copy() # .copy() is mandatory here otherwise that memory is changed by the next line!
self.representatives_colors[first:first+count] = self.color_selected
def unselect_action(self, representative_id):
"""Steps for visualizing an unselected representative.
"""
print "unselect_action:", representative_id
rid_position = self.representative_ids_ordered.index(representative_id)
first = self.representatives_first[rid_position]
count = self.representatives_count[rid_position]
if representative_id in self.color_storage: # check to allow unselect_all_action()
self.representatives_colors[first:first+count] = self.color_storage[representative_id]
self.color_storage.pop(representative_id)
def select_all_action(self):
"""
"""
print "A: select all representatives."
for rid in self.representative_ids_ordered:
self.select_action(rid)
def unselect_all_action(self):
"""
"""
print "A: unselect all representatives."
for rid in self.representative_ids_ordered:
self.unselect_action(rid)
def invert_action(self):
"""
"""
print "I: invert selection of all representatives."
for rid in self.representative_ids_ordered:
if rid in self.selected:
self.select_action(rid)
else:
self.unselect_action(rid)
def expand_collapse_selected_action(self):
"""
"""
print "E: Expand/collapse streamlines of selected representatives."
if self.expand:
print "Expand."
if len(self.selected)>0:
selected_streamlines_ids = list(reduce(chain, [self.clusters[rid] for rid in self.selected]))
self.streamlines_visualized_first = np.ascontiguousarray(self.streamlines_first[selected_streamlines_ids], dtype='i4')
self.streamlines_visualized_count = np.ascontiguousarray(self.streamlines_count[selected_streamlines_ids], dtype='i4')
else:
print "Collapse."
def remove_unselected_action(self):
"""
"""
print "Backspace: remove unselected."
# Note: the following steps needs to be done in the given order.
# 0) Restore original color to selected representatives.
self.unselect_all()
self.knnreset = False
# 1) sync self.representative_ids_ordered with new clusters:
self.representative_ids_ordered = sorted(self.clusters.keys())
# 2) change first and count buffers of representatives:
self.representatives_first = np.ascontiguousarray(self.streamlines_first[self.representative_ids_ordered], dtype='i4')
self.representatives_count = np.ascontiguousarray(self.streamlines_count[self.representative_ids_ordered], dtype='i4')
# 3) recompute self.representatives:
# (this is needed just for get_pointed_representative())
self.representatives = buffer2coordinates(self.representatives_buffer,
self.representatives_first,
self.representatives_count)
# 4) recompute self.streamlines_visualized_first/count:
streamlines_ids = list(reduce(chain, [self.clusters[rid] for rid in self.clusters]))
self.streamlines_visualized_first = np.ascontiguousarray(self.streamlines_first[streamlines_ids], dtype='i4')
self.streamlines_visualized_count = np.ascontiguousarray(self.streamlines_count[streamlines_ids], dtype='i4')
def recluster_action(self):
"""
"""
self.select_all()
self.remove_unselected_action()
self.knnreset = False
class ThresholdSelector(object):
"""
"""
def __init__(self, parent, default_value, from_=1, to=500):
"""
"""
self.parent = parent
self.s = Tkinter.Scale(self.parent, from_=from_, to=to, width=25, length=300, orient=Tkinter.HORIZONTAL)
self.s.set(default_value)
self.s.pack()
self.b = Tkinter.Button(self.parent, text='OK', command=self.ok)
self.b.pack(side=Tkinter.BOTTOM)
def ok(self):
self.value = self.s.get()
self.parent.destroy()
|
bsd-3-clause
|
esquivas/Walicxe-2D
|
py/test.py
|
1
|
2851
|
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
from walicxe2d_utils import *
path = '/Users/esquivel/Desktop/diable/data/esquivel/Walixce-2D/LE-JET/'
runs = ['Tau40AD','Tau40ISO','Tau40DMC','Tau40AD-2']
nproc = 16
nx = 32
ny = 32
nlevs = 7
neqs = 5
xmax = 4.#6e17
ymax = 1.#1.5e17
nbx = 4
nby = 1
nxtot = 4.*nx*2**(nlevs-1)
dx = xmax/nxtot
firstrun = False
rhomin= 10.
rhomax= 400.
tau = 40.*3.156e7
v0 = 200e5
plt.ion()
for ii in range(np.size(runs)):
if firstrun:
pos = np.zeros(200,dtype = 'd')
time = np.zeros(200,dtype = 'd')
flag = True
for nout in range(0,200):
# read the mesh and get a density (neq = 0), map
mesh = read_mesh(nproc,nx,ny,nlevs,nout,xmax,ymax,nbx,nby,path=path+runs[ii]+'/BIN/')
rho = read_map(nproc, nx, ny, nlevs, neqs, 0, nout, xmax, ymax, nbx, nby, path=path+runs[ii]+'/BIN/')
plt.figure(ii+2, figsize=(10,2.5))
plt.clf()
plt.axes().set_aspect('equal')
# density plot
plt.imshow(rho,origin='lower',interpolation='none',cmap='plasma',
norm=LogNorm(),extent=[0.,xmax,0.,ymax], vmin = rhomin, vmax=rhomax)
plt.title(runs[ii])
# add the mesh as an overlay
for nb in range(mesh.shape[0]):
plt.plot([ mesh[nb,0],mesh[nb,0] ],[ mesh[nb,1],mesh[nb,3] ],color='dimgray',linestyle='solid',alpha=0.25, linewidth=1.)
plt.plot([ mesh[nb,0],mesh[nb,2] ],[ mesh[nb,1],mesh[nb,1] ],color='dimgray',linestyle='solid',alpha=0.25, linewidth=1.)
plt.plot([ mesh[nb,2],mesh[nb,0] ],[ mesh[nb,3],mesh[nb,3] ],color='dimgray',linestyle='solid',alpha=0.25, linewidth=1.)
plt.plot([ mesh[nb,2],mesh[nb,2] ],[ mesh[nb,1],mesh[nb,3] ],color='dimgray',linestyle='solid',alpha=0.25, linewidth=1.)
#. add colorbar
plt.colorbar(shrink=0.8)
#. save to pdf
#plt.savefig('fig'+str(nout).zfill(3)+'.pdf', transparent=True, bbox_inches='tight')
Lx = rho.shape[1]
i = Lx-1
while flag:
if np.any(rho[:,i] > 110.):
print 'position of the head', nout, i
print 'time:', 5.*nout ,'pos:', i*dx*6e17/4
pos[nout] = i*dx*6e17/4
time[nout] = 5.*3.156e7*nout
flag = False
i = i - 1
if (i==0): flag = False
flag= True
np.savez('Rvst-'+runs[ii]+'.npz',pos, time)
else:
plt.figure(0)
if (ii == 0) : plt.clf()
data = np.load('Rvst-'+runs[ii]+'.npz')
time = data['arr_1']/tau
pos = data['arr_0']/v0/tau
plt.plot(time,pos,label=runs[ii], linewidth = 2)
plt.xlabel(r'$t/\tau$')
plt.ylabel(r'$x_h/v_0\,\tau$')
vel = np.zeros(200,dtype = 'd')
for jj in range(1,200):
vel[jj]=(pos[jj]-pos[jj-1])/.125
plt.figure(1)
if (ii == 0) : plt.clf()
plt.plot(time,vel,label=runs[ii], linewidth = 2)
plt.xlabel(r'$t/\tau$')
plt.ylabel(r'$v_h/v_0$')
plt.figure(0) ; plt.legend(loc='lower right')
plt.figure(1) ; plt.legend(loc='lower right')
|
gpl-3.0
|
idlead/scikit-learn
|
sklearn/linear_model/tests/test_theil_sen.py
|
58
|
9948
|
"""
Testing for Theil-Sen module (sklearn.linear_model.theil_sen)
"""
# Author: Florian Wilhelm <[email protected]>
# License: BSD 3 clause
from __future__ import division, print_function, absolute_import
import os
import sys
from contextlib import contextmanager
import numpy as np
from numpy.testing import assert_array_equal, assert_array_less
from numpy.testing import assert_array_almost_equal, assert_warns
from scipy.linalg import norm
from scipy.optimize import fmin_bfgs
from nose.tools import raises, assert_almost_equal
from sklearn.exceptions import ConvergenceWarning
from sklearn.linear_model import LinearRegression, TheilSenRegressor
from sklearn.linear_model.theil_sen import _spatial_median, _breakdown_point
from sklearn.linear_model.theil_sen import _modified_weiszfeld_step
from sklearn.utils.testing import assert_greater, assert_less
@contextmanager
def no_stdout_stderr():
old_stdout = sys.stdout
old_stderr = sys.stderr
with open(os.devnull, 'w') as devnull:
sys.stdout = devnull
sys.stderr = devnull
yield
devnull.flush()
sys.stdout = old_stdout
sys.stderr = old_stderr
def gen_toy_problem_1d(intercept=True):
random_state = np.random.RandomState(0)
# Linear model y = 3*x + N(2, 0.1**2)
w = 3.
if intercept:
c = 2.
n_samples = 50
else:
c = 0.1
n_samples = 100
x = random_state.normal(size=n_samples)
noise = 0.1 * random_state.normal(size=n_samples)
y = w * x + c + noise
# Add some outliers
if intercept:
x[42], y[42] = (-2, 4)
x[43], y[43] = (-2.5, 8)
x[33], y[33] = (2.5, 1)
x[49], y[49] = (2.1, 2)
else:
x[42], y[42] = (-2, 4)
x[43], y[43] = (-2.5, 8)
x[53], y[53] = (2.5, 1)
x[60], y[60] = (2.1, 2)
x[72], y[72] = (1.8, -7)
return x[:, np.newaxis], y, w, c
def gen_toy_problem_2d():
random_state = np.random.RandomState(0)
n_samples = 100
# Linear model y = 5*x_1 + 10*x_2 + N(1, 0.1**2)
X = random_state.normal(size=(n_samples, 2))
w = np.array([5., 10.])
c = 1.
noise = 0.1 * random_state.normal(size=n_samples)
y = np.dot(X, w) + c + noise
# Add some outliers
n_outliers = n_samples // 10
ix = random_state.randint(0, n_samples, size=n_outliers)
y[ix] = 50 * random_state.normal(size=n_outliers)
return X, y, w, c
def gen_toy_problem_4d():
random_state = np.random.RandomState(0)
n_samples = 10000
# Linear model y = 5*x_1 + 10*x_2 + 42*x_3 + 7*x_4 + N(1, 0.1**2)
X = random_state.normal(size=(n_samples, 4))
w = np.array([5., 10., 42., 7.])
c = 1.
noise = 0.1 * random_state.normal(size=n_samples)
y = np.dot(X, w) + c + noise
# Add some outliers
n_outliers = n_samples // 10
ix = random_state.randint(0, n_samples, size=n_outliers)
y[ix] = 50 * random_state.normal(size=n_outliers)
return X, y, w, c
def test_modweiszfeld_step_1d():
X = np.array([1., 2., 3.]).reshape(3, 1)
# Check startvalue is element of X and solution
median = 2.
new_y = _modified_weiszfeld_step(X, median)
assert_array_almost_equal(new_y, median)
# Check startvalue is not the solution
y = 2.5
new_y = _modified_weiszfeld_step(X, y)
assert_array_less(median, new_y)
assert_array_less(new_y, y)
# Check startvalue is not the solution but element of X
y = 3.
new_y = _modified_weiszfeld_step(X, y)
assert_array_less(median, new_y)
assert_array_less(new_y, y)
# Check that a single vector is identity
X = np.array([1., 2., 3.]).reshape(1, 3)
y = X[0, ]
new_y = _modified_weiszfeld_step(X, y)
assert_array_equal(y, new_y)
def test_modweiszfeld_step_2d():
X = np.array([0., 0., 1., 1., 0., 1.]).reshape(3, 2)
y = np.array([0.5, 0.5])
# Check first two iterations
new_y = _modified_weiszfeld_step(X, y)
assert_array_almost_equal(new_y, np.array([1 / 3, 2 / 3]))
new_y = _modified_weiszfeld_step(X, new_y)
assert_array_almost_equal(new_y, np.array([0.2792408, 0.7207592]))
# Check fix point
y = np.array([0.21132505, 0.78867497])
new_y = _modified_weiszfeld_step(X, y)
assert_array_almost_equal(new_y, y)
def test_spatial_median_1d():
X = np.array([1., 2., 3.]).reshape(3, 1)
true_median = 2.
_, median = _spatial_median(X)
assert_array_almost_equal(median, true_median)
# Test larger problem and for exact solution in 1d case
random_state = np.random.RandomState(0)
X = random_state.randint(100, size=(1000, 1))
true_median = np.median(X.ravel())
_, median = _spatial_median(X)
assert_array_equal(median, true_median)
def test_spatial_median_2d():
X = np.array([0., 0., 1., 1., 0., 1.]).reshape(3, 2)
_, median = _spatial_median(X, max_iter=100, tol=1.e-6)
def cost_func(y):
dists = np.array([norm(x - y) for x in X])
return np.sum(dists)
# Check if median is solution of the Fermat-Weber location problem
fermat_weber = fmin_bfgs(cost_func, median, disp=False)
assert_array_almost_equal(median, fermat_weber)
# Check when maximum iteration is exceeded a warning is emitted
assert_warns(ConvergenceWarning, _spatial_median, X, max_iter=30, tol=0.)
def test_theil_sen_1d():
X, y, w, c = gen_toy_problem_1d()
# Check that Least Squares fails
lstq = LinearRegression().fit(X, y)
assert_greater(np.abs(lstq.coef_ - w), 0.9)
# Check that Theil-Sen works
theil_sen = TheilSenRegressor(random_state=0).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w, 1)
assert_array_almost_equal(theil_sen.intercept_, c, 1)
def test_theil_sen_1d_no_intercept():
X, y, w, c = gen_toy_problem_1d(intercept=False)
# Check that Least Squares fails
lstq = LinearRegression(fit_intercept=False).fit(X, y)
assert_greater(np.abs(lstq.coef_ - w - c), 0.5)
# Check that Theil-Sen works
theil_sen = TheilSenRegressor(fit_intercept=False,
random_state=0).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w + c, 1)
assert_almost_equal(theil_sen.intercept_, 0.)
def test_theil_sen_2d():
X, y, w, c = gen_toy_problem_2d()
# Check that Least Squares fails
lstq = LinearRegression().fit(X, y)
assert_greater(norm(lstq.coef_ - w), 1.0)
# Check that Theil-Sen works
theil_sen = TheilSenRegressor(max_subpopulation=1e3,
random_state=0).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w, 1)
assert_array_almost_equal(theil_sen.intercept_, c, 1)
def test_calc_breakdown_point():
bp = _breakdown_point(1e10, 2)
assert_less(np.abs(bp - 1 + 1 / (np.sqrt(2))), 1.e-6)
@raises(ValueError)
def test_checksubparams_negative_subpopulation():
X, y, w, c = gen_toy_problem_1d()
TheilSenRegressor(max_subpopulation=-1, random_state=0).fit(X, y)
@raises(ValueError)
def test_checksubparams_too_few_subsamples():
X, y, w, c = gen_toy_problem_1d()
TheilSenRegressor(n_subsamples=1, random_state=0).fit(X, y)
@raises(ValueError)
def test_checksubparams_too_many_subsamples():
X, y, w, c = gen_toy_problem_1d()
TheilSenRegressor(n_subsamples=101, random_state=0).fit(X, y)
@raises(ValueError)
def test_checksubparams_n_subsamples_if_less_samples_than_features():
random_state = np.random.RandomState(0)
n_samples, n_features = 10, 20
X = random_state.normal(size=(n_samples, n_features))
y = random_state.normal(size=n_samples)
TheilSenRegressor(n_subsamples=9, random_state=0).fit(X, y)
def test_subpopulation():
X, y, w, c = gen_toy_problem_4d()
theil_sen = TheilSenRegressor(max_subpopulation=250,
random_state=0).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w, 1)
assert_array_almost_equal(theil_sen.intercept_, c, 1)
def test_subsamples():
X, y, w, c = gen_toy_problem_4d()
theil_sen = TheilSenRegressor(n_subsamples=X.shape[0],
random_state=0).fit(X, y)
lstq = LinearRegression().fit(X, y)
# Check for exact the same results as Least Squares
assert_array_almost_equal(theil_sen.coef_, lstq.coef_, 9)
def test_verbosity():
X, y, w, c = gen_toy_problem_1d()
# Check that Theil-Sen can be verbose
with no_stdout_stderr():
TheilSenRegressor(verbose=True, random_state=0).fit(X, y)
TheilSenRegressor(verbose=True,
max_subpopulation=10,
random_state=0).fit(X, y)
def test_theil_sen_parallel():
X, y, w, c = gen_toy_problem_2d()
# Check that Least Squares fails
lstq = LinearRegression().fit(X, y)
assert_greater(norm(lstq.coef_ - w), 1.0)
# Check that Theil-Sen works
theil_sen = TheilSenRegressor(n_jobs=-1,
random_state=0,
max_subpopulation=2e3).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w, 1)
assert_array_almost_equal(theil_sen.intercept_, c, 1)
def test_less_samples_than_features():
random_state = np.random.RandomState(0)
n_samples, n_features = 10, 20
X = random_state.normal(size=(n_samples, n_features))
y = random_state.normal(size=n_samples)
# Check that Theil-Sen falls back to Least Squares if fit_intercept=False
theil_sen = TheilSenRegressor(fit_intercept=False,
random_state=0).fit(X, y)
lstq = LinearRegression(fit_intercept=False).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, lstq.coef_, 12)
# Check fit_intercept=True case. This will not be equal to the Least
# Squares solution since the intercept is calculated differently.
theil_sen = TheilSenRegressor(fit_intercept=True, random_state=0).fit(X, y)
y_pred = theil_sen.predict(X)
assert_array_almost_equal(y_pred, y, 12)
|
bsd-3-clause
|
NicWayand/xray
|
xarray/test/test_dask.py
|
1
|
11746
|
import numpy as np
import pandas as pd
import xarray as xr
from xarray import Variable, DataArray, Dataset
import xarray.ufuncs as xu
from xarray.core.pycompat import suppress
from . import TestCase, requires_dask
with suppress(ImportError):
import dask
import dask.array as da
def _copy_at_variable_level(arg):
"""We need to copy the argument at the level of xarray.Variable objects, so
that viewing its values does not trigger lazy loading.
"""
if isinstance(arg, Variable):
return arg.copy(deep=False)
elif isinstance(arg, DataArray):
ds = arg.to_dataset(name='__copied__')
return _copy_at_variable_level(ds)['__copied__']
elif isinstance(arg, Dataset):
ds = arg.copy()
for k in list(ds):
ds._variables[k] = ds._variables[k].copy(deep=False)
return ds
else:
assert False
class DaskTestCase(TestCase):
def assertLazyAnd(self, expected, actual, test):
expected_copy = _copy_at_variable_level(expected)
actual_copy = _copy_at_variable_level(actual)
with dask.set_options(get=dask.get):
test(actual_copy, expected_copy)
var = getattr(actual, 'variable', actual)
self.assertIsInstance(var.data, da.Array)
@requires_dask
class TestVariable(DaskTestCase):
def assertLazyAnd(self, expected, actual, test):
expected_copy = expected.copy(deep=False)
actual_copy = actual.copy(deep=False)
with dask.set_options(get=dask.get):
test(actual_copy, expected_copy)
var = getattr(actual, 'variable', actual)
self.assertIsInstance(var.data, da.Array)
def assertLazyAndIdentical(self, expected, actual):
self.assertLazyAnd(expected, actual, self.assertVariableIdentical)
def assertLazyAndAllClose(self, expected, actual):
self.assertLazyAnd(expected, actual, self.assertVariableAllClose)
def setUp(self):
self.values = np.random.RandomState(0).randn(4, 6)
self.data = da.from_array(self.values, chunks=(2, 2))
self.eager_var = Variable(('x', 'y'), self.values)
self.lazy_var = Variable(('x', 'y'), self.data)
def test_basics(self):
v = self.lazy_var
self.assertIs(self.data, v.data)
self.assertEqual(self.data.chunks, v.chunks)
self.assertArrayEqual(self.values, v)
def test_copy(self):
self.assertLazyAndIdentical(self.eager_var, self.lazy_var.copy())
self.assertLazyAndIdentical(self.eager_var,
self.lazy_var.copy(deep=True))
def test_chunk(self):
for chunks, expected in [(None, ((2, 2), (2, 2, 2))),
(3, ((3, 1), (3, 3))),
({'x': 3, 'y': 3}, ((3, 1), (3, 3))),
({'x': 3}, ((3, 1), (2, 2, 2))),
({'x': (3, 1)}, ((3, 1), (2, 2, 2)))]:
rechunked = self.lazy_var.chunk(chunks)
self.assertEqual(rechunked.chunks, expected)
self.assertLazyAndIdentical(self.eager_var, rechunked)
def test_indexing(self):
u = self.eager_var
v = self.lazy_var
self.assertLazyAndIdentical(u[0], v[0])
self.assertLazyAndIdentical(u[:1], v[:1])
self.assertLazyAndIdentical(u[[0, 1], [0, 1, 2]], v[[0, 1], [0, 1, 2]])
with self.assertRaisesRegexp(TypeError, 'stored in a dask array'):
v[:1] = 0
def test_squeeze(self):
u = self.eager_var
v = self.lazy_var
self.assertLazyAndIdentical(u[0].squeeze(), v[0].squeeze())
def test_equals(self):
v = self.lazy_var
self.assertTrue(v.equals(v))
self.assertIsInstance(v.data, da.Array)
self.assertTrue(v.identical(v))
self.assertIsInstance(v.data, da.Array)
def test_transpose(self):
u = self.eager_var
v = self.lazy_var
self.assertLazyAndIdentical(u.T, v.T)
def test_shift(self):
u = self.eager_var
v = self.lazy_var
self.assertLazyAndIdentical(u.shift(x=2), v.shift(x=2))
self.assertLazyAndIdentical(u.shift(x=-2), v.shift(x=-2))
self.assertEqual(v.data.chunks, v.shift(x=1).data.chunks)
def test_roll(self):
u = self.eager_var
v = self.lazy_var
self.assertLazyAndIdentical(u.roll(x=2), v.roll(x=2))
self.assertEqual(v.data.chunks, v.roll(x=1).data.chunks)
def test_unary_op(self):
u = self.eager_var
v = self.lazy_var
self.assertLazyAndIdentical(-u, -v)
self.assertLazyAndIdentical(abs(u), abs(v))
self.assertLazyAndIdentical(u.round(), v.round())
def test_binary_op(self):
u = self.eager_var
v = self.lazy_var
self.assertLazyAndIdentical(2 * u, 2 * v)
self.assertLazyAndIdentical(u + u, v + v)
self.assertLazyAndIdentical(u[0] + u, v[0] + v)
def test_reduce(self):
u = self.eager_var
v = self.lazy_var
self.assertLazyAndAllClose(u.mean(), v.mean())
self.assertLazyAndAllClose(u.std(), v.std())
self.assertLazyAndAllClose(u.argmax(dim='x'), v.argmax(dim='x'))
self.assertLazyAndAllClose((u > 1).any(), (v > 1).any())
self.assertLazyAndAllClose((u < 1).all('x'), (v < 1).all('x'))
with self.assertRaisesRegexp(NotImplementedError, 'dask'):
v.prod()
with self.assertRaisesRegexp(NotImplementedError, 'dask'):
v.median()
def test_missing_values(self):
values = np.array([0, 1, np.nan, 3])
data = da.from_array(values, chunks=(2,))
eager_var = Variable('x', values)
lazy_var = Variable('x', data)
self.assertLazyAndIdentical(eager_var, lazy_var.fillna(lazy_var))
self.assertLazyAndIdentical(Variable('x', range(4)), lazy_var.fillna(2))
self.assertLazyAndIdentical(eager_var.count(), lazy_var.count())
def test_concat(self):
u = self.eager_var
v = self.lazy_var
self.assertLazyAndIdentical(u, Variable.concat([v[:2], v[2:]], 'x'))
self.assertLazyAndIdentical(u[:2], Variable.concat([v[0], v[1]], 'x'))
self.assertLazyAndIdentical(
u[:3], Variable.concat([v[[0, 2]], v[[1]]], 'x', positions=[[0, 2], [1]]))
def test_missing_methods(self):
v = self.lazy_var
try:
v.argsort()
except NotImplementedError as err:
self.assertIn('dask', str(err))
try:
v[0].item()
except NotImplementedError as err:
self.assertIn('dask', str(err))
def test_univariate_ufunc(self):
u = self.eager_var
v = self.lazy_var
self.assertLazyAndAllClose(np.sin(u), xu.sin(v))
def test_bivariate_ufunc(self):
u = self.eager_var
v = self.lazy_var
self.assertLazyAndAllClose(np.maximum(u, 0), xu.maximum(v, 0))
self.assertLazyAndAllClose(np.maximum(u, 0), xu.maximum(0, v))
@requires_dask
class TestDataArrayAndDataset(DaskTestCase):
def assertLazyAndIdentical(self, expected, actual):
self.assertLazyAnd(expected, actual, self.assertDataArrayIdentical)
def assertLazyAndAllClose(self, expected, actual):
self.assertLazyAnd(expected, actual, self.assertDataArrayAllClose)
def setUp(self):
self.values = np.random.randn(4, 6)
self.data = da.from_array(self.values, chunks=(2, 2))
self.eager_array = DataArray(self.values, dims=('x', 'y'), name='foo')
self.lazy_array = DataArray(self.data, dims=('x', 'y'), name='foo')
def test_rechunk(self):
chunked = self.eager_array.chunk({'x': 2}).chunk({'y': 2})
self.assertEqual(chunked.chunks, ((2,) * 2, (2,) * 3))
def test_new_chunk(self):
chunked = self.eager_array.chunk()
self.assertTrue(chunked.data.name.startswith('xarray-<this-array>'))
def test_lazy_dataset(self):
lazy_ds = Dataset({'foo': (('x', 'y'), self.data)})
self.assertIsInstance(lazy_ds.foo.variable.data, da.Array)
def test_lazy_array(self):
u = self.eager_array
v = self.lazy_array
self.assertLazyAndAllClose(u, v)
self.assertLazyAndAllClose(-u, -v)
self.assertLazyAndAllClose(u.T, v.T)
self.assertLazyAndAllClose(u.mean(), v.mean())
self.assertLazyAndAllClose(1 + u, 1 + v)
actual = xr.concat([v[:2], v[2:]], 'x')
self.assertLazyAndAllClose(u, actual)
def test_groupby(self):
u = self.eager_array
v = self.lazy_array
expected = u.groupby('x').mean()
actual = v.groupby('x').mean()
self.assertLazyAndAllClose(expected, actual)
def test_groupby_first(self):
u = self.eager_array
v = self.lazy_array
for coords in [u.coords, v.coords]:
coords['ab'] = ('x', ['a', 'a', 'b', 'b'])
with self.assertRaisesRegexp(NotImplementedError, 'dask'):
v.groupby('ab').first()
expected = u.groupby('ab').first()
actual = v.groupby('ab').first(skipna=False)
self.assertLazyAndAllClose(expected, actual)
def test_reindex(self):
u = self.eager_array
v = self.lazy_array
for kwargs in [{'x': [2, 3, 4]},
{'x': [1, 100, 2, 101, 3]},
{'x': [2.5, 3, 3.5], 'y': [2, 2.5, 3]}]:
expected = u.reindex(**kwargs)
actual = v.reindex(**kwargs)
self.assertLazyAndAllClose(expected, actual)
def test_to_dataset_roundtrip(self):
u = self.eager_array
v = self.lazy_array
expected = u.assign_coords(x=u['x'])
self.assertLazyAndIdentical(expected, v.to_dataset('x').to_array('x'))
def test_merge(self):
def duplicate_and_merge(array):
return xr.merge([array, array.rename('bar')]).to_array()
expected = duplicate_and_merge(self.eager_array)
actual = duplicate_and_merge(self.lazy_array)
self.assertLazyAndIdentical(expected, actual)
def test_ufuncs(self):
u = self.eager_array
v = self.lazy_array
self.assertLazyAndAllClose(np.sin(u), xu.sin(v))
def test_where_dispatching(self):
a = np.arange(10)
b = a > 3
x = da.from_array(a, 5)
y = da.from_array(b, 5)
expected = DataArray(a).where(b)
self.assertLazyAndIdentical(expected, DataArray(a).where(y))
self.assertLazyAndIdentical(expected, DataArray(x).where(b))
self.assertLazyAndIdentical(expected, DataArray(x).where(y))
def test_simultaneous_compute(self):
ds = Dataset({'foo': ('x', range(5)),
'bar': ('x', range(5))}).chunk()
count = [0]
def counting_get(*args, **kwargs):
count[0] += 1
return dask.get(*args, **kwargs)
with dask.set_options(get=counting_get):
ds.load()
self.assertEqual(count[0], 1)
def test_stack(self):
data = da.random.normal(size=(2, 3, 4), chunks=(1, 3, 4))
arr = DataArray(data, dims=('w', 'x', 'y'))
stacked = arr.stack(z=('x', 'y'))
z = pd.MultiIndex.from_product([np.arange(3), np.arange(4)],
names=['x', 'y'])
expected = DataArray(data.reshape(2, -1), {'w': [0, 1], 'z': z},
dims=['w', 'z'])
assert stacked.data.chunks == expected.data.chunks
self.assertLazyAndIdentical(expected, stacked)
def test_dot(self):
eager = self.eager_array.dot(self.eager_array[0])
lazy = self.lazy_array.dot(self.lazy_array[0])
self.assertLazyAndAllClose(eager, lazy)
|
apache-2.0
|
jorik041/scikit-learn
|
benchmarks/bench_lasso.py
|
297
|
3305
|
"""
Benchmarks of Lasso vs LassoLars
First, we fix a training set and increase the number of
samples. Then we plot the computation time as function of
the number of samples.
In the second benchmark, we increase the number of dimensions of the
training set. Then we plot the computation time as function of
the number of dimensions.
In both cases, only 10% of the features are informative.
"""
import gc
from time import time
import numpy as np
from sklearn.datasets.samples_generator import make_regression
def compute_bench(alpha, n_samples, n_features, precompute):
lasso_results = []
lars_lasso_results = []
it = 0
for ns in n_samples:
for nf in n_features:
it += 1
print('==================')
print('Iteration %s of %s' % (it, max(len(n_samples),
len(n_features))))
print('==================')
n_informative = nf // 10
X, Y, coef_ = make_regression(n_samples=ns, n_features=nf,
n_informative=n_informative,
noise=0.1, coef=True)
X /= np.sqrt(np.sum(X ** 2, axis=0)) # Normalize data
gc.collect()
print("- benchmarking Lasso")
clf = Lasso(alpha=alpha, fit_intercept=False,
precompute=precompute)
tstart = time()
clf.fit(X, Y)
lasso_results.append(time() - tstart)
gc.collect()
print("- benchmarking LassoLars")
clf = LassoLars(alpha=alpha, fit_intercept=False,
normalize=False, precompute=precompute)
tstart = time()
clf.fit(X, Y)
lars_lasso_results.append(time() - tstart)
return lasso_results, lars_lasso_results
if __name__ == '__main__':
from sklearn.linear_model import Lasso, LassoLars
import pylab as pl
alpha = 0.01 # regularization parameter
n_features = 10
list_n_samples = np.linspace(100, 1000000, 5).astype(np.int)
lasso_results, lars_lasso_results = compute_bench(alpha, list_n_samples,
[n_features], precompute=True)
pl.figure('scikit-learn LASSO benchmark results')
pl.subplot(211)
pl.plot(list_n_samples, lasso_results, 'b-',
label='Lasso')
pl.plot(list_n_samples, lars_lasso_results, 'r-',
label='LassoLars')
pl.title('precomputed Gram matrix, %d features, alpha=%s' % (n_features, alpha))
pl.legend(loc='upper left')
pl.xlabel('number of samples')
pl.ylabel('Time (s)')
pl.axis('tight')
n_samples = 2000
list_n_features = np.linspace(500, 3000, 5).astype(np.int)
lasso_results, lars_lasso_results = compute_bench(alpha, [n_samples],
list_n_features, precompute=False)
pl.subplot(212)
pl.plot(list_n_features, lasso_results, 'b-', label='Lasso')
pl.plot(list_n_features, lars_lasso_results, 'r-', label='LassoLars')
pl.title('%d samples, alpha=%s' % (n_samples, alpha))
pl.legend(loc='upper left')
pl.xlabel('number of features')
pl.ylabel('Time (s)')
pl.axis('tight')
pl.show()
|
bsd-3-clause
|
hlin117/statsmodels
|
statsmodels/tools/tests/test_grouputils.py
|
31
|
11494
|
import numpy as np
import pandas as pd
from statsmodels.tools.grouputils import Grouping
from statsmodels.tools.tools import categorical
from statsmodels.datasets import grunfeld, anes96
from pandas.util import testing as ptesting
class CheckGrouping(object):
def test_reindex(self):
# smoke test
self.grouping.reindex(self.grouping.index)
def test_count_categories(self):
self.grouping.count_categories(level=0)
np.testing.assert_equal(self.grouping.counts, self.expected_counts)
def test_sort(self):
# data frame
sorted_data, index = self.grouping.sort(self.data)
expected_sorted_data = self.data.sort_index()
ptesting.assert_frame_equal(sorted_data, expected_sorted_data)
np.testing.assert_(isinstance(sorted_data, pd.DataFrame))
np.testing.assert_(not index.equals(self.grouping.index))
# make sure it copied
if hasattr(sorted_data, 'equals'): # newer pandas
np.testing.assert_(not sorted_data.equals(self.data))
# 2d arrays
sorted_data, index = self.grouping.sort(self.data.values)
np.testing.assert_array_equal(sorted_data,
expected_sorted_data.values)
np.testing.assert_(isinstance(sorted_data, np.ndarray))
# 1d series
series = self.data[self.data.columns[0]]
sorted_data, index = self.grouping.sort(series)
expected_sorted_data = series.sort_index()
ptesting.assert_series_equal(sorted_data, expected_sorted_data)
np.testing.assert_(isinstance(sorted_data, pd.Series))
if hasattr(sorted_data, 'equals'):
np.testing.assert_(not sorted_data.equals(series))
# 1d array
array = series.values
sorted_data, index = self.grouping.sort(array)
expected_sorted_data = series.sort_index().values
np.testing.assert_array_equal(sorted_data, expected_sorted_data)
np.testing.assert_(isinstance(sorted_data, np.ndarray))
def test_transform_dataframe(self):
names = self.data.index.names
transformed_dataframe = self.grouping.transform_dataframe(
self.data,
lambda x : x.mean(),
level=0)
expected = self.data.reset_index().groupby(names[0]
).apply(lambda x : x.mean())[
self.data.columns]
np.testing.assert_array_equal(transformed_dataframe,
expected.values)
if len(names) > 1:
transformed_dataframe = self.grouping.transform_dataframe(
self.data, lambda x : x.mean(),
level=1)
expected = self.data.reset_index().groupby(names[1]
).apply(lambda x :
x.mean())[
self.data.columns]
np.testing.assert_array_equal(transformed_dataframe,
expected.values)
def test_transform_array(self):
names = self.data.index.names
transformed_array = self.grouping.transform_array(
self.data.values,
lambda x : x.mean(),
level=0)
expected = self.data.reset_index().groupby(names[0]
).apply(lambda x : x.mean())[
self.data.columns]
np.testing.assert_array_equal(transformed_array,
expected.values)
if len(names) > 1:
transformed_array = self.grouping.transform_array(
self.data.values,
lambda x : x.mean(), level=1)
expected = self.data.reset_index().groupby(names[1]
).apply(lambda x :
x.mean())[
self.data.columns]
np.testing.assert_array_equal(transformed_array,
expected.values)
def test_transform_slices(self):
names = self.data.index.names
transformed_slices = self.grouping.transform_slices(
self.data.values,
lambda x, idx : x.mean(0),
level=0)
expected = self.data.reset_index().groupby(names[0]).mean()[
self.data.columns]
np.testing.assert_allclose(transformed_slices, expected.values,
rtol=1e-12, atol=1e-25)
if len(names) > 1:
transformed_slices = self.grouping.transform_slices(
self.data.values,
lambda x, idx : x.mean(0),
level=1)
expected = self.data.reset_index().groupby(names[1]
).mean()[
self.data.columns]
np.testing.assert_allclose(transformed_slices, expected.values,
rtol=1e-12, atol=1e-25)
def test_dummies_groups(self):
# smoke test, calls dummy_sparse under the hood
self.grouping.dummies_groups()
if len(self.grouping.group_names) > 1:
self.grouping.dummies_groups(level=1)
def test_dummy_sparse(self):
data = self.data
self.grouping.dummy_sparse()
expected = categorical(data.index.get_level_values(0).values,
drop=True)
np.testing.assert_equal(self.grouping._dummies.toarray(), expected)
if len(self.grouping.group_names) > 1:
self.grouping.dummy_sparse(level=1)
expected = categorical(data.index.get_level_values(1).values,
drop=True)
np.testing.assert_equal(self.grouping._dummies.toarray(),
expected)
class TestMultiIndexGrouping(CheckGrouping):
@classmethod
def setupClass(cls):
grun_data = grunfeld.load_pandas().data
multi_index_data = grun_data.set_index(['firm', 'year'])
multi_index_panel = multi_index_data.index
cls.grouping = Grouping(multi_index_panel)
cls.data = multi_index_data
cls.expected_counts = [20] * 11
class TestIndexGrouping(CheckGrouping):
@classmethod
def setupClass(cls):
grun_data = grunfeld.load_pandas().data
index_data = grun_data.set_index(['firm'])
index_group = index_data.index
cls.grouping = Grouping(index_group)
cls.data = index_data
cls.expected_counts = [20] * 11
def test_init_api():
# make a multi-index panel
grun_data = grunfeld.load_pandas().data
multi_index_panel = grun_data.set_index(['firm', 'year']).index
grouping = Grouping(multi_index_panel)
# check group_names
np.testing.assert_array_equal(grouping.group_names, ['firm', 'year'])
# check shape
np.testing.assert_array_equal(grouping.index_shape, (11, 20))
# check index_int
np.testing.assert_array_equal(grouping.labels,
[[ 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
8, 8, 8, 8, 8, 8, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
9, 9, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
10, 10, 10, 10, 10, 10, 10, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
17, 18, 19, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
14, 15, 16, 17, 18, 19, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
11, 12, 13, 14, 15, 16, 17, 18, 19, 0, 1, 2, 3, 4, 5, 6, 7,
8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 0, 1, 2, 3, 4,
5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 0, 1,
2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
19, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
16, 17, 18, 19, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
13, 14, 15, 16, 17, 18, 19, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 0, 1, 2, 3, 4, 5, 6,
7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 0, 1, 2, 3,
4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19]])
grouping = Grouping(multi_index_panel, names=['firms', 'year'])
np.testing.assert_array_equal(grouping.group_names, ['firms', 'year'])
# make a multi-index grouping
anes_data = anes96.load_pandas().data
multi_index_groups = anes_data.set_index(['educ', 'income',
'TVnews']).index
grouping = Grouping(multi_index_groups)
np.testing.assert_array_equal(grouping.group_names,
['educ', 'income', 'TVnews'])
np.testing.assert_array_equal(grouping.index_shape, (7, 24, 8))
# make a list multi-index panel
list_panel = multi_index_panel.tolist()
grouping = Grouping(list_panel, names=['firms', 'year'])
np.testing.assert_array_equal(grouping.group_names, ['firms', 'year'])
np.testing.assert_array_equal(grouping.index_shape, (11, 20))
# make a list multi-index grouping
list_groups = multi_index_groups.tolist()
grouping = Grouping(list_groups, names=['educ', 'income', 'TVnews'])
np.testing.assert_array_equal(grouping.group_names,
['educ', 'income', 'TVnews'])
np.testing.assert_array_equal(grouping.index_shape, (7, 24, 8))
# single-variable index grouping
index_group = multi_index_panel.get_level_values(0)
grouping = Grouping(index_group)
# the original multi_index_panel had it's name changed inplace above
np.testing.assert_array_equal(grouping.group_names, ['firms'])
np.testing.assert_array_equal(grouping.index_shape, (220,))
# single variable list grouping
list_group = multi_index_panel.get_level_values(0).tolist()
grouping = Grouping(list_group)
np.testing.assert_array_equal(grouping.group_names, ["group0"])
np.testing.assert_array_equal(grouping.index_shape, 11*20)
# test generic group names
grouping = Grouping(list_groups)
np.testing.assert_array_equal(grouping.group_names,
['group0', 'group1', 'group2'])
|
bsd-3-clause
|
kumarkrishna/sympy
|
sympy/plotting/tests/test_plot.py
|
43
|
8577
|
from sympy import (pi, sin, cos, Symbol, Integral, summation, sqrt, log,
oo, LambertW, I, meijerg, exp_polar, Max, Piecewise)
from sympy.plotting import (plot, plot_parametric, plot3d_parametric_line,
plot3d, plot3d_parametric_surface)
from sympy.plotting.plot import unset_show
from sympy.utilities.pytest import skip, raises
from sympy.plotting.experimental_lambdify import lambdify
from sympy.external import import_module
from sympy.core.decorators import wraps
from tempfile import NamedTemporaryFile
import os
import sys
class MockPrint(object):
def write(self, s):
pass
def disable_print(func, *args, **kwargs):
@wraps(func)
def wrapper(*args, **kwargs):
sys.stdout = MockPrint()
func(*args, **kwargs)
sys.stdout = sys.__stdout__
return wrapper
unset_show()
# XXX: We could implement this as a context manager instead
# That would need rewriting the plot_and_save() function
# entirely
class TmpFileManager:
tmp_files = []
@classmethod
def tmp_file(cls, name=''):
cls.tmp_files.append(NamedTemporaryFile(prefix=name, suffix='.png').name)
return cls.tmp_files[-1]
@classmethod
def cleanup(cls):
map(os.remove, cls.tmp_files)
def plot_and_save(name):
tmp_file = TmpFileManager.tmp_file
x = Symbol('x')
y = Symbol('y')
z = Symbol('z')
###
# Examples from the 'introduction' notebook
###
p = plot(x)
p = plot(x*sin(x), x*cos(x))
p.extend(p)
p[0].line_color = lambda a: a
p[1].line_color = 'b'
p.title = 'Big title'
p.xlabel = 'the x axis'
p[1].label = 'straight line'
p.legend = True
p.aspect_ratio = (1, 1)
p.xlim = (-15, 20)
p.save(tmp_file('%s_basic_options_and_colors' % name))
p.extend(plot(x + 1))
p.append(plot(x + 3, x**2)[1])
p.save(tmp_file('%s_plot_extend_append' % name))
p[2] = plot(x**2, (x, -2, 3))
p.save(tmp_file('%s_plot_setitem' % name))
p = plot(sin(x), (x, -2*pi, 4*pi))
p.save(tmp_file('%s_line_explicit' % name))
p = plot(sin(x))
p.save(tmp_file('%s_line_default_range' % name))
p = plot((x**2, (x, -5, 5)), (x**3, (x, -3, 3)))
p.save(tmp_file('%s_line_multiple_range' % name))
raises(ValueError, lambda: plot(x, y))
p = plot(Piecewise((1, x > 0), (0, True)),(x,-1,1))
p.save(tmp_file('%s_plot_piecewise' % name))
#parametric 2d plots.
#Single plot with default range.
plot_parametric(sin(x), cos(x)).save(tmp_file())
#Single plot with range.
p = plot_parametric(sin(x), cos(x), (x, -5, 5))
p.save(tmp_file('%s_parametric_range' % name))
#Multiple plots with same range.
p = plot_parametric((sin(x), cos(x)), (x, sin(x)))
p.save(tmp_file('%s_parametric_multiple' % name))
#Multiple plots with different ranges.
p = plot_parametric((sin(x), cos(x), (x, -3, 3)), (x, sin(x), (x, -5, 5)))
p.save(tmp_file('%s_parametric_multiple_ranges' % name))
#depth of recursion specified.
p = plot_parametric(x, sin(x), depth=13)
p.save(tmp_file('%s_recursion_depth' % name))
#No adaptive sampling.
p = plot_parametric(cos(x), sin(x), adaptive=False, nb_of_points=500)
p.save(tmp_file('%s_adaptive' % name))
#3d parametric plots
p = plot3d_parametric_line(sin(x), cos(x), x)
p.save(tmp_file('%s_3d_line' % name))
p = plot3d_parametric_line(
(sin(x), cos(x), x, (x, -5, 5)), (cos(x), sin(x), x, (x, -3, 3)))
p.save(tmp_file('%s_3d_line_multiple' % name))
p = plot3d_parametric_line(sin(x), cos(x), x, nb_of_points=30)
p.save(tmp_file('%s_3d_line_points' % name))
# 3d surface single plot.
p = plot3d(x * y)
p.save(tmp_file('%s_surface' % name))
# Multiple 3D plots with same range.
p = plot3d(-x * y, x * y, (x, -5, 5))
p.save(tmp_file('%s_surface_multiple' % name))
# Multiple 3D plots with different ranges.
p = plot3d(
(x * y, (x, -3, 3), (y, -3, 3)), (-x * y, (x, -3, 3), (y, -3, 3)))
p.save(tmp_file('%s_surface_multiple_ranges' % name))
# Single Parametric 3D plot
p = plot3d_parametric_surface(sin(x + y), cos(x - y), x - y)
p.save(tmp_file('%s_parametric_surface' % name))
# Multiple Parametric 3D plots.
p = plot3d_parametric_surface(
(x*sin(z), x*cos(z), z, (x, -5, 5), (z, -5, 5)),
(sin(x + y), cos(x - y), x - y, (x, -5, 5), (y, -5, 5)))
p.save(tmp_file('%s_parametric_surface' % name))
###
# Examples from the 'colors' notebook
###
p = plot(sin(x))
p[0].line_color = lambda a: a
p.save(tmp_file('%s_colors_line_arity1' % name))
p[0].line_color = lambda a, b: b
p.save(tmp_file('%s_colors_line_arity2' % name))
p = plot(x*sin(x), x*cos(x), (x, 0, 10))
p[0].line_color = lambda a: a
p.save(tmp_file('%s_colors_param_line_arity1' % name))
p[0].line_color = lambda a, b: a
p.save(tmp_file('%s_colors_param_line_arity2a' % name))
p[0].line_color = lambda a, b: b
p.save(tmp_file('%s_colors_param_line_arity2b' % name))
p = plot3d_parametric_line(sin(x) + 0.1*sin(x)*cos(7*x),
cos(x) + 0.1*cos(x)*cos(7*x),
0.1*sin(7*x),
(x, 0, 2*pi))
p[0].line_color = lambda a: sin(4*a)
p.save(tmp_file('%s_colors_3d_line_arity1' % name))
p[0].line_color = lambda a, b: b
p.save(tmp_file('%s_colors_3d_line_arity2' % name))
p[0].line_color = lambda a, b, c: c
p.save(tmp_file('%s_colors_3d_line_arity3' % name))
p = plot3d(sin(x)*y, (x, 0, 6*pi), (y, -5, 5))
p[0].surface_color = lambda a: a
p.save(tmp_file('%s_colors_surface_arity1' % name))
p[0].surface_color = lambda a, b: b
p.save(tmp_file('%s_colors_surface_arity2' % name))
p[0].surface_color = lambda a, b, c: c
p.save(tmp_file('%s_colors_surface_arity3a' % name))
p[0].surface_color = lambda a, b, c: sqrt((a - 3*pi)**2 + b**2)
p.save(tmp_file('%s_colors_surface_arity3b' % name))
p = plot3d_parametric_surface(x * cos(4 * y), x * sin(4 * y), y,
(x, -1, 1), (y, -1, 1))
p[0].surface_color = lambda a: a
p.save(tmp_file('%s_colors_param_surf_arity1' % name))
p[0].surface_color = lambda a, b: a*b
p.save(tmp_file('%s_colors_param_surf_arity2' % name))
p[0].surface_color = lambda a, b, c: sqrt(a**2 + b**2 + c**2)
p.save(tmp_file('%s_colors_param_surf_arity3' % name))
###
# Examples from the 'advanced' notebook
###
i = Integral(log((sin(x)**2 + 1)*sqrt(x**2 + 1)), (x, 0, y))
p = plot(i, (y, 1, 5))
p.save(tmp_file('%s_advanced_integral' % name))
s = summation(1/x**y, (x, 1, oo))
p = plot(s, (y, 2, 10))
p.save(tmp_file('%s_advanced_inf_sum' % name))
p = plot(summation(1/x, (x, 1, y)), (y, 2, 10), show=False)
p[0].only_integers = True
p[0].steps = True
p.save(tmp_file('%s_advanced_fin_sum' % name))
###
# Test expressions that can not be translated to np and generate complex
# results.
###
plot(sin(x) + I*cos(x)).save(tmp_file())
plot(sqrt(sqrt(-x))).save(tmp_file())
plot(LambertW(x)).save(tmp_file())
plot(sqrt(LambertW(x))).save(tmp_file())
#Characteristic function of a StudentT distribution with nu=10
plot((meijerg(((1 / 2,), ()), ((5, 0, 1 / 2), ()), 5 * x**2 * exp_polar(-I*pi)/2)
+ meijerg(((1/2,), ()), ((5, 0, 1/2), ()),
5*x**2 * exp_polar(I*pi)/2)) / (48 * pi), (x, 1e-6, 1e-2)).save(tmp_file())
def test_matplotlib():
matplotlib = import_module('matplotlib', min_module_version='1.1.0', catch=(RuntimeError,))
if matplotlib:
try:
plot_and_save('test')
finally:
# clean up
TmpFileManager.cleanup()
else:
skip("Matplotlib not the default backend")
# Tests for exception handling in experimental_lambdify
def test_experimental_lambify():
x = Symbol('x')
f = lambdify([x], Max(x, 5))
# XXX should f be tested? If f(2) is attempted, an
# error is raised because a complex produced during wrapping of the arg
# is being compared with an int.
assert Max(2, 5) == 5
assert Max(5, 7) == 7
x = Symbol('x-3')
f = lambdify([x], x + 1)
assert f(1) == 2
@disable_print
def test_append_issue_7140():
x = Symbol('x')
p1 = plot(x)
p2 = plot(x**2)
p3 = plot(x + 2)
# append a series
p2.append(p1[0])
assert len(p2._series) == 2
with raises(TypeError):
p1.append(p2)
with raises(TypeError):
p1.append(p2._series)
|
bsd-3-clause
|
mantidproject/mantid
|
qt/applications/workbench/workbench/plotting/plotscriptgenerator/legend.py
|
3
|
6968
|
# Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2021 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
# This file is part of the mantid workbench.
from matplotlib import rcParams
from matplotlib.font_manager import FontProperties
from mantid.plots.legend import LegendProperties, convert_color_to_hex
from workbench.plotting.plotscriptgenerator.utils import convert_args_to_string
# Default values of all options that are accessible via the legend tab in the plot settings.
mpl_default_kwargs = {
'visible': True,
'title': '',
'background_color': convert_color_to_hex(rcParams['axes.facecolor']), # inherits from axes by default
'edge_color': convert_color_to_hex(rcParams['legend.edgecolor']),
'transparency': rcParams['legend.framealpha'],
'entries_font': 'DejaVu Sans',
'entries_size': rcParams['legend.fontsize'],
'entries_color': '#000000',
'title_font': 'DejaVu Sans',
'title_size': rcParams['axes.labelsize'], # Uses axes size by default
'title_color': '#000000',
'marker_size': rcParams['legend.handlelength'],
'box_visible': rcParams['legend.frameon'],
'shadow': rcParams['legend.shadow'],
'round_edges': rcParams['legend.fancybox'],
'columns': 1,
'column_spacing': rcParams['legend.columnspacing'],
'label_spacing': rcParams['legend.labelspacing'],
'marker_position': "Left of Entries",
'markers': rcParams['legend.numpoints'],
'border_padding': rcParams['legend.borderpad'],
'marker_label_padding': rcParams['legend.handletextpad']
}
# Dictionary to convert from the mantid legend interface to matplotlib legend argument names.
MANTID_TO_MPL = {
'background_color': 'facecolor',
'edge_color': 'edgecolor',
'transparency': 'framealpha',
'entries_size': 'fontsize',
'columns': 'ncol',
'markers': 'numpoints',
'marker_position': 'markerfirst',
'box_visible': 'frameon',
'round_edges': 'fancybox',
'shadow': 'shadow',
'title': 'title',
'border_padding': 'borderpad',
'label_spacing': 'labelspacing',
'marker_size': 'handlelength',
'marker_label_padding': 'handletextpad',
'column_spacing': 'columnspacing'
}
def generate_legend_commands(legend):
"""
Generates a string containing a comma separated list of kwargs to set legend properties.
"""
kwargs = get_legend_command_kwargs(legend)
return convert_args_to_string([], kwargs)
def generate_title_font_commands(legend, legend_object_var):
"""
Generate commands for setting properties for the legend title font.
"""
title_commands = []
kwargs = LegendProperties.from_legend(legend)
_remove_kwargs_if_default(kwargs)
if 'title_font' in kwargs:
title_commands.append(legend_object_var + ".get_title().set_fontname('" + kwargs['title_font'] + "')")
if 'title_color' in kwargs:
title_commands.append(legend_object_var + ".get_title().set_color('" + kwargs['title_color'] + "')")
if 'title_size' in kwargs:
title_commands.append(legend_object_var + ".get_title().set_fontsize('" + str(kwargs['title_size']) + "')")
return title_commands
def generate_label_font_commands(legend, legend_object_var):
"""
Generate python commands for setting the legend text label properties. The size is not present here because it is
already included in the list of legend properties.
"""
label_commands = []
kwargs = LegendProperties.from_legend(legend)
_remove_kwargs_if_default(kwargs)
if 'entries_font' in kwargs:
label_commands.append("[label.set_fontname('" + kwargs['entries_font']
+ "') for label in " + legend_object_var + ".get_texts()]")
if 'entries_color' in kwargs:
label_commands.append("[label.set_color('" + kwargs['entries_color']
+ "') for label in " + legend_object_var + ".get_texts()]")
return label_commands
def generate_visible_command(legend, legend_object_var):
"""
Returns a command to set the visibility of the legend if it's different to the default value.
It's returned as a list for convenience, so it can be added to the end of a list without checking if it's empty.
"""
visible_command = []
kwargs = LegendProperties.from_legend(legend)
_remove_kwargs_if_default(kwargs)
if 'visible' in kwargs:
visible_command.append(legend_object_var + ".set_visible(" + str(kwargs['visible']) + ")")
return visible_command
def get_legend_command_kwargs(legend):
"""
Returns a list of matplotlib legend kwargs, removing any that are default values.
"""
kwargs = LegendProperties.from_legend(legend)
_remove_kwargs_if_default(kwargs)
# Convert the kwargs to the matplotlib ones.
return get_mpl_kwargs(kwargs)
def get_mpl_kwargs(kwargs):
"""
Keep only matplotlib legend kwargs, and convert the keys to matplotlib compatible ones.
"""
mpl_kwargs = {}
for key, value in kwargs.items():
if key in MANTID_TO_MPL:
mpl_kwargs[MANTID_TO_MPL[key]] = value
# The markerfirst kwarg is a boolean in matplotlib, so need to convert it.
if 'markerfirst' in mpl_kwargs:
mpl_kwargs['markerfirst'] = mpl_kwargs['markerfirst'] == "Left of Entries"
return mpl_kwargs
def _remove_kwargs_if_default(kwargs):
"""
Remove kwargs from the given dict if they're the default values
"""
for kwarg, default_value in mpl_default_kwargs.items():
if kwargs[kwarg] == default_value:
kwargs.pop(kwarg)
# Font size defaults are string values (e.g. 'medium', 'large', 'x-large'), so we need to convert the defaults to
# point sizes before comparing.
if 'title_size' in kwargs:
if convert_to_point_size(kwargs['title_size']) == convert_to_point_size(mpl_default_kwargs['title_size']):
kwargs.pop('title_size')
if 'entries_size' in kwargs:
if convert_to_point_size(kwargs['entries_size']) == convert_to_point_size(mpl_default_kwargs['entries_size']):
kwargs.pop('entries_size')
# Hex values of colours may not be the same case, so convert to lower before comparing.
if 'background_color' in kwargs:
if kwargs['background_color'].lower() == mpl_default_kwargs['background_color'].lower():
kwargs.pop('background_color')
if 'edge_color' in kwargs:
if kwargs['edge_color'].lower() == mpl_default_kwargs['edge_color'].lower():
kwargs.pop('edge_color')
def convert_to_point_size(font_size):
"""
Convert font size (may be int or string, e.g. 'medium', 'large', ...) to point size.
"""
font = FontProperties()
font.set_size(font_size)
return font.get_size_in_points()
|
gpl-3.0
|
peterpolidoro/elf
|
tests/volume_to_adc.py
|
4
|
5108
|
# -*- coding: utf-8 -*-
from __future__ import print_function, division
import matplotlib.pyplot as plot
import numpy
from numpy.polynomial.polynomial import polyfit,polyadd,Polynomial
import yaml
INCHES_PER_ML = 0.078
VOLTS_PER_ADC_UNIT = 0.0049
def load_numpy_data(path):
with open(path,'r') as fid:
header = fid.readline().rstrip().split(',')
dt = numpy.dtype({'names':header,'formats':['S25']*len(header)})
numpy_data = numpy.loadtxt(path,dtype=dt,delimiter=",",skiprows=1)
return numpy_data
# -----------------------------------------------------------------------------------------
if __name__ == '__main__':
# Load VA data
data_file = 'hall_effect_data_va.csv'
hall_effect_data_va = load_numpy_data(data_file)
distances_va = numpy.float64(hall_effect_data_va['distance'])
A1_VA = numpy.float64(hall_effect_data_va['A1'])
A9_VA = numpy.float64(hall_effect_data_va['A9'])
A4_VA = numpy.float64(hall_effect_data_va['A4'])
A12_VA = numpy.float64(hall_effect_data_va['A12'])
A2_VA = numpy.float64(hall_effect_data_va['A2'])
A10_VA = numpy.float64(hall_effect_data_va['A10'])
A5_VA = numpy.float64(hall_effect_data_va['A5'])
A13_VA = numpy.float64(hall_effect_data_va['A13'])
# Massage VA data
volumes_va = distances_va/INCHES_PER_ML
A1_VA = numpy.reshape(A1_VA,(-1,1))
A9_VA = numpy.reshape(A9_VA,(-1,1))
A4_VA = numpy.reshape(A4_VA,(-1,1))
A12_VA = numpy.reshape(A12_VA,(-1,1))
A2_VA = numpy.reshape(A2_VA,(-1,1))
A10_VA = numpy.reshape(A10_VA,(-1,1))
A5_VA = numpy.reshape(A5_VA,(-1,1))
A13_VA = numpy.reshape(A13_VA,(-1,1))
data_va = numpy.hstack((A1_VA,A9_VA,A4_VA,A12_VA,A2_VA,A10_VA,A5_VA,A13_VA))
data_va = data_va/VOLTS_PER_ADC_UNIT
# Load OA data
data_file = 'hall_effect_data_oa.csv'
hall_effect_data_oa = load_numpy_data(data_file)
distances_oa = numpy.float64(hall_effect_data_oa['distance'])
A9_OA = numpy.float64(hall_effect_data_oa['A9'])
A10_OA = numpy.float64(hall_effect_data_oa['A10'])
A11_OA = numpy.float64(hall_effect_data_oa['A11'])
A12_OA = numpy.float64(hall_effect_data_oa['A12'])
# Massage OA data
volumes_oa = distances_oa/INCHES_PER_ML
A9_OA = numpy.reshape(A9_OA,(-1,1))
A10_OA = numpy.reshape(A10_OA,(-1,1))
A11_OA = numpy.reshape(A11_OA,(-1,1))
A12_OA = numpy.reshape(A12_OA,(-1,1))
data_oa = numpy.hstack((A9_OA,A10_OA,A11_OA,A12_OA))
data_oa = data_oa/VOLTS_PER_ADC_UNIT
# Create figure
fig = plot.figure()
fig.suptitle('hall effect sensors',fontsize=14,fontweight='bold')
fig.subplots_adjust(top=0.85)
colors = ['b','g','r','c','m','y','k','b']
markers = ['o','o','o','o','o','o','o','^']
# Axis 1
ax1 = fig.add_subplot(121)
for column_index in range(0,data_va.shape[1]):
color = colors[column_index]
marker = markers[column_index]
ax1.plot(volumes_va,data_va[:,column_index],marker=marker,linestyle='--',color=color)
for column_index in range(0,data_oa.shape[1]):
color = colors[column_index]
marker = markers[column_index]
ax1.plot(volumes_oa,data_oa[:,column_index],marker=marker,linestyle='--',color=color)
ax1.set_xlabel('volume (ml)')
ax1.set_ylabel('mean signals (ADC units)')
ax1.grid(True)
# Axis 2
for column_index in range(0,data_va.shape[1]):
data_va[:,column_index] -= data_va[:,column_index].min()
MAX_VA = 120
data_va = data_va[numpy.all(data_va<MAX_VA,axis=1)]
length = data_va.shape[0]
volumes_va = volumes_va[-length:]
# for column_index in range(0,data_oa.shape[1]):
# data_oa[:,column_index] -= data_oa[:,column_index].max()
ax2 = fig.add_subplot(122)
for column_index in range(0,data_va.shape[1]):
color = colors[column_index]
marker = markers[column_index]
ax2.plot(volumes_va,data_va[:,column_index],marker=marker,linestyle='--',color=color)
# for column_index in range(0,data_oa.shape[1]):
# color = colors[column_index]
# marker = markers[column_index]
# ax2.plot(data_oa[:,column_index],volumes_oa,marker=marker,linestyle='--',color=color)
ax2.set_xlabel('volume (ml)')
ax2.set_ylabel('offset mean signals (ADC units)')
ax2.grid(True)
order = 3
sum_va = None
for column_index in range(0,data_va.shape[1]):
coefficients_va = polyfit(volumes_va,data_va[:,column_index],order)
if sum_va is None:
sum_va = coefficients_va
else:
sum_va = polyadd(sum_va,coefficients_va)
average_va = sum_va/data_va.shape[1]
round_digits = 8
average_va = [round(i,round_digits) for i in average_va]
with open('volume_to_adc_va.yaml', 'w') as f:
yaml.dump(average_va, f, default_flow_style=False)
poly_va = Polynomial(average_va)
ys_va = poly_va(volumes_va)
ax2.plot(volumes_va,ys_va,'r',linewidth=3)
ax2.text(0.5,110,r'$s = c_0 + c_1v + c_2v^2 + c_3v^3$',fontsize=20)
ax2.text(0.5,100,str(average_va),fontsize=18,color='r')
plot.show()
|
bsd-3-clause
|
cbuntain/UMD_HCIL_TREC2015
|
src/main/python/topicFilter/scenario_b_type_b.py
|
1
|
3325
|
#!/usr/bin/python
import codecs
import json
import re
import sys
import time
from nltk.stem import WordNetLemmatizer
import pandas as pd
minKeywordCount = 1
maxPerDay = 100
if ( len(sys.argv) < 5 ):
print "Usage: %s <trec_topics.json> <sparkTrecOutput.csv> <output_file.csv> <runtag>" % (sys.argv[0])
exit(1)
topicsFilePath = sys.argv[1]
sparkCsvFilePath = sys.argv[2]
outputPath = sys.argv[3]
runtag = sys.argv[4]
topicsJsonObj = None
with codecs.open(topicsFilePath, "r", "utf-8") as f:
topicsJsonObj = json.load(f)
wordToTopicMap = {}
topicTimeMap = {}
for topic in topicsJsonObj:
topicTitle = topic["title"]
topicNum = topic["num"]
tokens = topic["tokens"]
for token in tokens:
if ( token not in wordToTopicMap ):
wordToTopicMap[token] = [(topicNum,topicTitle)]
else:
wordToTopicMap[token].append((topicNum,topicTitle))
topicTimeMap[topicNum] = {}
wnl = WordNetLemmatizer()
specCharRegex = re.compile(r"[^a-zA-Z0-9\\s]")
outputRows = []
with codecs.open(sparkCsvFilePath, "r", "utf-8") as f:
df = pd.read_csv(sparkCsvFilePath, header=None)
for (id, row) in df.iterrows():
topicNums = row[0]
captureTime = row[1]
tweetId = row[2]
tweetText = row[3]
gmTime = time.gmtime(captureTime)
timeTuple = (gmTime.tm_year, gmTime.tm_mon, gmTime.tm_mday)
timeStr = "%04d%02d%02d" % (gmTime.tm_year, gmTime.tm_mon, gmTime.tm_mday)
cleanTokens = specCharRegex.sub(" ", tweetText.lower(), count=0)
tokens = set([wnl.lemmatize(x) for x in cleanTokens.split(" ")])
localTopicCountMap = {}
localTopics = []
for token in tokens:
if ( token in wordToTopicMap ):
for x in wordToTopicMap[token]:
thisTopicNum = x[0]
if ( thisTopicNum not in localTopicCountMap ):
localTopics.append(x)
localTopicCountMap[thisTopicNum] = 1
else:
localTopicCountMap[thisTopicNum] += 1
for localTopic in localTopics:
if ( localTopicCountMap[localTopic[0]] < minKeywordCount ):
continue
if ( timeTuple in topicTimeMap[localTopic[0]] and len(topicTimeMap[localTopic[0]][timeTuple]) >= maxPerDay ):
continue
if ( timeTuple not in topicTimeMap[localTopic[0]] ):
topicTimeMap[localTopic[0]][timeTuple] = [tweetId]
else:
topicTimeMap[localTopic[0]][timeTuple].append(tweetId)
item = {
"topic":localTopic[0],
"title": localTopic[1],
"time":captureTime,
"date":timeStr,
"id":tweetId,
"text":tweetText,
"runtag":runtag,
"q0":"Q0",
"rank": 1,
"score": 1.0,
}
outputRows.append(item)
outputDf = pd.DataFrame(outputRows)
# YYYYMMDD topic_id Q0 tweet_id rank score runtag
# outputDf.to_csv(outputPath, columns=["topic", "title", "time", "date", "id", "text"], index=False)
outputDf.to_csv(outputPath, columns=["date", "topic", "q0", "id", "rank", "score", "runtag"], index=False, sep="\t")
|
apache-2.0
|
markus-antero/Stock
|
data/finance/worldbank.py
|
1
|
5580
|
'''
Created on 18.8.2017
- https://datahelpdesk.worldbank.org/knowledgebase/articles/889464-wbopendata-stata-module-to-access-world-bank-data
- https://datahelpdesk.worldbank.org/knowledgebase/topics/19286-world-development-indicators-wdi
- https://datahelpdesk.worldbank.org/knowledgebase/topics/125589-developer-information
- https://datahelpdesk.worldbank.org/knowledgebase/articles/902061-climate-data-api
- https://datahelpdesk.worldbank.org/knowledgebase/articles/898614-api-aggregates-regions-and-income-levels
@author: Markus.Walden
'''
import wbdata
import pandas as pd
import sqlalchemy as sqla
import csv
import datetime
import sys
from data.finance import countryCodes, worldbankConfFile, engineString, countryStatistics
class WorldBankDataReader(object):
'''
classdocs
used factors: GNI, GDP, Employment, income, consumption, Nominal, and debt
metrics (symbol - meaning):
CPTOTNSXN - CPI Price, nominal
GC.DOD.TOTL.GD.ZS - Central government debt, total (percentage of GDP)
GC.DOD.TOTL.CN - Central government debt, total (current LCU)
GFDD.DM.10 - Gross portfolio debt liabilities to GDP (percentage)
GFDD.DM.07 - International debt issues to GDP (percentage)
NY.GNP.PCAP.PP.CD - GNI per capita, PPP (current international $)
NY.GNP.PCAP.CD - GNI per capita, Atlas method (current US$)
NY.GNP.PCAP.KD.ZG - GNI per capita growth (annual percentage)
NY.GDP.PCAP.CD - GDP per capita (current US$)
NY.GDP.PCAP.KD.ZG - GDP per capita growth (annual percentage)
NY.ADJ.NNTY.PC.CD - Adjusted net national income per capita (current US$)
NY.ADJ.NNTY.PC.KD.ZG - Adjusted net national income per capita (annual percentage growth)
SL.UEM.TOTL.ZS - Unemployment, total (percentage of total labor force) (modeled ILO estimate)
SL.UEM.TOTL.NE.ZS - Unemployment, total (percentage of total labor force) (national estimate)
'''
def __init__(self, fileName):
'''
Constructor
'''
try:
with open(fileName) as f:
self.configurationParams = dict(filter(None, csv.reader(f, delimiter=';')))
print (self.configurationParams)
except:
print ('Exception type:', sys.exc_info()[0])
print ('Exception value:', sys.exc_info()[1])
self.sql = sqla.create_engine(engineString)
def setDateFilter (self, dateFilter):
'''
dateFilter = (datetime.datetime(1995, 1, 1), datetime.datetime(2015, 1, 1))
'''
self.data_date = dateFilter
def setCountries(self, countries):
'''
countries = ["FI"]
'''
self.countries = countries
def getAllCountryCodes(self):
query = countryCodes
df_countries = pd.read_sql_query(query, self.sql)
countries = df_countries['iso_a3'].tolist()
print (countries)
return countries
def storeStatisticsToSQL(self, dfToStore, tableName = '[geographicNEStat]', reload = False ):
if reload:
conn = self.sql.connect()
trans = conn.begin()
conn.execute("truncate table [dbo]." + tableName )
trans.commit()
conn.close()
print ('Truncate done, reloading table:')
try:
dfToStore.to_sql(tableName , self.sql, if_exists='append')
except:
print ('Exception type:', sys.exc_info()[0])
print ('Exception value:', sys.exc_info()[1])
@classmethod
def setDataFrame(cls, countries = ["FIN"], data_date = (datetime.datetime(1995, 1, 1), datetime.datetime(2015, 1, 1))):
'''
set up the indicator - {'NY.GNP.PCAP.CD':'GNI per Capita'}
df = df.dropna()
'''
cls.setCountries(countries)
cls.setDateFilter(dateFilter = data_date)
countries = cls.getAllCountryCodes()
indicators = cls.configurationParams
#grab indicators above for countires above and load into data frame
for country in countries :
df = wbdata.get_dataframe(indicators, country=cls.countries, convert_date=True, data_date = cls.data_date)
df_describe = df.describe()
df['country_code'] = country
cls.storeStatisticsToSQL(dfToStore = df, tableName = 'geographicNE_Stat')
def main():
'''
setDataFrame() - available parameters
countries = ["FI"] - List of countries
fileName = 'mainConf.csv' - config file for ID attributes from the World Bank
data_date = (datetime.datetime(1995, 1, 1), datetime.datetime(2015, 1, 1)) - Date filter to limit the records
get countrycodes to list
load dataframe to SQL database - use test case.
use date + countryId as key
'''
fileName = 'mainConf.csv'
reader = WorldBankDataReader(fileName = fileName)
reader.setDateFilter(dateFilter = (datetime.datetime(1995, 1, 1), datetime.datetime(2015, 1, 1)))
countries = reader.getAllCountryCodes()
reader.setCountries(countries = countries)
df_main, df_stat = reader.setDataFrame(reader = reader)
if __name__ == '__main__':
main()
|
apache-2.0
|
enderlabs/gavl
|
setup.py
|
1
|
1250
|
# Copyright 2017 by Teem, and other contributors,
# as noted in the individual source code files.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import setup, find_packages
setup(
name="gavl",
version="0.1.7",
description="Data definition language and aggregation layer",
packages=find_packages(),
include_package_data=True,
install_requires=[
'pyparsing',
'click',
'sqlalchemy',
'pandas',
],
entry_points='''
[console_scripts]
gavl=gavl.cli:main
''',
license='Apache-2.0',
classifiers=[
'License :: OSI Approved :: Apache Software License',
]
)
|
apache-2.0
|
xwolf12/scikit-learn
|
examples/applications/plot_out_of_core_classification.py
|
255
|
13919
|
"""
======================================================
Out-of-core classification of text documents
======================================================
This is an example showing how scikit-learn can be used for classification
using an out-of-core approach: learning from data that doesn't fit into main
memory. We make use of an online classifier, i.e., one that supports the
partial_fit method, that will be fed with batches of examples. To guarantee
that the features space remains the same over time we leverage a
HashingVectorizer that will project each example into the same feature space.
This is especially useful in the case of text classification where new
features (words) may appear in each batch.
The dataset used in this example is Reuters-21578 as provided by the UCI ML
repository. It will be automatically downloaded and uncompressed on first run.
The plot represents the learning curve of the classifier: the evolution
of classification accuracy over the course of the mini-batches. Accuracy is
measured on the first 1000 samples, held out as a validation set.
To limit the memory consumption, we queue examples up to a fixed amount before
feeding them to the learner.
"""
# Authors: Eustache Diemert <[email protected]>
# @FedericoV <https://github.com/FedericoV/>
# License: BSD 3 clause
from __future__ import print_function
from glob import glob
import itertools
import os.path
import re
import tarfile
import time
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import rcParams
from sklearn.externals.six.moves import html_parser
from sklearn.externals.six.moves import urllib
from sklearn.datasets import get_data_home
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.linear_model import SGDClassifier
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.linear_model import Perceptron
from sklearn.naive_bayes import MultinomialNB
def _not_in_sphinx():
# Hack to detect whether we are running by the sphinx builder
return '__file__' in globals()
###############################################################################
# Reuters Dataset related routines
###############################################################################
class ReutersParser(html_parser.HTMLParser):
"""Utility class to parse a SGML file and yield documents one at a time."""
def __init__(self, encoding='latin-1'):
html_parser.HTMLParser.__init__(self)
self._reset()
self.encoding = encoding
def handle_starttag(self, tag, attrs):
method = 'start_' + tag
getattr(self, method, lambda x: None)(attrs)
def handle_endtag(self, tag):
method = 'end_' + tag
getattr(self, method, lambda: None)()
def _reset(self):
self.in_title = 0
self.in_body = 0
self.in_topics = 0
self.in_topic_d = 0
self.title = ""
self.body = ""
self.topics = []
self.topic_d = ""
def parse(self, fd):
self.docs = []
for chunk in fd:
self.feed(chunk.decode(self.encoding))
for doc in self.docs:
yield doc
self.docs = []
self.close()
def handle_data(self, data):
if self.in_body:
self.body += data
elif self.in_title:
self.title += data
elif self.in_topic_d:
self.topic_d += data
def start_reuters(self, attributes):
pass
def end_reuters(self):
self.body = re.sub(r'\s+', r' ', self.body)
self.docs.append({'title': self.title,
'body': self.body,
'topics': self.topics})
self._reset()
def start_title(self, attributes):
self.in_title = 1
def end_title(self):
self.in_title = 0
def start_body(self, attributes):
self.in_body = 1
def end_body(self):
self.in_body = 0
def start_topics(self, attributes):
self.in_topics = 1
def end_topics(self):
self.in_topics = 0
def start_d(self, attributes):
self.in_topic_d = 1
def end_d(self):
self.in_topic_d = 0
self.topics.append(self.topic_d)
self.topic_d = ""
def stream_reuters_documents(data_path=None):
"""Iterate over documents of the Reuters dataset.
The Reuters archive will automatically be downloaded and uncompressed if
the `data_path` directory does not exist.
Documents are represented as dictionaries with 'body' (str),
'title' (str), 'topics' (list(str)) keys.
"""
DOWNLOAD_URL = ('http://archive.ics.uci.edu/ml/machine-learning-databases/'
'reuters21578-mld/reuters21578.tar.gz')
ARCHIVE_FILENAME = 'reuters21578.tar.gz'
if data_path is None:
data_path = os.path.join(get_data_home(), "reuters")
if not os.path.exists(data_path):
"""Download the dataset."""
print("downloading dataset (once and for all) into %s" %
data_path)
os.mkdir(data_path)
def progress(blocknum, bs, size):
total_sz_mb = '%.2f MB' % (size / 1e6)
current_sz_mb = '%.2f MB' % ((blocknum * bs) / 1e6)
if _not_in_sphinx():
print('\rdownloaded %s / %s' % (current_sz_mb, total_sz_mb),
end='')
archive_path = os.path.join(data_path, ARCHIVE_FILENAME)
urllib.request.urlretrieve(DOWNLOAD_URL, filename=archive_path,
reporthook=progress)
if _not_in_sphinx():
print('\r', end='')
print("untarring Reuters dataset...")
tarfile.open(archive_path, 'r:gz').extractall(data_path)
print("done.")
parser = ReutersParser()
for filename in glob(os.path.join(data_path, "*.sgm")):
for doc in parser.parse(open(filename, 'rb')):
yield doc
###############################################################################
# Main
###############################################################################
# Create the vectorizer and limit the number of features to a reasonable
# maximum
vectorizer = HashingVectorizer(decode_error='ignore', n_features=2 ** 18,
non_negative=True)
# Iterator over parsed Reuters SGML files.
data_stream = stream_reuters_documents()
# We learn a binary classification between the "acq" class and all the others.
# "acq" was chosen as it is more or less evenly distributed in the Reuters
# files. For other datasets, one should take care of creating a test set with
# a realistic portion of positive instances.
all_classes = np.array([0, 1])
positive_class = 'acq'
# Here are some classifiers that support the `partial_fit` method
partial_fit_classifiers = {
'SGD': SGDClassifier(),
'Perceptron': Perceptron(),
'NB Multinomial': MultinomialNB(alpha=0.01),
'Passive-Aggressive': PassiveAggressiveClassifier(),
}
def get_minibatch(doc_iter, size, pos_class=positive_class):
"""Extract a minibatch of examples, return a tuple X_text, y.
Note: size is before excluding invalid docs with no topics assigned.
"""
data = [(u'{title}\n\n{body}'.format(**doc), pos_class in doc['topics'])
for doc in itertools.islice(doc_iter, size)
if doc['topics']]
if not len(data):
return np.asarray([], dtype=int), np.asarray([], dtype=int)
X_text, y = zip(*data)
return X_text, np.asarray(y, dtype=int)
def iter_minibatches(doc_iter, minibatch_size):
"""Generator of minibatches."""
X_text, y = get_minibatch(doc_iter, minibatch_size)
while len(X_text):
yield X_text, y
X_text, y = get_minibatch(doc_iter, minibatch_size)
# test data statistics
test_stats = {'n_test': 0, 'n_test_pos': 0}
# First we hold out a number of examples to estimate accuracy
n_test_documents = 1000
tick = time.time()
X_test_text, y_test = get_minibatch(data_stream, 1000)
parsing_time = time.time() - tick
tick = time.time()
X_test = vectorizer.transform(X_test_text)
vectorizing_time = time.time() - tick
test_stats['n_test'] += len(y_test)
test_stats['n_test_pos'] += sum(y_test)
print("Test set is %d documents (%d positive)" % (len(y_test), sum(y_test)))
def progress(cls_name, stats):
"""Report progress information, return a string."""
duration = time.time() - stats['t0']
s = "%20s classifier : \t" % cls_name
s += "%(n_train)6d train docs (%(n_train_pos)6d positive) " % stats
s += "%(n_test)6d test docs (%(n_test_pos)6d positive) " % test_stats
s += "accuracy: %(accuracy).3f " % stats
s += "in %.2fs (%5d docs/s)" % (duration, stats['n_train'] / duration)
return s
cls_stats = {}
for cls_name in partial_fit_classifiers:
stats = {'n_train': 0, 'n_train_pos': 0,
'accuracy': 0.0, 'accuracy_history': [(0, 0)], 't0': time.time(),
'runtime_history': [(0, 0)], 'total_fit_time': 0.0}
cls_stats[cls_name] = stats
get_minibatch(data_stream, n_test_documents)
# Discard test set
# We will feed the classifier with mini-batches of 1000 documents; this means
# we have at most 1000 docs in memory at any time. The smaller the document
# batch, the bigger the relative overhead of the partial fit methods.
minibatch_size = 1000
# Create the data_stream that parses Reuters SGML files and iterates on
# documents as a stream.
minibatch_iterators = iter_minibatches(data_stream, minibatch_size)
total_vect_time = 0.0
# Main loop : iterate on mini-batchs of examples
for i, (X_train_text, y_train) in enumerate(minibatch_iterators):
tick = time.time()
X_train = vectorizer.transform(X_train_text)
total_vect_time += time.time() - tick
for cls_name, cls in partial_fit_classifiers.items():
tick = time.time()
# update estimator with examples in the current mini-batch
cls.partial_fit(X_train, y_train, classes=all_classes)
# accumulate test accuracy stats
cls_stats[cls_name]['total_fit_time'] += time.time() - tick
cls_stats[cls_name]['n_train'] += X_train.shape[0]
cls_stats[cls_name]['n_train_pos'] += sum(y_train)
tick = time.time()
cls_stats[cls_name]['accuracy'] = cls.score(X_test, y_test)
cls_stats[cls_name]['prediction_time'] = time.time() - tick
acc_history = (cls_stats[cls_name]['accuracy'],
cls_stats[cls_name]['n_train'])
cls_stats[cls_name]['accuracy_history'].append(acc_history)
run_history = (cls_stats[cls_name]['accuracy'],
total_vect_time + cls_stats[cls_name]['total_fit_time'])
cls_stats[cls_name]['runtime_history'].append(run_history)
if i % 3 == 0:
print(progress(cls_name, cls_stats[cls_name]))
if i % 3 == 0:
print('\n')
###############################################################################
# Plot results
###############################################################################
def plot_accuracy(x, y, x_legend):
"""Plot accuracy as a function of x."""
x = np.array(x)
y = np.array(y)
plt.title('Classification accuracy as a function of %s' % x_legend)
plt.xlabel('%s' % x_legend)
plt.ylabel('Accuracy')
plt.grid(True)
plt.plot(x, y)
rcParams['legend.fontsize'] = 10
cls_names = list(sorted(cls_stats.keys()))
# Plot accuracy evolution
plt.figure()
for _, stats in sorted(cls_stats.items()):
# Plot accuracy evolution with #examples
accuracy, n_examples = zip(*stats['accuracy_history'])
plot_accuracy(n_examples, accuracy, "training examples (#)")
ax = plt.gca()
ax.set_ylim((0.8, 1))
plt.legend(cls_names, loc='best')
plt.figure()
for _, stats in sorted(cls_stats.items()):
# Plot accuracy evolution with runtime
accuracy, runtime = zip(*stats['runtime_history'])
plot_accuracy(runtime, accuracy, 'runtime (s)')
ax = plt.gca()
ax.set_ylim((0.8, 1))
plt.legend(cls_names, loc='best')
# Plot fitting times
plt.figure()
fig = plt.gcf()
cls_runtime = []
for cls_name, stats in sorted(cls_stats.items()):
cls_runtime.append(stats['total_fit_time'])
cls_runtime.append(total_vect_time)
cls_names.append('Vectorization')
bar_colors = rcParams['axes.color_cycle'][:len(cls_names)]
ax = plt.subplot(111)
rectangles = plt.bar(range(len(cls_names)), cls_runtime, width=0.5,
color=bar_colors)
ax.set_xticks(np.linspace(0.25, len(cls_names) - 0.75, len(cls_names)))
ax.set_xticklabels(cls_names, fontsize=10)
ymax = max(cls_runtime) * 1.2
ax.set_ylim((0, ymax))
ax.set_ylabel('runtime (s)')
ax.set_title('Training Times')
def autolabel(rectangles):
"""attach some text vi autolabel on rectangles."""
for rect in rectangles:
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width() / 2.,
1.05 * height, '%.4f' % height,
ha='center', va='bottom')
autolabel(rectangles)
plt.show()
# Plot prediction times
plt.figure()
#fig = plt.gcf()
cls_runtime = []
cls_names = list(sorted(cls_stats.keys()))
for cls_name, stats in sorted(cls_stats.items()):
cls_runtime.append(stats['prediction_time'])
cls_runtime.append(parsing_time)
cls_names.append('Read/Parse\n+Feat.Extr.')
cls_runtime.append(vectorizing_time)
cls_names.append('Hashing\n+Vect.')
bar_colors = rcParams['axes.color_cycle'][:len(cls_names)]
ax = plt.subplot(111)
rectangles = plt.bar(range(len(cls_names)), cls_runtime, width=0.5,
color=bar_colors)
ax.set_xticks(np.linspace(0.25, len(cls_names) - 0.75, len(cls_names)))
ax.set_xticklabels(cls_names, fontsize=8)
plt.setp(plt.xticks()[1], rotation=30)
ymax = max(cls_runtime) * 1.2
ax.set_ylim((0, ymax))
ax.set_ylabel('runtime (s)')
ax.set_title('Prediction Times (%d instances)' % n_test_documents)
autolabel(rectangles)
plt.show()
|
bsd-3-clause
|
maxalbert/bokeh
|
setup.py
|
2
|
20252
|
"""Setup script for Bokeh."""
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENCE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import print_function
# Stdlib imports
import os, platform, re, shutil, site, subprocess, sys, time
from os.path import abspath, dirname, exists, isdir, join, realpath, relpath
from shutil import copy
try:
import colorama
def bright(text): return "%s%s%s" % (colorama.Style.BRIGHT, text, colorama.Style.RESET_ALL)
def dim(text): return "%s%s%s" % (colorama.Style.DIM, text, colorama.Style.RESET_ALL)
def white(text): return "%s%s%s" % (colorama.Fore.WHITE, text, colorama.Style.RESET_ALL)
def blue(text): return "%s%s%s" % (colorama.Fore.BLUE, text, colorama.Style.RESET_ALL)
def red(text): return "%s%s%s" % (colorama.Fore.RED, text, colorama.Style.RESET_ALL)
def green(text): return "%s%s%s" % (colorama.Fore.GREEN, text, colorama.Style.RESET_ALL)
def yellow(text): return "%s%s%s" % (colorama.Fore.YELLOW, text, colorama.Style.RESET_ALL)
except ImportError:
def bright(text): return text
def dim(text): return text
def white(text) : return text
def blue(text) : return text
def red(text) : return text
def green(text) : return text
def yellow(text) : return text
if 'nightly' in sys.argv:
from setuptools import setup
sys.argv.remove('nightly')
with open('__conda_version__.txt', 'r') as f:
version = f.read().rstrip()
vers_file = os.path.join('bokeh', '__conda_version__.py')
with open(vers_file, 'w') as f:
f.write("conda_version=" + "'" + version + "'")
else:
from distutils.core import setup
from distutils import dir_util
# Our own imports
import versioneer
# -----------------------------------------------------------------------------
# Globals and constants
# -----------------------------------------------------------------------------
ROOT = dirname(realpath(__file__))
BOKEHJSROOT = join(ROOT, 'bokehjs')
BOKEHJSBUILD = join(BOKEHJSROOT, 'build')
CSS = join(BOKEHJSBUILD, 'css')
JS = join(BOKEHJSBUILD, 'js')
SERVER = join(ROOT, 'bokeh/server')
if sys.version_info[0] < 3:
input = raw_input
# -----------------------------------------------------------------------------
# Local utilities
# -----------------------------------------------------------------------------
versioneer.versionfile_source = 'bokeh/_version.py'
versioneer.versionfile_build = 'bokeh/_version.py'
versioneer.tag_prefix = '' # tags are like 1.2.0
versioneer.parentdir_prefix = 'Bokeh-' # dirname like 'myproject-1.2.0'
# -----------------------------------------------------------------------------
# Classes and functions
# -----------------------------------------------------------------------------
copy("LICENSE.txt", "bokeh/")
package_data = ['LICENSE.txt', 'themes/*.yaml']
def package_path(path, filters=()):
if not os.path.exists(path):
raise RuntimeError("packaging non-existent path: %s" % path)
elif os.path.isfile(path):
package_data.append(relpath(path, 'bokeh'))
else:
for path, dirs, files in os.walk(path):
path = relpath(path, 'bokeh')
for f in files:
if not filters or f.endswith(filters):
package_data.append(join(path, f))
# You can't install Bokeh in a virtualenv because the lack of getsitepackages()
# This is an open bug: https://github.com/pypa/virtualenv/issues/355
# And this is an intended PR to fix it: https://github.com/pypa/virtualenv/pull/508
# Workaround to fix our issue: https://github.com/bokeh/bokeh/issues/378
def getsitepackages():
"""Returns a list containing all global site-packages directories
(and possibly site-python)."""
_is_64bit = (getattr(sys, 'maxsize', None) or getattr(sys, 'maxint')) > 2**32
_is_pypy = hasattr(sys, 'pypy_version_info')
_is_jython = sys.platform[:4] == 'java'
prefixes = [sys.prefix, sys.exec_prefix]
sitepackages = []
seen = set()
for prefix in prefixes:
if not prefix or prefix in seen:
continue
seen.add(prefix)
if sys.platform in ('os2emx', 'riscos') or _is_jython:
sitedirs = [os.path.join(prefix, "Lib", "site-packages")]
elif _is_pypy:
sitedirs = [os.path.join(prefix, 'site-packages')]
elif sys.platform == 'darwin' and prefix == sys.prefix:
if prefix.startswith("/System/Library/Frameworks/"): # Apple's Python
sitedirs = [os.path.join("/Library/Python", sys.version[:3], "site-packages"),
os.path.join(prefix, "Extras", "lib", "python")]
else: # any other Python distros on OSX work this way
sitedirs = [os.path.join(prefix, "lib",
"python" + sys.version[:3], "site-packages")]
elif os.sep == '/':
sitedirs = [os.path.join(prefix,
"lib",
"python" + sys.version[:3],
"site-packages"),
os.path.join(prefix, "lib", "site-python"),
]
lib64_dir = os.path.join(prefix, "lib64", "python" + sys.version[:3], "site-packages")
if (os.path.exists(lib64_dir) and
os.path.realpath(lib64_dir) not in [os.path.realpath(p) for p in sitedirs]):
if _is_64bit:
sitedirs.insert(0, lib64_dir)
else:
sitedirs.append(lib64_dir)
try:
# sys.getobjects only available in --with-pydebug build
sys.getobjects
sitedirs.insert(0, os.path.join(sitedirs[0], 'debug'))
except AttributeError:
pass
# Debian-specific dist-packages directories:
sitedirs.append(os.path.join(prefix, "local/lib",
"python" + sys.version[:3],
"dist-packages"))
sitedirs.append(os.path.join(prefix, "lib",
"python" + sys.version[:3],
"dist-packages"))
if sys.version_info[0] >= 3:
sitedirs.append(os.path.join(prefix, "lib",
"python" + sys.version[0],
"dist-packages"))
sitedirs.append(os.path.join(prefix, "lib", "dist-python"))
else:
sitedirs = [prefix, os.path.join(prefix, "lib", "site-packages")]
if sys.platform == 'darwin':
# for framework builds *only* we add the standard Apple
# locations. Currently only per-user, but /Library and
# /Network/Library could be added too
if 'Python.framework' in prefix:
home = os.environ.get('HOME')
if home:
sitedirs.append(
os.path.join(home,
'Library',
'Python',
sys.version[:3],
'site-packages'))
for sitedir in sitedirs:
sitepackages.append(os.path.abspath(sitedir))
sitepackages = [p for p in sitepackages if os.path.isdir(p)]
return sitepackages
def check_remove_bokeh_install(site_packages):
bokeh_path = join(site_packages, "bokeh")
if not (exists(bokeh_path) and isdir(bokeh_path)):
return
prompt = "Found existing bokeh install: %s\nRemove it? [y|N] " % bokeh_path
val = input(prompt)
if val == "y":
print("Removing old bokeh install...", end=" ")
try:
shutil.rmtree(bokeh_path)
print("Done")
except (IOError, OSError):
print("Unable to remove old bokeh at %s, exiting" % bokeh_path)
sys.exit(-1)
else:
print("Not removing old bokeh install")
sys.exit(1)
def remove_bokeh_pth(path_file):
if exists(path_file):
try:
os.remove(path_file)
except (IOError, OSError):
print("Unable to remove old path file at %s, exiting" % path_file)
sys.exit(-1)
return True
return False
BUILD_EXEC_FAIL_MSG = bright(red("Failed.")) + """
ERROR: subprocess.Popen(%r) failed to execute:
%s
Have you run `npm install` from the bokehjs subdirectory?
For more information, see the Dev Guide:
http://bokeh.pydata.org/en/latest/docs/dev_guide.html
"""
BUILD_FAIL_MSG = bright(red("Failed.")) + """
ERROR: 'gulp build' returned the following
---- on stdout:
%s
---- on stderr:
%s
"""
BUILD_SIZE_FAIL_MSG = """
ERROR: could not determine sizes:
%s
"""
BUILD_SUCCESS_MSG = bright(green("Success!")) + """
Build output:
%s"""
def build_js():
print("Building BokehJS... ", end="")
sys.stdout.flush()
os.chdir('bokehjs')
if sys.platform != "win32":
cmd = [join('node_modules', '.bin', 'gulp'), 'build']
else:
cmd = [join('node_modules', '.bin', 'gulp.cmd'), 'build']
t0 = time.time()
try:
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except OSError as e:
print(BUILD_EXEC_FAIL_MSG % (cmd, e))
sys.exit(1)
finally:
os.chdir('..')
result = proc.wait()
t1 = time.time()
if result != 0:
indented_msg = ""
outmsg = proc.stdout.read().decode('ascii', errors='ignore')
outmsg = "\n".join([" " + x for x in outmsg.split("\n")])
errmsg = proc.stderr.read().decode('ascii', errors='ignore')
errmsg = "\n".join([" " + x for x in errmsg.split("\n")])
print(BUILD_FAIL_MSG % (red(outmsg), red(errmsg)))
sys.exit(1)
indented_msg = ""
msg = proc.stdout.read().decode('ascii', errors='ignore')
pat = re.compile(r"(\[.*\]) (.*)", re.DOTALL)
for line in msg.strip().split("\n"):
m = pat.match(line)
if not m: continue # skip generate.py output lines
stamp, txt = m.groups()
indented_msg += " " + dim(green(stamp)) + " " + dim(txt) + "\n"
msg = "\n".join([" " + x for x in msg.split("\n")])
print(BUILD_SUCCESS_MSG % indented_msg)
print("Build time: %s" % bright(yellow("%0.1f seconds" % (t1-t0))))
print()
print("Build artifact sizes:")
try:
def size(*path):
return os.stat(join("bokehjs", "build", *path)).st_size / 2**10
print(" - bokeh.js : %6.1f KB" % size("js", "bokeh.js"))
print(" - bokeh.css : %6.1f KB" % size("css", "bokeh.css"))
print(" - bokeh.min.js : %6.1f KB" % size("js", "bokeh.min.js"))
print(" - bokeh.min.css : %6.1f KB" % size("css", "bokeh.min.css"))
print(" - bokeh-widgets.js : %6.1f KB" % size("js", "bokeh-widgets.js"))
print(" - bokeh-widgets.css : %6.1f KB" % size("css", "bokeh-widgets.css"))
print(" - bokeh-widgets.min.js : %6.1f KB" % size("js", "bokeh-widgets.min.js"))
print(" - bokeh-widgets.min.css : %6.1f KB" % size("css", "bokeh-widgets.min.css"))
except Exception as e:
print(BUILD_SIZE_FAIL_MSG % e)
def install_js():
target_jsdir = join(SERVER, 'static', 'js')
target_cssdir = join(SERVER, 'static', 'css')
STATIC_ASSETS = [
join(JS, 'bokeh.js'),
join(JS, 'bokeh.min.js'),
join(CSS, 'bokeh.css'),
join(CSS, 'bokeh.min.css'),
]
if not all([exists(a) for a in STATIC_ASSETS]):
print("""
ERROR: Cannot install BokehJS: files missing in `./bokehjs/build`.
Please build BokehJS by running setup.py with the `--build_js` option.
Dev Guide: http://bokeh.pydata.org/docs/dev_guide.html#bokehjs.
""")
sys.exit(1)
if exists(target_jsdir):
shutil.rmtree(target_jsdir)
shutil.copytree(JS, target_jsdir)
if exists(target_cssdir):
shutil.rmtree(target_cssdir)
shutil.copytree(CSS, target_cssdir)
def clean():
print("Removing prior-built items...", end=" ")
build_dir = 'build/lib/bokeh'
if os.path.exists(build_dir):
dir_util.remove_tree(build_dir)
for root, dirs, files in os.walk('.'):
for item in files:
if item.endswith('.pyc'):
os.remove(os.path.join(root, item))
print("Done")
def get_user_jsargs():
print("""
Bokeh includes a JavaScript library (BokehJS) that has its own
build process. How would you like to handle BokehJS:
1) build and install fresh BokehJS
2) install last built BokehJS from bokeh/bokehjs/build
""")
mapping = {"1": True, "2": False}
value = input("Choice? ")
while value not in mapping:
print("Input '%s' not understood. Valid choices: 1, 2\n" % value)
value = input("Choice? ")
return mapping[value]
def parse_jsargs():
options = ('install', 'develop', 'sdist', 'egg_info', 'build')
installing = any(arg in sys.argv for arg in options)
if '--build_js' in sys.argv:
if not installing:
print("Error: Option '--build_js' only valid with 'install', 'develop', 'sdist', or 'build', exiting.")
sys.exit(1)
jsbuild = True
sys.argv.remove('--build_js')
elif '--install_js' in sys.argv:
# Note that --install_js can be used by itself (without sdist/install/develop)
jsbuild = False
sys.argv.remove('--install_js')
else:
if installing:
jsbuild = get_user_jsargs()
else:
jsbuild = False
return jsbuild
# -----------------------------------------------------------------------------
# Main script
# -----------------------------------------------------------------------------
# Aliases for build_js and install_js
for i in range(len(sys.argv)):
if sys.argv[i] == '--build-js':
sys.argv[i] = '--build_js'
if sys.argv[i] == '--install-js':
sys.argv[i] = '--install_js'
# Set up this checkout or source archive with the right BokehJS files.
if sys.version_info[:2] < (2, 6):
raise RuntimeError("Bokeh requires python >= 2.6")
# Lightweight command to only install js and nothing more - developer mode
if len(sys.argv) == 2 and sys.argv[-1] == '--install_js':
install_js()
sys.exit(0)
# check for 'sdist' and make sure we always do a BokehJS build when packaging
if "sdist" in sys.argv:
if "--install_js" in sys.argv:
print("Removing '--install_js' incompatible with 'sdist'")
sys.argv.remove('--install_js')
if "--build_js" not in sys.argv:
print("Adding '--build_js' required for 'sdist'")
sys.argv.append('--build_js')
# check for package install, set jsinstall to False to skip prompt
jsinstall = True
if not exists(join(ROOT, 'MANIFEST.in')):
if "--build_js" in sys.argv or "--install_js" in sys.argv:
print("BokehJS source code is not shipped in sdist packages; "
"building/installing from the bokehjs source directory is disabled. "
"To build or develop BokehJS yourself, you must clone the full "
"Bokeh repository from https://github.com/bokeh/bokeh")
if "--build_js" in sys.argv:
sys.argv.remove('--build_js')
if "--install_js" in sys.argv:
sys.argv.remove('--install_js')
jsbuild = False
jsinstall = False
else:
jsbuild = parse_jsargs()
if jsbuild:
build_js()
if jsinstall:
install_js()
sampledata_suffixes = ('.csv', '.conf', '.gz', '.json', '.png', '.ics', '.geojson')
package_path(join(SERVER, 'static'))
package_path(join(ROOT, 'bokeh', '_templates'))
package_path(join(ROOT, 'bokeh', 'sampledata'), sampledata_suffixes)
if '--user' in sys.argv:
site_packages = site.USER_SITE
else:
site_packages = getsitepackages()[0]
path_file = join(site_packages, "bokeh.pth")
path = abspath(dirname(__file__))
print()
if 'develop' in sys.argv:
check_remove_bokeh_install(site_packages)
with open(path_file, "w+") as f:
f.write(path)
print("Installing Bokeh for development:")
print(" - writing path '%s' to %s" % (path, path_file))
if jsinstall:
print(" - using %s built BokehJS from bokehjs/build\n" % (bright(yellow("NEWLY")) if jsbuild else bright(yellow("PREVIOUSLY"))))
else:
print(" - using %s BokehJS, located in 'bokeh.server.static'\n" % yellow("PACKAGED"))
sys.exit()
elif 'clean' in sys.argv:
clean()
elif 'install' in sys.argv:
pth_removed = remove_bokeh_pth(path_file)
print("Installing Bokeh:")
if pth_removed:
print(" - removed path file at %s" % path_file)
if jsinstall:
print(" - using %s built BokehJS from bokehjs/build\n" % (bright(yellow("NEWLY")) if jsbuild else bright(yellow("PREVIOUSLY"))))
else:
print(" - using %s BokehJS, located in 'bokeh.server.static'\n" % bright(yellow("PACKAGED")))
elif '--help' in sys.argv:
if jsinstall:
print("Bokeh-specific options available with 'install' or 'develop':")
print()
print(" --build_js build and install a fresh BokehJS")
print(" --install_js install only last previously built BokehJS")
else:
print("Bokeh is using PACKAGED BokehJS, located in 'bokeh.server.static'")
print()
print()
REQUIRES = [
'six>=1.5.2',
'requests>=1.2.3',
'PyYAML>=3.10',
'python-dateutil>=2.1',
'Jinja2>=2.7',
'numpy>=1.7.1',
'pandas>=0.11.0',
'Flask>=0.10.1',
'pyzmq>=14.3.1',
'tornado>=4.0.1',
]
if sys.version_info[:2] == (2, 7):
REQUIRES.append('futures>=3.0.3')
_version = versioneer.get_version()
_cmdclass = versioneer.get_cmdclass()
# Horrible hack: workaround to allow creation of bdist_whell on pip installation
# Why, for God's sake, is pip forcing the generation of wheels when installing a package?
try:
from wheel.bdist_wheel import bdist_wheel
except ImportError as e:
# pip is not claiming for bdist_wheel when wheel is not installed
bdist_wheel = None
if bdist_wheel is not None:
_cmdclass["bdist_wheel"] = bdist_wheel
setup(
name='bokeh',
version=_version,
cmdclass=_cmdclass,
packages=[
'bokeh',
'bokeh.application',
'bokeh.application.tests',
'bokeh.application.handlers',
'bokeh.application.handlers.tests',
'bokeh.models',
'bokeh.models.tests',
'bokeh.models.widgets',
'bokeh.charts',
'bokeh.charts.builders',
'bokeh.charts.builders.tests',
'bokeh.charts.tests',
'bokeh.client',
'bokeh.command',
'bokeh.command.tests',
'bokeh.command.subcommands',
'bokeh.command.subcommands.tests',
'bokeh.compat',
'bokeh.compat.mplexporter',
'bokeh.compat.mplexporter.renderers',
'bokeh.crossfilter',
'bokeh.sampledata',
'bokeh.server',
'bokeh.server.protocol',
'bokeh.server.protocol.messages',
'bokeh.server.protocol.messages.tests',
'bokeh.server.protocol.tests',
'bokeh.server.tests',
'bokeh.server.views',
'bokeh.sphinxext',
'bokeh.themes',
'bokeh.tests',
'bokeh.util',
'bokeh.util.tests',
'bokeh.validation',
],
package_data={'bokeh': package_data},
author='Continuum Analytics',
author_email='[email protected]',
url='http://github.com/bokeh/bokeh',
description='Statistical and novel interactive HTML plots for Python',
license='New BSD',
scripts=['bin/bokeh'],
zip_safe=False,
install_requires=REQUIRES
)
|
bsd-3-clause
|
AnasGhrab/scikit-learn
|
examples/plot_multioutput_face_completion.py
|
330
|
3019
|
"""
==============================================
Face completion with a multi-output estimators
==============================================
This example shows the use of multi-output estimator to complete images.
The goal is to predict the lower half of a face given its upper half.
The first column of images shows true faces. The next columns illustrate
how extremely randomized trees, k nearest neighbors, linear
regression and ridge regression complete the lower half of those faces.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_olivetti_faces
from sklearn.utils.validation import check_random_state
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.neighbors import KNeighborsRegressor
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import RidgeCV
# Load the faces datasets
data = fetch_olivetti_faces()
targets = data.target
data = data.images.reshape((len(data.images), -1))
train = data[targets < 30]
test = data[targets >= 30] # Test on independent people
# Test on a subset of people
n_faces = 5
rng = check_random_state(4)
face_ids = rng.randint(test.shape[0], size=(n_faces, ))
test = test[face_ids, :]
n_pixels = data.shape[1]
X_train = train[:, :np.ceil(0.5 * n_pixels)] # Upper half of the faces
y_train = train[:, np.floor(0.5 * n_pixels):] # Lower half of the faces
X_test = test[:, :np.ceil(0.5 * n_pixels)]
y_test = test[:, np.floor(0.5 * n_pixels):]
# Fit estimators
ESTIMATORS = {
"Extra trees": ExtraTreesRegressor(n_estimators=10, max_features=32,
random_state=0),
"K-nn": KNeighborsRegressor(),
"Linear regression": LinearRegression(),
"Ridge": RidgeCV(),
}
y_test_predict = dict()
for name, estimator in ESTIMATORS.items():
estimator.fit(X_train, y_train)
y_test_predict[name] = estimator.predict(X_test)
# Plot the completed faces
image_shape = (64, 64)
n_cols = 1 + len(ESTIMATORS)
plt.figure(figsize=(2. * n_cols, 2.26 * n_faces))
plt.suptitle("Face completion with multi-output estimators", size=16)
for i in range(n_faces):
true_face = np.hstack((X_test[i], y_test[i]))
if i:
sub = plt.subplot(n_faces, n_cols, i * n_cols + 1)
else:
sub = plt.subplot(n_faces, n_cols, i * n_cols + 1,
title="true faces")
sub.axis("off")
sub.imshow(true_face.reshape(image_shape),
cmap=plt.cm.gray,
interpolation="nearest")
for j, est in enumerate(sorted(ESTIMATORS)):
completed_face = np.hstack((X_test[i], y_test_predict[est][i]))
if i:
sub = plt.subplot(n_faces, n_cols, i * n_cols + 2 + j)
else:
sub = plt.subplot(n_faces, n_cols, i * n_cols + 2 + j,
title=est)
sub.axis("off")
sub.imshow(completed_face.reshape(image_shape),
cmap=plt.cm.gray,
interpolation="nearest")
plt.show()
|
bsd-3-clause
|
rahul-c1/scikit-learn
|
examples/missing_values.py
|
11
|
2679
|
"""
======================================================
Imputing missing values before building an estimator
======================================================
This example shows that imputing the missing values can give better results
than discarding the samples containing any missing value.
Missing values can be replaced by the mean, the median or the most frequent
value using the ``strategy`` hyper-parameter.
Script output:
Score with the entire dataset = 0.56
Score without the samples containing missing values = 0.48
Score after imputation of the missing values = 0.55
"""
import numpy as np
from sklearn.datasets import load_boston
from sklearn.ensemble import RandomForestRegressor
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import Imputer
from sklearn.cross_validation import cross_val_score
rng = np.random.RandomState(0)
dataset = load_boston()
X_full, y_full = dataset.data, dataset.target
n_samples = X_full.shape[0]
n_features = X_full.shape[1]
# Estimate the score on the entire dataset, with no missing values
estimator = RandomForestRegressor(random_state=0, n_estimators=100)
score = cross_val_score(estimator, X_full, y_full).mean()
print("Score with the entire dataset = %.2f" % score)
# Add missing values in 75% of the lines
missing_rate = 0.75
n_missing_samples = np.floor(n_samples * missing_rate)
missing_samples = np.hstack((np.zeros(n_samples - n_missing_samples,
dtype=np.bool),
np.ones(n_missing_samples,
dtype=np.bool)))
rng.shuffle(missing_samples)
missing_features = rng.randint(0, n_features, n_missing_samples)
# Estimate the score without the lines containing missing values
X_filtered = X_full[~missing_samples, :]
y_filtered = y_full[~missing_samples]
estimator = RandomForestRegressor(random_state=0, n_estimators=100)
score = cross_val_score(estimator, X_filtered, y_filtered).mean()
print("Score without the samples containing missing values = %.2f" % score)
# Estimate the score after imputation of the missing values
X_missing = X_full.copy()
X_missing[np.where(missing_samples)[0], missing_features] = 0
y_missing = y_full.copy()
estimator = Pipeline([("imputer", Imputer(missing_values=0,
strategy="mean",
axis=0)),
("forest", RandomForestRegressor(random_state=0,
n_estimators=100))])
score = cross_val_score(estimator, X_missing, y_missing).mean()
print("Score after imputation of the missing values = %.2f" % score)
|
bsd-3-clause
|
gnu-user/sf-crime-classification
|
src/knn_crime_original.py
|
1
|
1702
|
'''
Original file from kaggle
https://www.kaggle.com/wawanco/sf-crime/k-nearest-neighbour/files
'''
import pandas as pd
import numpy as np
import math
import zipfile
import matplotlib.pyplot as plt
from sklearn.neighbors import KNeighborsClassifier
def llfun(act, pred):
""" Logloss function for 1/0 probability
"""
return (-(~(act == pred)).astype(int) * math.log(1e-15)).sum() / len(act)
z = zipfile.ZipFile('../input/train.csv.zip')
train = pd.read_csv(z.open('train.csv'), parse_dates=['Dates'])[['X', 'Y', 'Category']]
# Separate test and train set out of orignal train set.
msk = np.random.rand(len(train)) < 0.8
knn_train = train[msk]
knn_test = train[~msk]
n = len(knn_test)
print("Original size: %s" % len(train))
print("Train set: %s" % len(knn_train))
print("Test set: %s" % len(knn_test))
# Prepare data sets
x = knn_train[['X', 'Y']]
y = knn_train['Category'].astype('category')
actual = knn_test['Category'].astype('category')
# Fit
logloss = []
for i in range(1, 50, 1):
knn = KNeighborsClassifier(n_neighbors=i)
knn.fit(x, y)
# Predict on test set
outcome = knn.predict(knn_test[['X', 'Y']])
# Logloss
logloss.append(llfun(actual, outcome))
plt.plot(logloss)
plt.savefig('n_neighbors_vs_logloss.png')
# Submit for K=40
z = zipfile.ZipFile('../input/test.csv.zip')
test = pd.read_csv(z.open('test.csv'), parse_dates=['Dates'])
x_test = test[['X', 'Y']]
knn = KNeighborsClassifier(n_neighbors=40)
knn.fit(x, y)
outcomes = knn.predict(x_test)
submit = pd.DataFrame({'Id': test.Id.tolist()})
for category in y.cat.categories:
submit[category] = np.where(outcomes == category, 1, 0)
submit.to_csv('k_nearest_neigbour.csv', index = False)
|
gpl-3.0
|
sketchytechky/zipline
|
zipline/modelling/engine.py
|
5
|
17723
|
"""
Compute Engine for FFC API
"""
from abc import (
ABCMeta,
abstractmethod,
)
from operator import and_
from six import (
iteritems,
itervalues,
with_metaclass,
)
from six.moves import (
reduce,
zip_longest,
)
from numpy import (
add,
empty_like,
)
from pandas import (
DataFrame,
date_range,
MultiIndex,
)
from zipline.lib.adjusted_array import ensure_ndarray
from zipline.errors import NoFurtherDataError
from zipline.utils.pandas_utils import explode
from .classifier import Classifier
from .factor import Factor
from .filter import Filter
from .graph import TermGraph
from .term import AssetExists
class FFCEngine(with_metaclass(ABCMeta)):
@abstractmethod
def factor_matrix(self, terms, start_date, end_date):
"""
Compute values for `terms` between `start_date` and `end_date`.
Returns a DataFrame with a MultiIndex of (date, asset) pairs on the
index. On each date, we return a row for each asset that passed all
instances of `Filter` in `terms, and the columns of the returned frame
will be the keys in `terms` whose values are instances of `Factor`.
Parameters
----------
terms : dict[str -> zipline.modelling.term.Term]
Dict mapping term names to instances. The supplied names are used
as column names in our output frame.
start_date : pd.Timestamp
Start date of the computed matrix.
end_date : pd.Timestamp
End date of the computed matrix.
Returns
-------
matrix : pd.DataFrame
A matrix of computed results.
"""
raise NotImplementedError("factor_matrix")
class NoOpFFCEngine(FFCEngine):
"""
FFCEngine that doesn't do anything.
"""
def factor_matrix(self, terms, start_date, end_date):
return DataFrame(
index=MultiIndex.from_product(
[date_range(start=start_date, end=end_date, freq='D'), ()],
),
columns=sorted(terms.keys())
)
class SimpleFFCEngine(object):
"""
FFC Engine class that computes each term independently.
Parameters
----------
loader : FFCLoader
A loader to use to retrieve raw data for atomic terms.
calendar : DatetimeIndex
Array of dates to consider as trading days when computing a range
between a fixed start and end.
asset_finder : zipline.assets.AssetFinder
An AssetFinder instance. We depend on the AssetFinder to determine
which assets are in the top-level universe at any point in time.
"""
__slots__ = [
'_loader',
'_calendar',
'_finder',
'_root_mask_term',
'__weakref__',
]
def __init__(self, loader, calendar, asset_finder):
self._loader = loader
self._calendar = calendar
self._finder = asset_finder
self._root_mask_term = AssetExists()
def factor_matrix(self, terms, start_date, end_date):
"""
Compute a factor matrix.
Parameters
----------
terms : dict[str -> zipline.modelling.term.Term]
Dict mapping term names to instances. The supplied names are used
as column names in our output frame.
start_date : pd.Timestamp
Start date of the computed matrix.
end_date : pd.Timestamp
End date of the computed matrix.
The algorithm implemented here can be broken down into the following
stages:
0. Build a dependency graph of all terms in `terms`. Topologically
sort the graph to determine an order in which we can compute the terms.
1. Ask our AssetFinder for a "lifetimes matrix", which should contain,
for each date between start_date and end_date, a boolean value for each
known asset indicating whether the asset existed on that date.
2. Compute each term in the dependency order determined in (0), caching
the results in a a dictionary to that they can be fed into future
terms.
3. For each date, determine the number of assets passing **all**
filters. The sum, N, of all these values is the total number of rows in
our output frame, so we pre-allocate an output array of length N for
each factor in `terms`.
4. Fill in the arrays allocated in (3) by copying computed values from
our output cache into the corresponding rows.
5. Stick the values computed in (4) into a DataFrame and return it.
Step 0 is performed by `zipline.modelling.graph.TermGraph`.
Step 1 is performed in `self._compute_root_mask`.
Step 2 is performed in `self.compute_chunk`.
Steps 3, 4, and 5 are performed in self._format_factor_matrix.
See Also
--------
FFCEngine.factor_matrix
"""
if end_date <= start_date:
raise ValueError(
"start_date must be before end_date \n"
"start_date=%s, end_date=%s" % (start_date, end_date)
)
graph = TermGraph(terms)
extra_rows = graph.extra_rows[self._root_mask_term]
root_mask = self._compute_root_mask(start_date, end_date, extra_rows)
dates, assets, root_mask_values = explode(root_mask)
raw_outputs = self.compute_chunk(
graph,
dates,
assets,
initial_workspace={self._root_mask_term: root_mask_values},
)
# Collect the results that we'll actually show to the user.
filters, factors = {}, {}
for name, term in iteritems(terms):
if isinstance(term, Filter):
filters[name] = raw_outputs[name]
elif isinstance(term, Factor):
factors[name] = raw_outputs[name]
elif isinstance(term, Classifier):
continue
else:
raise ValueError("Unknown term type: %s" % term)
# Add the root mask as an implicit filter, truncating off the extra
# rows that we only needed to compute other terms.
filters['base'] = root_mask_values[extra_rows:]
out_dates = dates[extra_rows:]
return self._format_factor_matrix(out_dates, assets, filters, factors)
def _compute_root_mask(self, start_date, end_date, extra_rows):
"""
Compute a lifetimes matrix from our AssetFinder, then drop columns that
didn't exist at all during the query dates.
Parameters
----------
start_date : pd.Timestamp
Base start date for the matrix.
end_date : pd.Timestamp
End date for the matrix.
extra_rows : int
Number of extra rows to compute before `start_date`.
Extra rows are needed by terms like moving averages that require a
trailing window of data.
Returns
-------
lifetimes : pd.DataFrame
Frame of dtype `bool` containing dates from `extra_rows` days
before `start_date`, continuing through to `end_date`. The
returned frame contains as columns all assets in our AssetFinder
that existed for at least one day between `start_date` and
`end_date`.
"""
calendar = self._calendar
finder = self._finder
start_idx, end_idx = self._calendar.slice_locs(start_date, end_date)
if start_idx < extra_rows:
raise NoFurtherDataError(
msg="Insufficient data to compute FFC Matrix: "
"start date was %s, "
"earliest known date was %s, "
"and %d extra rows were requested." % (
start_date, calendar[0], extra_rows,
),
)
# Build lifetimes matrix reaching back to `extra_rows` days before
# `start_date.`
lifetimes = finder.lifetimes(
calendar[start_idx - extra_rows:end_idx],
include_start_date=False
)
assert lifetimes.index[extra_rows] == start_date
assert lifetimes.index[-1] == end_date
if not lifetimes.columns.unique:
columns = lifetimes.columns
duplicated = columns[columns.duplicated()].unique()
raise AssertionError("Duplicated sids: %d" % duplicated)
# Filter out columns that didn't exist between the requested start and
# end dates.
existed = lifetimes.iloc[extra_rows:].any()
return lifetimes.loc[:, existed]
def _mask_and_dates_for_term(self, term, workspace, graph, dates):
"""
Load mask and mask row labels for term.
"""
mask = term.mask
offset = graph.extra_rows[mask] - graph.extra_rows[term]
return workspace[mask][offset:], dates[offset:]
def _inputs_for_term(self, term, workspace, graph):
"""
Compute inputs for the given term.
This is mostly complicated by the fact that for each input we store as
many rows as will be necessary to serve **any** computation requiring
that input.
"""
offsets = graph.offset
if term.windowed:
# If term is windowed, then all input data should be instances of
# AdjustedArray.
return [
workspace[input_].traverse(
window_length=term.window_length,
offset=offsets[term, input_]
)
for input_ in term.inputs
]
# If term is not windowed, input_data may be an AdjustedArray or
# np.ndarray. Coerce the former to the latter.
out = []
for input_ in term.inputs:
input_data = ensure_ndarray(workspace[input_])
offset = offsets[term, input_]
# OPTIMIZATION: Don't make a copy by doing input_data[0:] if
# offset is zero.
if offset:
input_data = input_data[offset:]
out.append(input_data)
return out
def compute_chunk(self, graph, dates, assets, initial_workspace):
"""
Compute the FFC terms in the graph for the requested start and end
dates.
Parameters
----------
graph : zipline.modelling.graph.TermGraph
dates : pd.DatetimeIndex
Row labels for our root mask.
assets : pd.Int64Index
Column labels for our root mask.
initial_workspace : dict
Map from term -> output.
Must contain at least entry for `self._root_mask_term` whose shape
is `(len(dates), len(assets))`, but may contain additional
pre-computed terms for testing or optimization purposes.
Returns
-------
results : dict
Dictionary mapping requested results to outputs.
"""
self._validate_compute_chunk_params(dates, assets, initial_workspace)
loader = self._loader
# Copy the supplied initial workspace so we don't mutate it in place.
workspace = initial_workspace.copy()
for term in graph.ordered():
# `term` may have been supplied in `initial_workspace`, and in the
# future we may pre-compute atomic terms coming from the same
# dataset. In either case, we will already have an entry for this
# term, which we shouldn't re-compute.
if term in workspace:
continue
# Asset labels are always the same, but date labels vary by how
# many extra rows are needed.
mask, mask_dates = self._mask_and_dates_for_term(
term, workspace, graph, dates
)
if term.atomic:
# FUTURE OPTIMIZATION: Scan the resolution order for terms in
# the same dataset and load them here as well.
to_load = [term]
loaded = loader.load_adjusted_array(
to_load, mask_dates, assets, mask,
)
assert len(to_load) == len(loaded)
for loaded_term, adj_array in zip_longest(to_load, loaded):
workspace[loaded_term] = adj_array
else:
workspace[term] = term._compute(
self._inputs_for_term(term, workspace, graph),
mask_dates,
assets,
mask,
)
assert(workspace[term].shape == mask.shape)
out = {}
graph_extra_rows = graph.extra_rows
for name, term in iteritems(graph.outputs):
# Truncate off extra rows from outputs.
out[name] = workspace[term][graph_extra_rows[term]:]
return out
def _format_factor_matrix(self, dates, assets, filters, factors):
"""
Convert raw computed filters/factors into a DataFrame for public APIs.
Parameters
----------
dates : np.array[datetime64]
Row index for arrays in `filters` and `factors.`
assets : np.array[int64]
Column index for arrays in `filters` and `factors.`
filters : dict
Dict mapping filter names -> computed filters.
factors : dict
Dict mapping factor names -> computed factors.
Returns
-------
factor_matrix : pd.DataFrame
The indices of `factor_matrix` are as follows:
index : two-tiered MultiIndex of (date, asset).
For each date, we return a row for each asset that passed all
filters on that date.
columns : keys from `factor_data`
Each date/asset/factor triple contains the computed value of the given
factor on the given date for the given asset.
"""
# FUTURE OPTIMIZATION: Cythonize all of this.
# Boolean mask of values that passed all filters.
unioned = reduce(and_, itervalues(filters))
# Parallel arrays of (x,y) coords for (date, asset) pairs that passed
# all filters. Each entry here will correspond to a row in our output
# frame.
nonzero_xs, nonzero_ys = unioned.nonzero()
# Raw arrays storing (date, asset) pairs.
# These will form the index of our output frame.
raw_dates_index = empty_like(nonzero_xs, dtype='datetime64[ns]')
raw_assets_index = empty_like(nonzero_xs, dtype=int)
# Mapping from column_name -> array.
# This will be the `data` arg to our output frame.
columns = {
name: empty_like(nonzero_xs, dtype=factor.dtype)
for name, factor in iteritems(factors)
}
# We're going to iterate over `iteritems(columns)` a whole bunch of
# times down below. It's faster to construct iterate over a tuple of
# pairs.
columns_iter = tuple(iteritems(columns))
# This is tricky.
# unioned.sum(axis=1) gives us an array of the same size as `dates`
# containing, for each date, the number of assets that passed our
# filters on that date.
# Running this through add.accumulate gives us an array containing, for
# each date, the running total of the number of assets that passed our
# filters on or before that date.
# This means that (bounds[i - 1], bounds[i]) gives us the indices of
# the first and last rows in our output frame for each date in `dates`.
bounds = add.accumulate(unioned.sum(axis=1))
day_start = 0
for day_idx, day_end in enumerate(bounds):
day_bounds = slice(day_start, day_end)
column_indices = nonzero_ys[day_bounds]
raw_dates_index[day_bounds] = dates[day_idx]
raw_assets_index[day_bounds] = assets[column_indices]
for name, colarray in columns_iter:
colarray[day_bounds] = factors[name][day_idx, column_indices]
# Upper bound of current row becomes lower bound for next row.
day_start = day_end
return DataFrame(
data=columns,
index=MultiIndex.from_arrays(
[
raw_dates_index,
# FUTURE OPTIMIZATION:
# Avoid duplicate lookups by grouping and only looking up
# each unique sid once.
self._finder.retrieve_all(raw_assets_index),
],
)
).tz_localize('UTC', level=0)
def _validate_compute_chunk_params(self, dates, assets, initial_workspace):
"""
Verify that the values passed to compute_chunk are well-formed.
"""
root = self._root_mask_term
clsname = type(self).__name__
# Writing this out explicitly so this errors in testing if we change
# the name without updating this line.
compute_chunk_name = self.compute_chunk.__name__
if root not in initial_workspace:
raise AssertionError(
"root_mask values not supplied to {cls}.{method}".format(
cls=clsname,
method=compute_chunk_name,
)
)
shape = initial_workspace[root].shape
implied_shape = len(dates), len(assets)
if shape != implied_shape:
raise AssertionError(
"root_mask shape is {shape}, but received dates/assets "
"imply that shape should be {implied}".format(
shape=shape,
implied=implied_shape,
)
)
|
apache-2.0
|
creasyw/IMTAphy
|
modules/phy/imtaphy/testConfigs/plotChannel.py
|
1
|
4149
|
#!/usr/bin/env python
import numpy
import matplotlib.colors
import matplotlib.cm as cm
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
import sys
# defines a custom colormap that has a different color for the 0 entry
# which basically is the color for non-existing values.
# it is the first line for each red/green/blue and can be set to black (0,0,0)
# or white (0,1,1)
# http://www.scipy.org/Cookbook/Matplotlib/Show_colormaps
# based on original jet color map from matplotlib's _cm.py
cdict = {'red': ((0.0, 1.0, 1.0),
(0., 0, 0),
(0.35, 0, 0),
(0.66, 1, 1),
(0.89,1, 1),
(1, 0.5, 0.5)),
'green': ((0.0, 1.0, 1.0),
(0., 0, 0),
(0.125,0, 0),
(0.375,1, 1),
(0.64,1, 1),
(0.91,0,0),
(1, 0, 0)),
'blue': ((0.0, 1.0, 1.0),
(0., 0.5, 0.5),
(0.11, 1, 1),
(0.34, 1, 1),
(0.65,0, 0),
(1, 0, 0))}
cdict = {'red': (
(0., 0, 0),
(0.35, 0, 0),
(0.66, 1, 1),
(0.89,1, 1),
(1, 0.5, 0.5)),
'green': (
(0., 0, 0),
(0.125,0, 0),
(0.375,1, 1),
(0.64,1, 1),
(0.91,0,0),
(1, 0, 0)),
'blue': (
(0., 0.5, 0.5),
(0.11, 1, 1),
(0.34, 1, 1),
(0.65,0, 0),
(1, 0, 0))}
try:
fileToPlot = sys.argv[1]
except:
print "Usage: plotChannel.py filename"
print "E.g.: plotChannel.py output/channelGain_UE10_antennaPair1_max.m"
sys.exit()
minValue = -30
maxValue = 30
fillValue = minValue - 1
baseFilename = '_'.join(fileToPlot.split("_")[:-1])
what= '_' + fileToPlot.split("_")[-1]
try:
filename = baseFilename + what
trialFilename = baseFilename + '_trials.m'
print "Loading %s" % filename
map_raw = numpy.loadtxt(filename, comments='%')
print "Loading %s" % trialFilename
trials_raw = numpy.loadtxt(trialFilename, comments='%')
except IOError:
print "IOError"
sys.exit()
map_parsed = numpy.rec.fromrecords(map_raw, names = 'x,y,z')
trials_parsed = numpy.rec.fromrecords(trials_raw, names = 'x,y,z')
numXEntries = len(numpy.unique(map_parsed['x']))
minX = min(map_parsed['x'])
maxX = max(map_parsed['x'])
numYEntries = len(numpy.unique(map_parsed['y']))
minY = min(map_parsed['y'])
maxY = max(map_parsed['y'])
print "The map is (%dx%d)" % (numYEntries+1, numXEntries+1)
map = numpy.ones((numYEntries, numXEntries)) * fillValue
axes = matplotlib.pyplot.axes()
axes.xaxis.set_label_text("TTIs")
axes.yaxis.set_label_text("PRBs")
i = 0
for xx in xrange(numXEntries):
for yy in xrange(numYEntries):
if trials_parsed['z'][i] > 0:
# limit to max/min value if set to values different than 0
if minValue != float("-inf"):
map_parsed['z'][i] = max(map_parsed['z'][i], minValue)
if maxValue != float("+inf"):
map_parsed['z'][i] = min(map_parsed['z'][i], maxValue)
map[yy][xx] = map_parsed['z'][i]
else:
map[yy][xx] = fillValue #minValue - 0.0001
i += 1
my_cmap = matplotlib.colors.LinearSegmentedColormap('my_colormap',cdict,256)
im = axes.imshow(map, cmap = my_cmap,
origin = 'lower',
interpolation = "nearest"
)
cbar = matplotlib.pyplot.colorbar(im, ax=axes)
cbar.set_label("Fast-fading gain [dB]")
matplotlib.pyplot.savefig(filename + ".pdf",
dpi = 300,
transparent = True,
bbox_inches = "tight",
pad_inches = 0.02)
#plt.show()
|
gpl-2.0
|
Adai0808/scikit-learn
|
examples/mixture/plot_gmm.py
|
248
|
2817
|
"""
=================================
Gaussian Mixture Model Ellipsoids
=================================
Plot the confidence ellipsoids of a mixture of two Gaussians with EM
and variational Dirichlet process.
Both models have access to five components with which to fit the
data. Note that the EM model will necessarily use all five components
while the DP model will effectively only use as many as are needed for
a good fit. This is a property of the Dirichlet Process prior. Here we
can see that the EM model splits some components arbitrarily, because it
is trying to fit too many components, while the Dirichlet Process model
adapts it number of state automatically.
This example doesn't show it, as we're in a low-dimensional space, but
another advantage of the Dirichlet process model is that it can fit
full covariance matrices effectively even when there are less examples
per cluster than there are dimensions in the data, due to
regularization properties of the inference algorithm.
"""
import itertools
import numpy as np
from scipy import linalg
import matplotlib.pyplot as plt
import matplotlib as mpl
from sklearn import mixture
# Number of samples per component
n_samples = 500
# Generate random sample, two components
np.random.seed(0)
C = np.array([[0., -0.1], [1.7, .4]])
X = np.r_[np.dot(np.random.randn(n_samples, 2), C),
.7 * np.random.randn(n_samples, 2) + np.array([-6, 3])]
# Fit a mixture of Gaussians with EM using five components
gmm = mixture.GMM(n_components=5, covariance_type='full')
gmm.fit(X)
# Fit a Dirichlet process mixture of Gaussians using five components
dpgmm = mixture.DPGMM(n_components=5, covariance_type='full')
dpgmm.fit(X)
color_iter = itertools.cycle(['r', 'g', 'b', 'c', 'm'])
for i, (clf, title) in enumerate([(gmm, 'GMM'),
(dpgmm, 'Dirichlet Process GMM')]):
splot = plt.subplot(2, 1, 1 + i)
Y_ = clf.predict(X)
for i, (mean, covar, color) in enumerate(zip(
clf.means_, clf._get_covars(), color_iter)):
v, w = linalg.eigh(covar)
u = w[0] / linalg.norm(w[0])
# as the DP will not use every component it has access to
# unless it needs it, we shouldn't plot the redundant
# components.
if not np.any(Y_ == i):
continue
plt.scatter(X[Y_ == i, 0], X[Y_ == i, 1], .8, color=color)
# Plot an ellipse to show the Gaussian component
angle = np.arctan(u[1] / u[0])
angle = 180 * angle / np.pi # convert to degrees
ell = mpl.patches.Ellipse(mean, v[0], v[1], 180 + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(0.5)
splot.add_artist(ell)
plt.xlim(-10, 10)
plt.ylim(-3, 6)
plt.xticks(())
plt.yticks(())
plt.title(title)
plt.show()
|
bsd-3-clause
|
Cisco-Talos/fnc-1
|
tree_model/xgb_train.py
|
1
|
5035
|
#!/usr/bin/env python
import sys
import cPickle
import numpy as np
from itertools import chain
from sklearn.model_selection import StratifiedKFold
import xgboost as xgb
from collections import Counter
from CountFeatureGenerator import *
from TfidfFeatureGenerator import *
from SvdFeatureGenerator import *
from Word2VecFeatureGenerator import *
from SentimentFeatureGenerator import *
params_xgb = {
'max_depth': 6,
'colsample_bytree': 0.6,
'subsample': 1.0,
'eta': 0.1,
'silent': 1,
'objective': 'multi:softmax',
'eval_metric':'mlogloss',
'num_class': 4
}
num_round = 1000
def build_data():
# create target variable
body = pd.read_csv("train_bodies.csv")
stances = pd.read_csv("train_stances.csv")
data = pd.merge(body, stances, how='right', on='Body ID')
targets = ['agree', 'disagree', 'discuss', 'unrelated']
targets_dict = dict(zip(targets, range(len(targets))))
data['target'] = map(lambda x: targets_dict[x], data['Stance'])
data_y = data['target'].values
# read features
generators = [
CountFeatureGenerator(),
TfidfFeatureGenerator(),
SvdFeatureGenerator(),
Word2VecFeatureGenerator(),
SentimentFeatureGenerator()
]
features = [f for g in generators for f in g.read()]
data_x = np.hstack(features)
print 'data_x.shape'
print data_x.shape
print 'data_y.shape'
print data_y.shape
return data_x, data_y
def fscore(pred_y, truth_y):
# targets = ['agree', 'disagree', 'discuss', 'unrelated']
# y = [0, 1, 2, 3]
score = 0
if pred_y.shape != truth_y.shape:
raise Exception('pred_y and truth have different shapes')
for i in range(pred_y.shape[0]):
if truth_y[i] == 3:
if pred_y[i] == 3: score += 0.25
else:
if pred_y[i] != 3: score += 0.25
if truth_y[i] == pred_y[i]: score += 0.75
return score
def perfect_score(truth_y):
score = 0
for i in range(truth_y.shape[0]):
if truth_y[i] == 3: score += 0.25
else: score += 1
#else: score += 0.75
return score
def cv():
data_x, data_y = build_data()
random_seed = 2017
#skf = StratifiedKFold(n_splits=5, shuffle=True, random_state=random_seed)
#with open('skf.pkl', 'wb') as outfile:
# cPickle.dump(skf, outfile, -1)
# print 'skf saved'
scores = []
best_iters = [0]*5
pscores = []
with open('skf.pkl', 'rb') as infile:
skf = cPickle.load(infile)
for fold, (trainInd, validInd) in enumerate(skf.split(data_x, data_y)):
print 'fold %s' % fold
x_train = data_x[trainInd]
y_train = data_y[trainInd]
x_valid = data_x[validInd]
y_valid = data_y[validInd]
print 'perfect_score: ', perfect_score(y_valid)
print Counter(y_valid)
#break
dtrain = xgb.DMatrix(x_train, label=y_train)
dvalid = xgb.DMatrix(x_valid, label=y_valid)
watchlist = [(dtrain, 'train'), (dvalid, 'eval')]
bst = xgb.train(params_xgb,
dtrain,
num_round,
watchlist,
verbose_eval=100)
#early_stopping_rounds=30)
#pred_y = bst.predict(dvalid, ntree_limit=bst.best_ntree_limit)
#print 'best iterations: ', bst.best_ntree_limit
pred_y = bst.predict(dvalid)
print pred_y
print Counter(pred_y)
#pred_y = np.argmax(bst.predict(dvalid, ntree_limit=bst.best_ntree_limit), axis=1)
print 'pred_y.shape'
print pred_y.shape
print 'y_valid.shape'
print y_valid.shape
s = fscore(pred_y, y_valid)
s_perf = perfect_score(y_valid)
print 'fold %s, score = %d, perfect_score %d' % (fold, s, s_perf)
scores.append(s)
pscores.append(s_perf)
#break
print 'scores:'
print scores
print 'mean score:'
print np.mean(scores)
print 'perfect scores:'
print pscores
print 'mean perfect score:'
print np.mean(pscores)
if __name__ == '__main__':
#build_data()
cv()
# Copyright 2017 Cisco Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
apache-2.0
|
Akshay0724/scikit-learn
|
examples/svm/plot_oneclass.py
|
80
|
2338
|
"""
==========================================
One-class SVM with non-linear kernel (RBF)
==========================================
An example using a one-class SVM for novelty detection.
:ref:`One-class SVM <svm_outlier_detection>` is an unsupervised
algorithm that learns a decision function for novelty detection:
classifying new data as similar or different to the training set.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.font_manager
from sklearn import svm
xx, yy = np.meshgrid(np.linspace(-5, 5, 500), np.linspace(-5, 5, 500))
# Generate train data
X = 0.3 * np.random.randn(100, 2)
X_train = np.r_[X + 2, X - 2]
# Generate some regular novel observations
X = 0.3 * np.random.randn(20, 2)
X_test = np.r_[X + 2, X - 2]
# Generate some abnormal novel observations
X_outliers = np.random.uniform(low=-4, high=4, size=(20, 2))
# fit the model
clf = svm.OneClassSVM(nu=0.1, kernel="rbf", gamma=0.1)
clf.fit(X_train)
y_pred_train = clf.predict(X_train)
y_pred_test = clf.predict(X_test)
y_pred_outliers = clf.predict(X_outliers)
n_error_train = y_pred_train[y_pred_train == -1].size
n_error_test = y_pred_test[y_pred_test == -1].size
n_error_outliers = y_pred_outliers[y_pred_outliers == 1].size
# plot the line, the points, and the nearest vectors to the plane
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
plt.title("Novelty Detection")
plt.contourf(xx, yy, Z, levels=np.linspace(Z.min(), 0, 7), cmap=plt.cm.PuBu)
a = plt.contour(xx, yy, Z, levels=[0], linewidths=2, colors='darkred')
plt.contourf(xx, yy, Z, levels=[0, Z.max()], colors='palevioletred')
s = 40
b1 = plt.scatter(X_train[:, 0], X_train[:, 1], c='white', s=s)
b2 = plt.scatter(X_test[:, 0], X_test[:, 1], c='blueviolet', s=s)
c = plt.scatter(X_outliers[:, 0], X_outliers[:, 1], c='gold', s=s)
plt.axis('tight')
plt.xlim((-5, 5))
plt.ylim((-5, 5))
plt.legend([a.collections[0], b1, b2, c],
["learned frontier", "training observations",
"new regular observations", "new abnormal observations"],
loc="upper left",
prop=matplotlib.font_manager.FontProperties(size=11))
plt.xlabel(
"error train: %d/200 ; errors novel regular: %d/40 ; "
"errors novel abnormal: %d/40"
% (n_error_train, n_error_test, n_error_outliers))
plt.show()
|
bsd-3-clause
|
untom/scikit-learn
|
sklearn/learning_curve.py
|
110
|
13467
|
"""Utilities to evaluate models with respect to a variable
"""
# Author: Alexander Fabisch <[email protected]>
#
# License: BSD 3 clause
import warnings
import numpy as np
from .base import is_classifier, clone
from .cross_validation import check_cv
from .externals.joblib import Parallel, delayed
from .cross_validation import _safe_split, _score, _fit_and_score
from .metrics.scorer import check_scoring
from .utils import indexable
from .utils.fixes import astype
__all__ = ['learning_curve', 'validation_curve']
def learning_curve(estimator, X, y, train_sizes=np.linspace(0.1, 1.0, 5),
cv=None, scoring=None, exploit_incremental_learning=False,
n_jobs=1, pre_dispatch="all", verbose=0):
"""Learning curve.
Determines cross-validated training and test scores for different training
set sizes.
A cross-validation generator splits the whole dataset k times in training
and test data. Subsets of the training set with varying sizes will be used
to train the estimator and a score for each training subset size and the
test set will be computed. Afterwards, the scores will be averaged over
all k runs for each training subset size.
Read more in the :ref:`User Guide <learning_curves>`.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
train_sizes : array-like, shape (n_ticks,), dtype float or int
Relative or absolute numbers of training examples that will be used to
generate the learning curve. If the dtype is float, it is regarded as a
fraction of the maximum size of the training set (that is determined
by the selected validation method), i.e. it has to be within (0, 1].
Otherwise it is interpreted as absolute sizes of the training sets.
Note that for classification the number of samples usually have to
be big enough to contain at least one sample from each class.
(default: np.linspace(0.1, 1.0, 5))
cv : integer, cross-validation generator, optional
If an integer is passed, it is the number of folds (defaults to 3).
Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
exploit_incremental_learning : boolean, optional, default: False
If the estimator supports incremental learning, this will be
used to speed up fitting for different training set sizes.
n_jobs : integer, optional
Number of jobs to run in parallel (default 1).
pre_dispatch : integer or string, optional
Number of predispatched jobs for parallel execution (default is
all). The option can reduce the allocated memory. The string can
be an expression like '2*n_jobs'.
verbose : integer, optional
Controls the verbosity: the higher, the more messages.
Returns
-------
train_sizes_abs : array, shape = (n_unique_ticks,), dtype int
Numbers of training examples that has been used to generate the
learning curve. Note that the number of ticks might be less
than n_ticks because duplicate entries will be removed.
train_scores : array, shape (n_ticks, n_cv_folds)
Scores on training sets.
test_scores : array, shape (n_ticks, n_cv_folds)
Scores on test set.
Notes
-----
See :ref:`examples/model_selection/plot_learning_curve.py
<example_model_selection_plot_learning_curve.py>`
"""
if exploit_incremental_learning and not hasattr(estimator, "partial_fit"):
raise ValueError("An estimator must support the partial_fit interface "
"to exploit incremental learning")
X, y = indexable(X, y)
# Make a list since we will be iterating multiple times over the folds
cv = list(check_cv(cv, X, y, classifier=is_classifier(estimator)))
scorer = check_scoring(estimator, scoring=scoring)
# HACK as long as boolean indices are allowed in cv generators
if cv[0][0].dtype == bool:
new_cv = []
for i in range(len(cv)):
new_cv.append((np.nonzero(cv[i][0])[0], np.nonzero(cv[i][1])[0]))
cv = new_cv
n_max_training_samples = len(cv[0][0])
# Because the lengths of folds can be significantly different, it is
# not guaranteed that we use all of the available training data when we
# use the first 'n_max_training_samples' samples.
train_sizes_abs = _translate_train_sizes(train_sizes,
n_max_training_samples)
n_unique_ticks = train_sizes_abs.shape[0]
if verbose > 0:
print("[learning_curve] Training set sizes: " + str(train_sizes_abs))
parallel = Parallel(n_jobs=n_jobs, pre_dispatch=pre_dispatch,
verbose=verbose)
if exploit_incremental_learning:
classes = np.unique(y) if is_classifier(estimator) else None
out = parallel(delayed(_incremental_fit_estimator)(
clone(estimator), X, y, classes, train, test, train_sizes_abs,
scorer, verbose) for train, test in cv)
else:
out = parallel(delayed(_fit_and_score)(
clone(estimator), X, y, scorer, train[:n_train_samples], test,
verbose, parameters=None, fit_params=None, return_train_score=True)
for train, test in cv for n_train_samples in train_sizes_abs)
out = np.array(out)[:, :2]
n_cv_folds = out.shape[0] // n_unique_ticks
out = out.reshape(n_cv_folds, n_unique_ticks, 2)
out = np.asarray(out).transpose((2, 1, 0))
return train_sizes_abs, out[0], out[1]
def _translate_train_sizes(train_sizes, n_max_training_samples):
"""Determine absolute sizes of training subsets and validate 'train_sizes'.
Examples:
_translate_train_sizes([0.5, 1.0], 10) -> [5, 10]
_translate_train_sizes([5, 10], 10) -> [5, 10]
Parameters
----------
train_sizes : array-like, shape (n_ticks,), dtype float or int
Numbers of training examples that will be used to generate the
learning curve. If the dtype is float, it is regarded as a
fraction of 'n_max_training_samples', i.e. it has to be within (0, 1].
n_max_training_samples : int
Maximum number of training samples (upper bound of 'train_sizes').
Returns
-------
train_sizes_abs : array, shape (n_unique_ticks,), dtype int
Numbers of training examples that will be used to generate the
learning curve. Note that the number of ticks might be less
than n_ticks because duplicate entries will be removed.
"""
train_sizes_abs = np.asarray(train_sizes)
n_ticks = train_sizes_abs.shape[0]
n_min_required_samples = np.min(train_sizes_abs)
n_max_required_samples = np.max(train_sizes_abs)
if np.issubdtype(train_sizes_abs.dtype, np.float):
if n_min_required_samples <= 0.0 or n_max_required_samples > 1.0:
raise ValueError("train_sizes has been interpreted as fractions "
"of the maximum number of training samples and "
"must be within (0, 1], but is within [%f, %f]."
% (n_min_required_samples,
n_max_required_samples))
train_sizes_abs = astype(train_sizes_abs * n_max_training_samples,
dtype=np.int, copy=False)
train_sizes_abs = np.clip(train_sizes_abs, 1,
n_max_training_samples)
else:
if (n_min_required_samples <= 0 or
n_max_required_samples > n_max_training_samples):
raise ValueError("train_sizes has been interpreted as absolute "
"numbers of training samples and must be within "
"(0, %d], but is within [%d, %d]."
% (n_max_training_samples,
n_min_required_samples,
n_max_required_samples))
train_sizes_abs = np.unique(train_sizes_abs)
if n_ticks > train_sizes_abs.shape[0]:
warnings.warn("Removed duplicate entries from 'train_sizes'. Number "
"of ticks will be less than than the size of "
"'train_sizes' %d instead of %d)."
% (train_sizes_abs.shape[0], n_ticks), RuntimeWarning)
return train_sizes_abs
def _incremental_fit_estimator(estimator, X, y, classes, train, test,
train_sizes, scorer, verbose):
"""Train estimator on training subsets incrementally and compute scores."""
train_scores, test_scores = [], []
partitions = zip(train_sizes, np.split(train, train_sizes)[:-1])
for n_train_samples, partial_train in partitions:
train_subset = train[:n_train_samples]
X_train, y_train = _safe_split(estimator, X, y, train_subset)
X_partial_train, y_partial_train = _safe_split(estimator, X, y,
partial_train)
X_test, y_test = _safe_split(estimator, X, y, test, train_subset)
if y_partial_train is None:
estimator.partial_fit(X_partial_train, classes=classes)
else:
estimator.partial_fit(X_partial_train, y_partial_train,
classes=classes)
train_scores.append(_score(estimator, X_train, y_train, scorer))
test_scores.append(_score(estimator, X_test, y_test, scorer))
return np.array((train_scores, test_scores)).T
def validation_curve(estimator, X, y, param_name, param_range, cv=None,
scoring=None, n_jobs=1, pre_dispatch="all", verbose=0):
"""Validation curve.
Determine training and test scores for varying parameter values.
Compute scores for an estimator with different values of a specified
parameter. This is similar to grid search with one parameter. However, this
will also compute training scores and is merely a utility for plotting the
results.
Read more in the :ref:`User Guide <validation_curve>`.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
param_name : string
Name of the parameter that will be varied.
param_range : array-like, shape (n_values,)
The values of the parameter that will be evaluated.
cv : integer, cross-validation generator, optional
If an integer is passed, it is the number of folds (defaults to 3).
Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
n_jobs : integer, optional
Number of jobs to run in parallel (default 1).
pre_dispatch : integer or string, optional
Number of predispatched jobs for parallel execution (default is
all). The option can reduce the allocated memory. The string can
be an expression like '2*n_jobs'.
verbose : integer, optional
Controls the verbosity: the higher, the more messages.
Returns
-------
train_scores : array, shape (n_ticks, n_cv_folds)
Scores on training sets.
test_scores : array, shape (n_ticks, n_cv_folds)
Scores on test set.
Notes
-----
See
:ref:`examples/model_selection/plot_validation_curve.py
<example_model_selection_plot_validation_curve.py>`
"""
X, y = indexable(X, y)
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
parallel = Parallel(n_jobs=n_jobs, pre_dispatch=pre_dispatch,
verbose=verbose)
out = parallel(delayed(_fit_and_score)(
estimator, X, y, scorer, train, test, verbose,
parameters={param_name: v}, fit_params=None, return_train_score=True)
for train, test in cv for v in param_range)
out = np.asarray(out)[:, :2]
n_params = len(param_range)
n_cv_folds = out.shape[0] // n_params
out = out.reshape(n_cv_folds, n_params, 2).transpose((2, 1, 0))
return out[0], out[1]
|
bsd-3-clause
|
equialgo/scikit-learn
|
examples/cluster/plot_kmeans_silhouette_analysis.py
|
83
|
5888
|
"""
===============================================================================
Selecting the number of clusters with silhouette analysis on KMeans clustering
===============================================================================
Silhouette analysis can be used to study the separation distance between the
resulting clusters. The silhouette plot displays a measure of how close each
point in one cluster is to points in the neighboring clusters and thus provides
a way to assess parameters like number of clusters visually. This measure has a
range of [-1, 1].
Silhouette coefficients (as these values are referred to as) near +1 indicate
that the sample is far away from the neighboring clusters. A value of 0
indicates that the sample is on or very close to the decision boundary between
two neighboring clusters and negative values indicate that those samples might
have been assigned to the wrong cluster.
In this example the silhouette analysis is used to choose an optimal value for
``n_clusters``. The silhouette plot shows that the ``n_clusters`` value of 3, 5
and 6 are a bad pick for the given data due to the presence of clusters with
below average silhouette scores and also due to wide fluctuations in the size
of the silhouette plots. Silhouette analysis is more ambivalent in deciding
between 2 and 4.
Also from the thickness of the silhouette plot the cluster size can be
visualized. The silhouette plot for cluster 0 when ``n_clusters`` is equal to
2, is bigger in size owing to the grouping of the 3 sub clusters into one big
cluster. However when the ``n_clusters`` is equal to 4, all the plots are more
or less of similar thickness and hence are of similar sizes as can be also
verified from the labelled scatter plot on the right.
"""
from __future__ import print_function
from sklearn.datasets import make_blobs
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_samples, silhouette_score
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import numpy as np
print(__doc__)
# Generating the sample data from make_blobs
# This particular setting has one distinct cluster and 3 clusters placed close
# together.
X, y = make_blobs(n_samples=500,
n_features=2,
centers=4,
cluster_std=1,
center_box=(-10.0, 10.0),
shuffle=True,
random_state=1) # For reproducibility
range_n_clusters = [2, 3, 4, 5, 6]
for n_clusters in range_n_clusters:
# Create a subplot with 1 row and 2 columns
fig, (ax1, ax2) = plt.subplots(1, 2)
fig.set_size_inches(18, 7)
# The 1st subplot is the silhouette plot
# The silhouette coefficient can range from -1, 1 but in this example all
# lie within [-0.1, 1]
ax1.set_xlim([-0.1, 1])
# The (n_clusters+1)*10 is for inserting blank space between silhouette
# plots of individual clusters, to demarcate them clearly.
ax1.set_ylim([0, len(X) + (n_clusters + 1) * 10])
# Initialize the clusterer with n_clusters value and a random generator
# seed of 10 for reproducibility.
clusterer = KMeans(n_clusters=n_clusters, random_state=10)
cluster_labels = clusterer.fit_predict(X)
# The silhouette_score gives the average value for all the samples.
# This gives a perspective into the density and separation of the formed
# clusters
silhouette_avg = silhouette_score(X, cluster_labels)
print("For n_clusters =", n_clusters,
"The average silhouette_score is :", silhouette_avg)
# Compute the silhouette scores for each sample
sample_silhouette_values = silhouette_samples(X, cluster_labels)
y_lower = 10
for i in range(n_clusters):
# Aggregate the silhouette scores for samples belonging to
# cluster i, and sort them
ith_cluster_silhouette_values = \
sample_silhouette_values[cluster_labels == i]
ith_cluster_silhouette_values.sort()
size_cluster_i = ith_cluster_silhouette_values.shape[0]
y_upper = y_lower + size_cluster_i
color = cm.spectral(float(i) / n_clusters)
ax1.fill_betweenx(np.arange(y_lower, y_upper),
0, ith_cluster_silhouette_values,
facecolor=color, edgecolor=color, alpha=0.7)
# Label the silhouette plots with their cluster numbers at the middle
ax1.text(-0.05, y_lower + 0.5 * size_cluster_i, str(i))
# Compute the new y_lower for next plot
y_lower = y_upper + 10 # 10 for the 0 samples
ax1.set_title("The silhouette plot for the various clusters.")
ax1.set_xlabel("The silhouette coefficient values")
ax1.set_ylabel("Cluster label")
# The vertical line for average silhouette score of all the values
ax1.axvline(x=silhouette_avg, color="red", linestyle="--")
ax1.set_yticks([]) # Clear the yaxis labels / ticks
ax1.set_xticks([-0.1, 0, 0.2, 0.4, 0.6, 0.8, 1])
# 2nd Plot showing the actual clusters formed
colors = cm.spectral(cluster_labels.astype(float) / n_clusters)
ax2.scatter(X[:, 0], X[:, 1], marker='.', s=30, lw=0, alpha=0.7,
c=colors)
# Labeling the clusters
centers = clusterer.cluster_centers_
# Draw white circles at cluster centers
ax2.scatter(centers[:, 0], centers[:, 1],
marker='o', c="white", alpha=1, s=200)
for i, c in enumerate(centers):
ax2.scatter(c[0], c[1], marker='$%d$' % i, alpha=1, s=50)
ax2.set_title("The visualization of the clustered data.")
ax2.set_xlabel("Feature space for the 1st feature")
ax2.set_ylabel("Feature space for the 2nd feature")
plt.suptitle(("Silhouette analysis for KMeans clustering on sample data "
"with n_clusters = %d" % n_clusters),
fontsize=14, fontweight='bold')
plt.show()
|
bsd-3-clause
|
Nyker510/scikit-learn
|
sklearn/externals/joblib/__init__.py
|
36
|
4795
|
""" Joblib is a set of tools to provide **lightweight pipelining in
Python**. In particular, joblib offers:
1. transparent disk-caching of the output values and lazy re-evaluation
(memoize pattern)
2. easy simple parallel computing
3. logging and tracing of the execution
Joblib is optimized to be **fast** and **robust** in particular on large
data and has specific optimizations for `numpy` arrays. It is
**BSD-licensed**.
============================== ============================================
**User documentation**: http://pythonhosted.org/joblib
**Download packages**: http://pypi.python.org/pypi/joblib#downloads
**Source code**: http://github.com/joblib/joblib
**Report issues**: http://github.com/joblib/joblib/issues
============================== ============================================
Vision
--------
The vision is to provide tools to easily achieve better performance and
reproducibility when working with long running jobs.
* **Avoid computing twice the same thing**: code is rerun over an
over, for instance when prototyping computational-heavy jobs (as in
scientific development), but hand-crafted solution to alleviate this
issue is error-prone and often leads to unreproducible results
* **Persist to disk transparently**: persisting in an efficient way
arbitrary objects containing large data is hard. Using
joblib's caching mechanism avoids hand-written persistence and
implicitly links the file on disk to the execution context of
the original Python object. As a result, joblib's persistence is
good for resuming an application status or computational job, eg
after a crash.
Joblib strives to address these problems while **leaving your code and
your flow control as unmodified as possible** (no framework, no new
paradigms).
Main features
------------------
1) **Transparent and fast disk-caching of output value:** a memoize or
make-like functionality for Python functions that works well for
arbitrary Python objects, including very large numpy arrays. Separate
persistence and flow-execution logic from domain logic or algorithmic
code by writing the operations as a set of steps with well-defined
inputs and outputs: Python functions. Joblib can save their
computation to disk and rerun it only if necessary::
>>> import numpy as np
>>> from sklearn.externals.joblib import Memory
>>> mem = Memory(cachedir='/tmp/joblib')
>>> import numpy as np
>>> a = np.vander(np.arange(3)).astype(np.float)
>>> square = mem.cache(np.square)
>>> b = square(a) # doctest: +ELLIPSIS
________________________________________________________________________________
[Memory] Calling square...
square(array([[ 0., 0., 1.],
[ 1., 1., 1.],
[ 4., 2., 1.]]))
___________________________________________________________square - 0...s, 0.0min
>>> c = square(a)
>>> # The above call did not trigger an evaluation
2) **Embarrassingly parallel helper:** to make is easy to write readable
parallel code and debug it quickly::
>>> from sklearn.externals.joblib import Parallel, delayed
>>> from math import sqrt
>>> Parallel(n_jobs=1)(delayed(sqrt)(i**2) for i in range(10))
[0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0]
3) **Logging/tracing:** The different functionalities will
progressively acquire better logging mechanism to help track what
has been ran, and capture I/O easily. In addition, Joblib will
provide a few I/O primitives, to easily define define logging and
display streams, and provide a way of compiling a report.
We want to be able to quickly inspect what has been run.
4) **Fast compressed Persistence**: a replacement for pickle to work
efficiently on Python objects containing large data (
*joblib.dump* & *joblib.load* ).
..
>>> import shutil ; shutil.rmtree('/tmp/joblib/')
"""
# PEP0440 compatible formatted version, see:
# https://www.python.org/dev/peps/pep-0440/
#
# Generic release markers:
# X.Y
# X.Y.Z # For bugfix releases
#
# Admissible pre-release markers:
# X.YaN # Alpha release
# X.YbN # Beta release
# X.YrcN # Release Candidate
# X.Y # Final release
#
# Dev branch marker is: 'X.Y.dev' or 'X.Y.devN' where N is an integer.
# 'X.Y.dev0' is the canonical version of 'X.Y.dev'
#
__version__ = '0.9.0b2'
from .memory import Memory, MemorizedResult
from .logger import PrintTime
from .logger import Logger
from .hashing import hash
from .numpy_pickle import dump
from .numpy_pickle import load
from .parallel import Parallel
from .parallel import delayed
from .parallel import cpu_count
|
bsd-3-clause
|
BrooksbridgeCapitalLLP/returnsseries
|
returnsseries/plot.py
|
2
|
4876
|
"""Plotting funcs for ReturnsSeries class"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import returnsseries.utils as ru
import returnsseries.displayfunctions as rd
def plot_perf(returns_list, log2, shade_dates=None, shade_color='lightblue',
yticks_round=1, legend_loc='lower right',
summary_funcs=rd.summaries['ts'], **kwargs):
"""Plot a list of ReturnsSeries with relevant summary stats
Parameters
----------
returns_list: list
list of ReturnsSeries
log2: bool
Passed to ReturnsSeries.account_curve. If False result will be
price-index of compounding returns. If True result will be base-2
logarithm (numpy.log2) of price-index and will reset the y-axis
label and y-axis tick labels to reflect the log scale
shade_dates: list, optional, default None
List of tuples, each tuple of length 2, each tuple contains start
date and end date of time periods to shade with color
shade_color: str, optional, default 'lightblue'
String specifying the color to use for shade_dates. Accepts any
valid matplotlib color name/string
yticks_round: int, optional, default 1
Number of decimals the y-axis tick lables should be rounded to
legend_loc: str, optional, default 'lower right'
Specifies where to place pyplot.legend, accepts any string that is
valid for pyplot.legend loc arg
summary_funcs: list, optional,
default returnsseries.displayfunctions.summaries['ts']
list of functions passed into ReturnsSeries.summary
kwargs: keywords
Any keyword arguments to pass to matplotlib.pyplot.plot
Returns
-------
None"""
for rtns in returns_list:
rtns.plot_line(log2, shade_dates, **kwargs)
if log2:
yticks_log2(yticks_round)
summary_df = pd.concat([rtns.summary(summary_funcs) \
for rtns in returns_list], axis=1)
text_topleft(summary_df)
if legend_loc is not None:
plt.legend(loc=legend_loc)
return None
def correl_calc(returns_list, base_series):
"""Calculate correlation between all series in returns_list and base_series
Parameters
----------
returns_list: list
list of pandas.Series
base_series: int
Specifies which entry in returns_list to calculate the
correlation with. must have 0 <= base_series < len(returns_list)
Returns
-------
pandas.Series
index are the pandas.Series.name from the entries in returns_list,
values are the correlations between each series and the seriers in
returns_list[base_series]
"""
correlations = pd.concat(returns_list, axis=1).corr()
correlations = correlations.iloc[:,base_series]
correlations = correlations.round(2)
name = correlations.index[base_series]
correlations.name = "Correlation with {}".format(name)
return correlations
def shade_dates(shade_dates, srs, color):
"""Color in area below srs between index values in shade_dates
Note
----
Operates on active plotting figure.
Parameters
----------
shade_dates: list
list of tuples, each tuple contains 2 entries, entries define the
start and end of a subperiod within srs.index to be shaded in
srs: pandas.Series
values define the y-values to color beneath
color: str
Name of the color to use, can be any valid matplotlib color name
Returns
-------
None"""
maxs = ru.within_dates(srs, shade_dates, np.nan)
mins = maxs.copy()
ylim_min = min(plt.ylim())
mins[ np.invert(mins.isnull()) ] = ylim_min
plt.fill_between(mins.index, mins.values, maxs.values, color=color)
return None
def yticks_log2(round_=1):
"""Relabel y-axis for log2 plot
Note
----
Operates on active plotting figure.
Parameters
----------
round_: int, optional, default 1
Number of digits to round y-axis tick labels to, passed to numpy.round
Returns
-------
None"""
y_tick_locs, y_tick_labels = plt.yticks()
new_labels = np.round(pow(2, y_tick_locs), round_)
plt.yticks(y_tick_locs, new_labels)
plt.ylabel('Logarithmic Return Scale')
return None
def text_topleft(str_):
"""Write a text in the top-left corner of active plotting figure
Parameters
----------
str_: str
Text string to write
Returns
-------
None"""
xlims = plt.xlim()
xdiff = max(xlims) - min(xlims)
text_x = min(xlims) + xdiff * .01
text_y = max(plt.ylim()) * .99
plt.text(text_x, text_y, str_, horizontalalignment='left',
verticalalignment='top', family='monospace')
return None
|
gpl-3.0
|
biocore/qiime
|
tests/test_plot_taxa_summary.py
|
15
|
16573
|
#!/usr/bin/env python
# file test_plot_taxa_summary.py
__author__ = "Jesse Stombaugh"
__copyright__ = "Copyright 2011, The QIIME Project" # consider project name
__credits__ = ["Jesse Stombaugh", "Julia Goodrich"] # remember to add yourself
__license__ = "GPL"
__version__ = "1.9.1-dev"
__maintainer__ = "Jesse Stombaugh"
__email__ = "[email protected]"
import matplotlib
from matplotlib import use
use('Agg', warn=False)
from numpy import array
from os.path import exists
from StringIO import StringIO
from unittest import TestCase, main
from os import remove, mkdir, removedirs, listdir
from qiime.plot_taxa_summary import (make_pie_chart, make_img_name,
get_counts, write_html_file,
make_HTML_table, get_fracs, make_all_charts,
make_area_bar_chart, make_legend, DATA_HTML)
class TopLevelTests(TestCase):
"""Tests of top-level functions"""
def setUp(self):
"""define some top-level data"""
self.props = {"title": "Class: from 3 categories"}
self.prefs = {'1': {'column': '1'}}
self.counts1 = [(
1, "a;b;c", "a<br>b<br>c"), (3, "d;e;f", "d<br>e<br>f"),
(4, "a;g;h", "a<br>g<br>h"), (2, "d;e;i", "d<br>e<br>i")]
self.sample_ids = ['14FC041', '14FC042', '14FC043', '14FC044']
self.taxa = ["a;b;c", "d;e;i", "d;e;f", "a;g;h"]
self.lines_parsed = (['14FC041', '14FC042', '14FC043', '14FC044'],
['a;b;c', 'd;e;f', 'a;g;h', "d;e;i"],
[['0.1', '0.3', '0.2'], ['0', '0.2', '0.1'],
['0.4', '0', '0.3'], ['0.5', '0', '0.1']])
self.fracs = [("a;b;c", 1.0 / 10), ("d;e;f", 3.0 / 10),
("a;g;h", 4.0 / 10), ("d;e;i", 2.0 / 10)]
self.colors = ['#0000ff', '#00ff00', '#ff0000', '#00ffff']
self.area_fracs = [[0.1, 0.3, 0.2], [0.0, 0.2, 0.1],
[0.4, 0.0, 0.3], [0.5, 0.0, 0.1]]
self.color_prefs = {
"a;b;c": 'blue1', "d;e;i": 'red1', "d;e;f": 'blue2',
"a;g;h": 'red2'}
self.dpi = 80
self.plot_width = 12
self.plot_height = 6
self.bar_width = 1
self.generate_image_type = 'pdf'
self._paths_to_clean_up = []
self._dirs_to_clean_up = []
self.dir_path = "/tmp/qiimewebfiles/"
# make the webfile directory
try:
mkdir(self.dir_path)
except OSError:
pass
# make the charts directory
try:
mkdir("/tmp/qiimewebfiles/charts")
except OSError:
pass
# define directory to clean up
self._dirs_to_clean_up = ["/tmp/qiimewebfiles/charts"]
def tearDown(self):
map(remove, self._paths_to_clean_up)
map(removedirs, self._dirs_to_clean_up)
def test_make_legend(self):
"""make_legend create a legend image given an array of ids and
colors"""
fpath = '/tmp/qiimewebfiles/area.pdf'
filename1 = '/tmp/qiimewebfiles/area_legend.pdf'
obs = make_legend(self.sample_ids, self.colors, self.plot_width,
self.plot_height, 'black', 'white', fpath,
self.generate_image_type, self.dpi)
self.assertTrue(exists(filename1), 'The png file was not created in \
the appropriate location')
self._paths_to_clean_up = [filename1]
def test_get_counts(self):
"""get_counts should gets all the counts for an input file"""
# test the pie charts
img_data = get_counts("Phylum", ['14FC041', '14FC042', '14FC043'], 5,
"/tmp/qiimewebfiles/", 1, self.lines_parsed,
self.prefs, self.color_prefs, 'black', 'white',
'pie', self.generate_image_type, self.plot_width,
self.plot_height, self.bar_width, self.dpi,
'file.txt', 0, 'categorical', False)
self.assertEqual(len(img_data), 8)
# test the area charts
img_data = get_counts("Phylum", ['14FC041', '14FC042', '14FC043'], 5,
"/tmp/qiimewebfiles/", 1, self.lines_parsed,
self.prefs, self.color_prefs, 'black', 'white',
'area', self.generate_image_type, self.plot_width,
self.plot_height, self.bar_width, self.dpi,
'file.txt', 0, 'categorical', False)
self.assertEqual(len(img_data), 2)
# test the area charts
img_data = get_counts("Phylum", ['14FC041', '14FC042', '14FC043'], 5,
"/tmp/qiimewebfiles/", 1, self.lines_parsed,
self.prefs, self.color_prefs, 'black', 'white',
'bar', self.generate_image_type, self.plot_width,
self.plot_height, self.bar_width, self.dpi,
'file.txt', 0, 'categorical', False)
self.assertEqual(len(img_data), 2)
# clean up files generated
self._paths_to_clean_up = ["/tmp/qiimewebfiles/charts/" + f
for f in listdir("/tmp/qiimewebfiles/charts")]
def test_get_fracs(self):
""""get_fracs should Return fractions for matplotlib chart"""
# test the pie charts
exp_all_counts = [DATA_HTML % (
(4.0 / 10) * 100.0, 'a<br>g', 'h', 'h', "a;g;h"),
DATA_HTML % (
(3.0 / 10) * 100,
'd<br>e',
'f',
'f',
"d;e;f"),
DATA_HTML % (
(2.0 / 10) * 100,
'd<br>e',
'i',
'i',
"d;e;i"),
DATA_HTML % ((1.0 / 10) * 100, 'a<br>b', 'c', 'c', "a;b;c")]
fracs_labels_other, fracs_labels, all_counts, other_cat, red, other_frac \
= get_fracs(self.counts1, 5, 10, 'pie')
self.assertEqual(
fracs_labels_other, [("a;b;c", 1.0 / 10), ("a;g;h", 4.0 / 10),
("d;e;f", 3.0 / 10), ("d;e;i", 2.0 / 10)])
self.assertEqual(fracs_labels, [])
self.assertEqual(all_counts, exp_all_counts)
self.assertEqual(other_cat, 0)
self.assertEqual(red, 10)
self.assertEqual(other_frac, 0)
fracs_labels_other, fracs_labels, all_counts, other_cat, red, other_frac \
= get_fracs(self.counts1, 3, 10, 'pie')
self.assertEqual(
fracs_labels_other, [("a;g;h", 4.0 / 10), ("d;e;f", 3.0 / 10)])
self.assertEqual(
fracs_labels, [("a;g;h", 4.0 / 7), ("d;e;f", 3.0 / 7)])
self.assertEqual(all_counts, exp_all_counts)
self.assertEqual(other_cat, 2)
self.assertEqual(red, 7)
self.assertEqual(other_frac, 3.0 / 10)
# test the area charts
exp_all_counts = ['4', '3', '2', '1']
fracs_labels_other, fracs_labels, all_counts, other_cat, red, other_frac \
= get_fracs(self.counts1, 5, 10, 'area')
self.assertEqual(
fracs_labels_other, [("a;b;c", 1.0 / 10), ("a;g;h", 4.0 / 10),
("d;e;f", 3.0 / 10), ("d;e;i", 2.0 / 10)])
self.assertEqual(fracs_labels, [])
self.assertEqual(all_counts, exp_all_counts)
self.assertEqual(other_cat, 0)
self.assertEqual(red, 10)
self.assertEqual(other_frac, 0)
fracs_labels_other, fracs_labels, all_counts, other_cat, red, other_frac \
= get_fracs(self.counts1, 3, 10, 'area')
self.assertEqual(
fracs_labels_other, [("a;g;h", 4.0 / 10), ("d;e;f", 3.0 / 10),
('d;e;i', 2.0 / 10)])
self.assertEqual(
fracs_labels, [("a;g;h", 8.0 / 18), ("d;e;f", 6.0 / 18)])
self.assertEqual(all_counts, exp_all_counts)
self.assertEqual(other_cat, 2)
self.assertEqual(red, 9)
self.assertEqual(other_frac, 3.0 / 10)
# test bar charts
exp_all_counts = ['4', '3', '2', '1']
fracs_labels_other, fracs_labels, all_counts, other_cat, red, other_frac \
= get_fracs(self.counts1, 5, 10, 'bar')
self.assertEqual(
fracs_labels_other, [("a;b;c", 1.0 / 10), ("a;g;h", 4.0 / 10),
("d;e;f", 3.0 / 10), ("d;e;i", 2.0 / 10)])
self.assertEqual(fracs_labels, [])
self.assertEqual(all_counts, exp_all_counts)
self.assertEqual(other_cat, 0)
self.assertEqual(red, 10)
self.assertEqual(other_frac, 0)
fracs_labels_other, fracs_labels, all_counts, other_cat, red, other_frac \
= get_fracs(self.counts1, 3, 10, 'bar')
self.assertEqual(
fracs_labels_other, [("a;g;h", 4.0 / 10), ("d;e;f", 3.0 / 10),
('d;e;i', 2.0 / 10)])
self.assertEqual(
fracs_labels, [("a;g;h", 8.0 / 18), ("d;e;f", 6.0 / 18)])
self.assertEqual(all_counts, exp_all_counts)
self.assertEqual(other_cat, 2)
self.assertEqual(red, 9)
self.assertEqual(other_frac, 3.0 / 10)
self._paths_to_clean_up = ["/tmp/qiimewebfiles/charts/" + f
for f in listdir("/tmp/qiimewebfiles/charts")]
def test_make_HTML_table(self):
"""make_HTML_table should Make HTML tables for one set charts"""
# test pie charts
fracs_labels_other, fracs_labels, all_counts, other_cat, red, other_frac = \
get_fracs(self.counts1, 5, 10, 'pie')
img_data = make_HTML_table("Phylum", other_frac, 10, red, other_cat,
fracs_labels_other, fracs_labels,
self.dir_path, all_counts, 1, self.prefs,
self.color_prefs, 'black', 'white', 'pie',
'Test1',
self.generate_image_type, self.plot_width,
self.plot_height, self.bar_width, self.dpi, 0,
'categorical', False)
self.assertEqual(len(img_data), 2)
# test area charts
fracs_labels_other, fracs_labels, all_counts, other_cat, red, other_frac = \
get_fracs(self.counts1, 5, 10, 'area')
img_data = make_HTML_table("Phylum", other_frac, 10, red, other_cat,
fracs_labels_other, fracs_labels,
self.dir_path, all_counts, 1, self.prefs,
self.color_prefs, 'black', 'white', 'pie',
'Test1',
self.generate_image_type, self.plot_width,
self.plot_height, self.bar_width, self.dpi, 0,
'categorical', False)
self.assertEqual(len(img_data), 2)
# test bar charts
fracs_labels_other, fracs_labels, all_counts, other_cat, red, other_frac = \
get_fracs(self.counts1, 5, 10, 'bar')
img_data = make_HTML_table("Phylum", other_frac, 10, red, other_cat,
fracs_labels_other, fracs_labels,
self.dir_path, all_counts, 1, self.prefs,
self.color_prefs, 'black', 'white', 'pie',
'Test1',
self.generate_image_type, self.plot_width,
self.plot_height, self.bar_width, self.dpi, 0,
'categorical', False)
self.assertEqual(len(img_data), 2)
self._paths_to_clean_up = ["/tmp/qiimewebfiles/charts/" + f
for f in listdir("/tmp/qiimewebfiles/charts")]
def test_make_pie_chart(self):
"""make_pie_chart should create HTML source and pdfs for pie_charts"""
filename1 = '/tmp/qiimewebfiles/charts/pie_chart.png'
filename2 = '/tmp/qiimewebfiles/charts/pie_chart_legend.pdf'
filename3 = '/tmp/qiimewebfiles/charts/pie_chart.pdf'
obs1, obs2, obs3, obs4 = make_pie_chart(self.fracs, self.dir_path, 1,
self.prefs, self.color_prefs, "black", "white",
self.generate_image_type, self.plot_width,
self.plot_height, self.bar_width, self.dpi, False,
file_prefix="pie_chart",
props=self.props)
self.assertTrue(exists(filename1), 'The png file was not created in \
the appropriate location')
self.assertTrue(exists(filename2), 'The eps file was not created in \
the appropriate location')
self.assertTrue(exists(filename3), 'The pdf file was not created in \
the appropriate location')
self._paths_to_clean_up = ["/tmp/qiimewebfiles/charts/" + f
for f in listdir("/tmp/qiimewebfiles/charts")]
def test_make_area_bar_chart(self):
"""make_area_bar_chart should create HTML source and pdfs for area
and bar charts"""
# following is a list of files being generated
filename1 = '/tmp/qiimewebfiles/charts/area_chart.png'
filename2 = '/tmp/qiimewebfiles/charts/area_chart_legend.pdf'
filename3 = '/tmp/qiimewebfiles/charts/area_chart.pdf'
filename4 = '/tmp/qiimewebfiles/charts/bar_chart.png'
filename5 = '/tmp/qiimewebfiles/charts/bar_chart_legend.pdf'
filename6 = '/tmp/qiimewebfiles/charts/bar_chart.pdf'
# test area chart
obs1, obs2, obs3, obs4 = make_area_bar_chart(self.sample_ids,
self.area_fracs,
self.taxa, self.dir_path, 1, self.prefs,
self.color_prefs, "black", "white", "area",
self.generate_image_type, self.plot_width,
self.plot_height, self.bar_width,
self.dpi, 0, 'categorical', False,
"area_chart")
self.assertTrue(exists(filename1), 'The png file was not created in \
the appropriate location')
self.assertTrue(exists(filename2), 'The eps file was not created in \
the appropriate location')
self.assertTrue(exists(filename3), 'The pdf file was not created in \
the appropriate location')
# test bar chart
obs1, obs2, obs3, obs4 = make_area_bar_chart(self.sample_ids,
self.area_fracs,
self.taxa, self.dir_path, 1, self.prefs,
self.color_prefs, "black", "white", "bar",
self.generate_image_type, self.plot_width,
self.plot_height, self.bar_width, self.dpi,
0, 'categorical', False, "bar_chart",
self.props)
self.assertTrue(exists(filename4), 'The png file was not created in \
the appropriate location')
self.assertTrue(exists(filename5), 'The eps file was not created in \
the appropriate location')
self.assertTrue(exists(filename6), 'The pdf file was not created in \
the appropriate location')
self._paths_to_clean_up = ["/tmp/qiimewebfiles/charts/" + f
for f in listdir("/tmp/qiimewebfiles/charts")]
def test_write_html_file(self):
"Write html and make sure it gets cleaned up"""
filename1 = '/tmp/test.html'
self._paths_to_clean_up = [filename1]
write_html_file('Test', '/tmp/test.html')
self.assertTrue(exists(filename1), 'The file was not created in \
the appropriate location')
self._paths_to_clean_up = [filename1]
# run tests if called from command line
if __name__ == "__main__":
main()
|
gpl-2.0
|
juliangarcia/repeated_games_fsa
|
python/fsa.py
|
1
|
1838
|
from collections import namedtuple
import scipy.misc
import os
import pydot
import re
import matplotlib.pyplot as plt
from IPython.core.display import Image
#defines a state as a named tuple
State = namedtuple('State', 'action DD CD DC CC')
def find_between(s, pre, post=''):
"""
Finds a substring in s, between pre and post
"""
result = re.search(pre+'(.*)'+post, s)
return result.group(1)
def parse_string_to_automata(string):
"""
Given a string representation of an aumata, returns a list of states
"""
list_of_states = []
no_rubish = find_between(string, '\[', '\]')
for i in no_rubish.split(','):
state_strings = find_between(i, '/').split(' ')
list_of_states.append(State(i.strip()[0], int(state_strings[0]), int(state_strings[1]), int(state_strings[2]), int(state_strings[3])))
return list_of_states
# def draw_automata(list_of_states, title='', size=5):
# graph = pydot.Dot(graph_type='digraph')
# list_of_nodes = []
# #add nodes
# for i, state in enumerate(list_of_states):
# if state.action == 'C':
# color = 'blue'
# if state.action == 'D':
# color = 'red'
# list_of_nodes.append(pydot.Node(str(i), style="filled", fillcolor=color))
# for i in list_of_nodes:
# graph.add_node(i)
# #add transitions
# for i, state in enumerate(list_of_states):
# graph.add_edge(pydot.Edge(i, state.DD, fontsize="10.0", label='DD'))
# graph.add_edge(pydot.Edge(i, state.DC, fontsize="10.0", label='DC'))
# graph.add_edge(pydot.Edge(i, state.CD, fontsize="10.0", label='CD'))
# graph.add_edge(pydot.Edge(i, state.CC, fontsize="10.0", label='CC'))
# graph.write_png('automata.png')
# return graph
# plt.figure(figsize=(size, size))
# plt.xticks([],[])
# plt.yticks([],[])
# plt.imshow(scipy.misc.imread('automata.png'))
# plt.title(title)
# os.remove('automata.png')
|
mit
|
mne-tools/mne-tools.github.io
|
0.19/_downloads/e97b5c56259ba5eae9df0db2ed8933c0/plot_brainstorm_phantom_ctf.py
|
10
|
4721
|
# -*- coding: utf-8 -*-
"""
.. _plot_brainstorm_phantom_ctf:
=======================================
Brainstorm CTF phantom dataset tutorial
=======================================
Here we compute the evoked from raw for the Brainstorm CTF phantom
tutorial dataset. For comparison, see [1]_ and:
https://neuroimage.usc.edu/brainstorm/Tutorials/PhantomCtf
References
----------
.. [1] Tadel F, Baillet S, Mosher JC, Pantazis D, Leahy RM.
Brainstorm: A User-Friendly Application for MEG/EEG Analysis.
Computational Intelligence and Neuroscience, vol. 2011, Article ID
879716, 13 pages, 2011. doi:10.1155/2011/879716
"""
# Authors: Eric Larson <[email protected]>
#
# License: BSD (3-clause)
import os.path as op
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne import fit_dipole
from mne.datasets.brainstorm import bst_phantom_ctf
from mne.io import read_raw_ctf
print(__doc__)
###############################################################################
# The data were collected with a CTF system at 2400 Hz.
data_path = bst_phantom_ctf.data_path(verbose=True)
# Switch to these to use the higher-SNR data:
# raw_path = op.join(data_path, 'phantom_200uA_20150709_01.ds')
# dip_freq = 7.
raw_path = op.join(data_path, 'phantom_20uA_20150603_03.ds')
dip_freq = 23.
erm_path = op.join(data_path, 'emptyroom_20150709_01.ds')
raw = read_raw_ctf(raw_path, preload=True)
###############################################################################
# The sinusoidal signal is generated on channel HDAC006, so we can use
# that to obtain precise timing.
sinusoid, times = raw[raw.ch_names.index('HDAC006-4408')]
plt.figure()
plt.plot(times[times < 1.], sinusoid.T[times < 1.])
###############################################################################
# Let's create some events using this signal by thresholding the sinusoid.
events = np.where(np.diff(sinusoid > 0.5) > 0)[1] + raw.first_samp
events = np.vstack((events, np.zeros_like(events), np.ones_like(events))).T
###############################################################################
# The CTF software compensation works reasonably well:
raw.plot()
###############################################################################
# But here we can get slightly better noise suppression, lower localization
# bias, and a better dipole goodness of fit with spatio-temporal (tSSS)
# Maxwell filtering:
raw.apply_gradient_compensation(0) # must un-do software compensation first
mf_kwargs = dict(origin=(0., 0., 0.), st_duration=10.)
raw = mne.preprocessing.maxwell_filter(raw, **mf_kwargs)
raw.plot()
###############################################################################
# Our choice of tmin and tmax should capture exactly one cycle, so
# we can make the unusual choice of baselining using the entire epoch
# when creating our evoked data. We also then crop to a single time point
# (@t=0) because this is a peak in our signal.
tmin = -0.5 / dip_freq
tmax = -tmin
epochs = mne.Epochs(raw, events, event_id=1, tmin=tmin, tmax=tmax,
baseline=(None, None))
evoked = epochs.average()
evoked.plot(time_unit='s')
evoked.crop(0., 0.)
###############################################################################
# .. _plt_brainstorm_phantom_ctf_eeg_sphere_geometry:
#
# Let's use a :ref:`sphere head geometry model <eeg_sphere_model>`
# and let's see the coordinate alignment and the sphere location.
sphere = mne.make_sphere_model(r0=(0., 0., 0.), head_radius=None)
mne.viz.plot_alignment(raw.info, subject='sample',
meg='helmet', bem=sphere, dig=True,
surfaces=['brain'])
del raw, epochs
###############################################################################
# To do a dipole fit, let's use the covariance provided by the empty room
# recording.
raw_erm = read_raw_ctf(erm_path).apply_gradient_compensation(0)
raw_erm = mne.preprocessing.maxwell_filter(raw_erm, coord_frame='meg',
**mf_kwargs)
cov = mne.compute_raw_covariance(raw_erm)
del raw_erm
dip, residual = fit_dipole(evoked, cov, sphere, verbose=True)
###############################################################################
# Compare the actual position with the estimated one.
expected_pos = np.array([18., 0., 49.])
diff = np.sqrt(np.sum((dip.pos[0] * 1000 - expected_pos) ** 2))
print('Actual pos: %s mm' % np.array_str(expected_pos, precision=1))
print('Estimated pos: %s mm' % np.array_str(dip.pos[0] * 1000, precision=1))
print('Difference: %0.1f mm' % diff)
print('Amplitude: %0.1f nAm' % (1e9 * dip.amplitude[0]))
print('GOF: %0.1f %%' % dip.gof[0])
|
bsd-3-clause
|
AleksanderLidtke/XKCD
|
JapanSize.py
|
1
|
7133
|
# -*- coding: utf-8 -*-
"""
Throughout my travels I've discovered that most people, including myself, do not
realise many things about our Planet's size. For example, the latitude and
longitude of certain regions (South America is much further east than the US)
or the relative size of countries (Japan is surprisingly long).
Thus, I've created this script to understand such things a bit better. It
compares the sizes of Japan and Europe, which is the most recent surprise
I came across.
The shape data were aquired from [Global Administrative Areas](http://www.gadm.org/country)
website. Thus, their **redistribution, or commercial use is not allowed without
prior permission**.
Created on Sun May 7 14:13:47 2017
@author: Alek
"""
from mpl_toolkits.basemap import Basemap
import numpy, shapefile, os, matplotlib.pyplot
matplotlib.pyplot.xkcd() # Here we go.
def plotPrefecture(*,shp,colour,bMap,axes,latOff=0,longOff=0,lwdth=0.5):
""" Plot a prefecture from a shapefile.
Kwargs
-------
* shp - shape as returned by :func:`shapefile.Reader.shapes`,
* colour - colour accepted by :func:`matplotlib.pyplot.Axes.plot',
* bMap - instance of :class:`mpl_toolkits.basemap.Basemap` used to project
the shape onto a map,
* axes - :class:`matplotlib.pyplot.Axes` instance where to plot,
* latOff,longOff - deg, by how much to offset the `shp` lattitudes and
longitudes before plotting,
* lwdth - line width as accepted by :func:`matplotlib.pyplot.Axes.plot'.
"""
if len(shp.parts)==1: # Only one region in this shape.
vertices=numpy.array(shp.points)
bMap.plot(vertices[:,0]+longOff,vertices[:,1]+latOff,color=colour,
lw=lwdth,ls='-',latlon=True,ax=axes)
else: # This shape has islands, disjoint regions and what-not.
for ip in range(len(shp.parts)): # For every part of the shape.
# Indices that get the slice with this part of the shape.
lower=shp.parts[ip]
if ip==len(shp.parts)-1:
upper=len(shp.points) # Last part.
else:
upper=shp.parts[ip+1] # Next part starts at idx parts[ip+1]
partVertices=numpy.array(shp.points[lower:upper])
bMap.plot(partVertices[:,0]+longOff,partVertices[:,1]+latOff,
color=colour,lw=lwdth,ls='-',latlon=True,ax=axes)
# Various font sizes.
ticksFontSize=18
labelsFontSizeSmall=20
labelsFontSize=30
titleFontSize=34
legendFontSize=20
matplotlib.rc('xtick',labelsize=ticksFontSize)
matplotlib.rc('ytick',labelsize=ticksFontSize)
cm=matplotlib.pyplot.cm.get_cmap('viridis')
# Read a shapefile with Japan's cartography data.
shapeRdr0=shapefile.Reader(os.path.join('borders','JPN_adm0')) # Country.
shapeRdr1=shapefile.Reader(os.path.join('borders','JPN_adm1')) # Prefectures.
shapeRdr2=shapefile.Reader(os.path.join('borders','JPN_adm2')) # Towns.
shape=shapeRdr0.shapes()[0]
if shape.shapeType != shapefile.POLYGON:
raise ValueError('Shape not polygon with shapeType={}'.format(shape.shapeType ))
vertices=numpy.array(shape.points) # 2D array of coordinates.
# Where to centre different maps and where to translate Japan to.
latJpn=37 # Where to centre one map, i.e. over Japan. Lat/lon in degrees.
lonJpn=138
latCtr=40 # Where to centre the Europe's map. Lat/lon in degrees.
lonCtr=10
dLonJ=10 # Plot Japan at these coordinates over the map of Europe.
dLatJ=50
' Mercator projection, a.k.a. "the things you learn in schools".'
fig,ax=matplotlib.pyplot.subplots(1,2,figsize=(16,8))
# The whole Planet.
mercMapP=Basemap(projection='merc',llcrnrlat=-80,urcrnrlat=80,llcrnrlon=-180,
urcrnrlon=180,lat_ts=10,ax=ax[0],resolution='c')
mercMapP.drawcoastlines(linewidth=0.5)
mercMapP.drawcountries(linewidth=0.25)
mercMapP.drawparallels(numpy.arange(-90.,91.,30.))
mercMapP.drawmeridians(numpy.arange(-180.,181.,60.))
ax[0].set_title(r'$Our\ Planet$',fontsize=titleFontSize)
plotPrefecture(shp=shape,colour='gold',lwdth=1,bMap=mercMapP,axes=ax[0])
# Only Europe.
mercMapE=Basemap(projection='merc',llcrnrlat=30,urcrnrlat=75,llcrnrlon=-25,
urcrnrlon=40,lat_ts=10,ax=ax[1],resolution='l')
mercMapE.drawcoastlines(linewidth=0.5)
mercMapE.drawcountries(linewidth=0.25)
mercMapE.drawparallels(numpy.arange(mercMapE.latmin,mercMapE.latmax,10.))
mercMapE.drawmeridians(numpy.arange(mercMapE.lonmin,mercMapE.lonmax,15.))
ax[1].set_title(r'$Europe$',fontsize=titleFontSize)
plotPrefecture(shp=shape,colour='gold',lwdth=2,bMap=mercMapE,axes=ax[1],
latOff=dLatJ-latJpn,longOff=dLonJ-lonJpn)
fig.show()
' One figure with orthonormal maps centred on Japan and Europe.'
fig,ax=matplotlib.pyplot.subplots(1,2,figsize=(16,8))
# Centred on Japan.
ortnMapJ=Basemap(projection='ortho',lat_0=latJpn,lon_0=lonJpn,resolution='c',
ax=ax[0])
ortnMapJ.drawcoastlines(linewidth=0.5)
ortnMapJ.drawcountries(linewidth=0.25)
ortnMapJ.drawmeridians(numpy.arange(0,360,30))
ortnMapJ.drawparallels(numpy.arange(-90,90,30))
ax[0].set_title(r'${}$'.format(shapeRdr0.records()[0][4]),fontsize=titleFontSize)
plotPrefecture(shp=shape,colour='gold',lwdth=2,bMap=ortnMapJ,axes=ax[0])
# Plot all the prefectures.
cNorm=matplotlib.colors.Normalize(vmin=0,vmax=shapeRdr1.numRecords)
scalarMap=matplotlib.cm.ScalarMappable(norm=cNorm,cmap=cm)
prefectures=shapeRdr1.shapes()
prefRecords=shapeRdr1.records()
for i in range(shapeRdr1.numRecords):
if prefRecords[i][9]=='Prefecture':
plotPrefecture(shp=prefectures[i],colour=scalarMap.to_rgba(i),
lwdth=0.5,bMap=ortnMapJ,axes=ax[0])
# Centred on Europe.
ortnMapE=Basemap(projection='ortho',lat_0=latCtr,lon_0=lonCtr,resolution='c',
ax=ax[1])
ortnMapE.drawcoastlines(linewidth=0.5)
ortnMapE.drawcountries(linewidth=0.25)
ortnMapE.drawmeridians(numpy.arange(0,360,30))
ortnMapE.drawparallels(numpy.arange(-90,90,30))
ax[1].set_title(r'${}\ over\ Europe$'.format(shapeRdr0.records()[0][4]),
fontsize=titleFontSize)
plotPrefecture(shp=shape,colour='gold',lwdth=2,bMap=ortnMapE,axes=ax[1],
latOff=dLatJ-latJpn,longOff=dLonJ-lonJpn)
fig.show()
' Japan and Kitakyushu overlaid on Europe.'
fig,ax=matplotlib.pyplot.subplots(1,1,figsize=(16,8))
mercMapE=Basemap(projection='merc',llcrnrlat=30,urcrnrlat=75,llcrnrlon=-25,
urcrnrlon=40,lat_ts=10,ax=ax,resolution='l')
mercMapE.drawcoastlines(linewidth=0.5)
mercMapE.drawcountries(linewidth=0.25)
mercMapE.drawparallels(numpy.arange(mercMapE.latmin,mercMapE.latmax,10.))
mercMapE.drawmeridians(numpy.arange(mercMapE.lonmin,mercMapE.lonmax,15.))
ax.set_title(r'$Europe,\ true\ lat.$',fontsize=titleFontSize)
plotPrefecture(shp=shape,colour='gold',lwdth=2,bMap=mercMapE,axes=ax,
latOff=0,longOff=dLonJ-lonJpn)
# Show annotation at the true latitude.
xKIT,yKIT=mercMapE.projtran(130.834730+dLonJ-lonJpn,33.8924837)
xTXT,yTXT=mercMapE.projtran(110.834730+dLonJ-lonJpn,45.8924837)
ax.scatter([xKIT],[yKIT],s=50,c='crimson')
ax.annotate('Here', xy=(xKIT,yKIT),xytext=(xTXT,yTXT),color='crimson',
arrowprops=dict(facecolor='crimson', shrink=0.05))
fig.show()
|
mit
|
idlead/scikit-learn
|
sklearn/feature_selection/tests/test_rfe.py
|
13
|
10007
|
"""
Testing Recursive feature elimination
"""
import warnings
import numpy as np
from numpy.testing import assert_array_almost_equal, assert_array_equal
from nose.tools import assert_equal, assert_true
from scipy import sparse
from sklearn.feature_selection.rfe import RFE, RFECV
from sklearn.datasets import load_iris, make_friedman1
from sklearn.metrics import zero_one_loss
from sklearn.svm import SVC, SVR
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import cross_val_score
from sklearn.utils import check_random_state
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_greater
from sklearn.metrics import make_scorer
from sklearn.metrics import get_scorer
class MockClassifier(object):
"""
Dummy classifier to test recursive feature ellimination
"""
def __init__(self, foo_param=0):
self.foo_param = foo_param
def fit(self, X, Y):
assert_true(len(X) == len(Y))
self.coef_ = np.ones(X.shape[1], dtype=np.float64)
return self
def predict(self, T):
return T.shape[0]
predict_proba = predict
decision_function = predict
transform = predict
def score(self, X=None, Y=None):
if self.foo_param > 1:
score = 1.
else:
score = 0.
return score
def get_params(self, deep=True):
return {'foo_param': self.foo_param}
def set_params(self, **params):
return self
def test_rfe_features_importance():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = iris.target
clf = RandomForestClassifier(n_estimators=20,
random_state=generator, max_depth=2)
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
rfe.fit(X, y)
assert_equal(len(rfe.ranking_), X.shape[1])
clf_svc = SVC(kernel="linear")
rfe_svc = RFE(estimator=clf_svc, n_features_to_select=4, step=0.1)
rfe_svc.fit(X, y)
# Check if the supports are equal
assert_array_equal(rfe.get_support(), rfe_svc.get_support())
def test_rfe():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
X_sparse = sparse.csr_matrix(X)
y = iris.target
# dense model
clf = SVC(kernel="linear")
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
rfe.fit(X, y)
X_r = rfe.transform(X)
clf.fit(X_r, y)
assert_equal(len(rfe.ranking_), X.shape[1])
# sparse model
clf_sparse = SVC(kernel="linear")
rfe_sparse = RFE(estimator=clf_sparse, n_features_to_select=4, step=0.1)
rfe_sparse.fit(X_sparse, y)
X_r_sparse = rfe_sparse.transform(X_sparse)
assert_equal(X_r.shape, iris.data.shape)
assert_array_almost_equal(X_r[:10], iris.data[:10])
assert_array_almost_equal(rfe.predict(X), clf.predict(iris.data))
assert_equal(rfe.score(X, y), clf.score(iris.data, iris.target))
assert_array_almost_equal(X_r, X_r_sparse.toarray())
def test_rfe_mockclassifier():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = iris.target
# dense model
clf = MockClassifier()
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
rfe.fit(X, y)
X_r = rfe.transform(X)
clf.fit(X_r, y)
assert_equal(len(rfe.ranking_), X.shape[1])
assert_equal(X_r.shape, iris.data.shape)
def test_rfecv():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = list(iris.target) # regression test: list should be supported
# Test using the score function
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5)
rfecv.fit(X, y)
# non-regression test for missing worst feature:
assert_equal(len(rfecv.grid_scores_), X.shape[1])
assert_equal(len(rfecv.ranking_), X.shape[1])
X_r = rfecv.transform(X)
# All the noisy variable were filtered out
assert_array_equal(X_r, iris.data)
# same in sparse
rfecv_sparse = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5)
X_sparse = sparse.csr_matrix(X)
rfecv_sparse.fit(X_sparse, y)
X_r_sparse = rfecv_sparse.transform(X_sparse)
assert_array_equal(X_r_sparse.toarray(), iris.data)
# Test using a customized loss function
scoring = make_scorer(zero_one_loss, greater_is_better=False)
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5,
scoring=scoring)
ignore_warnings(rfecv.fit)(X, y)
X_r = rfecv.transform(X)
assert_array_equal(X_r, iris.data)
# Test using a scorer
scorer = get_scorer('accuracy')
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5,
scoring=scorer)
rfecv.fit(X, y)
X_r = rfecv.transform(X)
assert_array_equal(X_r, iris.data)
# Test fix on grid_scores
def test_scorer(estimator, X, y):
return 1.0
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5,
scoring=test_scorer)
rfecv.fit(X, y)
assert_array_equal(rfecv.grid_scores_, np.ones(len(rfecv.grid_scores_)))
# Same as the first two tests, but with step=2
rfecv = RFECV(estimator=SVC(kernel="linear"), step=2, cv=5)
rfecv.fit(X, y)
assert_equal(len(rfecv.grid_scores_), 6)
assert_equal(len(rfecv.ranking_), X.shape[1])
X_r = rfecv.transform(X)
assert_array_equal(X_r, iris.data)
rfecv_sparse = RFECV(estimator=SVC(kernel="linear"), step=2, cv=5)
X_sparse = sparse.csr_matrix(X)
rfecv_sparse.fit(X_sparse, y)
X_r_sparse = rfecv_sparse.transform(X_sparse)
assert_array_equal(X_r_sparse.toarray(), iris.data)
def test_rfecv_mockclassifier():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = list(iris.target) # regression test: list should be supported
# Test using the score function
rfecv = RFECV(estimator=MockClassifier(), step=1, cv=5)
rfecv.fit(X, y)
# non-regression test for missing worst feature:
assert_equal(len(rfecv.grid_scores_), X.shape[1])
assert_equal(len(rfecv.ranking_), X.shape[1])
def test_rfe_estimator_tags():
rfe = RFE(SVC(kernel='linear'))
assert_equal(rfe._estimator_type, "classifier")
# make sure that cross-validation is stratified
iris = load_iris()
score = cross_val_score(rfe, iris.data, iris.target)
assert_greater(score.min(), .7)
def test_rfe_min_step():
n_features = 10
X, y = make_friedman1(n_samples=50, n_features=n_features, random_state=0)
n_samples, n_features = X.shape
estimator = SVR(kernel="linear")
# Test when floor(step * n_features) <= 0
selector = RFE(estimator, step=0.01)
sel = selector.fit(X, y)
assert_equal(sel.support_.sum(), n_features // 2)
# Test when step is between (0,1) and floor(step * n_features) > 0
selector = RFE(estimator, step=0.20)
sel = selector.fit(X, y)
assert_equal(sel.support_.sum(), n_features // 2)
# Test when step is an integer
selector = RFE(estimator, step=5)
sel = selector.fit(X, y)
assert_equal(sel.support_.sum(), n_features // 2)
def test_number_of_subsets_of_features():
# In RFE, 'number_of_subsets_of_features'
# = the number of iterations in '_fit'
# = max(ranking_)
# = 1 + (n_features + step - n_features_to_select - 1) // step
# After optimization #4534, this number
# = 1 + np.ceil((n_features - n_features_to_select) / float(step))
# This test case is to test their equivalence, refer to #4534 and #3824
def formula1(n_features, n_features_to_select, step):
return 1 + ((n_features + step - n_features_to_select - 1) // step)
def formula2(n_features, n_features_to_select, step):
return 1 + np.ceil((n_features - n_features_to_select) / float(step))
# RFE
# Case 1, n_features - n_features_to_select is divisible by step
# Case 2, n_features - n_features_to_select is not divisible by step
n_features_list = [11, 11]
n_features_to_select_list = [3, 3]
step_list = [2, 3]
for n_features, n_features_to_select, step in zip(
n_features_list, n_features_to_select_list, step_list):
generator = check_random_state(43)
X = generator.normal(size=(100, n_features))
y = generator.rand(100).round()
rfe = RFE(estimator=SVC(kernel="linear"),
n_features_to_select=n_features_to_select, step=step)
rfe.fit(X, y)
# this number also equals to the maximum of ranking_
assert_equal(np.max(rfe.ranking_),
formula1(n_features, n_features_to_select, step))
assert_equal(np.max(rfe.ranking_),
formula2(n_features, n_features_to_select, step))
# In RFECV, 'fit' calls 'RFE._fit'
# 'number_of_subsets_of_features' of RFE
# = the size of 'grid_scores' of RFECV
# = the number of iterations of the for loop before optimization #4534
# RFECV, n_features_to_select = 1
# Case 1, n_features - 1 is divisible by step
# Case 2, n_features - 1 is not divisible by step
n_features_to_select = 1
n_features_list = [11, 10]
step_list = [2, 2]
for n_features, step in zip(n_features_list, step_list):
generator = check_random_state(43)
X = generator.normal(size=(100, n_features))
y = generator.rand(100).round()
rfecv = RFECV(estimator=SVC(kernel="linear"), step=step, cv=5)
rfecv.fit(X, y)
assert_equal(rfecv.grid_scores_.shape[0],
formula1(n_features, n_features_to_select, step))
assert_equal(rfecv.grid_scores_.shape[0],
formula2(n_features, n_features_to_select, step))
|
bsd-3-clause
|
MohammedWasim/scikit-learn
|
sklearn/feature_selection/tests/test_chi2.py
|
221
|
2398
|
"""
Tests for chi2, currently the only feature selection function designed
specifically to work with sparse matrices.
"""
import numpy as np
from scipy.sparse import coo_matrix, csr_matrix
import scipy.stats
from sklearn.feature_selection import SelectKBest, chi2
from sklearn.feature_selection.univariate_selection import _chisquare
from nose.tools import assert_raises
from numpy.testing import assert_equal, assert_array_almost_equal
# Feature 0 is highly informative for class 1;
# feature 1 is the same everywhere;
# feature 2 is a bit informative for class 2.
X = [[2, 1, 2],
[9, 1, 1],
[6, 1, 2],
[0, 1, 2]]
y = [0, 1, 2, 2]
def mkchi2(k):
"""Make k-best chi2 selector"""
return SelectKBest(chi2, k=k)
def test_chi2():
# Test Chi2 feature extraction
chi2 = mkchi2(k=1).fit(X, y)
chi2 = mkchi2(k=1).fit(X, y)
assert_equal(chi2.get_support(indices=True), [0])
assert_equal(chi2.transform(X), np.array(X)[:, [0]])
chi2 = mkchi2(k=2).fit(X, y)
assert_equal(sorted(chi2.get_support(indices=True)), [0, 2])
Xsp = csr_matrix(X, dtype=np.float)
chi2 = mkchi2(k=2).fit(Xsp, y)
assert_equal(sorted(chi2.get_support(indices=True)), [0, 2])
Xtrans = chi2.transform(Xsp)
assert_equal(Xtrans.shape, [Xsp.shape[0], 2])
# == doesn't work on scipy.sparse matrices
Xtrans = Xtrans.toarray()
Xtrans2 = mkchi2(k=2).fit_transform(Xsp, y).toarray()
assert_equal(Xtrans, Xtrans2)
def test_chi2_coo():
# Check that chi2 works with a COO matrix
# (as returned by CountVectorizer, DictVectorizer)
Xcoo = coo_matrix(X)
mkchi2(k=2).fit_transform(Xcoo, y)
# if we got here without an exception, we're safe
def test_chi2_negative():
# Check for proper error on negative numbers in the input X.
X, y = [[0, 1], [-1e-20, 1]], [0, 1]
for X in (X, np.array(X), csr_matrix(X)):
assert_raises(ValueError, chi2, X, y)
def test_chisquare():
# Test replacement for scipy.stats.chisquare against the original.
obs = np.array([[2., 2.],
[1., 1.]])
exp = np.array([[1.5, 1.5],
[1.5, 1.5]])
# call SciPy first because our version overwrites obs
chi_scp, p_scp = scipy.stats.chisquare(obs, exp)
chi_our, p_our = _chisquare(obs, exp)
assert_array_almost_equal(chi_scp, chi_our)
assert_array_almost_equal(p_scp, p_our)
|
bsd-3-clause
|
jm-begon/scikit-learn
|
sklearn/manifold/locally_linear.py
|
206
|
25061
|
"""Locally Linear Embedding"""
# Author: Fabian Pedregosa -- <[email protected]>
# Jake Vanderplas -- <[email protected]>
# License: BSD 3 clause (C) INRIA 2011
import numpy as np
from scipy.linalg import eigh, svd, qr, solve
from scipy.sparse import eye, csr_matrix
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_random_state, check_array
from ..utils.arpack import eigsh
from ..utils.validation import check_is_fitted
from ..utils.validation import FLOAT_DTYPES
from ..neighbors import NearestNeighbors
def barycenter_weights(X, Z, reg=1e-3):
"""Compute barycenter weights of X from Y along the first axis
We estimate the weights to assign to each point in Y[i] to recover
the point X[i]. The barycenter weights sum to 1.
Parameters
----------
X : array-like, shape (n_samples, n_dim)
Z : array-like, shape (n_samples, n_neighbors, n_dim)
reg: float, optional
amount of regularization to add for the problem to be
well-posed in the case of n_neighbors > n_dim
Returns
-------
B : array-like, shape (n_samples, n_neighbors)
Notes
-----
See developers note for more information.
"""
X = check_array(X, dtype=FLOAT_DTYPES)
Z = check_array(Z, dtype=FLOAT_DTYPES, allow_nd=True)
n_samples, n_neighbors = X.shape[0], Z.shape[1]
B = np.empty((n_samples, n_neighbors), dtype=X.dtype)
v = np.ones(n_neighbors, dtype=X.dtype)
# this might raise a LinalgError if G is singular and has trace
# zero
for i, A in enumerate(Z.transpose(0, 2, 1)):
C = A.T - X[i] # broadcasting
G = np.dot(C, C.T)
trace = np.trace(G)
if trace > 0:
R = reg * trace
else:
R = reg
G.flat[::Z.shape[1] + 1] += R
w = solve(G, v, sym_pos=True)
B[i, :] = w / np.sum(w)
return B
def barycenter_kneighbors_graph(X, n_neighbors, reg=1e-3):
"""Computes the barycenter weighted graph of k-Neighbors for points in X
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree, NearestNeighbors}
Sample data, shape = (n_samples, n_features), in the form of a
numpy array, sparse array, precomputed tree, or NearestNeighbors
object.
n_neighbors : int
Number of neighbors for each sample.
reg : float, optional
Amount of regularization when solving the least-squares
problem. Only relevant if mode='barycenter'. If None, use the
default.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
See also
--------
sklearn.neighbors.kneighbors_graph
sklearn.neighbors.radius_neighbors_graph
"""
knn = NearestNeighbors(n_neighbors + 1).fit(X)
X = knn._fit_X
n_samples = X.shape[0]
ind = knn.kneighbors(X, return_distance=False)[:, 1:]
data = barycenter_weights(X, X[ind], reg=reg)
indptr = np.arange(0, n_samples * n_neighbors + 1, n_neighbors)
return csr_matrix((data.ravel(), ind.ravel(), indptr),
shape=(n_samples, n_samples))
def null_space(M, k, k_skip=1, eigen_solver='arpack', tol=1E-6, max_iter=100,
random_state=None):
"""
Find the null space of a matrix M.
Parameters
----------
M : {array, matrix, sparse matrix, LinearOperator}
Input covariance matrix: should be symmetric positive semi-definite
k : integer
Number of eigenvalues/vectors to return
k_skip : integer, optional
Number of low eigenvalues to skip.
eigen_solver : string, {'auto', 'arpack', 'dense'}
auto : algorithm will attempt to choose the best method for input data
arpack : use arnoldi iteration in shift-invert mode.
For this method, M may be a dense matrix, sparse matrix,
or general linear operator.
Warning: ARPACK can be unstable for some problems. It is
best to try several random seeds in order to check results.
dense : use standard dense matrix operations for the eigenvalue
decomposition. For this method, M must be an array
or matrix type. This method should be avoided for
large problems.
tol : float, optional
Tolerance for 'arpack' method.
Not used if eigen_solver=='dense'.
max_iter : maximum number of iterations for 'arpack' method
not used if eigen_solver=='dense'
random_state: numpy.RandomState or int, optional
The generator or seed used to determine the starting vector for arpack
iterations. Defaults to numpy.random.
"""
if eigen_solver == 'auto':
if M.shape[0] > 200 and k + k_skip < 10:
eigen_solver = 'arpack'
else:
eigen_solver = 'dense'
if eigen_solver == 'arpack':
random_state = check_random_state(random_state)
v0 = random_state.rand(M.shape[0])
try:
eigen_values, eigen_vectors = eigsh(M, k + k_skip, sigma=0.0,
tol=tol, maxiter=max_iter,
v0=v0)
except RuntimeError as msg:
raise ValueError("Error in determining null-space with ARPACK. "
"Error message: '%s'. "
"Note that method='arpack' can fail when the "
"weight matrix is singular or otherwise "
"ill-behaved. method='dense' is recommended. "
"See online documentation for more information."
% msg)
return eigen_vectors[:, k_skip:], np.sum(eigen_values[k_skip:])
elif eigen_solver == 'dense':
if hasattr(M, 'toarray'):
M = M.toarray()
eigen_values, eigen_vectors = eigh(
M, eigvals=(k_skip, k + k_skip - 1), overwrite_a=True)
index = np.argsort(np.abs(eigen_values))
return eigen_vectors[:, index], np.sum(eigen_values)
else:
raise ValueError("Unrecognized eigen_solver '%s'" % eigen_solver)
def locally_linear_embedding(
X, n_neighbors, n_components, reg=1e-3, eigen_solver='auto', tol=1e-6,
max_iter=100, method='standard', hessian_tol=1E-4, modified_tol=1E-12,
random_state=None):
"""Perform a Locally Linear Embedding analysis on the data.
Read more in the :ref:`User Guide <locally_linear_embedding>`.
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree, NearestNeighbors}
Sample data, shape = (n_samples, n_features), in the form of a
numpy array, sparse array, precomputed tree, or NearestNeighbors
object.
n_neighbors : integer
number of neighbors to consider for each point.
n_components : integer
number of coordinates for the manifold.
reg : float
regularization constant, multiplies the trace of the local covariance
matrix of the distances.
eigen_solver : string, {'auto', 'arpack', 'dense'}
auto : algorithm will attempt to choose the best method for input data
arpack : use arnoldi iteration in shift-invert mode.
For this method, M may be a dense matrix, sparse matrix,
or general linear operator.
Warning: ARPACK can be unstable for some problems. It is
best to try several random seeds in order to check results.
dense : use standard dense matrix operations for the eigenvalue
decomposition. For this method, M must be an array
or matrix type. This method should be avoided for
large problems.
tol : float, optional
Tolerance for 'arpack' method
Not used if eigen_solver=='dense'.
max_iter : integer
maximum number of iterations for the arpack solver.
method : {'standard', 'hessian', 'modified', 'ltsa'}
standard : use the standard locally linear embedding algorithm.
see reference [1]_
hessian : use the Hessian eigenmap method. This method requires
n_neighbors > n_components * (1 + (n_components + 1) / 2.
see reference [2]_
modified : use the modified locally linear embedding algorithm.
see reference [3]_
ltsa : use local tangent space alignment algorithm
see reference [4]_
hessian_tol : float, optional
Tolerance for Hessian eigenmapping method.
Only used if method == 'hessian'
modified_tol : float, optional
Tolerance for modified LLE method.
Only used if method == 'modified'
random_state: numpy.RandomState or int, optional
The generator or seed used to determine the starting vector for arpack
iterations. Defaults to numpy.random.
Returns
-------
Y : array-like, shape [n_samples, n_components]
Embedding vectors.
squared_error : float
Reconstruction error for the embedding vectors. Equivalent to
``norm(Y - W Y, 'fro')**2``, where W are the reconstruction weights.
References
----------
.. [1] `Roweis, S. & Saul, L. Nonlinear dimensionality reduction
by locally linear embedding. Science 290:2323 (2000).`
.. [2] `Donoho, D. & Grimes, C. Hessian eigenmaps: Locally
linear embedding techniques for high-dimensional data.
Proc Natl Acad Sci U S A. 100:5591 (2003).`
.. [3] `Zhang, Z. & Wang, J. MLLE: Modified Locally Linear
Embedding Using Multiple Weights.`
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.70.382
.. [4] `Zhang, Z. & Zha, H. Principal manifolds and nonlinear
dimensionality reduction via tangent space alignment.
Journal of Shanghai Univ. 8:406 (2004)`
"""
if eigen_solver not in ('auto', 'arpack', 'dense'):
raise ValueError("unrecognized eigen_solver '%s'" % eigen_solver)
if method not in ('standard', 'hessian', 'modified', 'ltsa'):
raise ValueError("unrecognized method '%s'" % method)
nbrs = NearestNeighbors(n_neighbors=n_neighbors + 1)
nbrs.fit(X)
X = nbrs._fit_X
N, d_in = X.shape
if n_components > d_in:
raise ValueError("output dimension must be less than or equal "
"to input dimension")
if n_neighbors >= N:
raise ValueError("n_neighbors must be less than number of points")
if n_neighbors <= 0:
raise ValueError("n_neighbors must be positive")
M_sparse = (eigen_solver != 'dense')
if method == 'standard':
W = barycenter_kneighbors_graph(
nbrs, n_neighbors=n_neighbors, reg=reg)
# we'll compute M = (I-W)'(I-W)
# depending on the solver, we'll do this differently
if M_sparse:
M = eye(*W.shape, format=W.format) - W
M = (M.T * M).tocsr()
else:
M = (W.T * W - W.T - W).toarray()
M.flat[::M.shape[0] + 1] += 1 # W = W - I = W - I
elif method == 'hessian':
dp = n_components * (n_components + 1) // 2
if n_neighbors <= n_components + dp:
raise ValueError("for method='hessian', n_neighbors must be "
"greater than "
"[n_components * (n_components + 3) / 2]")
neighbors = nbrs.kneighbors(X, n_neighbors=n_neighbors + 1,
return_distance=False)
neighbors = neighbors[:, 1:]
Yi = np.empty((n_neighbors, 1 + n_components + dp), dtype=np.float)
Yi[:, 0] = 1
M = np.zeros((N, N), dtype=np.float)
use_svd = (n_neighbors > d_in)
for i in range(N):
Gi = X[neighbors[i]]
Gi -= Gi.mean(0)
#build Hessian estimator
if use_svd:
U = svd(Gi, full_matrices=0)[0]
else:
Ci = np.dot(Gi, Gi.T)
U = eigh(Ci)[1][:, ::-1]
Yi[:, 1:1 + n_components] = U[:, :n_components]
j = 1 + n_components
for k in range(n_components):
Yi[:, j:j + n_components - k] = (U[:, k:k + 1]
* U[:, k:n_components])
j += n_components - k
Q, R = qr(Yi)
w = Q[:, n_components + 1:]
S = w.sum(0)
S[np.where(abs(S) < hessian_tol)] = 1
w /= S
nbrs_x, nbrs_y = np.meshgrid(neighbors[i], neighbors[i])
M[nbrs_x, nbrs_y] += np.dot(w, w.T)
if M_sparse:
M = csr_matrix(M)
elif method == 'modified':
if n_neighbors < n_components:
raise ValueError("modified LLE requires "
"n_neighbors >= n_components")
neighbors = nbrs.kneighbors(X, n_neighbors=n_neighbors + 1,
return_distance=False)
neighbors = neighbors[:, 1:]
#find the eigenvectors and eigenvalues of each local covariance
# matrix. We want V[i] to be a [n_neighbors x n_neighbors] matrix,
# where the columns are eigenvectors
V = np.zeros((N, n_neighbors, n_neighbors))
nev = min(d_in, n_neighbors)
evals = np.zeros([N, nev])
#choose the most efficient way to find the eigenvectors
use_svd = (n_neighbors > d_in)
if use_svd:
for i in range(N):
X_nbrs = X[neighbors[i]] - X[i]
V[i], evals[i], _ = svd(X_nbrs,
full_matrices=True)
evals **= 2
else:
for i in range(N):
X_nbrs = X[neighbors[i]] - X[i]
C_nbrs = np.dot(X_nbrs, X_nbrs.T)
evi, vi = eigh(C_nbrs)
evals[i] = evi[::-1]
V[i] = vi[:, ::-1]
#find regularized weights: this is like normal LLE.
# because we've already computed the SVD of each covariance matrix,
# it's faster to use this rather than np.linalg.solve
reg = 1E-3 * evals.sum(1)
tmp = np.dot(V.transpose(0, 2, 1), np.ones(n_neighbors))
tmp[:, :nev] /= evals + reg[:, None]
tmp[:, nev:] /= reg[:, None]
w_reg = np.zeros((N, n_neighbors))
for i in range(N):
w_reg[i] = np.dot(V[i], tmp[i])
w_reg /= w_reg.sum(1)[:, None]
#calculate eta: the median of the ratio of small to large eigenvalues
# across the points. This is used to determine s_i, below
rho = evals[:, n_components:].sum(1) / evals[:, :n_components].sum(1)
eta = np.median(rho)
#find s_i, the size of the "almost null space" for each point:
# this is the size of the largest set of eigenvalues
# such that Sum[v; v in set]/Sum[v; v not in set] < eta
s_range = np.zeros(N, dtype=int)
evals_cumsum = np.cumsum(evals, 1)
eta_range = evals_cumsum[:, -1:] / evals_cumsum[:, :-1] - 1
for i in range(N):
s_range[i] = np.searchsorted(eta_range[i, ::-1], eta)
s_range += n_neighbors - nev # number of zero eigenvalues
#Now calculate M.
# This is the [N x N] matrix whose null space is the desired embedding
M = np.zeros((N, N), dtype=np.float)
for i in range(N):
s_i = s_range[i]
#select bottom s_i eigenvectors and calculate alpha
Vi = V[i, :, n_neighbors - s_i:]
alpha_i = np.linalg.norm(Vi.sum(0)) / np.sqrt(s_i)
#compute Householder matrix which satisfies
# Hi*Vi.T*ones(n_neighbors) = alpha_i*ones(s)
# using prescription from paper
h = alpha_i * np.ones(s_i) - np.dot(Vi.T, np.ones(n_neighbors))
norm_h = np.linalg.norm(h)
if norm_h < modified_tol:
h *= 0
else:
h /= norm_h
#Householder matrix is
# >> Hi = np.identity(s_i) - 2*np.outer(h,h)
#Then the weight matrix is
# >> Wi = np.dot(Vi,Hi) + (1-alpha_i) * w_reg[i,:,None]
#We do this much more efficiently:
Wi = (Vi - 2 * np.outer(np.dot(Vi, h), h)
+ (1 - alpha_i) * w_reg[i, :, None])
#Update M as follows:
# >> W_hat = np.zeros( (N,s_i) )
# >> W_hat[neighbors[i],:] = Wi
# >> W_hat[i] -= 1
# >> M += np.dot(W_hat,W_hat.T)
#We can do this much more efficiently:
nbrs_x, nbrs_y = np.meshgrid(neighbors[i], neighbors[i])
M[nbrs_x, nbrs_y] += np.dot(Wi, Wi.T)
Wi_sum1 = Wi.sum(1)
M[i, neighbors[i]] -= Wi_sum1
M[neighbors[i], i] -= Wi_sum1
M[i, i] += s_i
if M_sparse:
M = csr_matrix(M)
elif method == 'ltsa':
neighbors = nbrs.kneighbors(X, n_neighbors=n_neighbors + 1,
return_distance=False)
neighbors = neighbors[:, 1:]
M = np.zeros((N, N))
use_svd = (n_neighbors > d_in)
for i in range(N):
Xi = X[neighbors[i]]
Xi -= Xi.mean(0)
# compute n_components largest eigenvalues of Xi * Xi^T
if use_svd:
v = svd(Xi, full_matrices=True)[0]
else:
Ci = np.dot(Xi, Xi.T)
v = eigh(Ci)[1][:, ::-1]
Gi = np.zeros((n_neighbors, n_components + 1))
Gi[:, 1:] = v[:, :n_components]
Gi[:, 0] = 1. / np.sqrt(n_neighbors)
GiGiT = np.dot(Gi, Gi.T)
nbrs_x, nbrs_y = np.meshgrid(neighbors[i], neighbors[i])
M[nbrs_x, nbrs_y] -= GiGiT
M[neighbors[i], neighbors[i]] += 1
return null_space(M, n_components, k_skip=1, eigen_solver=eigen_solver,
tol=tol, max_iter=max_iter, random_state=random_state)
class LocallyLinearEmbedding(BaseEstimator, TransformerMixin):
"""Locally Linear Embedding
Read more in the :ref:`User Guide <locally_linear_embedding>`.
Parameters
----------
n_neighbors : integer
number of neighbors to consider for each point.
n_components : integer
number of coordinates for the manifold
reg : float
regularization constant, multiplies the trace of the local covariance
matrix of the distances.
eigen_solver : string, {'auto', 'arpack', 'dense'}
auto : algorithm will attempt to choose the best method for input data
arpack : use arnoldi iteration in shift-invert mode.
For this method, M may be a dense matrix, sparse matrix,
or general linear operator.
Warning: ARPACK can be unstable for some problems. It is
best to try several random seeds in order to check results.
dense : use standard dense matrix operations for the eigenvalue
decomposition. For this method, M must be an array
or matrix type. This method should be avoided for
large problems.
tol : float, optional
Tolerance for 'arpack' method
Not used if eigen_solver=='dense'.
max_iter : integer
maximum number of iterations for the arpack solver.
Not used if eigen_solver=='dense'.
method : string ('standard', 'hessian', 'modified' or 'ltsa')
standard : use the standard locally linear embedding algorithm. see
reference [1]
hessian : use the Hessian eigenmap method. This method requires
``n_neighbors > n_components * (1 + (n_components + 1) / 2``
see reference [2]
modified : use the modified locally linear embedding algorithm.
see reference [3]
ltsa : use local tangent space alignment algorithm
see reference [4]
hessian_tol : float, optional
Tolerance for Hessian eigenmapping method.
Only used if ``method == 'hessian'``
modified_tol : float, optional
Tolerance for modified LLE method.
Only used if ``method == 'modified'``
neighbors_algorithm : string ['auto'|'brute'|'kd_tree'|'ball_tree']
algorithm to use for nearest neighbors search,
passed to neighbors.NearestNeighbors instance
random_state: numpy.RandomState or int, optional
The generator or seed used to determine the starting vector for arpack
iterations. Defaults to numpy.random.
Attributes
----------
embedding_vectors_ : array-like, shape [n_components, n_samples]
Stores the embedding vectors
reconstruction_error_ : float
Reconstruction error associated with `embedding_vectors_`
nbrs_ : NearestNeighbors object
Stores nearest neighbors instance, including BallTree or KDtree
if applicable.
References
----------
.. [1] `Roweis, S. & Saul, L. Nonlinear dimensionality reduction
by locally linear embedding. Science 290:2323 (2000).`
.. [2] `Donoho, D. & Grimes, C. Hessian eigenmaps: Locally
linear embedding techniques for high-dimensional data.
Proc Natl Acad Sci U S A. 100:5591 (2003).`
.. [3] `Zhang, Z. & Wang, J. MLLE: Modified Locally Linear
Embedding Using Multiple Weights.`
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.70.382
.. [4] `Zhang, Z. & Zha, H. Principal manifolds and nonlinear
dimensionality reduction via tangent space alignment.
Journal of Shanghai Univ. 8:406 (2004)`
"""
def __init__(self, n_neighbors=5, n_components=2, reg=1E-3,
eigen_solver='auto', tol=1E-6, max_iter=100,
method='standard', hessian_tol=1E-4, modified_tol=1E-12,
neighbors_algorithm='auto', random_state=None):
self.n_neighbors = n_neighbors
self.n_components = n_components
self.reg = reg
self.eigen_solver = eigen_solver
self.tol = tol
self.max_iter = max_iter
self.method = method
self.hessian_tol = hessian_tol
self.modified_tol = modified_tol
self.random_state = random_state
self.neighbors_algorithm = neighbors_algorithm
def _fit_transform(self, X):
self.nbrs_ = NearestNeighbors(self.n_neighbors,
algorithm=self.neighbors_algorithm)
random_state = check_random_state(self.random_state)
X = check_array(X)
self.nbrs_.fit(X)
self.embedding_, self.reconstruction_error_ = \
locally_linear_embedding(
self.nbrs_, self.n_neighbors, self.n_components,
eigen_solver=self.eigen_solver, tol=self.tol,
max_iter=self.max_iter, method=self.method,
hessian_tol=self.hessian_tol, modified_tol=self.modified_tol,
random_state=random_state, reg=self.reg)
def fit(self, X, y=None):
"""Compute the embedding vectors for data X
Parameters
----------
X : array-like of shape [n_samples, n_features]
training set.
Returns
-------
self : returns an instance of self.
"""
self._fit_transform(X)
return self
def fit_transform(self, X, y=None):
"""Compute the embedding vectors for data X and transform X.
Parameters
----------
X : array-like of shape [n_samples, n_features]
training set.
Returns
-------
X_new: array-like, shape (n_samples, n_components)
"""
self._fit_transform(X)
return self.embedding_
def transform(self, X):
"""
Transform new points into embedding space.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
X_new : array, shape = [n_samples, n_components]
Notes
-----
Because of scaling performed by this method, it is discouraged to use
it together with methods that are not scale-invariant (like SVMs)
"""
check_is_fitted(self, "nbrs_")
X = check_array(X)
ind = self.nbrs_.kneighbors(X, n_neighbors=self.n_neighbors,
return_distance=False)
weights = barycenter_weights(X, self.nbrs_._fit_X[ind],
reg=self.reg)
X_new = np.empty((X.shape[0], self.n_components))
for i in range(X.shape[0]):
X_new[i] = np.dot(self.embedding_[ind[i]].T, weights[i])
return X_new
|
bsd-3-clause
|
AugurProject/pyconsensus
|
pyconsensus/plotj.py
|
1
|
3619
|
#!/usr/bin/env python
import numpy as np
import pandas as pd
from pyconsensus import Oracle
import rpy2.robjects as robj
import rpy2.robjects.pandas2ri
from rpy2.robjects.packages import importr
pd.set_option("display.max_rows", 25)
pd.set_option("display.width", 1000)
np.set_printoptions(linewidth=500)
# > M
# QID1 QID2 QID3 QID4 QID5 QID6 QID7 QID8 QID9 QID10
# Voter 1 1 0.0 1.0 1 0.4498141 0 0 1 1 0.7488008
# Voter 2 0 0.5 0.5 NA 0.4460967 0 0 1 0 0.7488008
# Voter 3 1 0.0 1.0 1 0.4498141 0 0 1 1 NA
# > Scales
# QID1 QID2 QID3 QID4 QID5 QID6 QID7 QID8 QID9 QID10
# Scaled 0 0 0 0 1 0 0 0 0 1
# Min 0 0 0 0 0 0 0 0 0 0
# Max 1 1 1 1 1 1 1 1 1 1
# > s <- BinaryScales(M)
# > s
# QID1 QID2 QID3 QID4 QID5 QID6 QID7 QID8 QID9 QID10
# Scaled 0 0 0 0 0 0 0 0 0 0
# Min 0 0 0 0 0 0 0 0 0 0
# Max 1 1 1 1 1 1 1 1 1 1
# reports = [[1, 0.0, 1.0, 1, 0.4498141, 0, 0, 1, 1, 0.7488008],
# [0, 0.5, 0.5, np.nan, 0.4460967, 0, 0, 1, 0, 0.7488008],
# [1, 0.0, 1.0, 1, 0.4498141, 0, 0, 1, 1, np.nan]]
reports = [[ 1, 0.5, 0, 0 ],
[ 1, 0.5, 0, 0 ],
[ 1, 1, 0, 0 ],
[ 1, 0.5, 0, 0 ],
[ 1, 0.5, 0, 0 ],
[ 1, 0.5, 0, 0 ],
[ 1, 0.5, 0, 0 ]]
num_rows = len(reports)
num_cols = len(reports[0])
Results = Oracle(votes=reports).consensus()
index = []
for i in range(1,num_rows+1):
index.append("Reporter " + str(i))
columns = []
for j in range(1,num_cols+1):
columns.append("E" + str(j))
df = pd.DataFrame(Results["filled"], columns=columns, index=index)
df["Var1"] = df.index
print df
mResults = pd.melt(df, id_vars=["Var1"], var_name="Var2")
mResults.value = pd.Categorical(np.round(mResults.value, 4))
mResults.Var1 = pd.Categorical(mResults.Var1)
# Get scores (opacity)
gain_loss = np.matrix(Results["agents"]["voter_bonus"]) - np.matrix(Results["agents"]["old_rep"])
SC = pd.DataFrame(np.hstack((np.matrix(index).T, gain_loss.T)), columns=["Var1", "GainLoss"])
# Format data
DF = pd.merge(mResults, SC)
DF.columns = ("Reporter", "Event", "Outcome", "Scores")
# Build the plot
plotFunc = robj.r("""
library(ggplot2)
function (DF) {
p1 <- ggplot(DF,aes(x=as.numeric(Outcome), y=1, fill=Reporter, alpha=as.numeric(Scores))) +
geom_bar(stat="identity", colour="black") +
geom_text(aes(label = Reporter, vjust = 1, ymax = 1), position = "stack", alpha=I(1)) +
facet_grid(Event ~ .)
p1f <- p1 +
theme_bw() +
scale_fill_hue(h=c(0,130)) +
scale_alpha_continuous(guide=guide_legend(title = "Scores"), range=c(.05,.9)) +
xlab("Outcome") +
ylab("Unscaled Votes") +
labs(title="Plot of Judgment Space")
# Uncomment this line to save to pdf file
# pdf("plot.pdf", width=8.5, height=11)
print(p1f)
}
""")
gr = importr('grDevices')
robj.pandas2ri.activate()
testData_R = robj.conversion.py2ri(DF)
plotFunc(testData_R)
raw_input()
gr.dev_off()
# (doesn't work, just use rpy2...)
# p1 = ggplot(DF, aes(x="Outcome", y=1, fill="Reporter", alpha="Scores")) + \
# geom_bar(stat="identity", colour="black") + \
# geom_text(aes(label="Reporter", vjust=1), position="stack", alpha=1) + \
# facet_grid("Event", None, scales="fixed")
|
gpl-3.0
|
ssh0/growing-string
|
triangular_lattice/vicsek/vicsek.py
|
1
|
6304
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
#
# written by Shotaro Fujimoto
# 2016-05-15
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from triangular import LatticeTriangular as LT
import matplotlib.pyplot as plt
import matplotlib.tri as tri
import matplotlib.animation as animation
import numpy as np
from numpy import linalg as la
import random
import time
rint = random.randint
randm = random.random
class Point:
def __init__(self, id, ix, iy):
self.id, self.x, self.y = id, ix, iy
# vel is unified and the value of it implies the direction of the
# velocity
self.vel = rint(0, 5)
self.priority = randm()
class Main:
def __init__(self, Lx=20, Ly=20, rho=0.9, lattice_scale=10, T=0.4, plot=True,
frames=100):
self.lattice = LT(- np.ones((Lx, Ly), dtype=np.int),
scale=lattice_scale)
self.N = int(Lx * Ly * rho)
self.points = [Point(n, rint(0, Lx - 1), rint(0, Ly - 1)) for n
in range(self.N)]
self.T = T
self.plot = plot
self.beta = 1. / self.T
self.order_param = []
self.num = 0
angs = [i * np.pi / 3. for i in range(6)]
self.velx = [np.cos(ang) for ang in angs]
self.vely = [-np.sin(ang) for ang in angs]
self.u = [np.array([vx, -vy]) for vx, vy in zip(self.velx, self.vely)]
self.lattice_X = self.lattice.coordinates_x
self.lattice_Y = self.lattice.coordinates_y
self.lattice_X = np.array(self.lattice_X).reshape(Lx, Ly)
self.lattice_Y = np.array(self.lattice_Y).reshape(Lx, Ly)
X_min, X_max = np.min(self.lattice_X), np.max(self.lattice_X)
Y_min, Y_max = np.min(self.lattice_Y), np.max(self.lattice_Y)
if self.plot:
self.fig, (self.ax1, self.ax2) = plt.subplots(
1, 2, figsize=(8, 10))
self.ax1.set_xlim([X_min, X_max])
self.ax1.set_ylim([Y_min, Y_max])
self.ax1.set_xticklabels([])
self.ax1.set_yticklabels([])
self.ax1.set_aspect('equal')
self.ax1.set_title("Lattice-Gas model for collective motion")
self.triang = tri.Triangulation(self.lattice_X.flatten(),
self.lattice_Y.flatten())
self.ax1.triplot(self.triang, color='whitesmoke', lw=0.5)
self.l, = self.ax2.plot([], [], 'b-')
self.ax2.set_title(r"Order parameter $m=\frac{1}{N} |\sum \vec{u}_{i}|$ ($T = %.2f$)"
% self.T)
self.ax2.set_ylim([0, 1])
def init_func(*arg):
return self.l,
ani = animation.FuncAnimation(self.fig, self.update, frames=frames,
init_func=init_func,
interval=1, blit=True, repeat=False)
plt.show()
else:
for i in range(100):
self.update(i)
print self.order_param[-1]
def update(self, num):
lowest, upper = {}, []
# 同じサイトにいるものを検出
for point in self.points:
if not lowest.has_key((point.x, point.y)):
lowest[(point.x, point.y)] = point
elif lowest[(point.x, point.y)].priority > point.priority:
upper.append(lowest[(point.x, point.y)])
lowest[(point.x, point.y)] = point
else:
upper.append(point)
# priority値最小のものだけ最近接効果(decided by Boltzmann eq)をうける
for point in lowest.values():
# 最近接の速度の合計を求める
velocities = np.array([0., 0.])
nnx, nny = self.lattice.neighbor_of(point.x, point.y)
for x, y in zip(nnx, nny):
if lowest.has_key((x, y)):
ang = lowest[(x, y)].vel
velocities += np.array([self.velx[ang], -self.vely[ang]])
# ボルツマン分布に従って確率的に方向を決定
A = [np.exp(self.beta * np.dot(u, velocities)) for u in self.u]
rand = randm() * sum(A)
p = 0
for i, P in enumerate(A):
p += P
if rand < p:
point.vel = i
break
# それ以外はランダムに向きを変えるように
for point in upper:
# change the velocity of the point
point.vel = rint(0, 5)
# 各点の座標とベクトルを更新し,描画
self.update_quivers()
# オーダーパラメーターをプロット
self.plot_order_param(num)
return self.quiver, self.l
def update_quivers(self):
# Get information to plot
X, Y = [], []
for point in self.points:
# Get possible direction
newx, newy = self.lattice.neighbor_of(point.x, point.y)
# Choose one by its velocity
point.x, point.y = newx[point.vel], newy[point.vel]
X.append(self.lattice_X[point.x, point.y])
Y.append(self.lattice_Y[point.x, point.y])
vel_x = [self.velx[p.vel] for p in self.points]
vel_y = [self.vely[p.vel] for p in self.points]
if self.plot:
self.quiver = self.ax1.quiver(X, Y, vel_x, vel_y,
units='xy', angles='xy', color='k')
def plot_order_param(self, num):
# nwidth = 20
self.order_param.append(self.cal_order_param())
self.num += 1
if self.plot:
nl = max(self.num - 20, 0)
nr = 1.25 * 20 + nl
self.ax2.set_xlim([nl, nr])
self.l.set_data(np.arange(nl, self.num), self.order_param[nl:])
def cal_order_param(self):
# return order parameter
velx = sum([self.velx[p.vel] for p in self.points])
vely = sum([self.vely[p.vel] for p in self.points])
return la.norm([velx, vely]) / self.N
if __name__ == '__main__':
main = Main(Lx=40, Ly=40, rho=0.9, T=0.41, frames=300, plot=True)
# main = Main(Lx=40, Ly=40, T=0.6, frames=1000, plot=True)
|
mit
|
yuyuz/FLASH
|
HPOlib/Plotting/plotTraceWithStd_perTime.py
|
1
|
10708
|
#!/usr/bin/env python
##
# wrapping: A program making it easy to use hyperparameter
# optimization software.
# Copyright (C) 2013 Katharina Eggensperger and Matthias Feurer
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from argparse import ArgumentParser
import cPickle
import itertools
import sys
from matplotlib.pyplot import tight_layout, figure, subplots_adjust, subplot, savefig, show
import matplotlib.gridspec
import numpy as np
from HPOlib.Plotting import plot_util
__authors__ = ["Katharina Eggensperger", "Matthias Feurer"]
__contact__ = "automl.org"
def plot_optimization_trace(trial_list, name_list, times_list, optimum=0, title="",
log=True, save="", y_max=0, y_min=0, scale_std=1, xmax=10, every=10):
markers = plot_util.get_plot_markers()
colors = plot_util.get_plot_colors()
linestyles = itertools.cycle(['-'])
size = 1
ratio = 5
gs = matplotlib.gridspec.GridSpec(ratio, 1)
fig = figure(1, dpi=100)
fig.suptitle(title, fontsize=16)
ax1 = subplot(gs[0:ratio, :])
ax1.grid(True, linestyle=':', which='major', color='grey', alpha=0.4, zorder=0)
min_val = sys.maxint
max_val = -sys.maxint
max_trials = 0
trial_list_means = list()
trial_list_std = list()
# One trialList represents all runs from one optimizer
for i in range(len(trial_list)):
if log:
trial_list_means.append(np.log10(np.mean(np.array(trial_list[i]), axis=0)))
else:
trial_list_means.append(np.median(np.array(trial_list[i]), axis=0))
trial_list_std.append(np.std(np.array(trial_list[i]), axis=0)*scale_std)
times_list[i] = np.array(times_list[i])
fig.suptitle(title, fontsize=16)
# Plot the median error and std
for i in range(len(trial_list_means)):
new_time_list = [t * 1.0 / 3600 for t in times_list]
x = new_time_list[i]
y = trial_list_means[i] - optimum
m = markers.next()
c = colors.next()
l = linestyles.next()
std_up = y + trial_list_std[i]
std_down = y - trial_list_std[i]
ax1.fill_between(x, std_down, std_up,
facecolor=c, alpha=0.3, edgecolor=c)
# ax1.plot(x, y, color=c, linewidth=size*2,
# label=name_list[i][0] + "(" + str(len(trial_list[i])) + ")",
# linestyle=l, marker="")
ax1.plot(x, y, color=c, linewidth=size*2,
label=name_list[i][0],
linestyle=l, marker=m, markevery=every)
if min(std_down) < min_val:
min_val = min(std_down)
if max(y + std_up) > max_val:
max_val = max(std_up)
if max(new_time_list[i]) > max_trials:
max_trials = max(new_time_list[i])
# Maybe plot on logscale
if scale_std != 1:
ylabel = ", %s * std" % scale_std
else:
ylabel = ""
ylabel = ''
if log:
ax1.set_ylabel("log10(Minfunction value)" + ylabel)
else:
ax1.set_ylabel("Test error rate (best so far)" + ylabel)
# Descript and label the stuff
leg = ax1.legend(loc='best', fancybox=True)
leg.get_frame().set_alpha(0.5)
ax1.set_xlabel("Time (hours)")
if y_max == y_min:
# Set axes limit
ax1.set_ylim([min_val-0.1*abs((max_val-min_val)), max_val+0.1*abs((max_val-min_val))])
else:
ax1.set_ylim([y_min, y_max])
# ax1.set_xlim([0, max_trials])
ax1.set_xlim([0, xmax])
tight_layout()
subplots_adjust(top=0.85)
if save != "":
savefig(save, dpi=100, facecolor='w', edgecolor='w',
orientation='portrait', papertype=None, format=None,
transparent=False, bbox_inches="tight", pad_inches=0.1)
else:
show()
def fill_trajectories(trace_list, times_list):
""" Each trajectory need to has the exact same number of entries and timestamps"""
# We need to define the max value = what is measured before the first evaluation
max_value = np.max([np.max(ls) for ls in trace_list])
number_exp = len(trace_list)
new_trajectories = list()
new_times = list()
for i in range(number_exp):
new_trajectories.append(list())
new_times.append(list())
# noinspection PyUnusedLocal
counter = [1 for i in range(number_exp)]
finish = False
# We need to insert the max values in the beginning and the min values in the end
for i in range(number_exp):
trace_list[i].insert(0, max_value)
trace_list[i].append(np.min(trace_list[i]))
times_list[i].insert(0, 0)
times_list[i].append(sys.maxint)
# Add all possible time values
while not finish:
min_idx = np.argmin([times_list[idx][counter[idx]] for idx in range(number_exp)])
counter[min_idx] += 1
for idx in range(number_exp):
new_times[idx].append(times_list[min_idx][counter[min_idx] - 1])
new_trajectories[idx].append(trace_list[idx][counter[idx] - 1])
# Check if we're finished
for i in range(number_exp):
finish = True
if counter[i] < len(trace_list[i]) - 1:
finish = False
break
times = new_times
trajectories = new_trajectories
tmp_times = list()
# Sanitize lists and delete double entries
for i in range(number_exp):
tmp_times = list()
tmp_traj = list()
for t in range(len(times[i]) - 1):
if times[i][t+1] != times[i][t] and not np.isnan(times[i][t]):
tmp_times.append(times[i][t])
tmp_traj.append(trajectories[i][t])
tmp_times.append(times[i][-1])
tmp_traj.append(trajectories[i][-1])
times[i] = tmp_times
trajectories[i] = tmp_traj
# We need only one list for all times
times = tmp_times
return trajectories, times
def main(pkl_list, name_list, autofill, optimum=0, save="", title="",
log=False, y_min=0, y_max=0, scale_std=1, cut=sys.maxint, xmax=10, every=10):
trial_list = list()
times_list = list()
for i in range(len(pkl_list)):
tmp_trial_list = list()
tmp_times_list = list()
for pkl in pkl_list[i]:
fh = open(pkl, "r")
trials = cPickle.load(fh)
fh.close()
trace = plot_util.extract_trajectory(trials)
times = plot_util.extract_runtime_timestamps(trials)
tmp_times_list.append(times)
tmp_trial_list.append(trace)
# We feed this function with two lists of lists and get one list of lists and one list
tmp_trial_list, tmp_times_list = fill_trajectories(tmp_trial_list, tmp_times_list)
trial_list.append(tmp_trial_list)
times_list.append(tmp_times_list)
for i in range(len(trial_list)):
max_len = max([len(ls) for ls in trial_list[i]])
for t in range(len(trial_list[i])):
if len(trial_list[i][t]) < max_len and autofill:
diff = max_len - len(trial_list[i][t])
# noinspection PyUnusedLocal
trial_list[i][t] = np.append(trial_list[i][t], [trial_list[i][t][-1] for x in range(diff)])
elif len(trial_list[i][t]) < max_len and not autofill:
raise ValueError("(%s != %s), Traces do not have the same length, please use -a" %
(str(max_len), str(len(trial_list[i][t]))))
plot_optimization_trace(trial_list, name_list, times_list, optimum, title=title, log=log,
save=save, y_min=y_min, y_max=y_max, scale_std=scale_std, xmax=xmax, every=every)
if save != "":
sys.stdout.write("Saved plot to " + save + "\n")
else:
sys.stdout.write("..Done\n")
if __name__ == "__main__":
prog = "python plotTraceWithStd.py WhatIsThis <oneOrMorePickles> [WhatIsThis <oneOrMorePickles>]"
description = "Plot a Trace with std for multiple experiments"
parser = ArgumentParser(description=description, prog=prog)
# Options for specific benchmarks
parser.add_argument("-o", "--optimum", type=float, dest="optimum",
default=0, help="If not set, the optimum is supposed to be zero")
# Options which are available only for this plot
parser.add_argument("-a", "--autofill", action="store_true", dest="autofill",
default=False, help="Fill trace automatically")
parser.add_argument("-c", "--scale", type=float, dest="scale",
default=1, help="Multiply std to get a nicer plot")
# General Options
parser.add_argument("-l", "--log", action="store_true", dest="log",
default=False, help="Plot on log scale")
parser.add_argument("--max", dest="max", type=float,
default=0, help="Maximum of the plot")
parser.add_argument("--min", dest="min", type=float,
default=0, help="Minimum of the plot")
parser.add_argument("-s", "--save", dest="save",
default="", help="Where to save plot instead of showing it?")
parser.add_argument("-t", "--title", dest="title",
default="", help="Optional supertitle for plot")
parser.add_argument('--xmax', dest='xmax', type=float,
default=10, help='Maximum of the x axis plot')
parser.add_argument('--fontsize', dest='fontsize', type=float,
default=20, help='Font size of the plot')
parser.add_argument('--every', dest='every', type=int,
default=10, help='plot points every X times')
args, unknown = parser.parse_known_args()
sys.stdout.write("\nFound " + str(len(unknown)) + " arguments\n")
pkl_list_main, name_list_main = plot_util.get_pkl_and_name_list(unknown)
matplotlib.rcParams.update({'font.size': args.fontsize})
main(pkl_list_main, name_list_main, autofill=args.autofill, optimum=args.optimum, save=args.save,
title=args.title, log=args.log, y_min=args.min, y_max=args.max, scale_std=args.scale, xmax=args.xmax, every=args.every)
|
gpl-3.0
|
xavierwu/scikit-learn
|
sklearn/cluster/tests/test_birch.py
|
342
|
5603
|
"""
Tests for the birch clustering algorithm.
"""
from scipy import sparse
import numpy as np
from sklearn.cluster.tests.common import generate_clustered_data
from sklearn.cluster.birch import Birch
from sklearn.cluster.hierarchical import AgglomerativeClustering
from sklearn.datasets import make_blobs
from sklearn.linear_model import ElasticNet
from sklearn.metrics import pairwise_distances_argmin, v_measure_score
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
def test_n_samples_leaves_roots():
# Sanity check for the number of samples in leaves and roots
X, y = make_blobs(n_samples=10)
brc = Birch()
brc.fit(X)
n_samples_root = sum([sc.n_samples_ for sc in brc.root_.subclusters_])
n_samples_leaves = sum([sc.n_samples_ for leaf in brc._get_leaves()
for sc in leaf.subclusters_])
assert_equal(n_samples_leaves, X.shape[0])
assert_equal(n_samples_root, X.shape[0])
def test_partial_fit():
# Test that fit is equivalent to calling partial_fit multiple times
X, y = make_blobs(n_samples=100)
brc = Birch(n_clusters=3)
brc.fit(X)
brc_partial = Birch(n_clusters=None)
brc_partial.partial_fit(X[:50])
brc_partial.partial_fit(X[50:])
assert_array_equal(brc_partial.subcluster_centers_,
brc.subcluster_centers_)
# Test that same global labels are obtained after calling partial_fit
# with None
brc_partial.set_params(n_clusters=3)
brc_partial.partial_fit(None)
assert_array_equal(brc_partial.subcluster_labels_, brc.subcluster_labels_)
def test_birch_predict():
# Test the predict method predicts the nearest centroid.
rng = np.random.RandomState(0)
X = generate_clustered_data(n_clusters=3, n_features=3,
n_samples_per_cluster=10)
# n_samples * n_samples_per_cluster
shuffle_indices = np.arange(30)
rng.shuffle(shuffle_indices)
X_shuffle = X[shuffle_indices, :]
brc = Birch(n_clusters=4, threshold=1.)
brc.fit(X_shuffle)
centroids = brc.subcluster_centers_
assert_array_equal(brc.labels_, brc.predict(X_shuffle))
nearest_centroid = pairwise_distances_argmin(X_shuffle, centroids)
assert_almost_equal(v_measure_score(nearest_centroid, brc.labels_), 1.0)
def test_n_clusters():
# Test that n_clusters param works properly
X, y = make_blobs(n_samples=100, centers=10)
brc1 = Birch(n_clusters=10)
brc1.fit(X)
assert_greater(len(brc1.subcluster_centers_), 10)
assert_equal(len(np.unique(brc1.labels_)), 10)
# Test that n_clusters = Agglomerative Clustering gives
# the same results.
gc = AgglomerativeClustering(n_clusters=10)
brc2 = Birch(n_clusters=gc)
brc2.fit(X)
assert_array_equal(brc1.subcluster_labels_, brc2.subcluster_labels_)
assert_array_equal(brc1.labels_, brc2.labels_)
# Test that the wrong global clustering step raises an Error.
clf = ElasticNet()
brc3 = Birch(n_clusters=clf)
assert_raises(ValueError, brc3.fit, X)
# Test that a small number of clusters raises a warning.
brc4 = Birch(threshold=10000.)
assert_warns(UserWarning, brc4.fit, X)
def test_sparse_X():
# Test that sparse and dense data give same results
X, y = make_blobs(n_samples=100, centers=10)
brc = Birch(n_clusters=10)
brc.fit(X)
csr = sparse.csr_matrix(X)
brc_sparse = Birch(n_clusters=10)
brc_sparse.fit(csr)
assert_array_equal(brc.labels_, brc_sparse.labels_)
assert_array_equal(brc.subcluster_centers_,
brc_sparse.subcluster_centers_)
def check_branching_factor(node, branching_factor):
subclusters = node.subclusters_
assert_greater_equal(branching_factor, len(subclusters))
for cluster in subclusters:
if cluster.child_:
check_branching_factor(cluster.child_, branching_factor)
def test_branching_factor():
# Test that nodes have at max branching_factor number of subclusters
X, y = make_blobs()
branching_factor = 9
# Purposefully set a low threshold to maximize the subclusters.
brc = Birch(n_clusters=None, branching_factor=branching_factor,
threshold=0.01)
brc.fit(X)
check_branching_factor(brc.root_, branching_factor)
brc = Birch(n_clusters=3, branching_factor=branching_factor,
threshold=0.01)
brc.fit(X)
check_branching_factor(brc.root_, branching_factor)
# Raises error when branching_factor is set to one.
brc = Birch(n_clusters=None, branching_factor=1, threshold=0.01)
assert_raises(ValueError, brc.fit, X)
def check_threshold(birch_instance, threshold):
"""Use the leaf linked list for traversal"""
current_leaf = birch_instance.dummy_leaf_.next_leaf_
while current_leaf:
subclusters = current_leaf.subclusters_
for sc in subclusters:
assert_greater_equal(threshold, sc.radius)
current_leaf = current_leaf.next_leaf_
def test_threshold():
# Test that the leaf subclusters have a threshold lesser than radius
X, y = make_blobs(n_samples=80, centers=4)
brc = Birch(threshold=0.5, n_clusters=None)
brc.fit(X)
check_threshold(brc, 0.5)
brc = Birch(threshold=5.0, n_clusters=None)
brc.fit(X)
check_threshold(brc, 5.)
|
bsd-3-clause
|
Xbar/PhyloAnalysis
|
phylo.py
|
1
|
11392
|
#!/usr/bin/env python
import os
import os.path, time
from os import listdir
from os.path import join
import pandas as pd
import numpy as np
import math
from scipy.spatial.distance import pdist, squareform
from scipy.cluster.hierarchy import linkage, dendrogram, fcluster
import scipy.stats as stats
import fastcluster
class phylo_score:
def __init__(self, datafile, host_organ, notefile):
self.datafile = datafile
self.blast_data = pd.read_csv(datafile, index_col=0)
self.host_organ = host_organ
self.annotation = gene_note(notefile)
self.preprocess()
self.hclust()
def preprocess(self, homology_cutoff=50, conserve_cutoff=5):
self.blast_data[self.blast_data < homology_cutoff] = 1
conserved = (self.blast_data>=50).sum(1)
self.normalized = self.blast_data[conserved > conserve_cutoff]
self.normalized = self.normalized.div(self.normalized[self.host_organ], axis=0)
self.normalized = np.log(self.normalized)/np.log(2)
self.normalized = self.normalized.drop(self.host_organ, 1)
self.normalized = self.normalized.add(-self.normalized.mean(axis=0), axis=1)
self.normalized = self.normalized.div(self.normalized.std(axis=0), axis=1)
def hclust(self):
link_file = self.datafile + '.link.npy'
if os.path.isfile(link_file) and os.path.getmtime(link_file) >= os.path.getmtime(self.datafile):
self.link_matrix = np.load(link_file)
else:
blast_score = self.normalized.as_matrix()
self.link_matrix = fastcluster.linkage(blast_score, method='average',
metric='correlation',
preserve_input=False)
del blast_score
np.save(link_file, self.link_matrix)
self.gene_num = self.normalized.shape[0]
self.node_num = self.gene_num + self.link_matrix.shape[0]
self.parent_tree = np.array(np.arange(self.node_num))
self.leaf_num = np.array([1] * self.gene_num +
[0] * (self.node_num - self.gene_num))
for i in range(self.link_matrix.shape[0]):
assert(self.parent_tree[self.link_matrix[i, 0]] == int(self.link_matrix[i, 0]))
assert(self.parent_tree[self.link_matrix[i, 1]] == int(self.link_matrix[i, 1]))
assert(self.leaf_num[self.gene_num + i] == 0)
self.parent_tree[self.link_matrix[i, 0]] = self.gene_num + i
self.parent_tree[self.link_matrix[i, 1]] = self.gene_num + i
self.leaf_num[i + self.gene_num] = self.leaf_num[self.link_matrix[i, 0]] + \
self.leaf_num[self.link_matrix[i, 1]]
def cluster_analysis(self, genes, outfile, cluster_cutoff=2):
gene_idx = self.genes_to_idx(genes)
dist_matrix_file = self.datafile + '.npdist.npy'
if os.path.isfile(dist_matrix_file) and os.path.getmtime(dist_matrix_file) >= os.path.getmtime(self.datafile):
dist_matrix = np.load(dist_matrix_file)
else:
if not hasattr(self, 'corr_matrix'):
corr_file = self.datafile + '.corr.npy'
if os.path.isfile(corr_file) and os.path.getmtime(corr_file) >= os.path.getmtime(self.datafile):
self.corr_matrix = np.load(corr_file)
else:
self.corr_matrix = np.corrcoef(self.normalized.as_matrix())
np.save(corr_file, self.corr_matrix)
top_item_num = 50
temp = self.corr_matrix.argsort(axis=1)
rank = temp.argsort(axis=1)
dist_matrix = top_item_num + rank - self.gene_num + 1
dist_matrix[dist_matrix < 0 ] = 0
temp = np.transpose(dist_matrix) * dist_matrix
self.dist_matrix = np.sqrt(temp)
np.save(self.datafile + '.npdist.npy', self.dist_matrix)
print "distance calculated..."
linkage_corr = fastcluster.linkage(dist_matrix, 'weighted', 'euclidean')
clusters = fcluster(linkage_corr, cluster_cutoff, criterion='distance')
np.save(self.datafile + '.npcluster.npy', clusters)
print "clusters generated..."
pvalues = np.array([1] * len(clusters))
goi_clusters = set(clusters[gene_idx])
significant_clusters = []
for goi_cluster in goi_clusters:
cluster_size = sum(clusters == goi_cluster)
intersect_size = sum(clusters[gene_idx] == goi_cluster)
pvalue = stats.hypergeom.sf(intersect_size, intersect_size + self.gene_num - cluster_size,
len(gene_idx), cluster_size)
pvalues[np.where(clusters == goi_cluster)] = pvalue
self.cluster_result = self.normalized.iloc[:, :1]
self.cluster_result.iloc[:, 0] = clusters
idx_in_cluster = [ i for i in range(len(clusters)) if pvalues[i] < 0.05 ]
self.cluster_result = self.cluster_result.iloc[idx_in_cluster, :]
self.cluster_result['gene'] = self.annotation.get_gene(self.cluster_result.index)
self.cluster_result['description'] = self.annotation.get_description(self.cluster_result.index)
self.cluster_result.columns = ['cluster', 'gene', 'description']
self.cluster_result.to_csv(outfile)
def common_ancestor(self, genes):
path = self.path_top(genes[0])
# print "Depth of tree {}".format(len(path))
for gene in genes:
path_new = self.path_top(gene)
path = [ x for x in path if x in path_new ]
# print "Depth: {}".format(len(path))
return path[0]
def is_child(self, child, parent):
pos = child
while (pos != self.parent_tree[pos] and pos != parent):
pos = self.parent_tree[pos]
if pos == parent:
return True
return False
def get_children(self, node):
# Cannot use recursion. Manage own queue
node_list = [node]
children_list = []
while len(node_list) > 0:
current_node = node_list.pop(0)
if current_node < self.gene_num:
children_list.append(current_node)
else:
node_list += [int(self.link_matrix[current_node - self.gene_num, 0]),
int(self.link_matrix[current_node - self.gene_num, 1])]
return children_list
def genes_to_idx(self, genes):
gene = genes[0]
if isinstance(gene, (int, long ,float, complex)):
genes_idx = genes
elif gene.startswith('ENS'):
idx_list = np.arange(self.gene_num)
genes_idx = [idx_list[np.where(self.normalized.index == x)] for x in genes]
genes_idx = [x[0] for x in genes_idx]
else:
genes_list = self.annotation.get_id(genes)
idx_list = np.arange(self.gene_num)
genes_idx = [idx_list[np.where(self.normalized.index == x)] for x in genes_list]
genes_idx = [x[0] for x in genes_idx if len(x) > 0 ]
return genes_idx
def wrapper_top_correlated_genes(self, gene, outfile, num_hit=50):
gene_idx = self.genes_to_idx([gene])
if not hasattr(self, 'corr_matrix'):
corr_file = self.datafile + '.corr.npy'
if os.path.isfile(corr_file) and os.path.getmtime(corr_file) >= os.path.getmtime(self.datafie):
self.corr_matrix = np.load(corr_file)
else:
corr_matrix = pdist(self.normalized, metric='correlation')
self.corr_matrix = squareform(corr_matrix)
del corr_matrix
np.save(corr_file, self.corr_matrix)
gene_corr = self.corr_matrix[gene_idx][0]
deco_gene = [ (x, i) for i, x in enumerate(gene_corr) ]
deco_gene.sort()
gene_idx = [ i for (x, i) in deco_gene ]
gene_list = pd.DataFrame(self.normalized.index[gene_idx[:num_hit]])
gene_list.columns = ['stable_id']
gene_list = gene_list.set_index(['stable_id'])
gene_list['gene'] = self.annotation.get_gene(gene_list.index)
gene_list['description'] = self.annotation.get_description(gene_list.index)
gene_list.to_csv(outfile)
def wrapper_cluster_gene_names(self, genes):
genes_idx = self.genes_to_idx(genes)
ancestor_node = self.common_ancestor(genes_idx)
gene_nodes = self.get_children(ancestor_node)
stable_ids = self.normalized.index[gene_nodes]
gene_names = self.annotation.get_gene(stable_ids)
return gene_names
def path_top(self, gene):
path_list = [gene]
pos = gene
while (pos != self.parent_tree[pos]):
pos = self.parent_tree[pos]
path_list.append(pos)
return path_list
def mrs_score(self, gene, ref):
max_mrs = 0.0
max_leafgroup = []
for i in range(len(ref)):
common_ancestor = self.common_ancestor([gene, ref[i]])
leaf_nodes = self.get_children(common_ancestor)
gene_of_interest = 0
for ref_gene in ref:
# if self.is_child(ref_gene, common_ancestor):
if ref_gene in leaf_nodes:
gene_of_interest += 1
mrs = 1.0 * gene_of_interest / self.leaf_num[common_ancestor]
if mrs > max_mrs:
max_mrs = mrs
max_leafgroup = leaf_nodes
return (max_mrs, max_leafgroup)
def wrapper_get_mrs(self, ref_genes, out_file):
self.mrs_score_array = np.array([0.0] * self.gene_num)
self.mrs_group_array = [''] * self.gene_num
ref_idx = self.genes_to_idx(ref_genes)
for i in range(self.gene_num):
(self.mrs_score_array[i], mrs_group) = self.mrs_score(i, ref_idx)
self.mrs_group_array[i] = str(mrs_group)
self.mrs_score_frame = self.normalized.iloc[:, :1]
self.mrs_score_frame.iloc[:, 0] = self.mrs_score_array
self.mrs_score_frame['gene'] = self.annotation.get_gene(self.mrs_score_frame.index)
self.mrs_score_frame['description'] = self.annotation.get_description(self.mrs_score_frame.index)
self.mrs_score_frame.columns = ['mrs','gene','description']
self.mrs_score_frame['group'] = self.mrs_group_array
self.mrs_score_frame.to_csv(out_file)
class gene_note:
def __init__(self, datafile):
self.gene_notes = pd.read_csv(datafile, header=None, index_col=0, sep='\t',
names=['gene', 'xref', 'description'])
def get_id(self, gene):
if isinstance(gene, (list, tuple)):
return [self.gene_notes.index[np.where(self.gene_notes['gene']==x)] for x in gene]
else:
return self.gene_notes.index[np.where(self.gene_notes['gene']==gene)]
def get_gene(self, stable_id):
return self.gene_notes.loc[stable_id, 'gene']
def get_description(self, stable_id):
return self.gene_notes.loc[stable_id, 'description']
def get_external_ref(self, stable_id):
return self.gene_notes.loc[stable_id, 'xref']
|
gpl-2.0
|
jjx02230808/project0223
|
sklearn/cross_decomposition/tests/test_pls.py
|
23
|
14318
|
import numpy as np
from sklearn.utils.testing import (assert_array_almost_equal,
assert_array_equal, assert_true,
assert_raise_message)
from sklearn.datasets import load_linnerud
from sklearn.cross_decomposition import pls_, CCA
from nose.tools import assert_equal
def test_pls():
d = load_linnerud()
X = d.data
Y = d.target
# 1) Canonical (symmetric) PLS (PLS 2 blocks canonical mode A)
# ===========================================================
# Compare 2 algo.: nipals vs. svd
# ------------------------------
pls_bynipals = pls_.PLSCanonical(n_components=X.shape[1])
pls_bynipals.fit(X, Y)
pls_bysvd = pls_.PLSCanonical(algorithm="svd", n_components=X.shape[1])
pls_bysvd.fit(X, Y)
# check equalities of loading (up to the sign of the second column)
assert_array_almost_equal(
pls_bynipals.x_loadings_,
pls_bysvd.x_loadings_, decimal=5,
err_msg="nipals and svd implementations lead to different x loadings")
assert_array_almost_equal(
pls_bynipals.y_loadings_,
pls_bysvd.y_loadings_, decimal=5,
err_msg="nipals and svd implementations lead to different y loadings")
# Check PLS properties (with n_components=X.shape[1])
# ---------------------------------------------------
plsca = pls_.PLSCanonical(n_components=X.shape[1])
plsca.fit(X, Y)
T = plsca.x_scores_
P = plsca.x_loadings_
Wx = plsca.x_weights_
U = plsca.y_scores_
Q = plsca.y_loadings_
Wy = plsca.y_weights_
def check_ortho(M, err_msg):
K = np.dot(M.T, M)
assert_array_almost_equal(K, np.diag(np.diag(K)), err_msg=err_msg)
# Orthogonality of weights
# ~~~~~~~~~~~~~~~~~~~~~~~~
check_ortho(Wx, "x weights are not orthogonal")
check_ortho(Wy, "y weights are not orthogonal")
# Orthogonality of latent scores
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
check_ortho(T, "x scores are not orthogonal")
check_ortho(U, "y scores are not orthogonal")
# Check X = TP' and Y = UQ' (with (p == q) components)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# center scale X, Y
Xc, Yc, x_mean, y_mean, x_std, y_std =\
pls_._center_scale_xy(X.copy(), Y.copy(), scale=True)
assert_array_almost_equal(Xc, np.dot(T, P.T), err_msg="X != TP'")
assert_array_almost_equal(Yc, np.dot(U, Q.T), err_msg="Y != UQ'")
# Check that rotations on training data lead to scores
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Xr = plsca.transform(X)
assert_array_almost_equal(Xr, plsca.x_scores_,
err_msg="rotation on X failed")
Xr, Yr = plsca.transform(X, Y)
assert_array_almost_equal(Xr, plsca.x_scores_,
err_msg="rotation on X failed")
assert_array_almost_equal(Yr, plsca.y_scores_,
err_msg="rotation on Y failed")
# "Non regression test" on canonical PLS
# --------------------------------------
# The results were checked against the R-package plspm
pls_ca = pls_.PLSCanonical(n_components=X.shape[1])
pls_ca.fit(X, Y)
x_weights = np.array(
[[-0.61330704, 0.25616119, -0.74715187],
[-0.74697144, 0.11930791, 0.65406368],
[-0.25668686, -0.95924297, -0.11817271]])
# x_weights_sign_flip holds columns of 1 or -1, depending on sign flip
# between R and python
x_weights_sign_flip = pls_ca.x_weights_ / x_weights
x_rotations = np.array(
[[-0.61330704, 0.41591889, -0.62297525],
[-0.74697144, 0.31388326, 0.77368233],
[-0.25668686, -0.89237972, -0.24121788]])
x_rotations_sign_flip = pls_ca.x_rotations_ / x_rotations
y_weights = np.array(
[[+0.58989127, 0.7890047, 0.1717553],
[+0.77134053, -0.61351791, 0.16920272],
[-0.23887670, -0.03267062, 0.97050016]])
y_weights_sign_flip = pls_ca.y_weights_ / y_weights
y_rotations = np.array(
[[+0.58989127, 0.7168115, 0.30665872],
[+0.77134053, -0.70791757, 0.19786539],
[-0.23887670, -0.00343595, 0.94162826]])
y_rotations_sign_flip = pls_ca.y_rotations_ / y_rotations
# x_weights = X.dot(x_rotation)
# Hence R/python sign flip should be the same in x_weight and x_rotation
assert_array_almost_equal(x_rotations_sign_flip, x_weights_sign_flip)
# This test that R / python give the same result up to colunm
# sign indeterminacy
assert_array_almost_equal(np.abs(x_rotations_sign_flip), 1, 4)
assert_array_almost_equal(np.abs(x_weights_sign_flip), 1, 4)
assert_array_almost_equal(y_rotations_sign_flip, y_weights_sign_flip)
assert_array_almost_equal(np.abs(y_rotations_sign_flip), 1, 4)
assert_array_almost_equal(np.abs(y_weights_sign_flip), 1, 4)
# 2) Regression PLS (PLS2): "Non regression test"
# ===============================================
# The results were checked against the R-packages plspm, misOmics and pls
pls_2 = pls_.PLSRegression(n_components=X.shape[1])
pls_2.fit(X, Y)
x_weights = np.array(
[[-0.61330704, -0.00443647, 0.78983213],
[-0.74697144, -0.32172099, -0.58183269],
[-0.25668686, 0.94682413, -0.19399983]])
x_weights_sign_flip = pls_2.x_weights_ / x_weights
x_loadings = np.array(
[[-0.61470416, -0.24574278, 0.78983213],
[-0.65625755, -0.14396183, -0.58183269],
[-0.51733059, 1.00609417, -0.19399983]])
x_loadings_sign_flip = pls_2.x_loadings_ / x_loadings
y_weights = np.array(
[[+0.32456184, 0.29892183, 0.20316322],
[+0.42439636, 0.61970543, 0.19320542],
[-0.13143144, -0.26348971, -0.17092916]])
y_weights_sign_flip = pls_2.y_weights_ / y_weights
y_loadings = np.array(
[[+0.32456184, 0.29892183, 0.20316322],
[+0.42439636, 0.61970543, 0.19320542],
[-0.13143144, -0.26348971, -0.17092916]])
y_loadings_sign_flip = pls_2.y_loadings_ / y_loadings
# x_loadings[:, i] = Xi.dot(x_weights[:, i]) \forall i
assert_array_almost_equal(x_loadings_sign_flip, x_weights_sign_flip, 4)
assert_array_almost_equal(np.abs(x_loadings_sign_flip), 1, 4)
assert_array_almost_equal(np.abs(x_weights_sign_flip), 1, 4)
assert_array_almost_equal(y_loadings_sign_flip, y_weights_sign_flip, 4)
assert_array_almost_equal(np.abs(y_loadings_sign_flip), 1, 4)
assert_array_almost_equal(np.abs(y_weights_sign_flip), 1, 4)
# 3) Another non-regression test of Canonical PLS on random dataset
# =================================================================
# The results were checked against the R-package plspm
n = 500
p_noise = 10
q_noise = 5
# 2 latents vars:
np.random.seed(11)
l1 = np.random.normal(size=n)
l2 = np.random.normal(size=n)
latents = np.array([l1, l1, l2, l2]).T
X = latents + np.random.normal(size=4 * n).reshape((n, 4))
Y = latents + np.random.normal(size=4 * n).reshape((n, 4))
X = np.concatenate(
(X, np.random.normal(size=p_noise * n).reshape(n, p_noise)), axis=1)
Y = np.concatenate(
(Y, np.random.normal(size=q_noise * n).reshape(n, q_noise)), axis=1)
np.random.seed(None)
pls_ca = pls_.PLSCanonical(n_components=3)
pls_ca.fit(X, Y)
x_weights = np.array(
[[0.65803719, 0.19197924, 0.21769083],
[0.7009113, 0.13303969, -0.15376699],
[0.13528197, -0.68636408, 0.13856546],
[0.16854574, -0.66788088, -0.12485304],
[-0.03232333, -0.04189855, 0.40690153],
[0.1148816, -0.09643158, 0.1613305],
[0.04792138, -0.02384992, 0.17175319],
[-0.06781, -0.01666137, -0.18556747],
[-0.00266945, -0.00160224, 0.11893098],
[-0.00849528, -0.07706095, 0.1570547],
[-0.00949471, -0.02964127, 0.34657036],
[-0.03572177, 0.0945091, 0.3414855],
[0.05584937, -0.02028961, -0.57682568],
[0.05744254, -0.01482333, -0.17431274]])
x_weights_sign_flip = pls_ca.x_weights_ / x_weights
x_loadings = np.array(
[[0.65649254, 0.1847647, 0.15270699],
[0.67554234, 0.15237508, -0.09182247],
[0.19219925, -0.67750975, 0.08673128],
[0.2133631, -0.67034809, -0.08835483],
[-0.03178912, -0.06668336, 0.43395268],
[0.15684588, -0.13350241, 0.20578984],
[0.03337736, -0.03807306, 0.09871553],
[-0.06199844, 0.01559854, -0.1881785],
[0.00406146, -0.00587025, 0.16413253],
[-0.00374239, -0.05848466, 0.19140336],
[0.00139214, -0.01033161, 0.32239136],
[-0.05292828, 0.0953533, 0.31916881],
[0.04031924, -0.01961045, -0.65174036],
[0.06172484, -0.06597366, -0.1244497]])
x_loadings_sign_flip = pls_ca.x_loadings_ / x_loadings
y_weights = np.array(
[[0.66101097, 0.18672553, 0.22826092],
[0.69347861, 0.18463471, -0.23995597],
[0.14462724, -0.66504085, 0.17082434],
[0.22247955, -0.6932605, -0.09832993],
[0.07035859, 0.00714283, 0.67810124],
[0.07765351, -0.0105204, -0.44108074],
[-0.00917056, 0.04322147, 0.10062478],
[-0.01909512, 0.06182718, 0.28830475],
[0.01756709, 0.04797666, 0.32225745]])
y_weights_sign_flip = pls_ca.y_weights_ / y_weights
y_loadings = np.array(
[[0.68568625, 0.1674376, 0.0969508],
[0.68782064, 0.20375837, -0.1164448],
[0.11712173, -0.68046903, 0.12001505],
[0.17860457, -0.6798319, -0.05089681],
[0.06265739, -0.0277703, 0.74729584],
[0.0914178, 0.00403751, -0.5135078],
[-0.02196918, -0.01377169, 0.09564505],
[-0.03288952, 0.09039729, 0.31858973],
[0.04287624, 0.05254676, 0.27836841]])
y_loadings_sign_flip = pls_ca.y_loadings_ / y_loadings
assert_array_almost_equal(x_loadings_sign_flip, x_weights_sign_flip, 4)
assert_array_almost_equal(np.abs(x_weights_sign_flip), 1, 4)
assert_array_almost_equal(np.abs(x_loadings_sign_flip), 1, 4)
assert_array_almost_equal(y_loadings_sign_flip, y_weights_sign_flip, 4)
assert_array_almost_equal(np.abs(y_weights_sign_flip), 1, 4)
assert_array_almost_equal(np.abs(y_loadings_sign_flip), 1, 4)
# Orthogonality of weights
# ~~~~~~~~~~~~~~~~~~~~~~~~
check_ortho(pls_ca.x_weights_, "x weights are not orthogonal")
check_ortho(pls_ca.y_weights_, "y weights are not orthogonal")
# Orthogonality of latent scores
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
check_ortho(pls_ca.x_scores_, "x scores are not orthogonal")
check_ortho(pls_ca.y_scores_, "y scores are not orthogonal")
def test_PLSSVD():
# Let's check the PLSSVD doesn't return all possible component but just
# the specificied number
d = load_linnerud()
X = d.data
Y = d.target
n_components = 2
for clf in [pls_.PLSSVD, pls_.PLSRegression, pls_.PLSCanonical]:
pls = clf(n_components=n_components)
pls.fit(X, Y)
assert_equal(n_components, pls.y_scores_.shape[1])
def test_univariate_pls_regression():
# Ensure 1d Y is correctly interpreted
d = load_linnerud()
X = d.data
Y = d.target
clf = pls_.PLSRegression()
# Compare 1d to column vector
model1 = clf.fit(X, Y[:, 0]).coef_
model2 = clf.fit(X, Y[:, :1]).coef_
assert_array_almost_equal(model1, model2)
def test_predict_transform_copy():
# check that the "copy" keyword works
d = load_linnerud()
X = d.data
Y = d.target
clf = pls_.PLSCanonical()
X_copy = X.copy()
Y_copy = Y.copy()
clf.fit(X, Y)
# check that results are identical with copy
assert_array_almost_equal(clf.predict(X), clf.predict(X.copy(), copy=False))
assert_array_almost_equal(clf.transform(X), clf.transform(X.copy(), copy=False))
# check also if passing Y
assert_array_almost_equal(clf.transform(X, Y),
clf.transform(X.copy(), Y.copy(), copy=False))
# check that copy doesn't destroy
# we do want to check exact equality here
assert_array_equal(X_copy, X)
assert_array_equal(Y_copy, Y)
# also check that mean wasn't zero before (to make sure we didn't touch it)
assert_true(np.all(X.mean(axis=0) != 0))
def test_scale_and_stability():
# We test scale=True parameter
# This allows to check numerical stability over platforms as well
d = load_linnerud()
X1 = d.data
Y1 = d.target
# causes X[:, -1].std() to be zero
X1[:, -1] = 1.0
# From bug #2821
# Test with X2, T2 s.t. clf.x_score[:, 1] == 0, clf.y_score[:, 1] == 0
# This test robustness of algorithm when dealing with value close to 0
X2 = np.array([[0., 0., 1.],
[1., 0., 0.],
[2., 2., 2.],
[3., 5., 4.]])
Y2 = np.array([[0.1, -0.2],
[0.9, 1.1],
[6.2, 5.9],
[11.9, 12.3]])
for (X, Y) in [(X1, Y1), (X2, Y2)]:
X_std = X.std(axis=0, ddof=1)
X_std[X_std == 0] = 1
Y_std = Y.std(axis=0, ddof=1)
Y_std[Y_std == 0] = 1
X_s = (X - X.mean(axis=0)) / X_std
Y_s = (Y - Y.mean(axis=0)) / Y_std
for clf in [CCA(), pls_.PLSCanonical(), pls_.PLSRegression(),
pls_.PLSSVD()]:
clf.set_params(scale=True)
X_score, Y_score = clf.fit_transform(X, Y)
clf.set_params(scale=False)
X_s_score, Y_s_score = clf.fit_transform(X_s, Y_s)
assert_array_almost_equal(X_s_score, X_score)
assert_array_almost_equal(Y_s_score, Y_score)
# Scaling should be idempotent
clf.set_params(scale=True)
X_score, Y_score = clf.fit_transform(X_s, Y_s)
assert_array_almost_equal(X_s_score, X_score)
assert_array_almost_equal(Y_s_score, Y_score)
def test_pls_errors():
d = load_linnerud()
X = d.data
Y = d.target
for clf in [pls_.PLSCanonical(), pls_.PLSRegression(),
pls_.PLSSVD()]:
clf.n_components = 4
assert_raise_message(ValueError, "Invalid number of components", clf.fit, X, Y)
|
bsd-3-clause
|
boland1992/SeisSuite
|
build/lib/seissuite/spacing/search_station.py
|
2
|
31188
|
# -*- coding: utf-8 -*-
"""
Created on Fri Jul 3 08:44:50 2015
@author: boland
CODE DESCRIPTION:
The following python script searches for M new random points atop N set station
points. The tests performed have to do with point density distribution of
points representing all combinations of great-circlel lines that could
be physically possible between seismic stations. An extension is to select
low point density points as a new cluster to search for new station points.
"""
#------------------------------------------------------------------------------
# MODULES
#------------------------------------------------------------------------------
import os
#import fiona
import pysal
import pickle
import pyproj
import datetime
import itertools
import shapefile
import numpy as np
import datetime as dt
import multiprocessing as mp
import matplotlib.pyplot as plt
from math import sqrt, atan2, asin, degrees, radians, tan, sin, cos
from shapely.geometry import asPolygon, Polygon
from descartes.patch import PolygonPatch
from matplotlib.colors import LogNorm
from scipy.spatial import ConvexHull
from scipy.cluster.vq import kmeans
from shapely.affinity import scale
from matplotlib.path import Path
from shapely import geometry
#------------------------------------------------------------------------------
# VARIABLES
#------------------------------------------------------------------------------
# Reference elipsoid to calculate distance.
wgs84 = pyproj.Geod(ellps='WGS84')
#------------------------------------------------------------------------------
# CLASSES
#------------------------------------------------------------------------------
class InShape:
"""
Class defined in order to define a shapefile boundary AND quickly check
if a given set of coordinates is contained within it. This class uses
the shapely module.
"""
def __init__(self, input_shape, coords=0.):
#initialise boundary shapefile location string input
self.boundary = input_shape
#initialise coords shape input
self.dots = coords
#initialise boundary polygon
self.polygon = 0.
#initialise output coordinates that are contained within the polygon
self.output = 0.
def shape_poly(self):
#with fiona.open(self.boundary) as fiona_collection:
# In this case, we'll assume the shapefile only has one later
# shapefile_record = fiona_collection.next()
# Use Shapely to create the polygon
# self.polygon = geometry.asShape( shapefile_record['geometry'] )
# return self.polygon
# Now, open the shapefile using pysal's FileIO
shps = pysal.open(self.boundary , 'r')
poly = shps.next()
self.polygon = geometry.asShape(poly)
return self.polygon
def point_check(self, coord):
"""
Function that takes a single (2,1) shape input, converts the points
into a shapely.geometry.Point object and then checks if the coord
is contained within the shapefile.
"""
self.polygon = self.shape_poly()
point = geometry.Point(coord[0], coord[1])
if self.polygon.contains(point):
return coord
def shape_bounds(self):
"""
Function that returns the bounding box coordinates xmin,xmax,ymin,ymax
"""
self.polygon = self.shape_poly()
return self.polygon.bounds
def shape_buffer(self, shape=None, size=1., res=1):
"""
Function that returns a new polygon of the larger buffered points.
Can import polygon into function if desired. Default is
self.shape_poly()
"""
if shape is None:
self.polygon = self.shape_poly()
return asPolygon(self.polygon.buffer(size, resolution=res)\
.exterior)
def extract_poly_coords(self, poly):
if poly.type == 'Polygon':
exterior_coords = poly.exterior.coords[:]
elif poly.type == 'MultiPolygon':
exterior_coords = []
for part in poly:
epc = np.asarray(self.extract_poly_coords(part)) # Recursive call
exterior_coords.append(epc)
else:
raise ValueError('Unhandled geometry type: ' + repr(poly.type))
return np.vstack(exterior_coords)
def external_coords(self, shape=None, buff=None, size=1., res=1):
"""
Function that returns the external coords of a buffered shapely
polygon. Note that shape variable input
MUST be a shapely Polygon object.
"""
if shape is not None and buff is not None:
poly = self.shape_buffer(shape=shape, size=size, res=res)
elif shape is not None:
poly = shape
else:
poly = self.shape_poly()
exterior_coords = self.extract_poly_coords(poly)
return exterior_coords
class InPoly:
"""
Class defined in order to define a shapefile boundary AND quickly check
if a given set of coordinates is contained within it. The class uses
the matplotlib Path class.
"""
def __init__(self, input_shape, coords=0.):
#initialise boundary shapefile location string input
self.boundary = input_shape
#initialise coords shape input
self.dots = coords
#initialise boundary polygon
self.polygon = 0.
#initialise boundary polygon nodes
self.nodes = 0.
#initialise output coordinates that are contained within the polygon
self.output = 0.
def poly_nodes(self):
"""
Function that returns the nodes of a shapefile as a (n,2) array.
"""
sf = shapefile.Reader(self.boundary)
poly = sf.shapes()[0]
#find polygon nodes lat lons
self.nodes = np.asarray(poly.points)
return self.nodes
def points_from_path(self, poly):
"""
Function that returns nodes from matplotlib Path object.
"""
return poly.vertices
def shapefile_poly(self):
"""
Function that imports a shapefile location path and returns
a matplotlib Path object representing this shape.
"""
self.nodes = self.poly_nodes()
#convert to a matplotlib path class!
self.polygon = Path(self.nodes)
return self.polygon
def node_poly(self, nodes):
"""
Function creates a matplotlib Path object from input nodes.
"""
#convert to a matplotlib path class!
polygon = Path(nodes)
return polygon
def points_in_shapefile_poly(self):
"""
Function that takes a single (2,1) coordinate input, and uses the
contains() function in class matplotlib Path to check if point is
in the polygon.
"""
self.polygon = self.shapefile_poly()
points_in = self.polygon.contains_points(self.dots)
self.output = self.dots[points_in == True]
return np.asarray(self.output)
def points_in(self, points, poly=None, IN=True):
"""
Function that takes a many (2,N) points, and uses the
contains() function in class matplotlib Path to check if point is
in the polygon. If IN=True then the function will return points inside
the matplotlib Path object, else if IN=False then the function will
return the points outside the matplotlib Path object.
"""
if poly is None:
poly = self.shapefile_poly()
points_test = poly.contains_points(points)
output = points[points_test == IN]
return np.asarray(output)
def bounds_poly(self, nodes=None):
"""
Function that returns boundaries of a shapefile polygon.
"""
if nodes is None:
nodes = self.poly_nodes()
xmin, xmax = np.min(nodes[:,0]), np.max(nodes[:,0])
ymin, ymax = np.min(nodes[:,1]), np.max(nodes[:,1])
return xmin, xmax, ymin, ymax
def poly_from_shape(self, shape=None, size=1., res=1):
"""
Function that returns a matplotlib Path object from
buffered shape points. if shape != None then the shape input
MUST be of type shapely polygon.
"""
SHAPE = InShape(self.boundary)
if shape is None:
# Generates shape object from shape_file input
shape = SHAPE
return self.node_poly(shape.external_coords(size=size, res=res))
else:
return self.node_poly(SHAPE.external_coords(shape=shape))
def rand_poly(self, poly=None, N=1e4, IN=True):
"""
Function that takes an input matplotlib Path object (or the default)
and generates N random points within the bounding box around it.
Then M unknown points are returned that ARE contained within the
Path object. This is done for speed. If IN=True then the function
will return points inside the matplotlib Path object, else if
IN=False then the function will return the points outside the
matplotlib Path object.
"""
if poly is None:
#poly = self.shapefile_poly()
xmin, xmax, ymin, ymax = self.bounds_poly()
else:
nodes = self.points_from_path(poly)
xmin, xmax, ymin, ymax = self.bounds_poly(nodes=nodes)
X = abs(xmax - xmin) * np.random.rand(N,1) + xmin
Y = abs(ymax - ymin) * np.random.rand(N,1) + ymin
many_points = np.column_stack((X,Y))
many_points = self.points_in(many_points, poly=poly, IN=IN)
return many_points
def rand_shape(self, shape=None, N=1e4, IN=True):
"""
Function that takes an input shapely Polygon object (or the default)
and generates N random points within the bounding box around it.
Then M unknown points are returned that ARE contained within the
Polygon object. This is done for speed. If IN=True then the function
will return points inside the matplotlib Path object, else
if IN=False then the function will return the points outside
the matplotlib Path object.
"""
if shape is None:
# Generates shape object from shape_file input
INSHAPE = InShape(self.boundary)
shape = self.node_poly(INSHAPE.external_coords())
xmin, xmax, ymin, ymax = INSHAPE.shape_bounds()
poly = self.node_poly(shape.external_coords(shape=shape))
points = self.rand_poly(poly=poly, N=N, IN=IN)
return points
class Convex:
"""
CLASS CURRENTLY NOT WORKING!
Class defined in order to create a convex hull around an array of points
and then perform functions on them e.g. produce K random points inside,
find N equidistant points within etc.
"""
def __init__(self, points):
# initialise points of interest
self.dots = points
# initialise polygon for potential convex hull with matplotlib Path
self.polygon = 0.
# initialise output points for points inside convex hull!
self.output = 0.
def convex_hull(self, point_set):
"""
Function to produce a convex hull object that surrounds
a set of points. The input must be of the type Nx2 matrix/
numpy array or equivalent. new_point shape is (2,1)
"""
return ConvexHull(point_set)
def poly_hull(self):
"""
Function that generates a matplotlib Path object from convex hull
nodes.
"""
hull = self.convex_hull(self.dots)
X, Y = self.dots[hull.vertices,0], self.dots[hull.vertices,1]
self.polygon = Path(np.column_stack((X, Y)))
return self.polygon
def in_poly_hull(self, point_set):
"""
Function that quickly returns (2,N) array from (2,M) array of
input points such that M >= N and N points are contained within
the self.polygon polygon.
"""
self.polygon = self.poly_hull()
points_in = self.polygon.contains_points(point_set)
self.output = point_set[points_in == True]
return self.output
def plot_hull(self, show_points=False):
"""
Function that plots the boundaries of a convex hull using
matplotlib.pyplot. Input hull must be of type:
scipy.spatial.qhull.ConvexHull
points input must be of the original coordinates.
"""
hull = self.convex_hull(self.dots)
plt.figure()
for simplex in hull.simplices:
plt.plot(self.dots[simplex,0], \
self.dots[simplex,1], 'k-')
if show_points:
plt.scatter(self.dots[:,0], \
self.dots[:,1], s=10,c='g')
plt.scatter(self.dots[:,0], \
self.dots[:,1], s=30,c='orange')
plt.show()
def rand_hull(hull, points, K):
"Generate K new random points contained within a convex hull"
minx, maxx = np.min(points[:,0]), np.max(points[:,0])
miny, maxy = np.min(points[:,1]), np.max(points[:,1])
X = abs(maxx - minx) * np.random.rand(10*K**2,1) + minx
Y = abs(maxy - miny) * np.random.rand(10*K**2,1) + miny
new_coords = np.column_stack((X,Y))
return new_coords
class Geodesic:
"""
Class defined in order to create to process points, distances and
other related geodesic calculations and functions
"""
def __init__(self, period_range=[1, 40], km_point=20., max_dist=2e3):
# initialise period_range as [1,40] default for ambient noise
self.per_range = period_range
self.km = km_point
self.max_dist = max_dist
def remove_distance(self, period_range, max_dist=None):
"""
Function that returns a given possible resolvable ambient noise
structure distance range, given the maximum period range
availabe to the study. The distance returned is in km.
Maximum distance default can be reassigned based on the cut-off found
by your time-lag plots for your study!
"""
if max_dist is None:
max_dist = self.max_dist
if type(period_range) == list:
min_dist = min(period_range) * 9
return [min_dist, max_dist]
elif type(period_range) == int or float:
return [period_range*9, max_dist]
def haversine(self, lon1, lat1, lon2, lat2, R=6371):
"""
Calculate the great circle distance between two points
on the earth (specified in decimal degrees). R is radius of
spherical earth. Default is 6371km.
"""
# convert decimal degrees to radians
lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])
# haversine formula
dlon, dlat = lon2 - lon1, lat2 - lat1
a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2
c = 2 * asin(sqrt(a))
km = R * c
return km
def fast_geodesic(self, lon1, lat1, lon2, lat2, npts):
"""
Returns a list of *npts* points along the geodesic between
(and including) *coord1* and *coord2*, in an array of
shape (*npts*, 2).
@rtype: L{ndarray}
"""
if npts < 2:
raise Exception('nb of points must be at least 2')
path = wgs84.npts(lon1=lon1, lat1=lat1,
lon2=lon2, lat2=lat2,
npts=npts-2)
return np.array([[lon1,lat1]] + path + [[lon2,lat2]])
def paths_calc(self, path_info, km_points=None, per_lims=None):
"""
Function that returns an array of coordinates equidistant along
a great cricle path between two lat-lon coordinates if these points
lay within a certain distance range ... otherwise the points return
only a set of zeros the same size as the array. Default is 1.0km
distance per point.
"""
if per_lims is None:
# if no new default for period limits is defined, then set the
# limit to the default.
per_lims = self.per_range
if km_points is None:
km_points = self.km
lon1, lat1, lon2, lat2 = path_info[0], \
path_info[1], path_info[2], path_info[3]
# interpoint distance <= 1 km, and nb of points >= 100
dist = self.haversine(lon1, lat1, lon2, lat2)
npts = max(int((np.ceil(dist) + 1) / km_points), 100)
path = self.fast_geodesic(lon1, lat1, lon2, lat2, npts)
dist_range = self.remove_distance(per_lims)
if min(dist_range) < dist < max(dist_range):
#remove the closest points along this line that fall below the distance
#find the index of the first point that is above this distance away!
pts_km = npts / float((np.ceil(dist) + 1)) #this gives pts/km
#remove all points below this index in the paths list
dist_index = pts_km * min(dist_range)
path = path[dist_index:]
return path
else:
return np.zeros_like(path)
def fast_paths(self, coord_list):
"""
Function that takes many point coordinate combinations and quickly
passes them through the paths_calc function. coord_list MUST be
of the shape (4, N) whereby each coordinate combination is in a
(4,1) row [lon1,lat1,lon2,lat2].
"""
return map(self.paths_calc, coord_list)
def combine_paths(self, paths):
"""
Function that takes many paths (should be array of same length as
number of stations). This is automatically generated by parallelising
the fast_paths function above.
The output array should only contain unique, no repeating paths
and should be of the shape (2,N) where N is a large number of coords.
"""
#create a flattened numpy array of size 2xN from the paths created!
paths = list(itertools.chain(*paths))
paths = np.asarray(list(itertools.chain\
(*paths)))
#keep all but the repeated coordinates by keeping only unique whole rows!
b = np.ascontiguousarray(paths).view(np.dtype\
((np.void, paths.dtype.itemsize * \
paths.shape[1])))
_, idx = np.unique(b, return_index=True)
paths = np.unique(b).view(paths.dtype)\
.reshape(-1, paths.shape[1])
return paths
def remove_zeros(self, paths):
"""
Function that processes the flattened path output from combine_paths
and removes the zero paths created by paths_calc. Remove zeroes
from paths to ensure all paths that were NOT in the distance threshold
are removed from the path density calculation!
"""
path_lons, path_lats = paths[:,0], paths[:,1]
FIND_ZERO1 = np.where(paths[:,0]==0)[0]
FIND_ZERO2 = np.where(paths[:,1]==0)[0]
if len(FIND_ZERO1) != 0 and len(FIND_ZERO2) != 0:
path_lons = np.delete(path_lons, FIND_ZERO1)
path_lats = np.delete(path_lats, FIND_ZERO2)
return np.column_stack((path_lons, path_lats))
class Coordinates:
"""
Class defined in order to perform to latitude, longitude coordinates
operations.
"""
def __init__(self, input_list=None, N=None):
# initialise input list of a (2,N) numpy array
self.input_list = input_list
# initialise import number
self.N = N
def del_N(self, N=None, inputs=None):
"""
Function that deletes the last N coordinates from a list of coordinate
"""
if N is None:
if self.N is not None:
N = self.N
elif self.N is None:
raise "There are no number input. Please enter a desired\
number of points to remove from the input_list!"
if inputs is None:
if self.input_list is not None:
inputs = self.input_list
elif self.input_list is None:
raise "There are no list input. Please enter a desired\
list of points to remove N number of points from the end!"
if not type(inputs) == 'list':
inputs = list(inputs)
del inputs[-N:]
return np.asarray(inputs)
def decluster(self, inputs=None, degree_dist=1., verbose=False):
"""
Function that deletes points that are too close together
given a set degree range and returns only one point to represent
that cluster. Default is one degree distance. Inputs must be (2,N)
lon-lat coordinate arrays/lists.
"""
from sklearn.cluster import DBSCAN
import random
if inputs is None:
if self.input_list is not None:
inputs = self.input_list
elif self.input_list is None:
raise "There are no list input. "
#scan for all points that are within a degree radius of one another!
db = DBSCAN(eps=degree_dist).fit(inputs)
core_samples_mask = np.zeros_like(db.labels_, dtype=bool)
core_samples_mask[db.core_sample_indices_] = True
labels = db.labels_
unique_labels = set(labels)
clusters = []
cluster_keep = []
for k in unique_labels:
if k != -1:
class_member_mask = (labels == k)
cluster = inputs[class_member_mask & core_samples_mask]
# Select only 1 random point from each cluster to keep. Remove all others!
clusters.append(cluster)
cluster_keep.append(cluster[random.randint(0,len(cluster)-1)])
cluster_keep = np.asarray(cluster_keep)
# flatten clusters array
clusters = np.asarray(list(itertools.chain(*clusters)))
# remove all points in clusters from the overall coords array
inputs = np.asarray([point for point in inputs if
point not in clusters])
if verbose:
print "clusters array shape: ", clusters.shape
print "inputs array shape: ", inputs.shape
print "cluster_keep array shape: ",cluster_keep.shape
if len(cluster_keep) > 0:
output_coords = np.append(inputs, cluster_keep, axis=0)
# place single representative point from cluster into coord list
else:
output_coords = inputs
return output_coords
class Density:
"""
Class defined to perform to density field operations e.g. 2d histogram,
gradient fields and averages and standard deviations thereof.
"""
def __init__(self, paths=None, nbins=200):
# initialise path points for shape (2,N) to calculate point density
self.paths = paths
# initialise the number of bins per axis for 2d histogram
self.nbins = nbins
# initialise the density calculation
self.H = 0.
self.H_masked = 0.
# initialise the density gradient field calculation
self.grad = 0.
self.grad_masked = 0.
# initialise x and y tridiagonal coordinate matrices
self.xedges = 0.
self.yedges = 0.
def hist2d(self, paths=None):
"""
Function that calculates the 2D histogram and generates H, xedges,
and yedges.
"""
if paths is None:
paths = self.paths
self.H, self.xedges, self.yedges = np.histogram2d(paths[:,0],
paths[:,1],
bins=self.nbins)
return self.H, self.xedges, self.yedges
def hgrad(self, H=None):
"""
Function that calculates the 2D histogram and generates H, xedges,
and yedges.
"""
#, xedges=None, yedges=None
if H is None:
H, xedges, yedges = self.hist2d()
self.grad = np.abs(np.asarray(np.gradient(H)[0]))
return self.grad
def transform_h(self, H=None):
"""
Function that rotates, flips and masks the H density field
in order for it to be plotted etc.
"""
if H is None:
if all( [self.H != 0., self.xedges != 0, self.yedges != 0] ):
H, xedges, yedges = self.H, self.xedges, self.yedges
else:
H, xedges, yedges = self.hist2d()
H = np.rot90(H)
H = np.flipud(H)
self.H_masked = np.ma.masked_where(H==0,H)
return self.H_masked
def transform_grad(self, grad=None):
"""
Function that rotates, flips and masks the H density gradient field
in order for it to be plotted etc.
"""
if grad is None:
grad, xedges, yedges = self.hgrad()
grad = np.rot90(grad)
grad = np.flipud(grad)
self.grad_masked = np.ma.masked_where(grad==0,grad)
return self.grad_masked
def plot_lims(self, paths=None):
if paths is None:
try:
lons, lats = self.paths[:,0], self.paths[:,1]
except Exception as error:
raise error
else:
try:
lons, lats = paths[:,0], paths[:,1]
except Exception as error:
raise error
return np.min(lons), np.max(lons), np.min(lats), np.max(lats)
def select_points(self, perc=0.1, high=None):
"""
Function that returns the lat-lon coordinates of points below a
certain density. This is taken by H < perc*np.average(H) OR if
high=True or is not None, then high density points are chosen from
H >perc*np.average(H). Perc=0.1 by default.
"""
H = self.H
if high is None:
search = np.where(H<perc*np.average(self.H))
else:
search = np.where(H>perc*np.average(self.H))
xmin, xmax = np.min(self.xedges), np.max(self.xedges)
ymin, ymax = np.min(self.yedges), np.max(self.yedges)
Hdensx, Hdensy = search[1], search[0]
Hdensx = (xmax-xmin)/(self.nbins) * Hdensx + xmin
Hdensy = (ymax-ymin)/(self.nbins) * Hdensy + ymin
return np.column_stack((Hdensx, Hdensy))
def plot_field(self, grad=False, SHAPE=None, swell=0.00):
lonmin, lonmax, latmin, latmax = self.plot_lims()
fig = plt.figure(figsize=(15,10), dpi=100)
plt.xlabel('longitude (degrees)')
plt.ylabel('latitude (degrees)')
plt.xlim(lonmin-swell*abs(lonmax-lonmin),\
lonmax+swell*abs(lonmax-lonmin))
plt.ylim(latmin-swell*abs(latmax-latmin),\
latmax+swell*abs(latmax-latmin))
if not grad:
plt.title("Path Density Distribution")
if self.H_masked is not 0.:
H_masked = self.H_masked
else:
H_masked = self.transform_h()
plt.pcolor(self.xedges, self.yedges, H_masked, norm=LogNorm(\
vmin=np.min(H_masked), vmax=np.max(H_masked)), cmap='rainbow',\
alpha=0.6, zorder = 3)
col = plt.colorbar()
col.ax.set_ylabel('Points Per Bin')
elif grad:
plt.title("Gradient Path Density Distribution")
if self.grad_masked is not 0.:
grad_masked = self.grad_masked
else:
raise Exception("grad_masked has not yet been defined. please\
run the necessary functions e.g. transform_grad before plotting")
plt.pcolor(self.xedges, self.yedges, grad_masked, norm=LogNorm(\
vmin=np.min(grad), vmax=np.max(grad)), cmap='rainbow',\
alpha=0.6, zorder = 3)
col = plt.colorbar()
col.ax.set_ylabel('Gradient Points Per Bin')
else:
raise Exception("Either you have not chosen to plot anything OR\n\
both H and grad are inputed and the function doesn't\
know what to do.")
if SHAPE is not None:
patch = PolygonPatch(SHAPE, facecolor='white',\
edgecolor='k', zorder=1)
ax = fig.add_subplot(111)
ax.add_patch(patch)
ax.set_xlim(lonmin-0.05*abs(lonmax-lonmin), \
lonmax+0.05*abs(lonmax-lonmin))
ax.set_ylim(latmin-0.05*abs(latmax-latmin), \
latmax+0.05*abs(latmax-latmin))
#plt.scatter(new_coords[:,0], new_coords[:,1],c='r', s=30)
fig.savefig("plot_density.png")
fig.clf()
# The functions below are used to calculate the
def latitude(dist, sigma01, alpha0, lon0):
sigma = sigma01 + dist#/R
lat = degrees(asin(cos(alpha0)*sin(sigma)))
#alpha = atan2(tan(alpha0),cos(sigma))
return lat
def longitude(dist, sigma01, alpha0, lon0):
sigma = sigma01 + dist#/R
lon = degrees(atan2(sin(alpha0)*sin(sigma), cos(sigma))) + degrees(lon0)
#alpha = atan2(tan(alpha0),cos(sigma))
return lon
vlat_func = np.vectorize(latitude)
vlon_func = np.vectorize(longitude)
def waypoint_init(path_info, km=20):
R = 6371
lon1, lat1, lon2, lat2, dist = radians(path_info[0]), \
radians(path_info[1]), radians(path_info[2]), \
radians(path_info[3]), radians(path_info[4])
#lon1, lat1, lon2, lat2, dist = \
#map(radians, [path_info[0],path_info[1],path_info[2],
#path_info[3],path_info[4]])
lon_diff = lon2-lon1
alpha1 = atan2(sin(lon_diff),(cos(lat1)*tan(lat2)-sin(lat1)*cos(lon_diff)))
#alpha2 = atan2(sin(lon_diff),(-cos(lat2)*tan(lat1)+sin(lat2)*cos(lon_diff)))
#try:
#sigma12 = acos(sin(lat1)*sin(lat2)+cos(lat1)*cos(lat2)*cos(lon_diff))
#except:
#return
sigma01, alpha0 = atan2(tan(lat1), cos(alpha1)), asin(sin(alpha1)*cos(lat1))
#sigma02 = sigma01+sigma12
lon01 = atan2(sin(alpha0)*sin(sigma01), cos(sigma01))
lon0 = lon1 - lon01
npts = max(int((np.ceil(dist) + 1)/km), 100)
all_d = np.linspace(0,dist,npts)/R
lons, lats = vlon_func(all_d, sigma01, alpha0, lon0), vlat_func(all_d, sigma01, alpha0, lon0)
return np.column_stack((lons, lats))
|
gpl-3.0
|
nhejazi/scikit-learn
|
sklearn/feature_selection/rfe.py
|
7
|
16859
|
# Authors: Alexandre Gramfort <[email protected]>
# Vincent Michel <[email protected]>
# Gilles Louppe <[email protected]>
#
# License: BSD 3 clause
"""Recursive feature elimination for feature ranking"""
import numpy as np
from ..utils import check_X_y, safe_sqr
from ..utils.metaestimators import if_delegate_has_method
from ..utils.validation import check_is_fitted
from ..base import BaseEstimator
from ..base import MetaEstimatorMixin
from ..base import clone
from ..base import is_classifier
from ..externals.joblib import Parallel, delayed
from ..model_selection import check_cv
from ..model_selection._validation import _safe_split, _score
from ..metrics.scorer import check_scoring
from .base import SelectorMixin
def _rfe_single_fit(rfe, estimator, X, y, train, test, scorer):
"""
Return the score for a fit across one fold.
"""
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, y_test = _safe_split(estimator, X, y, test, train)
return rfe._fit(
X_train, y_train, lambda estimator, features:
_score(estimator, X_test[:, features], y_test, scorer)).scores_
class RFE(BaseEstimator, MetaEstimatorMixin, SelectorMixin):
"""Feature ranking with recursive feature elimination.
Given an external estimator that assigns weights to features (e.g., the
coefficients of a linear model), the goal of recursive feature elimination
(RFE) is to select features by recursively considering smaller and smaller
sets of features. First, the estimator is trained on the initial set of
features and the importance of each feature is obtained either through a
``coef_`` attribute or through a ``feature_importances_`` attribute.
Then, the least important features are pruned from current set of features.
That procedure is recursively repeated on the pruned set until the desired
number of features to select is eventually reached.
Read more in the :ref:`User Guide <rfe>`.
Parameters
----------
estimator : object
A supervised learning estimator with a ``fit`` method that provides
information about feature importance either through a ``coef_``
attribute or through a ``feature_importances_`` attribute.
n_features_to_select : int or None (default=None)
The number of features to select. If `None`, half of the features
are selected.
step : int or float, optional (default=1)
If greater than or equal to 1, then `step` corresponds to the (integer)
number of features to remove at each iteration.
If within (0.0, 1.0), then `step` corresponds to the percentage
(rounded down) of features to remove at each iteration.
verbose : int, default=0
Controls verbosity of output.
Attributes
----------
n_features_ : int
The number of selected features.
support_ : array of shape [n_features]
The mask of selected features.
ranking_ : array of shape [n_features]
The feature ranking, such that ``ranking_[i]`` corresponds to the
ranking position of the i-th feature. Selected (i.e., estimated
best) features are assigned rank 1.
estimator_ : object
The external estimator fit on the reduced dataset.
Examples
--------
The following example shows how to retrieve the 5 right informative
features in the Friedman #1 dataset.
>>> from sklearn.datasets import make_friedman1
>>> from sklearn.feature_selection import RFE
>>> from sklearn.svm import SVR
>>> X, y = make_friedman1(n_samples=50, n_features=10, random_state=0)
>>> estimator = SVR(kernel="linear")
>>> selector = RFE(estimator, 5, step=1)
>>> selector = selector.fit(X, y)
>>> selector.support_ # doctest: +NORMALIZE_WHITESPACE
array([ True, True, True, True, True,
False, False, False, False, False], dtype=bool)
>>> selector.ranking_
array([1, 1, 1, 1, 1, 6, 4, 3, 2, 5])
References
----------
.. [1] Guyon, I., Weston, J., Barnhill, S., & Vapnik, V., "Gene selection
for cancer classification using support vector machines",
Mach. Learn., 46(1-3), 389--422, 2002.
"""
def __init__(self, estimator, n_features_to_select=None, step=1,
verbose=0):
self.estimator = estimator
self.n_features_to_select = n_features_to_select
self.step = step
self.verbose = verbose
@property
def _estimator_type(self):
return self.estimator._estimator_type
def fit(self, X, y):
"""Fit the RFE model and then the underlying estimator on the selected
features.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
The training input samples.
y : array-like, shape = [n_samples]
The target values.
"""
return self._fit(X, y)
def _fit(self, X, y, step_score=None):
# Parameter step_score controls the calculation of self.scores_
# step_score is not exposed to users
# and is used when implementing RFECV
# self.scores_ will not be calculated when calling _fit through fit
X, y = check_X_y(X, y, "csc")
# Initialization
n_features = X.shape[1]
if self.n_features_to_select is None:
n_features_to_select = n_features // 2
else:
n_features_to_select = self.n_features_to_select
if 0.0 < self.step < 1.0:
step = int(max(1, self.step * n_features))
else:
step = int(self.step)
if step <= 0:
raise ValueError("Step must be >0")
support_ = np.ones(n_features, dtype=np.bool)
ranking_ = np.ones(n_features, dtype=np.int)
if step_score:
self.scores_ = []
# Elimination
while np.sum(support_) > n_features_to_select:
# Remaining features
features = np.arange(n_features)[support_]
# Rank the remaining features
estimator = clone(self.estimator)
if self.verbose > 0:
print("Fitting estimator with %d features." % np.sum(support_))
estimator.fit(X[:, features], y)
# Get coefs
if hasattr(estimator, 'coef_'):
coefs = estimator.coef_
else:
coefs = getattr(estimator, 'feature_importances_', None)
if coefs is None:
raise RuntimeError('The classifier does not expose '
'"coef_" or "feature_importances_" '
'attributes')
# Get ranks
if coefs.ndim > 1:
ranks = np.argsort(safe_sqr(coefs).sum(axis=0))
else:
ranks = np.argsort(safe_sqr(coefs))
# for sparse case ranks is matrix
ranks = np.ravel(ranks)
# Eliminate the worse features
threshold = min(step, np.sum(support_) - n_features_to_select)
# Compute step score on the previous selection iteration
# because 'estimator' must use features
# that have not been eliminated yet
if step_score:
self.scores_.append(step_score(estimator, features))
support_[features[ranks][:threshold]] = False
ranking_[np.logical_not(support_)] += 1
# Set final attributes
features = np.arange(n_features)[support_]
self.estimator_ = clone(self.estimator)
self.estimator_.fit(X[:, features], y)
# Compute step score when only n_features_to_select features left
if step_score:
self.scores_.append(step_score(self.estimator_, features))
self.n_features_ = support_.sum()
self.support_ = support_
self.ranking_ = ranking_
return self
@if_delegate_has_method(delegate='estimator')
def predict(self, X):
"""Reduce X to the selected features and then predict using the
underlying estimator.
Parameters
----------
X : array of shape [n_samples, n_features]
The input samples.
Returns
-------
y : array of shape [n_samples]
The predicted target values.
"""
check_is_fitted(self, 'estimator_')
return self.estimator_.predict(self.transform(X))
@if_delegate_has_method(delegate='estimator')
def score(self, X, y):
"""Reduce X to the selected features and then return the score of the
underlying estimator.
Parameters
----------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples]
The target values.
"""
check_is_fitted(self, 'estimator_')
return self.estimator_.score(self.transform(X), y)
def _get_support_mask(self):
check_is_fitted(self, 'support_')
return self.support_
@if_delegate_has_method(delegate='estimator')
def decision_function(self, X):
check_is_fitted(self, 'estimator_')
return self.estimator_.decision_function(self.transform(X))
@if_delegate_has_method(delegate='estimator')
def predict_proba(self, X):
check_is_fitted(self, 'estimator_')
return self.estimator_.predict_proba(self.transform(X))
@if_delegate_has_method(delegate='estimator')
def predict_log_proba(self, X):
check_is_fitted(self, 'estimator_')
return self.estimator_.predict_log_proba(self.transform(X))
class RFECV(RFE, MetaEstimatorMixin):
"""Feature ranking with recursive feature elimination and cross-validated
selection of the best number of features.
Read more in the :ref:`User Guide <rfe>`.
Parameters
----------
estimator : object
A supervised learning estimator with a ``fit`` method that provides
information about feature importance either through a ``coef_``
attribute or through a ``feature_importances_`` attribute.
step : int or float, optional (default=1)
If greater than or equal to 1, then `step` corresponds to the (integer)
number of features to remove at each iteration.
If within (0.0, 1.0), then `step` corresponds to the percentage
(rounded down) of features to remove at each iteration.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if ``y`` is binary or multiclass,
:class:`sklearn.model_selection.StratifiedKFold` is used. If the
estimator is a classifier or if ``y`` is neither binary nor multiclass,
:class:`sklearn.model_selection.KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
verbose : int, default=0
Controls verbosity of output.
n_jobs : int, default 1
Number of cores to run in parallel while fitting across folds.
Defaults to 1 core. If `n_jobs=-1`, then number of jobs is set
to number of cores.
Attributes
----------
n_features_ : int
The number of selected features with cross-validation.
support_ : array of shape [n_features]
The mask of selected features.
ranking_ : array of shape [n_features]
The feature ranking, such that `ranking_[i]`
corresponds to the ranking
position of the i-th feature.
Selected (i.e., estimated best)
features are assigned rank 1.
grid_scores_ : array of shape [n_subsets_of_features]
The cross-validation scores such that
``grid_scores_[i]`` corresponds to
the CV score of the i-th subset of features.
estimator_ : object
The external estimator fit on the reduced dataset.
Notes
-----
The size of ``grid_scores_`` is equal to ceil((n_features - 1) / step) + 1,
where step is the number of features removed at each iteration.
Examples
--------
The following example shows how to retrieve the a-priori not known 5
informative features in the Friedman #1 dataset.
>>> from sklearn.datasets import make_friedman1
>>> from sklearn.feature_selection import RFECV
>>> from sklearn.svm import SVR
>>> X, y = make_friedman1(n_samples=50, n_features=10, random_state=0)
>>> estimator = SVR(kernel="linear")
>>> selector = RFECV(estimator, step=1, cv=5)
>>> selector = selector.fit(X, y)
>>> selector.support_ # doctest: +NORMALIZE_WHITESPACE
array([ True, True, True, True, True,
False, False, False, False, False], dtype=bool)
>>> selector.ranking_
array([1, 1, 1, 1, 1, 6, 4, 3, 2, 5])
References
----------
.. [1] Guyon, I., Weston, J., Barnhill, S., & Vapnik, V., "Gene selection
for cancer classification using support vector machines",
Mach. Learn., 46(1-3), 389--422, 2002.
"""
def __init__(self, estimator, step=1, cv=None, scoring=None, verbose=0,
n_jobs=1):
self.estimator = estimator
self.step = step
self.cv = cv
self.scoring = scoring
self.verbose = verbose
self.n_jobs = n_jobs
def fit(self, X, y):
"""Fit the RFE model and automatically tune the number of selected
features.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vector, where `n_samples` is the number of samples and
`n_features` is the total number of features.
y : array-like, shape = [n_samples]
Target values (integers for classification, real numbers for
regression).
"""
X, y = check_X_y(X, y, "csr")
# Initialization
cv = check_cv(self.cv, y, is_classifier(self.estimator))
scorer = check_scoring(self.estimator, scoring=self.scoring)
n_features = X.shape[1]
n_features_to_select = 1
if 0.0 < self.step < 1.0:
step = int(max(1, self.step * n_features))
else:
step = int(self.step)
if step <= 0:
raise ValueError("Step must be >0")
rfe = RFE(estimator=self.estimator,
n_features_to_select=n_features_to_select,
step=self.step, verbose=self.verbose)
# Determine the number of subsets of features by fitting across
# the train folds and choosing the "features_to_select" parameter
# that gives the least averaged error across all folds.
# Note that joblib raises a non-picklable error for bound methods
# even if n_jobs is set to 1 with the default multiprocessing
# backend.
# This branching is done so that to
# make sure that user code that sets n_jobs to 1
# and provides bound methods as scorers is not broken with the
# addition of n_jobs parameter in version 0.18.
if self.n_jobs == 1:
parallel, func = list, _rfe_single_fit
else:
parallel, func, = Parallel(n_jobs=self.n_jobs), delayed(_rfe_single_fit)
scores = parallel(
func(rfe, self.estimator, X, y, train, test, scorer)
for train, test in cv.split(X, y))
scores = np.sum(scores, axis=0)
n_features_to_select = max(
n_features - (np.argmax(scores) * step),
n_features_to_select)
# Re-execute an elimination with best_k over the whole set
rfe = RFE(estimator=self.estimator,
n_features_to_select=n_features_to_select, step=self.step)
rfe.fit(X, y)
# Set final attributes
self.support_ = rfe.support_
self.n_features_ = rfe.n_features_
self.ranking_ = rfe.ranking_
self.estimator_ = clone(self.estimator)
self.estimator_.fit(self.transform(X), y)
# Fixing a normalization error, n is equal to get_n_splits(X, y) - 1
# here, the scores are normalized by get_n_splits(X, y)
self.grid_scores_ = scores[::-1] / cv.get_n_splits(X, y)
return self
|
bsd-3-clause
|
allisony/pyspeckit
|
pyspeckit/cubes/mapplot.py
|
1
|
16789
|
"""
MapPlot
-------
Make plots of the cube and interactively connect them to spectrum plotting.
This is really an interactive component of the package; nothing in here is
meant for publication-quality plots, but more for user interactive analysis.
That said, the plotter makes use of `APLpy <https://github.com/aplpy/aplpy>`_,
so it is possible to make publication-quality plots.
:author: Adam Ginsburg
:date: 03/17/2011
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
from __future__ import print_function
import matplotlib
import matplotlib.pyplot
import matplotlib.figure
import numpy as np
import copy
import itertools
from astropy.extern import six
try:
import astropy.wcs as pywcs
import astropy.io.fits as pyfits
pywcsOK = True
except ImportError:
try:
import pyfits
import pywcs
pywcsOK = True
except ImportError:
pywcsOK = False
try:
import aplpy
icanhasaplpy = True
except: # aplpy fails with generic exceptions instead of ImportError
icanhasaplpy = False
from . import cubes
class MapPlotter(object):
"""
Class to plot a spectrum
See `mapplot` for use documentation; this docstring is only for
initialization.
"""
def __init__(self, Cube=None, figure=None, doplot=False, **kwargs):
"""
Create a map figure for future plotting
"""
# figure out where to put the plot
if isinstance(figure,matplotlib.figure.Figure):
self.figure = figure
elif type(figure) is int:
self.figure = matplotlib.pyplot.figure(figure)
else:
self.figure = None
self.axis = None
self.FITSFigure = None
self._click_marks = []
self._circles = []
self._clickX = None
self._clickY = None
self.overplot_colorcycle = itertools.cycle(['b', 'g', 'r', 'c', 'm', 'y'])
self.overplot_linestyle = '-'
self.Cube = Cube
if self.Cube is not None:
self.header = cubes.flatten_header(self.Cube.header, delete=True)
if pywcsOK:
self.wcs = pywcs.WCS(self.header)
if doplot: self.mapplot(**kwargs)
def __call__(self, **kwargs):
""" see mapplot """
return self.mapplot(**kwargs)
def mapplot(self, convention='calabretta', colorbar=True, useaplpy=True,
vmin=None, vmax=None, cmap=None, plotkwargs={}, **kwargs):
"""
Plot up a map based on an input data cube.
The map to be plotted is selected using `makeplane`.
The `estimator` keyword argument is passed to that function.
The plotted map, once shown, is interactive. You can click on it with any
of the three mouse buttons.
Button 1 or keyboard '1':
Plot the selected pixel's spectrum in another window. Mark the
clicked pixel with an 'x'
Button 2 or keyboard 'o':
Overplot a second (or third, fourth, fifth...) spectrum in the
external plot window
Button 3:
Disconnect the interactive viewer
You can also click-and-drag with button 1 to average over a circular
region. This same effect can be achieved by using the 'c' key to
set the /c/enter of a circle and the 'r' key to set its /r/adius (i.e.,
hover over the center and press 'c', then hover some distance away and
press 'r').
Parameters
----------
convention : 'calabretta' or 'griesen'
The default projection to assume for Galactic data when plotting
with aplpy.
colorbar : bool
Whether to show a colorbar
plotkwargs : dict, optional
A dictionary of keyword arguments to pass to aplpy.show_colorscale
or matplotlib.pyplot.imshow
useaplpy : bool
Use aplpy if a FITS header is available
vmin, vmax: float or None
Override values for the vmin/vmax values. Will be automatically
determined if left as None
.. todo:
Allow mapplot in subfigure
"""
if (self.figure is None):
self.figure = matplotlib.pyplot.figure()
elif (not matplotlib.pyplot.fignum_exists(self.figure.number)):
self.figure = matplotlib.pyplot.figure()
else:
self._disconnect()
self.figure.clf()
# this is where the map is created; everything below this is just plotting
self.makeplane(**kwargs)
# have tot pop out estimator so that kwargs can be passed to imshow
if 'estimator' in kwargs:
kwargs.pop('estimator')
# Below here is all plotting stuff
if vmin is None: vmin = self.plane[self.plane==self.plane].min()
if vmax is None: vmax = self.plane[self.plane==self.plane].max()
if icanhasaplpy and useaplpy:
self.fitsfile = pyfits.PrimaryHDU(data=self.plane,header=self.header)
self.FITSFigure = aplpy.FITSFigure(self.fitsfile,figure=self.figure,convention=convention)
self.FITSFigure.show_colorscale(vmin=vmin, vmax=vmax, cmap=cmap, **plotkwargs)
if hasattr(self.FITSFigure, '_ax1'):
self.axis = self.FITSFigure._ax1
else:
self.axis = self.FITSFigure.ax
if colorbar:
try:
self.FITSFigure.add_colorbar()
except Exception as ex:
print("ERROR: Could not create colorbar! Error was %s" % str(ex))
self._origin = 0 # FITS convention
# TODO: set _origin to 1 if using PIXEL units, not real wcs
else:
self.axis = self.figure.add_subplot(111)
if hasattr(self,'colorbar') and self.colorbar is not None:
if self.colorbar.ax in self.axis.figure.axes:
self.axis.figure.delaxes(self.colorbar.ax)
self.axis.imshow(self.plane, vmin=vmin, vmax=vmax, cmap=cmap, **plotkwargs)
if colorbar:
try:
self.colorbar = matplotlib.pyplot.colorbar(self.axis.images[0])
except Exception as ex:
print("ERROR: Could not create colorbar! Error was %s" % str(ex))
self._origin = 0 # normal convention
self.canvas = self.axis.figure.canvas
self._connect()
def _connect(self):
""" Connect click, click up (release click), and key press to events """
self.clickid = self.canvas.callbacks.connect('button_press_event',self.click)
self.clickupid = self.canvas.callbacks.connect('button_release_event',self.plot_spectrum)
self.keyid = self.canvas.callbacks.connect('key_press_event',self.plot_spectrum)
def _disconnect(self):
""" Disconnect click, click up (release click), and key press from events """
if hasattr(self,'canvas'):
self.canvas.mpl_disconnect(self.clickid)
self.canvas.mpl_disconnect(self.clickupid)
self.canvas.mpl_disconnect(self.keyid)
def makeplane(self, estimator=np.nanmean):
"""
Create a "plane" view of the cube, either by slicing or projecting it
or by showing a slice from the best-fit model parameter cube.
Parameters
----------
estimator : [ function | 'max' | 'int' | FITS filename | integer | slice ]
A non-pythonic, non-duck-typed variable. If it's a function, apply that function
along the cube's spectral axis to obtain an estimate (e.g., mean, min, max, etc.).
'max' will do the same thing as passing np.max
'int' will attempt to integrate the image (which is why I didn't duck-type)
(integrate means sum and multiply by dx)
a .fits filename will be read using pyfits (so you can make your own cover figure)
an integer will get the n'th slice in the parcube if it exists
If it's a slice, slice the input data cube along the Z-axis with this slice
"""
# THIS IS A HACK!!! isinstance(a function, function) must be a thing...
FUNCTION = type(np.max)
# estimator is NOT duck-typed
if type(estimator) is FUNCTION:
self.plane = estimator(self.Cube.cube,axis=0)
elif isinstance(estimator, six.string_types):
if estimator == 'max':
self.plane = self.Cube.cube.max(axis=0)
elif estimator == 'int':
dx = np.abs(self.Cube.xarr[1:] - self.Cube.xarr[:-1])
dx = np.concatenate([dx,[dx[-1]]])
self.plane = (self.Cube.cube * dx[:,np.newaxis,np.newaxis]).sum(axis=0)
elif estimator[-5:] == ".fits":
self.plane = pyfits.getdata(estimator)
elif type(estimator) is slice:
self.plane = self.Cube.cube[estimator,:,:]
elif type(estimator) is int:
if hasattr(self.Cube,'parcube'):
self.plane = self.Cube.parcube[estimator,:,:]
if self.plane is None:
raise ValueError("Invalid estimator %s" % (str(estimator)))
if np.sum(np.isfinite(self.plane)) == 0:
raise ValueError("Map is all NaNs or infs. Check your estimator or your input cube.")
def click(self,event):
"""
Record location of downclick
"""
if event.inaxes:
self._clickX = np.round(event.xdata) - self._origin
self._clickY = np.round(event.ydata) - self._origin
def plot_spectrum(self, event, plot_fit=True):
"""
Connects map cube to Spectrum...
"""
self.event = event
if event.inaxes:
clickX = np.round(event.xdata) - self._origin
clickY = np.round(event.ydata) - self._origin
# grab toolbar info so that we don't do anything if a tool is selected
tb = self.canvas.toolbar
if tb.mode != '':
return
elif event.key is not None:
if event.key == 'c':
self._center = (clickX-1,clickY-1)
self._remove_circle()
self._add_click_mark(clickX,clickY,clear=True)
elif event.key == 'r':
x,y = self._center
self._add_circle(x,y,clickX,clickY)
self.circle(x,y,clickX-1,clickY-1)
elif event.key == 'o':
clickX,clickY = round(clickX),round(clickY)
print("OverPlotting spectrum from point %i,%i" % (clickX-1,clickY-1))
color=self.overplot_colorcycle.next()
self._add_click_mark(clickX,clickY,clear=False, color=color)
self.Cube.plot_spectrum(clickX-1,clickY-1,clear=False, color=color, linestyle=self.overplot_linestyle)
elif event.key in ('1','2'):
event.button = int(event.key)
event.key = None
self.plot_spectrum(event)
elif (hasattr(event,'button') and event.button in (1,2)
and not (self._clickX == clickX and self._clickY == clickY)):
if event.button == 1:
self._remove_circle()
clear=True
color = 'k'
linestyle = 'steps-mid'
else:
color = self.overplot_colorcycle.next()
linestyle = self.overplot_linestyle
clear=False
rad = ( (self._clickX-clickX)**2 + (self._clickY-clickY)**2 )**0.5
print("Plotting circle from point %i,%i to %i,%i (r=%f)" % (self._clickX-1,self._clickY-1,clickX-1,clickY-1,rad))
self._add_circle(self._clickX,self._clickY,clickX,clickY)
self.circle(self._clickX-1,self._clickY-1,clickX-1,clickY-1,clear=clear,linestyle=linestyle,color=color)
elif hasattr(event,'button') and event.button is not None:
if event.button==1:
clickX,clickY = round(clickX),round(clickY)
print("Plotting spectrum from point %i,%i" % (clickX-1,clickY-1))
self._remove_circle()
self._add_click_mark(clickX,clickY,clear=True)
self.Cube.plot_spectrum(clickX-1,clickY-1,clear=True)
if plot_fit: self.Cube.plot_fit(clickX-1, clickY-1, silent=True)
elif event.button==2:
clickX,clickY = round(clickX),round(clickY)
print("OverPlotting spectrum from point %i,%i" % (clickX-1,clickY-1))
color=self.overplot_colorcycle.next()
self._add_click_mark(clickX,clickY,clear=False, color=color)
self.Cube.plot_spectrum(clickX-1,clickY-1,clear=False, color=color, linestyle=self.overplot_linestyle)
elif event.button==3:
print("Disconnecting GAIA-like tool")
self._disconnect()
else:
print("Call failed for some reason: ")
print("event: ",event)
else:
pass
# never really needed... warn("Click outside of axes")
def _add_click_mark(self,x,y,clear=False,color='k'):
"""
Add an X at some position
"""
if clear:
self._clear_click_marks()
if self.FITSFigure is not None:
label = 'xmark%i' % (len(self._click_marks)+1)
x,y = self.FITSFigure.pixel2world(x,y)
self.FITSFigure.show_markers(x,y,marker='x',c=color,layer=label)
self._click_marks.append( label )
else:
self._click_marks.append( self.axis.plot(x,y,'kx') )
self.refresh()
def _clear_click_marks(self):
"""
Remove all marks added by previous clicks
"""
if self.FITSFigure is not None:
for mark in self._click_marks:
if mark in self.FITSFigure._layers:
self.FITSFigure.remove_layer(mark)
else:
for mark in self._click_marks:
self._click_marks.remove(mark)
if mark in self.axis.lines:
self.axis.lines.remove(mark)
self.refresh()
def _add_circle(self,x,y,x2,y2,**kwargs):
"""
"""
if self.FITSFigure is not None:
x,y = self.FITSFigure.pixel2world(x,y)
x2,y2 = self.FITSFigure.pixel2world(x2,y2)
r = (np.linalg.norm(np.array([x,y])-np.array([x2,y2])))
#self.FITSFigure.show_markers(x,y,s=r,marker='o',facecolor='none',edgecolor='black',layer='circle')
layername = "circle%02i" % len(self._circles)
self.FITSFigure.show_circles(x,y,r,edgecolor='black',facecolor='none',layer=layername,**kwargs)
self._circles.append(layername)
else:
r = np.linalg.norm(np.array([x,y])-np.array([x2,y2]))
circle = matplotlib.patches.Circle([x,y],radius=r,**kwargs)
self._circles.append( circle )
self.axis.patches.append(circle)
self.refresh()
def _remove_circle(self):
"""
"""
if self.FITSFigure is not None:
for layername in self._circles:
if layername in self.FITSFigure._layers:
self.FITSFigure.remove_layer(layername)
else:
for circle in self._circles:
if circle in self.axis.patches:
self.axis.patches.remove(circle)
self._circles.remove(circle)
self.refresh()
def refresh(self):
if self.axis is not None:
self.axis.figure.canvas.draw()
def circle(self,x1,y1,x2,y2,**kwargs):
"""
Plot the spectrum of a circular aperture
"""
r = (np.linalg.norm(np.array([x1,y1])-np.array([x2,y2])))
self.Cube.plot_apspec([x1,y1,r],**kwargs)
#self.Cube.data = cubes.extract_aperture( self.Cube.cube, [x1,y1,r] , coordsys=None )
#self.Cube.plotter()
def copy(self, parent=None):
"""
Create a copy of the map plotter with blank (uninitialized) axis & figure
[ parent ]
A spectroscopic axis instance that is the parent of the specfit
instance. This needs to be specified at some point, but defaults
to None to prevent overwriting a previous plot.
"""
newmapplot = copy.copy(self)
newmapplot.Cube = parent
newmapplot.axis = None
newmapplot.figure = None
return newmapplot
|
mit
|
anntzer/scikit-learn
|
sklearn/externals/_pilutil.py
|
15
|
17784
|
"""
A collection of image utilities using the Python Imaging Library (PIL).
This is a local version of utility functions from scipy that are wrapping PIL
functionality. These functions are deprecated in scipy 1.0.0 and will be
removed in scipy 1.2.0. Therefore, the functionality used in sklearn is copied
here. This file is taken from scipy/misc/pilutil.py in scipy
1.0.0. Modifications include: making this module importable if pillow is not
installed, removal of DeprecationWarning, removal of functions scikit-learn
does not need.
Copyright (c) 2001, 2002 Enthought, Inc.
All rights reserved.
Copyright (c) 2003-2017 SciPy Developers.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
a. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
b. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
c. Neither the name of Enthought nor the names of the SciPy Developers
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS
BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
"""
from __future__ import division, print_function, absolute_import
import numpy
from numpy import (amin, amax, ravel, asarray, arange, ones, newaxis,
transpose, iscomplexobj, uint8, issubdtype, array)
# Modification of original scipy pilutil.py to make this module importable if
# pillow is not installed. If pillow is not installed, functions will raise
# ImportError when called.
try:
try:
from PIL import Image
except ImportError:
import Image
pillow_installed = True
if not hasattr(Image, 'frombytes'):
Image.frombytes = Image.fromstring
except ImportError:
pillow_installed = False
__all__ = ['bytescale', 'imread', 'imsave', 'fromimage', 'toimage', 'imresize']
PILLOW_ERROR_MESSAGE = (
"The Python Imaging Library (PIL) is required to load data "
"from jpeg files. Please refer to "
"https://pillow.readthedocs.io/en/stable/installation.html "
"for installing PIL."
)
def bytescale(data, cmin=None, cmax=None, high=255, low=0):
"""
Byte scales an array (image).
Byte scaling means converting the input image to uint8 dtype and scaling
the range to ``(low, high)`` (default 0-255).
If the input image already has dtype uint8, no scaling is done.
This function is only available if Python Imaging Library (PIL) is installed.
Parameters
----------
data : ndarray
PIL image data array.
cmin : scalar, default=None
Bias scaling of small values. Default is ``data.min()``.
cmax : scalar, default=None
Bias scaling of large values. Default is ``data.max()``.
high : scalar, default=None
Scale max value to `high`. Default is 255.
low : scalar, default=None
Scale min value to `low`. Default is 0.
Returns
-------
img_array : uint8 ndarray
The byte-scaled array.
Examples
--------
>>> import numpy as np
>>> from scipy.misc import bytescale
>>> img = np.array([[ 91.06794177, 3.39058326, 84.4221549 ],
... [ 73.88003259, 80.91433048, 4.88878881],
... [ 51.53875334, 34.45808177, 27.5873488 ]])
>>> bytescale(img)
array([[255, 0, 236],
[205, 225, 4],
[140, 90, 70]], dtype=uint8)
>>> bytescale(img, high=200, low=100)
array([[200, 100, 192],
[180, 188, 102],
[155, 135, 128]], dtype=uint8)
>>> bytescale(img, cmin=0, cmax=255)
array([[91, 3, 84],
[74, 81, 5],
[52, 34, 28]], dtype=uint8)
"""
if data.dtype == uint8:
return data
if high > 255:
raise ValueError("`high` should be less than or equal to 255.")
if low < 0:
raise ValueError("`low` should be greater than or equal to 0.")
if high < low:
raise ValueError("`high` should be greater than or equal to `low`.")
if cmin is None:
cmin = data.min()
if cmax is None:
cmax = data.max()
cscale = cmax - cmin
if cscale < 0:
raise ValueError("`cmax` should be larger than `cmin`.")
elif cscale == 0:
cscale = 1
scale = float(high - low) / cscale
bytedata = (data - cmin) * scale + low
return (bytedata.clip(low, high) + 0.5).astype(uint8)
def imread(name, flatten=False, mode=None):
"""
Read an image from a file as an array.
This function is only available if Python Imaging Library (PIL) is installed.
Parameters
----------
name : str or file object
The file name or file object to be read.
flatten : bool, default=False
If True, flattens the color layers into a single gray-scale layer.
mode : str, default=None
Mode to convert image to, e.g. ``'RGB'``. See the Notes for more
details.
Returns
-------
imread : ndarray
The array obtained by reading the image.
Notes
-----
`imread` uses the Python Imaging Library (PIL) to read an image.
The following notes are from the PIL documentation.
`mode` can be one of the following strings:
* 'L' (8-bit pixels, black and white)
* 'P' (8-bit pixels, mapped to any other mode using a color palette)
* 'RGB' (3x8-bit pixels, true color)
* 'RGBA' (4x8-bit pixels, true color with transparency mask)
* 'CMYK' (4x8-bit pixels, color separation)
* 'YCbCr' (3x8-bit pixels, color video format)
* 'I' (32-bit signed integer pixels)
* 'F' (32-bit floating point pixels)
PIL also provides limited support for a few special modes, including
'LA' ('L' with alpha), 'RGBX' (true color with padding) and 'RGBa'
(true color with premultiplied alpha).
When translating a color image to black and white (mode 'L', 'I' or
'F'), the library uses the ITU-R 601-2 luma transform::
L = R * 299/1000 + G * 587/1000 + B * 114/1000
When `flatten` is True, the image is converted using mode 'F'.
When `mode` is not None and `flatten` is True, the image is first
converted according to `mode`, and the result is then flattened using
mode 'F'.
"""
if not pillow_installed:
raise ImportError(PILLOW_ERROR_MESSAGE)
im = Image.open(name)
return fromimage(im, flatten=flatten, mode=mode)
def imsave(name, arr, format=None):
"""
Save an array as an image.
This function is only available if Python Imaging Library (PIL) is installed.
.. warning::
This function uses `bytescale` under the hood to rescale images to use
the full (0, 255) range if ``mode`` is one of ``None, 'L', 'P', 'l'``.
It will also cast data for 2-D images to ``uint32`` for ``mode=None``
(which is the default).
Parameters
----------
name : str or file object
Output file name or file object.
arr : ndarray, MxN or MxNx3 or MxNx4
Array containing image values. If the shape is ``MxN``, the array
represents a grey-level image. Shape ``MxNx3`` stores the red, green
and blue bands along the last dimension. An alpha layer may be
included, specified as the last colour band of an ``MxNx4`` array.
format : str, default=None
Image format. If omitted, the format to use is determined from the
file name extension. If a file object was used instead of a file name,
this parameter should always be used.
Examples
--------
Construct an array of gradient intensity values and save to file:
>>> import numpy as np
>>> from scipy.misc import imsave
>>> x = np.zeros((255, 255))
>>> x = np.zeros((255, 255), dtype=np.uint8)
>>> x[:] = np.arange(255)
>>> imsave('gradient.png', x)
Construct an array with three colour bands (R, G, B) and store to file:
>>> rgb = np.zeros((255, 255, 3), dtype=np.uint8)
>>> rgb[..., 0] = np.arange(255)
>>> rgb[..., 1] = 55
>>> rgb[..., 2] = 1 - np.arange(255)
>>> imsave('rgb_gradient.png', rgb)
"""
im = toimage(arr, channel_axis=2)
if format is None:
im.save(name)
else:
im.save(name, format)
return
def fromimage(im, flatten=False, mode=None):
"""
Return a copy of a PIL image as a numpy array.
This function is only available if Python Imaging Library (PIL) is installed.
Parameters
----------
im : PIL image
Input image.
flatten : bool, default=False
If true, convert the output to grey-scale.
mode : str, default=None
Mode to convert image to, e.g. ``'RGB'``. See the Notes of the
`imread` docstring for more details.
Returns
-------
fromimage : ndarray
The different colour bands/channels are stored in the
third dimension, such that a grey-image is MxN, an
RGB-image MxNx3 and an RGBA-image MxNx4.
"""
if not pillow_installed:
raise ImportError(PILLOW_ERROR_MESSAGE)
if not Image.isImageType(im):
raise TypeError("Input is not a PIL image.")
if mode is not None:
if mode != im.mode:
im = im.convert(mode)
elif im.mode == 'P':
# Mode 'P' means there is an indexed "palette". If we leave the mode
# as 'P', then when we do `a = array(im)` below, `a` will be a 2-D
# containing the indices into the palette, and not a 3-D array
# containing the RGB or RGBA values.
if 'transparency' in im.info:
im = im.convert('RGBA')
else:
im = im.convert('RGB')
if flatten:
im = im.convert('F')
elif im.mode == '1':
# Workaround for crash in PIL. When im is 1-bit, the call array(im)
# can cause a seg. fault, or generate garbage. See
# https://github.com/scipy/scipy/issues/2138 and
# https://github.com/python-pillow/Pillow/issues/350.
#
# This converts im from a 1-bit image to an 8-bit image.
im = im.convert('L')
a = array(im)
return a
_errstr = "Mode is unknown or incompatible with input array shape."
def toimage(arr, high=255, low=0, cmin=None, cmax=None, pal=None,
mode=None, channel_axis=None):
"""Takes a numpy array and returns a PIL image.
This function is only available if Python Imaging Library (PIL) is installed.
The mode of the PIL image depends on the array shape and the `pal` and
`mode` keywords.
For 2-D arrays, if `pal` is a valid (N,3) byte-array giving the RGB values
(from 0 to 255) then ``mode='P'``, otherwise ``mode='L'``, unless mode
is given as 'F' or 'I' in which case a float and/or integer array is made.
.. warning::
This function uses `bytescale` under the hood to rescale images to use
the full (0, 255) range if ``mode`` is one of ``None, 'L', 'P', 'l'``.
It will also cast data for 2-D images to ``uint32`` for ``mode=None``
(which is the default).
Notes
-----
For 3-D arrays, the `channel_axis` argument tells which dimension of the
array holds the channel data.
For 3-D arrays if one of the dimensions is 3, the mode is 'RGB'
by default or 'YCbCr' if selected.
The numpy array must be either 2 dimensional or 3 dimensional.
"""
if not pillow_installed:
raise ImportError(PILLOW_ERROR_MESSAGE)
data = asarray(arr)
if iscomplexobj(data):
raise ValueError("Cannot convert a complex-valued array.")
shape = list(data.shape)
valid = len(shape) == 2 or ((len(shape) == 3) and
((3 in shape) or (4 in shape)))
if not valid:
raise ValueError("'arr' does not have a suitable array shape for "
"any mode.")
if len(shape) == 2:
shape = (shape[1], shape[0]) # columns show up first
if mode == 'F':
data32 = data.astype(numpy.float32)
image = Image.frombytes(mode, shape, data32.tobytes())
return image
if mode in [None, 'L', 'P']:
bytedata = bytescale(data, high=high, low=low,
cmin=cmin, cmax=cmax)
image = Image.frombytes('L', shape, bytedata.tobytes())
if pal is not None:
image.putpalette(asarray(pal, dtype=uint8).tobytes())
# Becomes a mode='P' automagically.
elif mode == 'P': # default gray-scale
pal = (arange(0, 256, 1, dtype=uint8)[:, newaxis] *
ones((3,), dtype=uint8)[newaxis, :])
image.putpalette(asarray(pal, dtype=uint8).tobytes())
return image
if mode == '1': # high input gives threshold for 1
bytedata = (data > high)
image = Image.frombytes('1', shape, bytedata.tobytes())
return image
if cmin is None:
cmin = amin(ravel(data))
if cmax is None:
cmax = amax(ravel(data))
data = (data*1.0 - cmin)*(high - low)/(cmax - cmin) + low
if mode == 'I':
data32 = data.astype(numpy.uint32)
image = Image.frombytes(mode, shape, data32.tobytes())
else:
raise ValueError(_errstr)
return image
# if here then 3-d array with a 3 or a 4 in the shape length.
# Check for 3 in datacube shape --- 'RGB' or 'YCbCr'
if channel_axis is None:
if (3 in shape):
ca = numpy.flatnonzero(asarray(shape) == 3)[0]
else:
ca = numpy.flatnonzero(asarray(shape) == 4)
if len(ca):
ca = ca[0]
else:
raise ValueError("Could not find channel dimension.")
else:
ca = channel_axis
numch = shape[ca]
if numch not in [3, 4]:
raise ValueError("Channel axis dimension is not valid.")
bytedata = bytescale(data, high=high, low=low, cmin=cmin, cmax=cmax)
if ca == 2:
strdata = bytedata.tobytes()
shape = (shape[1], shape[0])
elif ca == 1:
strdata = transpose(bytedata, (0, 2, 1)).tobytes()
shape = (shape[2], shape[0])
elif ca == 0:
strdata = transpose(bytedata, (1, 2, 0)).tobytes()
shape = (shape[2], shape[1])
if mode is None:
if numch == 3:
mode = 'RGB'
else:
mode = 'RGBA'
if mode not in ['RGB', 'RGBA', 'YCbCr', 'CMYK']:
raise ValueError(_errstr)
if mode in ['RGB', 'YCbCr']:
if numch != 3:
raise ValueError("Invalid array shape for mode.")
if mode in ['RGBA', 'CMYK']:
if numch != 4:
raise ValueError("Invalid array shape for mode.")
# Here we know data and mode is correct
image = Image.frombytes(mode, shape, strdata)
return image
def imresize(arr, size, interp='bilinear', mode=None):
"""
Resize an image.
This function is only available if Python Imaging Library (PIL) is installed.
.. warning::
This function uses `bytescale` under the hood to rescale images to use
the full (0, 255) range if ``mode`` is one of ``None, 'L', 'P', 'l'``.
It will also cast data for 2-D images to ``uint32`` for ``mode=None``
(which is the default).
Parameters
----------
arr : ndarray
The array of image to be resized.
size : int, float or tuple
* int - Percentage of current size.
* float - Fraction of current size.
* tuple - Size of the output image (height, width).
interp : str, default='bilinear'
Interpolation to use for re-sizing ('nearest', 'lanczos', 'bilinear',
'bicubic' or 'cubic').
mode : str, default=None
The PIL image mode ('P', 'L', etc.) to convert `arr` before resizing.
If ``mode=None`` (the default), 2-D images will be treated like
``mode='L'``, i.e. casting to long integer. For 3-D and 4-D arrays,
`mode` will be set to ``'RGB'`` and ``'RGBA'`` respectively.
Returns
-------
imresize : ndarray
The resized array of image.
See Also
--------
toimage : Implicitly used to convert `arr` according to `mode`.
scipy.ndimage.zoom : More generic implementation that does not use PIL.
"""
im = toimage(arr, mode=mode)
ts = type(size)
if issubdtype(ts, numpy.signedinteger):
percent = size / 100.0
size = tuple((array(im.size)*percent).astype(int))
elif issubdtype(type(size), numpy.floating):
size = tuple((array(im.size)*size).astype(int))
else:
size = (size[1], size[0])
func = {'nearest': 0, 'lanczos': 1, 'bilinear': 2, 'bicubic': 3, 'cubic': 3}
imnew = im.resize(size, resample=func[interp])
return fromimage(imnew)
|
bsd-3-clause
|
LEX2016WoKaGru/pyClamster
|
pyclamster/clustering/kmeans.py
|
1
|
4221
|
# -*- coding: utf-8 -*-
"""
Created on 10.06.16
Created for pyclamster
Copyright (C) {2016}
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
# System modules
# External modules
import numpy as np
from sklearn.cluster import MiniBatchKMeans
from sklearn.base import BaseEstimator, TransformerMixin, ClusterMixin
from sklearn.metrics import silhouette_score
# Internal modules
from ..maskstore import Labels
__version__ = "0.1"
class KMeans(BaseEstimator, ClusterMixin, TransformerMixin):
"""
Class for the KMeans algorithm based on sklearn MiniBatchKMeans algorithm.
in addition to the KMeans of sklearn this algorithm returns for the
transform method a Labels instance and this class could also determine the
best number of clusters with the method bestK.
"""
def __init__(self, n_cluster=None):
"""
Args:
n_cluster (optional[int]): Number of clusters. If it is none then
the number of clusters will be determined with an automatic
method within the fit method.
"""
self.base_algorithm = MiniBatchKMeans
self.algorithm = None
self.n_cluster = n_cluster
@property
def labels(self):
assert self.algorithm is not None, "The algorithm isn't trained yet"
return Labels(self.algorithm.labels_)
def fit(self, X):
if self.n_cluster is None:
self.n_cluster, _ = self.bestK(X)
if self.algorithm is None:
self.algorithm = self.base_algorithm(self.n_cluster)
self.algorithm.fit(X)
return self
def partial_fit(self, X):
if self.algorithm is None:
self.algorithm = self.base_algorithm(self.n_cluster)
self.algorithm.partial_fit(X)
return self
def predict(self, X):
return Labels(self.algorithm.predict(X))
def transform(self, X):
return self.algorithm.transform(X)
def bestK(self, X, range_k=(2, 20)):
"""
Based on the silhouette score at the moment.
Args:
X (numpy array):
Returns:
n_cluster, scores, intertia
"""
silhouette = {1: -1}
inertia = {}
for k in range(range_k[0], range_k[1]+1):
clusterer = self.base_algorithm(k).fit(X)
labels = clusterer.labels_
silhouette[k] = silhouette_score(X, labels, sample_size=1000,
random_state=42)
inertia[k] = clusterer.inertia_
if silhouette[k-1]>silhouette[k]:
n_cluster = k-1
break
else:
n_cluster = max(silhouette, key=silhouette.get)
return n_cluster, silhouette, inertia
def gap(self, X, range_k=(2, 20)):
"""
Based on the silhouette score at the moment.
Args:
X (numpy array):
Returns:
n_cluster, scores, intertia
"""
mins, maxes = np.min(X, axis=0), np.max(X, axis=0)
silhouette = {1: -1}
inertia = {}
for k in range(range_k[0], range_k[1]+1):
clusterer = self.base_algorithm(k).fit(X)
labels = clusterer.labels_
silhouette[k] = silhouette_score(X, labels, sample_size=1000,
random_state=42)
inertia[k] = clusterer.inertia_
if silhouette[k-1]>silhouette[k]:
n_cluster = k-1
break
else:
n_cluster = max(silhouette, key=silhouette.get)
return n_cluster, silhouette, inertia
|
gpl-3.0
|
ToAruShiroiNeko/revscoring
|
revscoring/scorer_models/svc.py
|
1
|
6137
|
"""
A collection of Support Vector Machine type classifier models.
.. autoclass:: revscoring.scorer_models.LinearSVC
:members:
:member-order:
.. autoclass:: revscoring.scorer_models.RBFSVC
:members:
:member-order:
.. autoclass:: revscoring.scorer_models.SVC
:members:
:member-order:
"""
import random
import time
from collections import defaultdict
from sklearn import svm
from .scorer_model import ScikitLearnClassifier
class SVC(ScikitLearnClassifier):
"""
Implements a Support Vector Classifier model.
:Params:
features : `list` ( :class:`revscoring.Feature` )
The features that the model will be trained on
version : str
A version string representing the version of the model
`**kwargs`
Passed to :class:`sklearn.svm.SVC`
"""
def __init__(self, features, version=None, svc=None,
balance_labels=True, **kwargs):
if svc is None:
classifier_model = svm.SVC(probability=True, **kwargs)
else:
classifier_model = svc
super().__init__(features, classifier_model, version=version)
self.balance_labels = balance_labels
self.feature_stats = None
self.weights = None
def train(self, values_labels):
"""
:Returns:
A dictionary with the fields:
* seconds_elapsed -- Time in seconds spent fitting the model
"""
start = time.time()
# Balance labels
if self.balance_labels:
values_labels = self._balance_labels(values_labels)
# Split out feature_values
feature_values, labels = zip(*values_labels)
# Scale and center feature_values
self.feature_stats = self._generate_stats(feature_values)
scaled_values = self._scale_and_center(feature_values,
self.feature_stats)
# Train the classifier
stats = super().train(zip(scaled_values, labels))
# Overwrite seconds elapsed to account for time spent
# balancing and scaling
stats['seconds_elapsed'] = time.time() - start
return stats
def score(self, feature_values):
scaled_values = next(self._scale_and_center([feature_values],
self.feature_stats))
return super().score(scaled_values)
def _balance_labels(self, values_labels):
"""
Rebalances a set of a labels based on the label with the most
observations by sampling (with replacement[1]) from lesser labels.
For example, the following dataset has unbalanced observations:
(0.10 0.20 0.30), True
(0.20 0.10 0.30), False
(0.10 0.15 0.40), True
(0.09 0.40 0.30), False
(0.15 0.00 0.28), True
True` occurs three times while `False` only occurs twice. This
function would randomly choose one of the False observations to
duplicate in order to balance the labels. For example:
(0.10 0.20 0.30), True
(0.20 0.10 0.30), False
(0.20 0.10 0.30), False
(0.10 0.15 0.40), True
(0.09 0.40 0.30), False
(0.15 0.00 0.28), True
Why would anyone want to do this? If you don't, SVM's
predict_proba() will return values that don't represent it's
predictions. This is a hack. It seems to work in practice with large
numbers of observations[2].
1. See https://www.ma.utexas.edu/users/parker/sampling/repl.htm for a
discussion of "sampling with replacement".
2. http://nbviewer.ipython.org/github/halfak/
Objective-Revision-Evaluation-Service/blob/ipython/ipython/
Wat%20predict_proba.ipynb
"""
# Group observations by label
groups = defaultdict(list)
for feature_values, label in values_labels:
groups[label].append(feature_values)
# Find out which label occurs most often and how often
max_label_n = max(len(groups[label]) for label in groups)
# Resample the max observations from each group of observations.
new_values_labels = []
for label in groups:
new_values_labels.extend((random.choice(groups[label]), label)
for i in range(max_label_n))
# Shuffle the observations again before returning.
random.shuffle(new_values_labels)
return new_values_labels
SVCModel = SVC
"Alias for backwards compatibility"
class LinearSVC(SVC):
"""
Implements a Support Vector Classifier model with a Linear kernel.
:Params:
features : `list` ( :class:`revscoring.Feature` )
The features that the model will be trained on
version : str
A version string representing the version of the model
`**kwargs`
Passed to :class:`sklearn.svm.SVC`
"""
def __init__(self, *args, **kwargs):
if 'kernel' in kwargs:
raise TypeError("'kernel' is hard-coded to 'linear'. If you'd " +
"like to use a different kernel, use SVCModel.")
super().__init__(*args, kernel="linear", **kwargs)
LinearSVCModel = LinearSVC
"Alias for backwards compatibility"
class RBFSVC(SVC):
"""
Implements a Support Vector Classifier model with an RBF kernel.
:Params:
features : `list` ( :class:`revscoring.Feature` )
The features that the model will be trained on
version : str
A version string representing the version of the model
`**kwargs`
Passed to :class:`sklearn.svm.SVC`
"""
def __init__(self, *args, **kwargs):
if 'kernel' in kwargs:
raise TypeError("'kernel' is hard-coded to 'rbf'. If you'd " +
"like to use a different kernel, try SVCModel.")
super().__init__(*args, kernel="rbf", **kwargs)
RBFSVCModel = RBFSVC
"Alias for backwards compatibility"
|
mit
|
ElDeveloper/scikit-learn
|
examples/gaussian_process/plot_gpr_noisy_targets.py
|
45
|
3680
|
"""
=========================================================
Gaussian Processes regression: basic introductory example
=========================================================
A simple one-dimensional regression example computed in two different ways:
1. A noise-free case
2. A noisy case with known noise-level per datapoint
In both cases, the kernel's parameters are estimated using the maximum
likelihood principle.
The figures illustrate the interpolating property of the Gaussian Process
model as well as its probabilistic nature in the form of a pointwise 95%
confidence interval.
Note that the parameter ``alpha`` is applied as a Tikhonov
regularization of the assumed covariance between the training points.
"""
print(__doc__)
# Author: Vincent Dubourg <[email protected]>
# Jake Vanderplas <[email protected]>
# Jan Hendrik Metzen <[email protected]>s
# Licence: BSD 3 clause
import numpy as np
from matplotlib import pyplot as pl
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import RBF, ConstantKernel as C
np.random.seed(1)
def f(x):
"""The function to predict."""
return x * np.sin(x)
# ----------------------------------------------------------------------
# First the noiseless case
X = np.atleast_2d([1., 3., 5., 6., 7., 8.]).T
# Observations
y = f(X).ravel()
# Mesh the input space for evaluations of the real function, the prediction and
# its MSE
x = np.atleast_2d(np.linspace(0, 10, 1000)).T
# Instanciate a Gaussian Process model
kernel = C(1.0, (1e-3, 1e3)) * RBF(10, (1e-2, 1e2))
gp = GaussianProcessRegressor(kernel=kernel, n_restarts_optimizer=9)
# Fit to data using Maximum Likelihood Estimation of the parameters
gp.fit(X, y)
# Make the prediction on the meshed x-axis (ask for MSE as well)
y_pred, sigma = gp.predict(x, return_std=True)
# Plot the function, the prediction and the 95% confidence interval based on
# the MSE
fig = pl.figure()
pl.plot(x, f(x), 'r:', label=u'$f(x) = x\,\sin(x)$')
pl.plot(X, y, 'r.', markersize=10, label=u'Observations')
pl.plot(x, y_pred, 'b-', label=u'Prediction')
pl.fill(np.concatenate([x, x[::-1]]),
np.concatenate([y_pred - 1.9600 * sigma,
(y_pred + 1.9600 * sigma)[::-1]]),
alpha=.5, fc='b', ec='None', label='95% confidence interval')
pl.xlabel('$x$')
pl.ylabel('$f(x)$')
pl.ylim(-10, 20)
pl.legend(loc='upper left')
# ----------------------------------------------------------------------
# now the noisy case
X = np.linspace(0.1, 9.9, 20)
X = np.atleast_2d(X).T
# Observations and noise
y = f(X).ravel()
dy = 0.5 + 1.0 * np.random.random(y.shape)
noise = np.random.normal(0, dy)
y += noise
# Instanciate a Gaussian Process model
gp = GaussianProcessRegressor(kernel=kernel, alpha=(dy / y) ** 2,
n_restarts_optimizer=10)
# Fit to data using Maximum Likelihood Estimation of the parameters
gp.fit(X, y)
# Make the prediction on the meshed x-axis (ask for MSE as well)
y_pred, sigma = gp.predict(x, return_std=True)
# Plot the function, the prediction and the 95% confidence interval based on
# the MSE
fig = pl.figure()
pl.plot(x, f(x), 'r:', label=u'$f(x) = x\,\sin(x)$')
pl.errorbar(X.ravel(), y, dy, fmt='r.', markersize=10, label=u'Observations')
pl.plot(x, y_pred, 'b-', label=u'Prediction')
pl.fill(np.concatenate([x, x[::-1]]),
np.concatenate([y_pred - 1.9600 * sigma,
(y_pred + 1.9600 * sigma)[::-1]]),
alpha=.5, fc='b', ec='None', label='95% confidence interval')
pl.xlabel('$x$')
pl.ylabel('$f(x)$')
pl.ylim(-10, 20)
pl.legend(loc='upper left')
pl.show()
|
bsd-3-clause
|
nicolasfauchereau/paleopy
|
paleopy/core/proxy.py
|
1
|
18233
|
# Python packages imports
import os
from collections import OrderedDict as od
import numpy as np
from numpy import ma
import pandas as pd
import matplotlib.pyplot as plt
import json
try:
import xarray as xray
except:
try:
import xray
except ImportError:
print('cannot import xarray or xray')
from scipy.stats import linregress
# relative imports
from ..utils import do_kdtree
from ..utils import haversine
from ..utils import pprint_od
from ..utils import seasons_params
from ..plotting import plot_season_ts
import warnings
warnings.filterwarnings(action='ignore', category=FutureWarning)
class proxy:
"""
base class for a single proxy
Parameters
----------
sitename : string
The name of the proxy site
user-defined, no default
proxy_type : string
the type of proxy
can be e.g.:
"Tree-ring"
"Speleotheme"
"Coral core"
user-defined, no default
lon : float
The longitude (in decimal degrees) of the site
user-defined, no default
lat : float
The latitude (in decimal degrees) of the site
user-defined, no default
djsons : string
The path to the json files defining the paths
and parameters arrached to each dataset + variable
defined by the frontend in PICT, default is ./jsons
pjsons : string
The path where to save the individual proxy json files
defined by the frontend in PICT, default is ./jsons/proxies
pfname : string
the name of the JSON file containing the information
for a single proxy, no default
dataset : string
The dataset to load to look for analogs, see
the `datasets.json` file for a list of the available
datasets (e.g. `ersst`, 'ncep', `gpcp`)
user-defined, default is `ersst`
variable : string
The variable to extract from the dataset to look for analogs, see
the `datasets.json` file for a list of variables available for
each dataset (e.g. for `ncep` dataset: `hgt_1000, hgt_850` etc
user-defined, default is `sst`
season : string
The season to which the proxy is sensitive, can be:
- any of `DJF`, `JFM`, `FMA`, ..
- `Warm Season (Dec. - May)`
- `Cold Season (Jun. - Nov.)`
- `Year (Jan. - Dec.)`
- `Hydro. year (Jul. - Jun.)`
user-defined, default is `DJF`
value : float or integer or string
The value attached to the proxy
if string, must be in ['WB', 'B', 'N', 'A', 'WA']
for Well-Below, Below, etc and will be taken
as the category of anomalies WRT to present conditions
user-defined, no default
period : string
The period from which the analogs can be taken in a
given dataset. Default is "1979-2014"
user-defined, default is full period for the dataset
interogated
climatology : string
The climatological period with respect to which the
anomalies (if `calc_anoms == True`) are calculated
user-defined, default is "1981-2010"
calc_anoms : boolean
If `True`, the anomalies are calculated before the analog
years are determined: The `value` parameter needs to represent
an *anomaly* with respect to present condition
If `False`, the analog years are determined from the raw seasonal
time-series: The `value` parameter needs to represent a raw value
(i.e. a rainfall amount, a mean temperature)
user-defined, default is True
detrend : boolean
If `True` the linear trend is removed from the time-series
before the analog years are determined, if `False`, nothing is
done
user-defined, default is True
method : string
can be either 'closest 8' or 'quintiles'
to specicify the method employed to choose
the analog seasons
aspect : float
The aspect of the proxy site (in degrees from 0 to 360)
user-defined, no default
elevation : float
The elevation of the proxy site (in meters)
user-defined, no default
dating_convention : string
the dating convention
user-defined, no default
calendar : string
calendar year
user-defined, no default
chronology : string
the chronology control (i.e. 14C, Historic, Dendrochronology, etc)
user-defined, no default
measurement : string
the proxy measurement type (e.g. width for tree rings)
user-defined, no default
Attributes
----------
"""
def __init__(self, sitename=None, proxy_type=None, lon=None, lat=None, \
aspect=None, elevation=None, dating_convention=None, calendar=None, \
chronology=None, measurement=None, djsons='./jsons', pjsons='./jsons/proxies', \
pfname=None, dataset='ersst', variable='sst', season='DJF', value=None, \
qualitative=0, period="1979-2014", climatology="1981-2010", \
calc_anoms=1, detrend=1, method='quintiles'):
super(proxy, self).__init__()
if lon < 0:
lon += 360.
self.description = 'proxy'
self.sitename = sitename
self.proxy_type = proxy_type
self.measurement = measurement
self.coords = (lon, lat)
self.aspect = aspect
self.elevation = elevation
self.djsons = djsons
self.pjsons = pjsons
self.pfname = pfname
self.dataset = dataset
self.variable = variable
self.dating_convention = dating_convention
self.chronology = chronology
self.calendar = calendar
self.season = season
self.value = value
self.qualitative = bool(qualitative)
self.period = tuple(map(int,period.split("-"))) # to correct the type
self.climatology = tuple(map(int,climatology.split("-"))) # to correct the type
self.calc_anoms = bool(calc_anoms)
self.detrend = bool(detrend)
self.method = method
def read_dset_params(self):
"""
reads the `datasets.json` file and loads the dictionnary
containing all the parameters for this dataset
"""
with open(os.path.join(self.djsons, 'datasets.json'), 'r') as f:
dset_dict = json.loads(f.read())
self.dset_dict = dset_dict[self.dataset][self.variable]
def check_domain(self):
"""
checks if the domain that is passed
is compatible with the domain of the
dataset
"""
if not(hasattr(self, 'dset_dict')):
self.read_dset_params()
domain = self.dset_dict['domain']
lond = self.coords[0]
latd = self.coords[1]
# uncomment if one wants to print
# print("\nLON: {:4.2f}, LAT: {:4.2f}".format(lond, latd))
if ( (lond <= domain[0]) \
| (lond >= domain[1]) \
| (latd <= domain[2]) \
| (latd >= domain[3]) ):
print("""
ERROR! coordinates of the proxy fall outside
of the bounds of the domain for dataset {}
""".format(self.dataset))
raise Exception("DOMAIN ERROR")
def _calc_weights(self, df):
"""
calculate the weights for compositing
"""
tmp_df = df.copy(deep=True)
# print(self.value)
# print(type(self.value))
# print(type(tmp_df.iloc[:,0].values))
# print(np.abs(self.value - tmp_df.iloc[:,0].values.flatten()))
weights = abs(self.value - tmp_df.iloc[:,0]) / sum(abs(self.value - tmp_df.iloc[:,0]))
tmp_df.loc[:,'weights'] = (1 - weights) / (1 - weights).sum()
return tmp_df
def extract_ts(self):
"""
extract the time-series for the closest grid-point to
the passed proxy coordinates
"""
# checks the domain first
self.check_domain()
# if all good, we proceed
fname = self.dset_dict['path']
point = self.coords
start = str(self.period[0])
end = str(self.period[1])
dset = xray.open_dataset(fname)
dset = dset.sel(time=slice(start, end))
# test is a mask is present, if so assume we have a grid
# then meshgrid, mask, flatten, etc
if 'mask' in dset.data_vars:
mask = dset['mask'].data
mask = mask.astype(np.bool)
lon = dset['longitudes'].data
lat = dset['latitudes'].data
lons, lats = np.meshgrid(lon, lat)
lons = ma.masked_array(lons, mask)
lats = ma.masked_array(lats, mask)
lonf = lons.flatten('F').compressed()
latf = lats.flatten('F').compressed()
self.extracted_coords = do_kdtree(lonf, latf, point)
self.distance_point = haversine(self.extracted_coords, point)
ts = dset[self.variable].sel(longitudes=self.extracted_coords[0], latitudes=self.extracted_coords[1])
ts = ts.to_dataframe()
self.ts = pd.DataFrame(ts.loc[:,self.variable])
else:
lon = dset['longitudes'].data
lat = dset['latitudes'].data
lons, lats = np.meshgrid(lon, lat)
lonf = lons.flatten('F')
latf = lats.flatten('F')
### TODO:
### test, then replace do_kdtree with the call to the sel
### method of a xray.Dataset with method = 'nearest'
self.extracted_coords = do_kdtree(lonf, latf, point)
self.distance_point = haversine(self.extracted_coords, point)
ts = dset[self.variable].sel(longitudes=self.extracted_coords[0], latitudes=self.extracted_coords[1])
ts = ts.to_dataframe()
self.ts = pd.DataFrame(ts.loc[:,self.variable])
dset.close()
return self
def calculate_season(self):
"""
calculates the seasonal values, can be either raw
or anomalies depending on the parameter (boolean) `calc_anoms`
passed when instantiating the `proxy` class
"""
season = self.season
start_clim = str(self.climatology[0])
end_clim = str(self.climatology[1])
# seasons parameters is a dictionnary with:
# key = the season string ('DJF', 'JJA')
# value = a tuple (length of the season, month of the end of the season)
self.seasons_params = seasons_params()
if not(hasattr(self, 'ts')):
self.extract_ts()
# if the variable is rainfall, we calculate rolling sum
if self.dset_dict['units'] in ['mm']:
# test which version of pandas we are using
if pd.__version__ >= '0.18':
ts_seas = self.ts.rolling(window=self.seasons_params[season][0]).sum()
else:
ts_seas = pd.rolling_sum(self.ts, self.seasons_params[season][0])
# else we calculate the rolling mean (average)
else:
if pd.__version__ >= '0.18':
ts_seas = self.ts.rolling(window=self.seasons_params[season][0]).mean()
else:
ts_seas = pd.rolling_mean(self.ts, self.seasons_params[season][0])
ts_seas = ts_seas[ts_seas.index.month == self.seasons_params[season][1]]
# drop the missing values coming from the rolling average
ts_seas.dropna(inplace=True)
# casts that into a pandas DataFrame
ts_seas.loc[:,'anomalies'] = ts_seas - ts_seas.ix[start_clim:end_clim].mean(0)
# caculates trend for raw values
x = ts_seas.index.year
y = ts_seas.loc[:,self.variable]
slope, intercept, pval, rval, stderr = linregress(x, y)
if not self.calc_anoms:
self.trend_params = {'slope':slope, 'intercept':intercept}
yhat = slope * x + intercept
ydetrend = y - yhat
ts_seas.loc[:,'d_' + self.variable] = (ydetrend + y.mean())
# calculates the trend for the anomalies
x = ts_seas.index.year
y = ts_seas.loc[:,'anomalies']
slope, intercept, pval, rval, stderr = linregress(x, y)
if self.calc_anoms:
self.trend_params = {'slope':slope, 'intercept':intercept}
yhat = slope * x + intercept
ydetrend = y - yhat
ts_seas.loc[:,'d_anomalies'] = ydetrend
# TODO: extract the period of interest only, which should
# be help in self.period
self.ts_seas = ts_seas.loc[str(self.period[0]):str(self.period[1]),:]
return self
def find_analogs(self):
"""
find the analog seasons
return:
self.analogs : a pandas DataFrame
self.analog_years : a list with the analog years
self.quintiles : the bins for the quintiles used
"""
if not(hasattr(self, 'ts_seas')):
self.calculate_season()
if self.calc_anoms and not self.detrend:
ts = self.ts_seas.loc[:,['anomalies']].copy(deep=True)
if self.calc_anoms and self.detrend:
ts = self.ts_seas.loc[:,['d_anomalies']].copy(deep=True)
if not self.calc_anoms and self.detrend:
ts = self.ts_seas.loc[:,['d_' + self.variable]].copy(deep=True)
if not self.calc_anoms and not self.detrend:
ts = self.ts_seas.loc[:,[self.variable]].copy(deep=True)
labels=['WB','B','N','A','WA']
sub, bins = pd.qcut(ts.iloc[:,0], 5, labels=labels, retbins=True)
ts.loc[:,'cat'] = sub
# if the flag qualitative is set to True (default is false)
# then we search the years corresponding to the category
if self.qualitative:
self.value = str(self.value)
if self.value not in labels:
raise ValueError("category not in ['WB','B','N','A','WA']")
else:
tmp_df = ts[ts['cat'] == self.value].copy(deep=True)
tmp_df.loc[:,'weights'] = 1. / len(tmp_df)
self.analogs = tmp_df
self.weights = self.analogs.loc[:,'weights'].values
self.category = self.value
# if value is quantitative we use the method ("quintiles" or "closest eight")
else:
self.value = float(self.value)
if self.method == 'quintiles':
bins[0] = -np.inf
bins[-1] = np.inf
category = labels[np.searchsorted(bins, np.float(self.value))-1]
subset = ts[ts['cat'] == category]
self.category = category
tmp_df = subset.copy(deep=True)
# calculates the weights (add to 1)
self.analogs = self._calc_weights(tmp_df)
self.quintiles = bins
elif self.method == "closest 8":
sub = (abs(self.value - ts.iloc[:,0])).sort_values()[:8].index
tmp_df = ts.loc[sub,:].copy(deep=True)
# calculates the weights (add to 1)
self.analogs = self._calc_weights(tmp_df)
self.category = self.analogs.loc[:,'cat'].values
self.weights = self.analogs.loc[:,'weights'].values
self.analog_years = self.analogs.index.year
def proxy_repr(self, pprint=False, outfile=True):
"""
proxy_dict is an OrderedDict
"""
proxy_dict = od()
proxy_dict['sitename'] = self.sitename
proxy_dict['proxy_type'] = self.proxy_type
proxy_dict['measurement'] = self.proxy_type
proxy_dict['proxy_type'] = self.proxy_type
proxy_dict['proxy_type'] = self.proxy_type
proxy_dict['measurement'] = self.measurement
proxy_dict['dating_convention'] = self.dating_convention
proxy_dict['calendar'] = self.calendar
proxy_dict['chronology'] = self.chronology
proxy_dict['coords'] = self.coords
proxy_dict['aspect'] = self.aspect
proxy_dict['elevation'] = self.elevation
proxy_dict['season'] = self.season
proxy_dict['dataset'] = self.dataset
proxy_dict['variable'] = self.variable
proxy_dict['calc_anoms'] = self.calc_anoms
proxy_dict['detrend'] = self.detrend
proxy_dict['value'] = self.value
proxy_dict['climatology'] = self.climatology
proxy_dict['period'] = self.period
proxy_dict['extracted_coords'] = self.extracted_coords.tolist()
proxy_dict['distance_point'] = self.distance_point
proxy_dict['trend_params'] = self.trend_params
if self.qualitative:
proxy_dict['category'] = self.category
else:
if self.method == 'quintiles':
proxy_dict['category'] = self.category
elif self.method == 'closest 8':
proxy_dict['category'] = ",".join(list(self.category))
proxy_dict['analog_years'] = self.analog_years.tolist()
proxy_dict['weights'] = list(self.weights)
if pprint:
pprint_od(proxy_dict)
if outfile:
# the name of the JSON file used to be created from the
# proxy name
# --------------------------------------------------------
# proxy_name = self.sitename.replace(" ","_")
# proxy_name = proxy_name.replace(".","")
# fname = "{}.json".format(self.sitename.replace(" ","_"))
# now the name of the JSON file is a parameter that is
# passed to the script "proxy_oper" by the PHP layer
# --------------------------------------------------------
#with open(os.path.join(self.pjsons, fname),'w') as f:
with open(os.path.join(self.pjsons, self.pfname),'w') as f:
json.dump(proxy_dict, f)
self.proxy_dict = proxy_dict
def to_html(filename):
if not(hasattr(self, 'analogs')):
self.find_analogs()
def plot(self):
f = plot_season_ts(self)
return f
|
mit
|
jakevdp/altair
|
altair/vegalite/v2/schema/channels.py
|
1
|
127029
|
# -*- coding: utf-8 -*-
#
# The contents of this file are automatically written by
# tools/generate_schema_wrapper.py. Do not modify directly.
import six
from . import core
import pandas as pd
from altair.utils.schemapi import Undefined
from altair.utils import parse_shorthand
class FieldChannelMixin(object):
def to_dict(self, validate=True, ignore=(), context=None):
context = context or {}
shorthand = self._get('shorthand')
field = self._get('field')
if shorthand is not Undefined and field is not Undefined:
raise ValueError("{} specifies both shorthand={} and field={}. "
"".format(self.__class__.__name__, shorthand, field))
if isinstance(shorthand, (tuple, list)):
# If given a list of shorthands, then transform it to a list of classes
kwds = self._kwds.copy()
kwds.pop('shorthand')
return [self.__class__(sh, **kwds).to_dict(validate=validate, ignore=ignore, context=context)
for sh in shorthand]
if shorthand is Undefined:
parsed = {}
elif isinstance(shorthand, six.string_types):
parsed = parse_shorthand(shorthand, data=context.get('data', None))
type_required = 'type' in self._kwds
type_in_shorthand = 'type' in parsed
type_defined_explicitly = self._get('type') is not Undefined
if not type_required:
# Secondary field names don't require a type argument in VegaLite 3+.
# We still parse it out of the shorthand, but drop it here.
parsed.pop('type', None)
elif not (type_in_shorthand or type_defined_explicitly):
if isinstance(context.get('data', None), pd.DataFrame):
raise ValueError("{} encoding field is specified without a type; "
"the type cannot be inferred because it does not "
"match any column in the data.".format(shorthand))
else:
raise ValueError("{} encoding field is specified without a type; "
"the type cannot be automatically inferred because "
"the data is not specified as a pandas.DataFrame."
"".format(shorthand))
else:
# Shorthand is not a string; we pass the definition to field,
# and do not do any parsing.
parsed = {'field': shorthand}
# Set shorthand to Undefined, because it's not part of the base schema.
self.shorthand = Undefined
self._kwds.update({k: v for k, v in parsed.items()
if self._get(k) is Undefined})
return super(FieldChannelMixin, self).to_dict(
validate=validate,
ignore=ignore,
context=context
)
class ValueChannelMixin(object):
def to_dict(self, validate=True, ignore=(), context=None):
context = context or {}
condition = getattr(self, 'condition', Undefined)
copy = self # don't copy unless we need to
if condition is not Undefined:
if isinstance(condition, core.SchemaBase):
pass
elif 'field' in condition and 'type' not in condition:
kwds = parse_shorthand(condition['field'], context.get('data', None))
copy = self.copy(deep=['condition'])
copy.condition.update(kwds)
return super(ValueChannelMixin, copy).to_dict(validate=validate,
ignore=ignore,
context=context)
class Color(FieldChannelMixin, core.MarkPropFieldDefWithCondition):
"""Color schema wrapper
Mapping(required=[shorthand])
A FieldDef with Condition :raw-html:`<ValueDef>`
Attributes
----------
shorthand : string
shorthand for field, aggregate, and type
aggregate : :class:`Aggregate`
Aggregation function for the field
(e.g., ``mean``, ``sum``, ``median``, ``min``, ``max``, ``count`` ).
**Default value:** ``undefined`` (None)
bin : anyOf(boolean, :class:`BinParams`)
A flag for binning a ``quantitative`` field, or `an object defining binning
parameters <https://vega.github.io/vega-lite/docs/bin.html#params>`__.
If ``true``, default `binning parameters
<https://vega.github.io/vega-lite/docs/bin.html>`__ will be applied.
**Default value:** ``false``
condition : anyOf(:class:`ConditionalValueDef`, List(:class:`ConditionalValueDef`))
One or more value definition(s) with a selection predicate.
**Note:** A field definition's ``condition`` property can only contain `value
definitions <https://vega.github.io/vega-lite/docs/encoding.html#value-def>`__
since Vega-Lite only allows at most one encoded field per encoding channel.
field : anyOf(string, :class:`RepeatRef`)
**Required.** A string defining the name of the field from which to pull a data
value
or an object defining iterated values from the `repeat
<https://vega.github.io/vega-lite/docs/repeat.html>`__ operator.
**Note:** Dots ( ``.`` ) and brackets ( ``[`` and ``]`` ) can be used to access
nested objects (e.g., ``"field": "foo.bar"`` and ``"field": "foo['bar']"`` ).
If field names contain dots or brackets but are not nested, you can use ``\\`` to
escape dots and brackets (e.g., ``"a\\.b"`` and ``"a\\[0\\]"`` ).
See more details about escaping in the `field documentation
<https://vega.github.io/vega-lite/docs/field.html>`__.
**Note:** ``field`` is not required if ``aggregate`` is ``count``.
legend : anyOf(:class:`Legend`, None)
An object defining properties of the legend.
If ``null``, the legend for the encoding channel will be removed.
**Default value:** If undefined, default `legend properties
<https://vega.github.io/vega-lite/docs/legend.html>`__ are applied.
scale : anyOf(:class:`Scale`, None)
An object defining properties of the channel's scale, which is the function that
transforms values in the data domain (numbers, dates, strings, etc) to visual values
(pixels, colors, sizes) of the encoding channels.
If ``null``, the scale will be `disabled and the data value will be directly encoded
<https://vega.github.io/vega-lite/docs/scale.html#disable>`__.
**Default value:** If undefined, default `scale properties
<https://vega.github.io/vega-lite/docs/scale.html>`__ are applied.
sort : :class:`Sort`
Sort order for the encoded field.
For continuous fields (quantitative or temporal), ``sort`` can be either
``"ascending"`` or ``"descending"``.
For discrete fields, ``sort`` can be one of the following:
* ``"ascending"`` or ``"descending"`` -- for sorting by the values' natural order in
Javascript.
* `A sort field definition
<https://vega.github.io/vega-lite/docs/sort.html#sort-field>`__ for sorting by
another field.
* `An array specifying the field values in preferred order
<https://vega.github.io/vega-lite/docs/sort.html#sort-array>`__. In this case, the
sort order will obey the values in the array, followed by any unspecified values
in their original order. For discrete time field, values in the sort array can be
`date-time definition objects <types#datetime>`__. In addition, for time units
``"month"`` and ``"day"``, the values can be the month or day names (case
insensitive) or their 3-letter initials (e.g., ``"Mon"``, ``"Tue"`` ).
* ``null`` indicating no sort.
**Default value:** ``"ascending"``
**Note:** ``null`` is not supported for ``row`` and ``column``.
timeUnit : :class:`TimeUnit`
Time unit (e.g., ``year``, ``yearmonth``, ``month``, ``hours`` ) for a temporal
field.
or `a temporal field that gets casted as ordinal
<https://vega.github.io/vega-lite/docs/type.html#cast>`__.
**Default value:** ``undefined`` (None)
title : anyOf(string, None)
A title for the field. If ``null``, the title will be removed.
**Default value:** derived from the field's name and transformation function (
``aggregate``, ``bin`` and ``timeUnit`` ). If the field has an aggregate function,
the function is displayed as part of the title (e.g., ``"Sum of Profit"`` ). If the
field is binned or has a time unit applied, the applied function is shown in
parentheses (e.g., ``"Profit (binned)"``, ``"Transaction Date (year-month)"`` ).
Otherwise, the title is simply the field name.
**Notes** :
1) You can customize the default field title format by providing the [fieldTitle
property in the `config <https://vega.github.io/vega-lite/docs/config.html>`__ or
`fieldTitle function via the compile function's options
<https://vega.github.io/vega-lite/docs/compile.html#field-title>`__.
2) If both field definition's ``title`` and axis, header, or legend ``title`` are
defined, axis/header/legend title will be used.
type : :class:`Type`
The encoded field's type of measurement ( ``"quantitative"``, ``"temporal"``,
``"ordinal"``, or ``"nominal"`` ).
It can also be a ``"geojson"`` type for encoding `'geoshape'
<https://vega.github.io/vega-lite/docs/geoshape.html>`__.
"""
_class_is_valid_at_instantiation = False
_encoding_name = "color"
def __init__(self, shorthand=Undefined, aggregate=Undefined, bin=Undefined, condition=Undefined,
field=Undefined, legend=Undefined, scale=Undefined, sort=Undefined, timeUnit=Undefined,
title=Undefined, type=Undefined, **kwds):
super(Color, self).__init__(shorthand=shorthand, aggregate=aggregate, bin=bin,
condition=condition, field=field, legend=legend, scale=scale,
sort=sort, timeUnit=timeUnit, title=title, type=type, **kwds)
class ColorValue(ValueChannelMixin, core.MarkPropValueDefWithCondition):
"""ColorValue schema wrapper
Mapping(required=[])
A ValueDef with Condition<ValueDef | FieldDef>
Attributes
----------
condition : anyOf(:class:`ConditionalMarkPropFieldDef`, :class:`ConditionalValueDef`,
List(:class:`ConditionalValueDef`))
A field definition or one or more value definition(s) with a selection predicate.
value : anyOf(float, string, boolean)
A constant value in visual domain.
"""
_class_is_valid_at_instantiation = False
_encoding_name = "color"
def __init__(self, value, condition=Undefined, **kwds):
super(ColorValue, self).__init__(value=value, condition=condition, **kwds)
class Column(FieldChannelMixin, core.FacetFieldDef):
"""Column schema wrapper
Mapping(required=[shorthand])
Attributes
----------
shorthand : string
shorthand for field, aggregate, and type
aggregate : :class:`Aggregate`
Aggregation function for the field
(e.g., ``mean``, ``sum``, ``median``, ``min``, ``max``, ``count`` ).
**Default value:** ``undefined`` (None)
bin : anyOf(boolean, :class:`BinParams`)
A flag for binning a ``quantitative`` field, or `an object defining binning
parameters <https://vega.github.io/vega-lite/docs/bin.html#params>`__.
If ``true``, default `binning parameters
<https://vega.github.io/vega-lite/docs/bin.html>`__ will be applied.
**Default value:** ``false``
field : anyOf(string, :class:`RepeatRef`)
**Required.** A string defining the name of the field from which to pull a data
value
or an object defining iterated values from the `repeat
<https://vega.github.io/vega-lite/docs/repeat.html>`__ operator.
**Note:** Dots ( ``.`` ) and brackets ( ``[`` and ``]`` ) can be used to access
nested objects (e.g., ``"field": "foo.bar"`` and ``"field": "foo['bar']"`` ).
If field names contain dots or brackets but are not nested, you can use ``\\`` to
escape dots and brackets (e.g., ``"a\\.b"`` and ``"a\\[0\\]"`` ).
See more details about escaping in the `field documentation
<https://vega.github.io/vega-lite/docs/field.html>`__.
**Note:** ``field`` is not required if ``aggregate`` is ``count``.
header : :class:`Header`
An object defining properties of a facet's header.
sort : :class:`Sort`
Sort order for the encoded field.
For continuous fields (quantitative or temporal), ``sort`` can be either
``"ascending"`` or ``"descending"``.
For discrete fields, ``sort`` can be one of the following:
* ``"ascending"`` or ``"descending"`` -- for sorting by the values' natural order in
Javascript.
* `A sort field definition
<https://vega.github.io/vega-lite/docs/sort.html#sort-field>`__ for sorting by
another field.
* `An array specifying the field values in preferred order
<https://vega.github.io/vega-lite/docs/sort.html#sort-array>`__. In this case, the
sort order will obey the values in the array, followed by any unspecified values
in their original order. For discrete time field, values in the sort array can be
`date-time definition objects <types#datetime>`__. In addition, for time units
``"month"`` and ``"day"``, the values can be the month or day names (case
insensitive) or their 3-letter initials (e.g., ``"Mon"``, ``"Tue"`` ).
* ``null`` indicating no sort.
**Default value:** ``"ascending"``
**Note:** ``null`` is not supported for ``row`` and ``column``.
timeUnit : :class:`TimeUnit`
Time unit (e.g., ``year``, ``yearmonth``, ``month``, ``hours`` ) for a temporal
field.
or `a temporal field that gets casted as ordinal
<https://vega.github.io/vega-lite/docs/type.html#cast>`__.
**Default value:** ``undefined`` (None)
title : anyOf(string, None)
A title for the field. If ``null``, the title will be removed.
**Default value:** derived from the field's name and transformation function (
``aggregate``, ``bin`` and ``timeUnit`` ). If the field has an aggregate function,
the function is displayed as part of the title (e.g., ``"Sum of Profit"`` ). If the
field is binned or has a time unit applied, the applied function is shown in
parentheses (e.g., ``"Profit (binned)"``, ``"Transaction Date (year-month)"`` ).
Otherwise, the title is simply the field name.
**Notes** :
1) You can customize the default field title format by providing the [fieldTitle
property in the `config <https://vega.github.io/vega-lite/docs/config.html>`__ or
`fieldTitle function via the compile function's options
<https://vega.github.io/vega-lite/docs/compile.html#field-title>`__.
2) If both field definition's ``title`` and axis, header, or legend ``title`` are
defined, axis/header/legend title will be used.
type : :class:`Type`
The encoded field's type of measurement ( ``"quantitative"``, ``"temporal"``,
``"ordinal"``, or ``"nominal"`` ).
It can also be a ``"geojson"`` type for encoding `'geoshape'
<https://vega.github.io/vega-lite/docs/geoshape.html>`__.
"""
_class_is_valid_at_instantiation = False
_encoding_name = "column"
def __init__(self, shorthand=Undefined, aggregate=Undefined, bin=Undefined, field=Undefined,
header=Undefined, sort=Undefined, timeUnit=Undefined, title=Undefined, type=Undefined,
**kwds):
super(Column, self).__init__(shorthand=shorthand, aggregate=aggregate, bin=bin, field=field,
header=header, sort=sort, timeUnit=timeUnit, title=title,
type=type, **kwds)
class Detail(FieldChannelMixin, core.FieldDef):
"""Detail schema wrapper
Mapping(required=[shorthand])
Definition object for a data field, its type and transformation of an encoding channel.
Attributes
----------
shorthand : string
shorthand for field, aggregate, and type
aggregate : :class:`Aggregate`
Aggregation function for the field
(e.g., ``mean``, ``sum``, ``median``, ``min``, ``max``, ``count`` ).
**Default value:** ``undefined`` (None)
bin : anyOf(boolean, :class:`BinParams`)
A flag for binning a ``quantitative`` field, or `an object defining binning
parameters <https://vega.github.io/vega-lite/docs/bin.html#params>`__.
If ``true``, default `binning parameters
<https://vega.github.io/vega-lite/docs/bin.html>`__ will be applied.
**Default value:** ``false``
field : anyOf(string, :class:`RepeatRef`)
**Required.** A string defining the name of the field from which to pull a data
value
or an object defining iterated values from the `repeat
<https://vega.github.io/vega-lite/docs/repeat.html>`__ operator.
**Note:** Dots ( ``.`` ) and brackets ( ``[`` and ``]`` ) can be used to access
nested objects (e.g., ``"field": "foo.bar"`` and ``"field": "foo['bar']"`` ).
If field names contain dots or brackets but are not nested, you can use ``\\`` to
escape dots and brackets (e.g., ``"a\\.b"`` and ``"a\\[0\\]"`` ).
See more details about escaping in the `field documentation
<https://vega.github.io/vega-lite/docs/field.html>`__.
**Note:** ``field`` is not required if ``aggregate`` is ``count``.
timeUnit : :class:`TimeUnit`
Time unit (e.g., ``year``, ``yearmonth``, ``month``, ``hours`` ) for a temporal
field.
or `a temporal field that gets casted as ordinal
<https://vega.github.io/vega-lite/docs/type.html#cast>`__.
**Default value:** ``undefined`` (None)
title : anyOf(string, None)
A title for the field. If ``null``, the title will be removed.
**Default value:** derived from the field's name and transformation function (
``aggregate``, ``bin`` and ``timeUnit`` ). If the field has an aggregate function,
the function is displayed as part of the title (e.g., ``"Sum of Profit"`` ). If the
field is binned or has a time unit applied, the applied function is shown in
parentheses (e.g., ``"Profit (binned)"``, ``"Transaction Date (year-month)"`` ).
Otherwise, the title is simply the field name.
**Notes** :
1) You can customize the default field title format by providing the [fieldTitle
property in the `config <https://vega.github.io/vega-lite/docs/config.html>`__ or
`fieldTitle function via the compile function's options
<https://vega.github.io/vega-lite/docs/compile.html#field-title>`__.
2) If both field definition's ``title`` and axis, header, or legend ``title`` are
defined, axis/header/legend title will be used.
type : :class:`Type`
The encoded field's type of measurement ( ``"quantitative"``, ``"temporal"``,
``"ordinal"``, or ``"nominal"`` ).
It can also be a ``"geojson"`` type for encoding `'geoshape'
<https://vega.github.io/vega-lite/docs/geoshape.html>`__.
"""
_class_is_valid_at_instantiation = False
_encoding_name = "detail"
def __init__(self, shorthand=Undefined, aggregate=Undefined, bin=Undefined, field=Undefined,
timeUnit=Undefined, title=Undefined, type=Undefined, **kwds):
super(Detail, self).__init__(shorthand=shorthand, aggregate=aggregate, bin=bin, field=field,
timeUnit=timeUnit, title=title, type=type, **kwds)
class Fill(FieldChannelMixin, core.MarkPropFieldDefWithCondition):
"""Fill schema wrapper
Mapping(required=[shorthand])
A FieldDef with Condition :raw-html:`<ValueDef>`
Attributes
----------
shorthand : string
shorthand for field, aggregate, and type
aggregate : :class:`Aggregate`
Aggregation function for the field
(e.g., ``mean``, ``sum``, ``median``, ``min``, ``max``, ``count`` ).
**Default value:** ``undefined`` (None)
bin : anyOf(boolean, :class:`BinParams`)
A flag for binning a ``quantitative`` field, or `an object defining binning
parameters <https://vega.github.io/vega-lite/docs/bin.html#params>`__.
If ``true``, default `binning parameters
<https://vega.github.io/vega-lite/docs/bin.html>`__ will be applied.
**Default value:** ``false``
condition : anyOf(:class:`ConditionalValueDef`, List(:class:`ConditionalValueDef`))
One or more value definition(s) with a selection predicate.
**Note:** A field definition's ``condition`` property can only contain `value
definitions <https://vega.github.io/vega-lite/docs/encoding.html#value-def>`__
since Vega-Lite only allows at most one encoded field per encoding channel.
field : anyOf(string, :class:`RepeatRef`)
**Required.** A string defining the name of the field from which to pull a data
value
or an object defining iterated values from the `repeat
<https://vega.github.io/vega-lite/docs/repeat.html>`__ operator.
**Note:** Dots ( ``.`` ) and brackets ( ``[`` and ``]`` ) can be used to access
nested objects (e.g., ``"field": "foo.bar"`` and ``"field": "foo['bar']"`` ).
If field names contain dots or brackets but are not nested, you can use ``\\`` to
escape dots and brackets (e.g., ``"a\\.b"`` and ``"a\\[0\\]"`` ).
See more details about escaping in the `field documentation
<https://vega.github.io/vega-lite/docs/field.html>`__.
**Note:** ``field`` is not required if ``aggregate`` is ``count``.
legend : anyOf(:class:`Legend`, None)
An object defining properties of the legend.
If ``null``, the legend for the encoding channel will be removed.
**Default value:** If undefined, default `legend properties
<https://vega.github.io/vega-lite/docs/legend.html>`__ are applied.
scale : anyOf(:class:`Scale`, None)
An object defining properties of the channel's scale, which is the function that
transforms values in the data domain (numbers, dates, strings, etc) to visual values
(pixels, colors, sizes) of the encoding channels.
If ``null``, the scale will be `disabled and the data value will be directly encoded
<https://vega.github.io/vega-lite/docs/scale.html#disable>`__.
**Default value:** If undefined, default `scale properties
<https://vega.github.io/vega-lite/docs/scale.html>`__ are applied.
sort : :class:`Sort`
Sort order for the encoded field.
For continuous fields (quantitative or temporal), ``sort`` can be either
``"ascending"`` or ``"descending"``.
For discrete fields, ``sort`` can be one of the following:
* ``"ascending"`` or ``"descending"`` -- for sorting by the values' natural order in
Javascript.
* `A sort field definition
<https://vega.github.io/vega-lite/docs/sort.html#sort-field>`__ for sorting by
another field.
* `An array specifying the field values in preferred order
<https://vega.github.io/vega-lite/docs/sort.html#sort-array>`__. In this case, the
sort order will obey the values in the array, followed by any unspecified values
in their original order. For discrete time field, values in the sort array can be
`date-time definition objects <types#datetime>`__. In addition, for time units
``"month"`` and ``"day"``, the values can be the month or day names (case
insensitive) or their 3-letter initials (e.g., ``"Mon"``, ``"Tue"`` ).
* ``null`` indicating no sort.
**Default value:** ``"ascending"``
**Note:** ``null`` is not supported for ``row`` and ``column``.
timeUnit : :class:`TimeUnit`
Time unit (e.g., ``year``, ``yearmonth``, ``month``, ``hours`` ) for a temporal
field.
or `a temporal field that gets casted as ordinal
<https://vega.github.io/vega-lite/docs/type.html#cast>`__.
**Default value:** ``undefined`` (None)
title : anyOf(string, None)
A title for the field. If ``null``, the title will be removed.
**Default value:** derived from the field's name and transformation function (
``aggregate``, ``bin`` and ``timeUnit`` ). If the field has an aggregate function,
the function is displayed as part of the title (e.g., ``"Sum of Profit"`` ). If the
field is binned or has a time unit applied, the applied function is shown in
parentheses (e.g., ``"Profit (binned)"``, ``"Transaction Date (year-month)"`` ).
Otherwise, the title is simply the field name.
**Notes** :
1) You can customize the default field title format by providing the [fieldTitle
property in the `config <https://vega.github.io/vega-lite/docs/config.html>`__ or
`fieldTitle function via the compile function's options
<https://vega.github.io/vega-lite/docs/compile.html#field-title>`__.
2) If both field definition's ``title`` and axis, header, or legend ``title`` are
defined, axis/header/legend title will be used.
type : :class:`Type`
The encoded field's type of measurement ( ``"quantitative"``, ``"temporal"``,
``"ordinal"``, or ``"nominal"`` ).
It can also be a ``"geojson"`` type for encoding `'geoshape'
<https://vega.github.io/vega-lite/docs/geoshape.html>`__.
"""
_class_is_valid_at_instantiation = False
_encoding_name = "fill"
def __init__(self, shorthand=Undefined, aggregate=Undefined, bin=Undefined, condition=Undefined,
field=Undefined, legend=Undefined, scale=Undefined, sort=Undefined, timeUnit=Undefined,
title=Undefined, type=Undefined, **kwds):
super(Fill, self).__init__(shorthand=shorthand, aggregate=aggregate, bin=bin,
condition=condition, field=field, legend=legend, scale=scale,
sort=sort, timeUnit=timeUnit, title=title, type=type, **kwds)
class FillValue(ValueChannelMixin, core.MarkPropValueDefWithCondition):
"""FillValue schema wrapper
Mapping(required=[])
A ValueDef with Condition<ValueDef | FieldDef>
Attributes
----------
condition : anyOf(:class:`ConditionalMarkPropFieldDef`, :class:`ConditionalValueDef`,
List(:class:`ConditionalValueDef`))
A field definition or one or more value definition(s) with a selection predicate.
value : anyOf(float, string, boolean)
A constant value in visual domain.
"""
_class_is_valid_at_instantiation = False
_encoding_name = "fill"
def __init__(self, value, condition=Undefined, **kwds):
super(FillValue, self).__init__(value=value, condition=condition, **kwds)
class Href(FieldChannelMixin, core.FieldDefWithCondition):
"""Href schema wrapper
Mapping(required=[shorthand])
A FieldDef with Condition :raw-html:`<ValueDef>`
Attributes
----------
shorthand : string
shorthand for field, aggregate, and type
aggregate : :class:`Aggregate`
Aggregation function for the field
(e.g., ``mean``, ``sum``, ``median``, ``min``, ``max``, ``count`` ).
**Default value:** ``undefined`` (None)
bin : anyOf(boolean, :class:`BinParams`)
A flag for binning a ``quantitative`` field, or `an object defining binning
parameters <https://vega.github.io/vega-lite/docs/bin.html#params>`__.
If ``true``, default `binning parameters
<https://vega.github.io/vega-lite/docs/bin.html>`__ will be applied.
**Default value:** ``false``
condition : anyOf(:class:`ConditionalValueDef`, List(:class:`ConditionalValueDef`))
One or more value definition(s) with a selection predicate.
**Note:** A field definition's ``condition`` property can only contain `value
definitions <https://vega.github.io/vega-lite/docs/encoding.html#value-def>`__
since Vega-Lite only allows at most one encoded field per encoding channel.
field : anyOf(string, :class:`RepeatRef`)
**Required.** A string defining the name of the field from which to pull a data
value
or an object defining iterated values from the `repeat
<https://vega.github.io/vega-lite/docs/repeat.html>`__ operator.
**Note:** Dots ( ``.`` ) and brackets ( ``[`` and ``]`` ) can be used to access
nested objects (e.g., ``"field": "foo.bar"`` and ``"field": "foo['bar']"`` ).
If field names contain dots or brackets but are not nested, you can use ``\\`` to
escape dots and brackets (e.g., ``"a\\.b"`` and ``"a\\[0\\]"`` ).
See more details about escaping in the `field documentation
<https://vega.github.io/vega-lite/docs/field.html>`__.
**Note:** ``field`` is not required if ``aggregate`` is ``count``.
timeUnit : :class:`TimeUnit`
Time unit (e.g., ``year``, ``yearmonth``, ``month``, ``hours`` ) for a temporal
field.
or `a temporal field that gets casted as ordinal
<https://vega.github.io/vega-lite/docs/type.html#cast>`__.
**Default value:** ``undefined`` (None)
title : anyOf(string, None)
A title for the field. If ``null``, the title will be removed.
**Default value:** derived from the field's name and transformation function (
``aggregate``, ``bin`` and ``timeUnit`` ). If the field has an aggregate function,
the function is displayed as part of the title (e.g., ``"Sum of Profit"`` ). If the
field is binned or has a time unit applied, the applied function is shown in
parentheses (e.g., ``"Profit (binned)"``, ``"Transaction Date (year-month)"`` ).
Otherwise, the title is simply the field name.
**Notes** :
1) You can customize the default field title format by providing the [fieldTitle
property in the `config <https://vega.github.io/vega-lite/docs/config.html>`__ or
`fieldTitle function via the compile function's options
<https://vega.github.io/vega-lite/docs/compile.html#field-title>`__.
2) If both field definition's ``title`` and axis, header, or legend ``title`` are
defined, axis/header/legend title will be used.
type : :class:`Type`
The encoded field's type of measurement ( ``"quantitative"``, ``"temporal"``,
``"ordinal"``, or ``"nominal"`` ).
It can also be a ``"geojson"`` type for encoding `'geoshape'
<https://vega.github.io/vega-lite/docs/geoshape.html>`__.
"""
_class_is_valid_at_instantiation = False
_encoding_name = "href"
def __init__(self, shorthand=Undefined, aggregate=Undefined, bin=Undefined, condition=Undefined,
field=Undefined, timeUnit=Undefined, title=Undefined, type=Undefined, **kwds):
super(Href, self).__init__(shorthand=shorthand, aggregate=aggregate, bin=bin,
condition=condition, field=field, timeUnit=timeUnit, title=title,
type=type, **kwds)
class HrefValue(ValueChannelMixin, core.ValueDefWithCondition):
"""HrefValue schema wrapper
Mapping(required=[])
A ValueDef with Condition<ValueDef | FieldDef>
Attributes
----------
condition : anyOf(:class:`ConditionalFieldDef`, :class:`ConditionalValueDef`,
List(:class:`ConditionalValueDef`))
A field definition or one or more value definition(s) with a selection predicate.
value : anyOf(float, string, boolean)
A constant value in visual domain.
"""
_class_is_valid_at_instantiation = False
_encoding_name = "href"
def __init__(self, value, condition=Undefined, **kwds):
super(HrefValue, self).__init__(value=value, condition=condition, **kwds)
class Key(FieldChannelMixin, core.FieldDef):
"""Key schema wrapper
Mapping(required=[shorthand])
Definition object for a data field, its type and transformation of an encoding channel.
Attributes
----------
shorthand : string
shorthand for field, aggregate, and type
aggregate : :class:`Aggregate`
Aggregation function for the field
(e.g., ``mean``, ``sum``, ``median``, ``min``, ``max``, ``count`` ).
**Default value:** ``undefined`` (None)
bin : anyOf(boolean, :class:`BinParams`)
A flag for binning a ``quantitative`` field, or `an object defining binning
parameters <https://vega.github.io/vega-lite/docs/bin.html#params>`__.
If ``true``, default `binning parameters
<https://vega.github.io/vega-lite/docs/bin.html>`__ will be applied.
**Default value:** ``false``
field : anyOf(string, :class:`RepeatRef`)
**Required.** A string defining the name of the field from which to pull a data
value
or an object defining iterated values from the `repeat
<https://vega.github.io/vega-lite/docs/repeat.html>`__ operator.
**Note:** Dots ( ``.`` ) and brackets ( ``[`` and ``]`` ) can be used to access
nested objects (e.g., ``"field": "foo.bar"`` and ``"field": "foo['bar']"`` ).
If field names contain dots or brackets but are not nested, you can use ``\\`` to
escape dots and brackets (e.g., ``"a\\.b"`` and ``"a\\[0\\]"`` ).
See more details about escaping in the `field documentation
<https://vega.github.io/vega-lite/docs/field.html>`__.
**Note:** ``field`` is not required if ``aggregate`` is ``count``.
timeUnit : :class:`TimeUnit`
Time unit (e.g., ``year``, ``yearmonth``, ``month``, ``hours`` ) for a temporal
field.
or `a temporal field that gets casted as ordinal
<https://vega.github.io/vega-lite/docs/type.html#cast>`__.
**Default value:** ``undefined`` (None)
title : anyOf(string, None)
A title for the field. If ``null``, the title will be removed.
**Default value:** derived from the field's name and transformation function (
``aggregate``, ``bin`` and ``timeUnit`` ). If the field has an aggregate function,
the function is displayed as part of the title (e.g., ``"Sum of Profit"`` ). If the
field is binned or has a time unit applied, the applied function is shown in
parentheses (e.g., ``"Profit (binned)"``, ``"Transaction Date (year-month)"`` ).
Otherwise, the title is simply the field name.
**Notes** :
1) You can customize the default field title format by providing the [fieldTitle
property in the `config <https://vega.github.io/vega-lite/docs/config.html>`__ or
`fieldTitle function via the compile function's options
<https://vega.github.io/vega-lite/docs/compile.html#field-title>`__.
2) If both field definition's ``title`` and axis, header, or legend ``title`` are
defined, axis/header/legend title will be used.
type : :class:`Type`
The encoded field's type of measurement ( ``"quantitative"``, ``"temporal"``,
``"ordinal"``, or ``"nominal"`` ).
It can also be a ``"geojson"`` type for encoding `'geoshape'
<https://vega.github.io/vega-lite/docs/geoshape.html>`__.
"""
_class_is_valid_at_instantiation = False
_encoding_name = "key"
def __init__(self, shorthand=Undefined, aggregate=Undefined, bin=Undefined, field=Undefined,
timeUnit=Undefined, title=Undefined, type=Undefined, **kwds):
super(Key, self).__init__(shorthand=shorthand, aggregate=aggregate, bin=bin, field=field,
timeUnit=timeUnit, title=title, type=type, **kwds)
class Latitude(FieldChannelMixin, core.FieldDef):
"""Latitude schema wrapper
Mapping(required=[shorthand])
Definition object for a data field, its type and transformation of an encoding channel.
Attributes
----------
shorthand : string
shorthand for field, aggregate, and type
aggregate : :class:`Aggregate`
Aggregation function for the field
(e.g., ``mean``, ``sum``, ``median``, ``min``, ``max``, ``count`` ).
**Default value:** ``undefined`` (None)
bin : anyOf(boolean, :class:`BinParams`)
A flag for binning a ``quantitative`` field, or `an object defining binning
parameters <https://vega.github.io/vega-lite/docs/bin.html#params>`__.
If ``true``, default `binning parameters
<https://vega.github.io/vega-lite/docs/bin.html>`__ will be applied.
**Default value:** ``false``
field : anyOf(string, :class:`RepeatRef`)
**Required.** A string defining the name of the field from which to pull a data
value
or an object defining iterated values from the `repeat
<https://vega.github.io/vega-lite/docs/repeat.html>`__ operator.
**Note:** Dots ( ``.`` ) and brackets ( ``[`` and ``]`` ) can be used to access
nested objects (e.g., ``"field": "foo.bar"`` and ``"field": "foo['bar']"`` ).
If field names contain dots or brackets but are not nested, you can use ``\\`` to
escape dots and brackets (e.g., ``"a\\.b"`` and ``"a\\[0\\]"`` ).
See more details about escaping in the `field documentation
<https://vega.github.io/vega-lite/docs/field.html>`__.
**Note:** ``field`` is not required if ``aggregate`` is ``count``.
timeUnit : :class:`TimeUnit`
Time unit (e.g., ``year``, ``yearmonth``, ``month``, ``hours`` ) for a temporal
field.
or `a temporal field that gets casted as ordinal
<https://vega.github.io/vega-lite/docs/type.html#cast>`__.
**Default value:** ``undefined`` (None)
title : anyOf(string, None)
A title for the field. If ``null``, the title will be removed.
**Default value:** derived from the field's name and transformation function (
``aggregate``, ``bin`` and ``timeUnit`` ). If the field has an aggregate function,
the function is displayed as part of the title (e.g., ``"Sum of Profit"`` ). If the
field is binned or has a time unit applied, the applied function is shown in
parentheses (e.g., ``"Profit (binned)"``, ``"Transaction Date (year-month)"`` ).
Otherwise, the title is simply the field name.
**Notes** :
1) You can customize the default field title format by providing the [fieldTitle
property in the `config <https://vega.github.io/vega-lite/docs/config.html>`__ or
`fieldTitle function via the compile function's options
<https://vega.github.io/vega-lite/docs/compile.html#field-title>`__.
2) If both field definition's ``title`` and axis, header, or legend ``title`` are
defined, axis/header/legend title will be used.
type : :class:`Type`
The encoded field's type of measurement ( ``"quantitative"``, ``"temporal"``,
``"ordinal"``, or ``"nominal"`` ).
It can also be a ``"geojson"`` type for encoding `'geoshape'
<https://vega.github.io/vega-lite/docs/geoshape.html>`__.
"""
_class_is_valid_at_instantiation = False
_encoding_name = "latitude"
def __init__(self, shorthand=Undefined, aggregate=Undefined, bin=Undefined, field=Undefined,
timeUnit=Undefined, title=Undefined, type=Undefined, **kwds):
super(Latitude, self).__init__(shorthand=shorthand, aggregate=aggregate, bin=bin, field=field,
timeUnit=timeUnit, title=title, type=type, **kwds)
class Latitude2(FieldChannelMixin, core.FieldDef):
"""Latitude2 schema wrapper
Mapping(required=[shorthand])
Definition object for a data field, its type and transformation of an encoding channel.
Attributes
----------
shorthand : string
shorthand for field, aggregate, and type
aggregate : :class:`Aggregate`
Aggregation function for the field
(e.g., ``mean``, ``sum``, ``median``, ``min``, ``max``, ``count`` ).
**Default value:** ``undefined`` (None)
bin : anyOf(boolean, :class:`BinParams`)
A flag for binning a ``quantitative`` field, or `an object defining binning
parameters <https://vega.github.io/vega-lite/docs/bin.html#params>`__.
If ``true``, default `binning parameters
<https://vega.github.io/vega-lite/docs/bin.html>`__ will be applied.
**Default value:** ``false``
field : anyOf(string, :class:`RepeatRef`)
**Required.** A string defining the name of the field from which to pull a data
value
or an object defining iterated values from the `repeat
<https://vega.github.io/vega-lite/docs/repeat.html>`__ operator.
**Note:** Dots ( ``.`` ) and brackets ( ``[`` and ``]`` ) can be used to access
nested objects (e.g., ``"field": "foo.bar"`` and ``"field": "foo['bar']"`` ).
If field names contain dots or brackets but are not nested, you can use ``\\`` to
escape dots and brackets (e.g., ``"a\\.b"`` and ``"a\\[0\\]"`` ).
See more details about escaping in the `field documentation
<https://vega.github.io/vega-lite/docs/field.html>`__.
**Note:** ``field`` is not required if ``aggregate`` is ``count``.
timeUnit : :class:`TimeUnit`
Time unit (e.g., ``year``, ``yearmonth``, ``month``, ``hours`` ) for a temporal
field.
or `a temporal field that gets casted as ordinal
<https://vega.github.io/vega-lite/docs/type.html#cast>`__.
**Default value:** ``undefined`` (None)
title : anyOf(string, None)
A title for the field. If ``null``, the title will be removed.
**Default value:** derived from the field's name and transformation function (
``aggregate``, ``bin`` and ``timeUnit`` ). If the field has an aggregate function,
the function is displayed as part of the title (e.g., ``"Sum of Profit"`` ). If the
field is binned or has a time unit applied, the applied function is shown in
parentheses (e.g., ``"Profit (binned)"``, ``"Transaction Date (year-month)"`` ).
Otherwise, the title is simply the field name.
**Notes** :
1) You can customize the default field title format by providing the [fieldTitle
property in the `config <https://vega.github.io/vega-lite/docs/config.html>`__ or
`fieldTitle function via the compile function's options
<https://vega.github.io/vega-lite/docs/compile.html#field-title>`__.
2) If both field definition's ``title`` and axis, header, or legend ``title`` are
defined, axis/header/legend title will be used.
type : :class:`Type`
The encoded field's type of measurement ( ``"quantitative"``, ``"temporal"``,
``"ordinal"``, or ``"nominal"`` ).
It can also be a ``"geojson"`` type for encoding `'geoshape'
<https://vega.github.io/vega-lite/docs/geoshape.html>`__.
"""
_class_is_valid_at_instantiation = False
_encoding_name = "latitude2"
def __init__(self, shorthand=Undefined, aggregate=Undefined, bin=Undefined, field=Undefined,
timeUnit=Undefined, title=Undefined, type=Undefined, **kwds):
super(Latitude2, self).__init__(shorthand=shorthand, aggregate=aggregate, bin=bin, field=field,
timeUnit=timeUnit, title=title, type=type, **kwds)
class Longitude(FieldChannelMixin, core.FieldDef):
"""Longitude schema wrapper
Mapping(required=[shorthand])
Definition object for a data field, its type and transformation of an encoding channel.
Attributes
----------
shorthand : string
shorthand for field, aggregate, and type
aggregate : :class:`Aggregate`
Aggregation function for the field
(e.g., ``mean``, ``sum``, ``median``, ``min``, ``max``, ``count`` ).
**Default value:** ``undefined`` (None)
bin : anyOf(boolean, :class:`BinParams`)
A flag for binning a ``quantitative`` field, or `an object defining binning
parameters <https://vega.github.io/vega-lite/docs/bin.html#params>`__.
If ``true``, default `binning parameters
<https://vega.github.io/vega-lite/docs/bin.html>`__ will be applied.
**Default value:** ``false``
field : anyOf(string, :class:`RepeatRef`)
**Required.** A string defining the name of the field from which to pull a data
value
or an object defining iterated values from the `repeat
<https://vega.github.io/vega-lite/docs/repeat.html>`__ operator.
**Note:** Dots ( ``.`` ) and brackets ( ``[`` and ``]`` ) can be used to access
nested objects (e.g., ``"field": "foo.bar"`` and ``"field": "foo['bar']"`` ).
If field names contain dots or brackets but are not nested, you can use ``\\`` to
escape dots and brackets (e.g., ``"a\\.b"`` and ``"a\\[0\\]"`` ).
See more details about escaping in the `field documentation
<https://vega.github.io/vega-lite/docs/field.html>`__.
**Note:** ``field`` is not required if ``aggregate`` is ``count``.
timeUnit : :class:`TimeUnit`
Time unit (e.g., ``year``, ``yearmonth``, ``month``, ``hours`` ) for a temporal
field.
or `a temporal field that gets casted as ordinal
<https://vega.github.io/vega-lite/docs/type.html#cast>`__.
**Default value:** ``undefined`` (None)
title : anyOf(string, None)
A title for the field. If ``null``, the title will be removed.
**Default value:** derived from the field's name and transformation function (
``aggregate``, ``bin`` and ``timeUnit`` ). If the field has an aggregate function,
the function is displayed as part of the title (e.g., ``"Sum of Profit"`` ). If the
field is binned or has a time unit applied, the applied function is shown in
parentheses (e.g., ``"Profit (binned)"``, ``"Transaction Date (year-month)"`` ).
Otherwise, the title is simply the field name.
**Notes** :
1) You can customize the default field title format by providing the [fieldTitle
property in the `config <https://vega.github.io/vega-lite/docs/config.html>`__ or
`fieldTitle function via the compile function's options
<https://vega.github.io/vega-lite/docs/compile.html#field-title>`__.
2) If both field definition's ``title`` and axis, header, or legend ``title`` are
defined, axis/header/legend title will be used.
type : :class:`Type`
The encoded field's type of measurement ( ``"quantitative"``, ``"temporal"``,
``"ordinal"``, or ``"nominal"`` ).
It can also be a ``"geojson"`` type for encoding `'geoshape'
<https://vega.github.io/vega-lite/docs/geoshape.html>`__.
"""
_class_is_valid_at_instantiation = False
_encoding_name = "longitude"
def __init__(self, shorthand=Undefined, aggregate=Undefined, bin=Undefined, field=Undefined,
timeUnit=Undefined, title=Undefined, type=Undefined, **kwds):
super(Longitude, self).__init__(shorthand=shorthand, aggregate=aggregate, bin=bin, field=field,
timeUnit=timeUnit, title=title, type=type, **kwds)
class Longitude2(FieldChannelMixin, core.FieldDef):
"""Longitude2 schema wrapper
Mapping(required=[shorthand])
Definition object for a data field, its type and transformation of an encoding channel.
Attributes
----------
shorthand : string
shorthand for field, aggregate, and type
aggregate : :class:`Aggregate`
Aggregation function for the field
(e.g., ``mean``, ``sum``, ``median``, ``min``, ``max``, ``count`` ).
**Default value:** ``undefined`` (None)
bin : anyOf(boolean, :class:`BinParams`)
A flag for binning a ``quantitative`` field, or `an object defining binning
parameters <https://vega.github.io/vega-lite/docs/bin.html#params>`__.
If ``true``, default `binning parameters
<https://vega.github.io/vega-lite/docs/bin.html>`__ will be applied.
**Default value:** ``false``
field : anyOf(string, :class:`RepeatRef`)
**Required.** A string defining the name of the field from which to pull a data
value
or an object defining iterated values from the `repeat
<https://vega.github.io/vega-lite/docs/repeat.html>`__ operator.
**Note:** Dots ( ``.`` ) and brackets ( ``[`` and ``]`` ) can be used to access
nested objects (e.g., ``"field": "foo.bar"`` and ``"field": "foo['bar']"`` ).
If field names contain dots or brackets but are not nested, you can use ``\\`` to
escape dots and brackets (e.g., ``"a\\.b"`` and ``"a\\[0\\]"`` ).
See more details about escaping in the `field documentation
<https://vega.github.io/vega-lite/docs/field.html>`__.
**Note:** ``field`` is not required if ``aggregate`` is ``count``.
timeUnit : :class:`TimeUnit`
Time unit (e.g., ``year``, ``yearmonth``, ``month``, ``hours`` ) for a temporal
field.
or `a temporal field that gets casted as ordinal
<https://vega.github.io/vega-lite/docs/type.html#cast>`__.
**Default value:** ``undefined`` (None)
title : anyOf(string, None)
A title for the field. If ``null``, the title will be removed.
**Default value:** derived from the field's name and transformation function (
``aggregate``, ``bin`` and ``timeUnit`` ). If the field has an aggregate function,
the function is displayed as part of the title (e.g., ``"Sum of Profit"`` ). If the
field is binned or has a time unit applied, the applied function is shown in
parentheses (e.g., ``"Profit (binned)"``, ``"Transaction Date (year-month)"`` ).
Otherwise, the title is simply the field name.
**Notes** :
1) You can customize the default field title format by providing the [fieldTitle
property in the `config <https://vega.github.io/vega-lite/docs/config.html>`__ or
`fieldTitle function via the compile function's options
<https://vega.github.io/vega-lite/docs/compile.html#field-title>`__.
2) If both field definition's ``title`` and axis, header, or legend ``title`` are
defined, axis/header/legend title will be used.
type : :class:`Type`
The encoded field's type of measurement ( ``"quantitative"``, ``"temporal"``,
``"ordinal"``, or ``"nominal"`` ).
It can also be a ``"geojson"`` type for encoding `'geoshape'
<https://vega.github.io/vega-lite/docs/geoshape.html>`__.
"""
_class_is_valid_at_instantiation = False
_encoding_name = "longitude2"
def __init__(self, shorthand=Undefined, aggregate=Undefined, bin=Undefined, field=Undefined,
timeUnit=Undefined, title=Undefined, type=Undefined, **kwds):
super(Longitude2, self).__init__(shorthand=shorthand, aggregate=aggregate, bin=bin, field=field,
timeUnit=timeUnit, title=title, type=type, **kwds)
class Opacity(FieldChannelMixin, core.MarkPropFieldDefWithCondition):
"""Opacity schema wrapper
Mapping(required=[shorthand])
A FieldDef with Condition :raw-html:`<ValueDef>`
Attributes
----------
shorthand : string
shorthand for field, aggregate, and type
aggregate : :class:`Aggregate`
Aggregation function for the field
(e.g., ``mean``, ``sum``, ``median``, ``min``, ``max``, ``count`` ).
**Default value:** ``undefined`` (None)
bin : anyOf(boolean, :class:`BinParams`)
A flag for binning a ``quantitative`` field, or `an object defining binning
parameters <https://vega.github.io/vega-lite/docs/bin.html#params>`__.
If ``true``, default `binning parameters
<https://vega.github.io/vega-lite/docs/bin.html>`__ will be applied.
**Default value:** ``false``
condition : anyOf(:class:`ConditionalValueDef`, List(:class:`ConditionalValueDef`))
One or more value definition(s) with a selection predicate.
**Note:** A field definition's ``condition`` property can only contain `value
definitions <https://vega.github.io/vega-lite/docs/encoding.html#value-def>`__
since Vega-Lite only allows at most one encoded field per encoding channel.
field : anyOf(string, :class:`RepeatRef`)
**Required.** A string defining the name of the field from which to pull a data
value
or an object defining iterated values from the `repeat
<https://vega.github.io/vega-lite/docs/repeat.html>`__ operator.
**Note:** Dots ( ``.`` ) and brackets ( ``[`` and ``]`` ) can be used to access
nested objects (e.g., ``"field": "foo.bar"`` and ``"field": "foo['bar']"`` ).
If field names contain dots or brackets but are not nested, you can use ``\\`` to
escape dots and brackets (e.g., ``"a\\.b"`` and ``"a\\[0\\]"`` ).
See more details about escaping in the `field documentation
<https://vega.github.io/vega-lite/docs/field.html>`__.
**Note:** ``field`` is not required if ``aggregate`` is ``count``.
legend : anyOf(:class:`Legend`, None)
An object defining properties of the legend.
If ``null``, the legend for the encoding channel will be removed.
**Default value:** If undefined, default `legend properties
<https://vega.github.io/vega-lite/docs/legend.html>`__ are applied.
scale : anyOf(:class:`Scale`, None)
An object defining properties of the channel's scale, which is the function that
transforms values in the data domain (numbers, dates, strings, etc) to visual values
(pixels, colors, sizes) of the encoding channels.
If ``null``, the scale will be `disabled and the data value will be directly encoded
<https://vega.github.io/vega-lite/docs/scale.html#disable>`__.
**Default value:** If undefined, default `scale properties
<https://vega.github.io/vega-lite/docs/scale.html>`__ are applied.
sort : :class:`Sort`
Sort order for the encoded field.
For continuous fields (quantitative or temporal), ``sort`` can be either
``"ascending"`` or ``"descending"``.
For discrete fields, ``sort`` can be one of the following:
* ``"ascending"`` or ``"descending"`` -- for sorting by the values' natural order in
Javascript.
* `A sort field definition
<https://vega.github.io/vega-lite/docs/sort.html#sort-field>`__ for sorting by
another field.
* `An array specifying the field values in preferred order
<https://vega.github.io/vega-lite/docs/sort.html#sort-array>`__. In this case, the
sort order will obey the values in the array, followed by any unspecified values
in their original order. For discrete time field, values in the sort array can be
`date-time definition objects <types#datetime>`__. In addition, for time units
``"month"`` and ``"day"``, the values can be the month or day names (case
insensitive) or their 3-letter initials (e.g., ``"Mon"``, ``"Tue"`` ).
* ``null`` indicating no sort.
**Default value:** ``"ascending"``
**Note:** ``null`` is not supported for ``row`` and ``column``.
timeUnit : :class:`TimeUnit`
Time unit (e.g., ``year``, ``yearmonth``, ``month``, ``hours`` ) for a temporal
field.
or `a temporal field that gets casted as ordinal
<https://vega.github.io/vega-lite/docs/type.html#cast>`__.
**Default value:** ``undefined`` (None)
title : anyOf(string, None)
A title for the field. If ``null``, the title will be removed.
**Default value:** derived from the field's name and transformation function (
``aggregate``, ``bin`` and ``timeUnit`` ). If the field has an aggregate function,
the function is displayed as part of the title (e.g., ``"Sum of Profit"`` ). If the
field is binned or has a time unit applied, the applied function is shown in
parentheses (e.g., ``"Profit (binned)"``, ``"Transaction Date (year-month)"`` ).
Otherwise, the title is simply the field name.
**Notes** :
1) You can customize the default field title format by providing the [fieldTitle
property in the `config <https://vega.github.io/vega-lite/docs/config.html>`__ or
`fieldTitle function via the compile function's options
<https://vega.github.io/vega-lite/docs/compile.html#field-title>`__.
2) If both field definition's ``title`` and axis, header, or legend ``title`` are
defined, axis/header/legend title will be used.
type : :class:`Type`
The encoded field's type of measurement ( ``"quantitative"``, ``"temporal"``,
``"ordinal"``, or ``"nominal"`` ).
It can also be a ``"geojson"`` type for encoding `'geoshape'
<https://vega.github.io/vega-lite/docs/geoshape.html>`__.
"""
_class_is_valid_at_instantiation = False
_encoding_name = "opacity"
def __init__(self, shorthand=Undefined, aggregate=Undefined, bin=Undefined, condition=Undefined,
field=Undefined, legend=Undefined, scale=Undefined, sort=Undefined, timeUnit=Undefined,
title=Undefined, type=Undefined, **kwds):
super(Opacity, self).__init__(shorthand=shorthand, aggregate=aggregate, bin=bin,
condition=condition, field=field, legend=legend, scale=scale,
sort=sort, timeUnit=timeUnit, title=title, type=type, **kwds)
class OpacityValue(ValueChannelMixin, core.MarkPropValueDefWithCondition):
"""OpacityValue schema wrapper
Mapping(required=[])
A ValueDef with Condition<ValueDef | FieldDef>
Attributes
----------
condition : anyOf(:class:`ConditionalMarkPropFieldDef`, :class:`ConditionalValueDef`,
List(:class:`ConditionalValueDef`))
A field definition or one or more value definition(s) with a selection predicate.
value : anyOf(float, string, boolean)
A constant value in visual domain.
"""
_class_is_valid_at_instantiation = False
_encoding_name = "opacity"
def __init__(self, value, condition=Undefined, **kwds):
super(OpacityValue, self).__init__(value=value, condition=condition, **kwds)
class Order(FieldChannelMixin, core.OrderFieldDef):
"""Order schema wrapper
Mapping(required=[shorthand])
Attributes
----------
shorthand : string
shorthand for field, aggregate, and type
aggregate : :class:`Aggregate`
Aggregation function for the field
(e.g., ``mean``, ``sum``, ``median``, ``min``, ``max``, ``count`` ).
**Default value:** ``undefined`` (None)
bin : anyOf(boolean, :class:`BinParams`)
A flag for binning a ``quantitative`` field, or `an object defining binning
parameters <https://vega.github.io/vega-lite/docs/bin.html#params>`__.
If ``true``, default `binning parameters
<https://vega.github.io/vega-lite/docs/bin.html>`__ will be applied.
**Default value:** ``false``
field : anyOf(string, :class:`RepeatRef`)
**Required.** A string defining the name of the field from which to pull a data
value
or an object defining iterated values from the `repeat
<https://vega.github.io/vega-lite/docs/repeat.html>`__ operator.
**Note:** Dots ( ``.`` ) and brackets ( ``[`` and ``]`` ) can be used to access
nested objects (e.g., ``"field": "foo.bar"`` and ``"field": "foo['bar']"`` ).
If field names contain dots or brackets but are not nested, you can use ``\\`` to
escape dots and brackets (e.g., ``"a\\.b"`` and ``"a\\[0\\]"`` ).
See more details about escaping in the `field documentation
<https://vega.github.io/vega-lite/docs/field.html>`__.
**Note:** ``field`` is not required if ``aggregate`` is ``count``.
sort : :class:`SortOrder`
The sort order. One of ``"ascending"`` (default) or ``"descending"``.
timeUnit : :class:`TimeUnit`
Time unit (e.g., ``year``, ``yearmonth``, ``month``, ``hours`` ) for a temporal
field.
or `a temporal field that gets casted as ordinal
<https://vega.github.io/vega-lite/docs/type.html#cast>`__.
**Default value:** ``undefined`` (None)
title : anyOf(string, None)
A title for the field. If ``null``, the title will be removed.
**Default value:** derived from the field's name and transformation function (
``aggregate``, ``bin`` and ``timeUnit`` ). If the field has an aggregate function,
the function is displayed as part of the title (e.g., ``"Sum of Profit"`` ). If the
field is binned or has a time unit applied, the applied function is shown in
parentheses (e.g., ``"Profit (binned)"``, ``"Transaction Date (year-month)"`` ).
Otherwise, the title is simply the field name.
**Notes** :
1) You can customize the default field title format by providing the [fieldTitle
property in the `config <https://vega.github.io/vega-lite/docs/config.html>`__ or
`fieldTitle function via the compile function's options
<https://vega.github.io/vega-lite/docs/compile.html#field-title>`__.
2) If both field definition's ``title`` and axis, header, or legend ``title`` are
defined, axis/header/legend title will be used.
type : :class:`Type`
The encoded field's type of measurement ( ``"quantitative"``, ``"temporal"``,
``"ordinal"``, or ``"nominal"`` ).
It can also be a ``"geojson"`` type for encoding `'geoshape'
<https://vega.github.io/vega-lite/docs/geoshape.html>`__.
"""
_class_is_valid_at_instantiation = False
_encoding_name = "order"
def __init__(self, shorthand=Undefined, aggregate=Undefined, bin=Undefined, field=Undefined,
sort=Undefined, timeUnit=Undefined, title=Undefined, type=Undefined, **kwds):
super(Order, self).__init__(shorthand=shorthand, aggregate=aggregate, bin=bin, field=field,
sort=sort, timeUnit=timeUnit, title=title, type=type, **kwds)
class OrderValue(ValueChannelMixin, core.ValueDef):
"""OrderValue schema wrapper
Mapping(required=[value])
Definition object for a constant value of an encoding channel.
Attributes
----------
value : anyOf(float, string, boolean)
A constant value in visual domain (e.g., ``"red"`` / "#0099ff" for color, values
between ``0`` to ``1`` for opacity).
"""
_class_is_valid_at_instantiation = False
_encoding_name = "order"
def __init__(self, value, **kwds):
super(OrderValue, self).__init__(value=value, **kwds)
class Row(FieldChannelMixin, core.FacetFieldDef):
"""Row schema wrapper
Mapping(required=[shorthand])
Attributes
----------
shorthand : string
shorthand for field, aggregate, and type
aggregate : :class:`Aggregate`
Aggregation function for the field
(e.g., ``mean``, ``sum``, ``median``, ``min``, ``max``, ``count`` ).
**Default value:** ``undefined`` (None)
bin : anyOf(boolean, :class:`BinParams`)
A flag for binning a ``quantitative`` field, or `an object defining binning
parameters <https://vega.github.io/vega-lite/docs/bin.html#params>`__.
If ``true``, default `binning parameters
<https://vega.github.io/vega-lite/docs/bin.html>`__ will be applied.
**Default value:** ``false``
field : anyOf(string, :class:`RepeatRef`)
**Required.** A string defining the name of the field from which to pull a data
value
or an object defining iterated values from the `repeat
<https://vega.github.io/vega-lite/docs/repeat.html>`__ operator.
**Note:** Dots ( ``.`` ) and brackets ( ``[`` and ``]`` ) can be used to access
nested objects (e.g., ``"field": "foo.bar"`` and ``"field": "foo['bar']"`` ).
If field names contain dots or brackets but are not nested, you can use ``\\`` to
escape dots and brackets (e.g., ``"a\\.b"`` and ``"a\\[0\\]"`` ).
See more details about escaping in the `field documentation
<https://vega.github.io/vega-lite/docs/field.html>`__.
**Note:** ``field`` is not required if ``aggregate`` is ``count``.
header : :class:`Header`
An object defining properties of a facet's header.
sort : :class:`Sort`
Sort order for the encoded field.
For continuous fields (quantitative or temporal), ``sort`` can be either
``"ascending"`` or ``"descending"``.
For discrete fields, ``sort`` can be one of the following:
* ``"ascending"`` or ``"descending"`` -- for sorting by the values' natural order in
Javascript.
* `A sort field definition
<https://vega.github.io/vega-lite/docs/sort.html#sort-field>`__ for sorting by
another field.
* `An array specifying the field values in preferred order
<https://vega.github.io/vega-lite/docs/sort.html#sort-array>`__. In this case, the
sort order will obey the values in the array, followed by any unspecified values
in their original order. For discrete time field, values in the sort array can be
`date-time definition objects <types#datetime>`__. In addition, for time units
``"month"`` and ``"day"``, the values can be the month or day names (case
insensitive) or their 3-letter initials (e.g., ``"Mon"``, ``"Tue"`` ).
* ``null`` indicating no sort.
**Default value:** ``"ascending"``
**Note:** ``null`` is not supported for ``row`` and ``column``.
timeUnit : :class:`TimeUnit`
Time unit (e.g., ``year``, ``yearmonth``, ``month``, ``hours`` ) for a temporal
field.
or `a temporal field that gets casted as ordinal
<https://vega.github.io/vega-lite/docs/type.html#cast>`__.
**Default value:** ``undefined`` (None)
title : anyOf(string, None)
A title for the field. If ``null``, the title will be removed.
**Default value:** derived from the field's name and transformation function (
``aggregate``, ``bin`` and ``timeUnit`` ). If the field has an aggregate function,
the function is displayed as part of the title (e.g., ``"Sum of Profit"`` ). If the
field is binned or has a time unit applied, the applied function is shown in
parentheses (e.g., ``"Profit (binned)"``, ``"Transaction Date (year-month)"`` ).
Otherwise, the title is simply the field name.
**Notes** :
1) You can customize the default field title format by providing the [fieldTitle
property in the `config <https://vega.github.io/vega-lite/docs/config.html>`__ or
`fieldTitle function via the compile function's options
<https://vega.github.io/vega-lite/docs/compile.html#field-title>`__.
2) If both field definition's ``title`` and axis, header, or legend ``title`` are
defined, axis/header/legend title will be used.
type : :class:`Type`
The encoded field's type of measurement ( ``"quantitative"``, ``"temporal"``,
``"ordinal"``, or ``"nominal"`` ).
It can also be a ``"geojson"`` type for encoding `'geoshape'
<https://vega.github.io/vega-lite/docs/geoshape.html>`__.
"""
_class_is_valid_at_instantiation = False
_encoding_name = "row"
def __init__(self, shorthand=Undefined, aggregate=Undefined, bin=Undefined, field=Undefined,
header=Undefined, sort=Undefined, timeUnit=Undefined, title=Undefined, type=Undefined,
**kwds):
super(Row, self).__init__(shorthand=shorthand, aggregate=aggregate, bin=bin, field=field,
header=header, sort=sort, timeUnit=timeUnit, title=title, type=type,
**kwds)
class Shape(FieldChannelMixin, core.MarkPropFieldDefWithCondition):
"""Shape schema wrapper
Mapping(required=[shorthand])
A FieldDef with Condition :raw-html:`<ValueDef>`
Attributes
----------
shorthand : string
shorthand for field, aggregate, and type
aggregate : :class:`Aggregate`
Aggregation function for the field
(e.g., ``mean``, ``sum``, ``median``, ``min``, ``max``, ``count`` ).
**Default value:** ``undefined`` (None)
bin : anyOf(boolean, :class:`BinParams`)
A flag for binning a ``quantitative`` field, or `an object defining binning
parameters <https://vega.github.io/vega-lite/docs/bin.html#params>`__.
If ``true``, default `binning parameters
<https://vega.github.io/vega-lite/docs/bin.html>`__ will be applied.
**Default value:** ``false``
condition : anyOf(:class:`ConditionalValueDef`, List(:class:`ConditionalValueDef`))
One or more value definition(s) with a selection predicate.
**Note:** A field definition's ``condition`` property can only contain `value
definitions <https://vega.github.io/vega-lite/docs/encoding.html#value-def>`__
since Vega-Lite only allows at most one encoded field per encoding channel.
field : anyOf(string, :class:`RepeatRef`)
**Required.** A string defining the name of the field from which to pull a data
value
or an object defining iterated values from the `repeat
<https://vega.github.io/vega-lite/docs/repeat.html>`__ operator.
**Note:** Dots ( ``.`` ) and brackets ( ``[`` and ``]`` ) can be used to access
nested objects (e.g., ``"field": "foo.bar"`` and ``"field": "foo['bar']"`` ).
If field names contain dots or brackets but are not nested, you can use ``\\`` to
escape dots and brackets (e.g., ``"a\\.b"`` and ``"a\\[0\\]"`` ).
See more details about escaping in the `field documentation
<https://vega.github.io/vega-lite/docs/field.html>`__.
**Note:** ``field`` is not required if ``aggregate`` is ``count``.
legend : anyOf(:class:`Legend`, None)
An object defining properties of the legend.
If ``null``, the legend for the encoding channel will be removed.
**Default value:** If undefined, default `legend properties
<https://vega.github.io/vega-lite/docs/legend.html>`__ are applied.
scale : anyOf(:class:`Scale`, None)
An object defining properties of the channel's scale, which is the function that
transforms values in the data domain (numbers, dates, strings, etc) to visual values
(pixels, colors, sizes) of the encoding channels.
If ``null``, the scale will be `disabled and the data value will be directly encoded
<https://vega.github.io/vega-lite/docs/scale.html#disable>`__.
**Default value:** If undefined, default `scale properties
<https://vega.github.io/vega-lite/docs/scale.html>`__ are applied.
sort : :class:`Sort`
Sort order for the encoded field.
For continuous fields (quantitative or temporal), ``sort`` can be either
``"ascending"`` or ``"descending"``.
For discrete fields, ``sort`` can be one of the following:
* ``"ascending"`` or ``"descending"`` -- for sorting by the values' natural order in
Javascript.
* `A sort field definition
<https://vega.github.io/vega-lite/docs/sort.html#sort-field>`__ for sorting by
another field.
* `An array specifying the field values in preferred order
<https://vega.github.io/vega-lite/docs/sort.html#sort-array>`__. In this case, the
sort order will obey the values in the array, followed by any unspecified values
in their original order. For discrete time field, values in the sort array can be
`date-time definition objects <types#datetime>`__. In addition, for time units
``"month"`` and ``"day"``, the values can be the month or day names (case
insensitive) or their 3-letter initials (e.g., ``"Mon"``, ``"Tue"`` ).
* ``null`` indicating no sort.
**Default value:** ``"ascending"``
**Note:** ``null`` is not supported for ``row`` and ``column``.
timeUnit : :class:`TimeUnit`
Time unit (e.g., ``year``, ``yearmonth``, ``month``, ``hours`` ) for a temporal
field.
or `a temporal field that gets casted as ordinal
<https://vega.github.io/vega-lite/docs/type.html#cast>`__.
**Default value:** ``undefined`` (None)
title : anyOf(string, None)
A title for the field. If ``null``, the title will be removed.
**Default value:** derived from the field's name and transformation function (
``aggregate``, ``bin`` and ``timeUnit`` ). If the field has an aggregate function,
the function is displayed as part of the title (e.g., ``"Sum of Profit"`` ). If the
field is binned or has a time unit applied, the applied function is shown in
parentheses (e.g., ``"Profit (binned)"``, ``"Transaction Date (year-month)"`` ).
Otherwise, the title is simply the field name.
**Notes** :
1) You can customize the default field title format by providing the [fieldTitle
property in the `config <https://vega.github.io/vega-lite/docs/config.html>`__ or
`fieldTitle function via the compile function's options
<https://vega.github.io/vega-lite/docs/compile.html#field-title>`__.
2) If both field definition's ``title`` and axis, header, or legend ``title`` are
defined, axis/header/legend title will be used.
type : :class:`Type`
The encoded field's type of measurement ( ``"quantitative"``, ``"temporal"``,
``"ordinal"``, or ``"nominal"`` ).
It can also be a ``"geojson"`` type for encoding `'geoshape'
<https://vega.github.io/vega-lite/docs/geoshape.html>`__.
"""
_class_is_valid_at_instantiation = False
_encoding_name = "shape"
def __init__(self, shorthand=Undefined, aggregate=Undefined, bin=Undefined, condition=Undefined,
field=Undefined, legend=Undefined, scale=Undefined, sort=Undefined, timeUnit=Undefined,
title=Undefined, type=Undefined, **kwds):
super(Shape, self).__init__(shorthand=shorthand, aggregate=aggregate, bin=bin,
condition=condition, field=field, legend=legend, scale=scale,
sort=sort, timeUnit=timeUnit, title=title, type=type, **kwds)
class ShapeValue(ValueChannelMixin, core.MarkPropValueDefWithCondition):
"""ShapeValue schema wrapper
Mapping(required=[])
A ValueDef with Condition<ValueDef | FieldDef>
Attributes
----------
condition : anyOf(:class:`ConditionalMarkPropFieldDef`, :class:`ConditionalValueDef`,
List(:class:`ConditionalValueDef`))
A field definition or one or more value definition(s) with a selection predicate.
value : anyOf(float, string, boolean)
A constant value in visual domain.
"""
_class_is_valid_at_instantiation = False
_encoding_name = "shape"
def __init__(self, value, condition=Undefined, **kwds):
super(ShapeValue, self).__init__(value=value, condition=condition, **kwds)
class Size(FieldChannelMixin, core.MarkPropFieldDefWithCondition):
"""Size schema wrapper
Mapping(required=[shorthand])
A FieldDef with Condition :raw-html:`<ValueDef>`
Attributes
----------
shorthand : string
shorthand for field, aggregate, and type
aggregate : :class:`Aggregate`
Aggregation function for the field
(e.g., ``mean``, ``sum``, ``median``, ``min``, ``max``, ``count`` ).
**Default value:** ``undefined`` (None)
bin : anyOf(boolean, :class:`BinParams`)
A flag for binning a ``quantitative`` field, or `an object defining binning
parameters <https://vega.github.io/vega-lite/docs/bin.html#params>`__.
If ``true``, default `binning parameters
<https://vega.github.io/vega-lite/docs/bin.html>`__ will be applied.
**Default value:** ``false``
condition : anyOf(:class:`ConditionalValueDef`, List(:class:`ConditionalValueDef`))
One or more value definition(s) with a selection predicate.
**Note:** A field definition's ``condition`` property can only contain `value
definitions <https://vega.github.io/vega-lite/docs/encoding.html#value-def>`__
since Vega-Lite only allows at most one encoded field per encoding channel.
field : anyOf(string, :class:`RepeatRef`)
**Required.** A string defining the name of the field from which to pull a data
value
or an object defining iterated values from the `repeat
<https://vega.github.io/vega-lite/docs/repeat.html>`__ operator.
**Note:** Dots ( ``.`` ) and brackets ( ``[`` and ``]`` ) can be used to access
nested objects (e.g., ``"field": "foo.bar"`` and ``"field": "foo['bar']"`` ).
If field names contain dots or brackets but are not nested, you can use ``\\`` to
escape dots and brackets (e.g., ``"a\\.b"`` and ``"a\\[0\\]"`` ).
See more details about escaping in the `field documentation
<https://vega.github.io/vega-lite/docs/field.html>`__.
**Note:** ``field`` is not required if ``aggregate`` is ``count``.
legend : anyOf(:class:`Legend`, None)
An object defining properties of the legend.
If ``null``, the legend for the encoding channel will be removed.
**Default value:** If undefined, default `legend properties
<https://vega.github.io/vega-lite/docs/legend.html>`__ are applied.
scale : anyOf(:class:`Scale`, None)
An object defining properties of the channel's scale, which is the function that
transforms values in the data domain (numbers, dates, strings, etc) to visual values
(pixels, colors, sizes) of the encoding channels.
If ``null``, the scale will be `disabled and the data value will be directly encoded
<https://vega.github.io/vega-lite/docs/scale.html#disable>`__.
**Default value:** If undefined, default `scale properties
<https://vega.github.io/vega-lite/docs/scale.html>`__ are applied.
sort : :class:`Sort`
Sort order for the encoded field.
For continuous fields (quantitative or temporal), ``sort`` can be either
``"ascending"`` or ``"descending"``.
For discrete fields, ``sort`` can be one of the following:
* ``"ascending"`` or ``"descending"`` -- for sorting by the values' natural order in
Javascript.
* `A sort field definition
<https://vega.github.io/vega-lite/docs/sort.html#sort-field>`__ for sorting by
another field.
* `An array specifying the field values in preferred order
<https://vega.github.io/vega-lite/docs/sort.html#sort-array>`__. In this case, the
sort order will obey the values in the array, followed by any unspecified values
in their original order. For discrete time field, values in the sort array can be
`date-time definition objects <types#datetime>`__. In addition, for time units
``"month"`` and ``"day"``, the values can be the month or day names (case
insensitive) or their 3-letter initials (e.g., ``"Mon"``, ``"Tue"`` ).
* ``null`` indicating no sort.
**Default value:** ``"ascending"``
**Note:** ``null`` is not supported for ``row`` and ``column``.
timeUnit : :class:`TimeUnit`
Time unit (e.g., ``year``, ``yearmonth``, ``month``, ``hours`` ) for a temporal
field.
or `a temporal field that gets casted as ordinal
<https://vega.github.io/vega-lite/docs/type.html#cast>`__.
**Default value:** ``undefined`` (None)
title : anyOf(string, None)
A title for the field. If ``null``, the title will be removed.
**Default value:** derived from the field's name and transformation function (
``aggregate``, ``bin`` and ``timeUnit`` ). If the field has an aggregate function,
the function is displayed as part of the title (e.g., ``"Sum of Profit"`` ). If the
field is binned or has a time unit applied, the applied function is shown in
parentheses (e.g., ``"Profit (binned)"``, ``"Transaction Date (year-month)"`` ).
Otherwise, the title is simply the field name.
**Notes** :
1) You can customize the default field title format by providing the [fieldTitle
property in the `config <https://vega.github.io/vega-lite/docs/config.html>`__ or
`fieldTitle function via the compile function's options
<https://vega.github.io/vega-lite/docs/compile.html#field-title>`__.
2) If both field definition's ``title`` and axis, header, or legend ``title`` are
defined, axis/header/legend title will be used.
type : :class:`Type`
The encoded field's type of measurement ( ``"quantitative"``, ``"temporal"``,
``"ordinal"``, or ``"nominal"`` ).
It can also be a ``"geojson"`` type for encoding `'geoshape'
<https://vega.github.io/vega-lite/docs/geoshape.html>`__.
"""
_class_is_valid_at_instantiation = False
_encoding_name = "size"
def __init__(self, shorthand=Undefined, aggregate=Undefined, bin=Undefined, condition=Undefined,
field=Undefined, legend=Undefined, scale=Undefined, sort=Undefined, timeUnit=Undefined,
title=Undefined, type=Undefined, **kwds):
super(Size, self).__init__(shorthand=shorthand, aggregate=aggregate, bin=bin,
condition=condition, field=field, legend=legend, scale=scale,
sort=sort, timeUnit=timeUnit, title=title, type=type, **kwds)
class SizeValue(ValueChannelMixin, core.MarkPropValueDefWithCondition):
"""SizeValue schema wrapper
Mapping(required=[])
A ValueDef with Condition<ValueDef | FieldDef>
Attributes
----------
condition : anyOf(:class:`ConditionalMarkPropFieldDef`, :class:`ConditionalValueDef`,
List(:class:`ConditionalValueDef`))
A field definition or one or more value definition(s) with a selection predicate.
value : anyOf(float, string, boolean)
A constant value in visual domain.
"""
_class_is_valid_at_instantiation = False
_encoding_name = "size"
def __init__(self, value, condition=Undefined, **kwds):
super(SizeValue, self).__init__(value=value, condition=condition, **kwds)
class Stroke(FieldChannelMixin, core.MarkPropFieldDefWithCondition):
"""Stroke schema wrapper
Mapping(required=[shorthand])
A FieldDef with Condition :raw-html:`<ValueDef>`
Attributes
----------
shorthand : string
shorthand for field, aggregate, and type
aggregate : :class:`Aggregate`
Aggregation function for the field
(e.g., ``mean``, ``sum``, ``median``, ``min``, ``max``, ``count`` ).
**Default value:** ``undefined`` (None)
bin : anyOf(boolean, :class:`BinParams`)
A flag for binning a ``quantitative`` field, or `an object defining binning
parameters <https://vega.github.io/vega-lite/docs/bin.html#params>`__.
If ``true``, default `binning parameters
<https://vega.github.io/vega-lite/docs/bin.html>`__ will be applied.
**Default value:** ``false``
condition : anyOf(:class:`ConditionalValueDef`, List(:class:`ConditionalValueDef`))
One or more value definition(s) with a selection predicate.
**Note:** A field definition's ``condition`` property can only contain `value
definitions <https://vega.github.io/vega-lite/docs/encoding.html#value-def>`__
since Vega-Lite only allows at most one encoded field per encoding channel.
field : anyOf(string, :class:`RepeatRef`)
**Required.** A string defining the name of the field from which to pull a data
value
or an object defining iterated values from the `repeat
<https://vega.github.io/vega-lite/docs/repeat.html>`__ operator.
**Note:** Dots ( ``.`` ) and brackets ( ``[`` and ``]`` ) can be used to access
nested objects (e.g., ``"field": "foo.bar"`` and ``"field": "foo['bar']"`` ).
If field names contain dots or brackets but are not nested, you can use ``\\`` to
escape dots and brackets (e.g., ``"a\\.b"`` and ``"a\\[0\\]"`` ).
See more details about escaping in the `field documentation
<https://vega.github.io/vega-lite/docs/field.html>`__.
**Note:** ``field`` is not required if ``aggregate`` is ``count``.
legend : anyOf(:class:`Legend`, None)
An object defining properties of the legend.
If ``null``, the legend for the encoding channel will be removed.
**Default value:** If undefined, default `legend properties
<https://vega.github.io/vega-lite/docs/legend.html>`__ are applied.
scale : anyOf(:class:`Scale`, None)
An object defining properties of the channel's scale, which is the function that
transforms values in the data domain (numbers, dates, strings, etc) to visual values
(pixels, colors, sizes) of the encoding channels.
If ``null``, the scale will be `disabled and the data value will be directly encoded
<https://vega.github.io/vega-lite/docs/scale.html#disable>`__.
**Default value:** If undefined, default `scale properties
<https://vega.github.io/vega-lite/docs/scale.html>`__ are applied.
sort : :class:`Sort`
Sort order for the encoded field.
For continuous fields (quantitative or temporal), ``sort`` can be either
``"ascending"`` or ``"descending"``.
For discrete fields, ``sort`` can be one of the following:
* ``"ascending"`` or ``"descending"`` -- for sorting by the values' natural order in
Javascript.
* `A sort field definition
<https://vega.github.io/vega-lite/docs/sort.html#sort-field>`__ for sorting by
another field.
* `An array specifying the field values in preferred order
<https://vega.github.io/vega-lite/docs/sort.html#sort-array>`__. In this case, the
sort order will obey the values in the array, followed by any unspecified values
in their original order. For discrete time field, values in the sort array can be
`date-time definition objects <types#datetime>`__. In addition, for time units
``"month"`` and ``"day"``, the values can be the month or day names (case
insensitive) or their 3-letter initials (e.g., ``"Mon"``, ``"Tue"`` ).
* ``null`` indicating no sort.
**Default value:** ``"ascending"``
**Note:** ``null`` is not supported for ``row`` and ``column``.
timeUnit : :class:`TimeUnit`
Time unit (e.g., ``year``, ``yearmonth``, ``month``, ``hours`` ) for a temporal
field.
or `a temporal field that gets casted as ordinal
<https://vega.github.io/vega-lite/docs/type.html#cast>`__.
**Default value:** ``undefined`` (None)
title : anyOf(string, None)
A title for the field. If ``null``, the title will be removed.
**Default value:** derived from the field's name and transformation function (
``aggregate``, ``bin`` and ``timeUnit`` ). If the field has an aggregate function,
the function is displayed as part of the title (e.g., ``"Sum of Profit"`` ). If the
field is binned or has a time unit applied, the applied function is shown in
parentheses (e.g., ``"Profit (binned)"``, ``"Transaction Date (year-month)"`` ).
Otherwise, the title is simply the field name.
**Notes** :
1) You can customize the default field title format by providing the [fieldTitle
property in the `config <https://vega.github.io/vega-lite/docs/config.html>`__ or
`fieldTitle function via the compile function's options
<https://vega.github.io/vega-lite/docs/compile.html#field-title>`__.
2) If both field definition's ``title`` and axis, header, or legend ``title`` are
defined, axis/header/legend title will be used.
type : :class:`Type`
The encoded field's type of measurement ( ``"quantitative"``, ``"temporal"``,
``"ordinal"``, or ``"nominal"`` ).
It can also be a ``"geojson"`` type for encoding `'geoshape'
<https://vega.github.io/vega-lite/docs/geoshape.html>`__.
"""
_class_is_valid_at_instantiation = False
_encoding_name = "stroke"
def __init__(self, shorthand=Undefined, aggregate=Undefined, bin=Undefined, condition=Undefined,
field=Undefined, legend=Undefined, scale=Undefined, sort=Undefined, timeUnit=Undefined,
title=Undefined, type=Undefined, **kwds):
super(Stroke, self).__init__(shorthand=shorthand, aggregate=aggregate, bin=bin,
condition=condition, field=field, legend=legend, scale=scale,
sort=sort, timeUnit=timeUnit, title=title, type=type, **kwds)
class StrokeValue(ValueChannelMixin, core.MarkPropValueDefWithCondition):
"""StrokeValue schema wrapper
Mapping(required=[])
A ValueDef with Condition<ValueDef | FieldDef>
Attributes
----------
condition : anyOf(:class:`ConditionalMarkPropFieldDef`, :class:`ConditionalValueDef`,
List(:class:`ConditionalValueDef`))
A field definition or one or more value definition(s) with a selection predicate.
value : anyOf(float, string, boolean)
A constant value in visual domain.
"""
_class_is_valid_at_instantiation = False
_encoding_name = "stroke"
def __init__(self, value, condition=Undefined, **kwds):
super(StrokeValue, self).__init__(value=value, condition=condition, **kwds)
class Text(FieldChannelMixin, core.TextFieldDefWithCondition):
"""Text schema wrapper
Mapping(required=[shorthand])
A FieldDef with Condition :raw-html:`<ValueDef>`
Attributes
----------
shorthand : string
shorthand for field, aggregate, and type
aggregate : :class:`Aggregate`
Aggregation function for the field
(e.g., ``mean``, ``sum``, ``median``, ``min``, ``max``, ``count`` ).
**Default value:** ``undefined`` (None)
bin : anyOf(boolean, :class:`BinParams`)
A flag for binning a ``quantitative`` field, or `an object defining binning
parameters <https://vega.github.io/vega-lite/docs/bin.html#params>`__.
If ``true``, default `binning parameters
<https://vega.github.io/vega-lite/docs/bin.html>`__ will be applied.
**Default value:** ``false``
condition : anyOf(:class:`ConditionalValueDef`, List(:class:`ConditionalValueDef`))
One or more value definition(s) with a selection predicate.
**Note:** A field definition's ``condition`` property can only contain `value
definitions <https://vega.github.io/vega-lite/docs/encoding.html#value-def>`__
since Vega-Lite only allows at most one encoded field per encoding channel.
field : anyOf(string, :class:`RepeatRef`)
**Required.** A string defining the name of the field from which to pull a data
value
or an object defining iterated values from the `repeat
<https://vega.github.io/vega-lite/docs/repeat.html>`__ operator.
**Note:** Dots ( ``.`` ) and brackets ( ``[`` and ``]`` ) can be used to access
nested objects (e.g., ``"field": "foo.bar"`` and ``"field": "foo['bar']"`` ).
If field names contain dots or brackets but are not nested, you can use ``\\`` to
escape dots and brackets (e.g., ``"a\\.b"`` and ``"a\\[0\\]"`` ).
See more details about escaping in the `field documentation
<https://vega.github.io/vega-lite/docs/field.html>`__.
**Note:** ``field`` is not required if ``aggregate`` is ``count``.
format : string
The `formatting pattern <https://vega.github.io/vega-lite/docs/format.html>`__ for a
text field. If not defined, this will be determined automatically.
timeUnit : :class:`TimeUnit`
Time unit (e.g., ``year``, ``yearmonth``, ``month``, ``hours`` ) for a temporal
field.
or `a temporal field that gets casted as ordinal
<https://vega.github.io/vega-lite/docs/type.html#cast>`__.
**Default value:** ``undefined`` (None)
title : anyOf(string, None)
A title for the field. If ``null``, the title will be removed.
**Default value:** derived from the field's name and transformation function (
``aggregate``, ``bin`` and ``timeUnit`` ). If the field has an aggregate function,
the function is displayed as part of the title (e.g., ``"Sum of Profit"`` ). If the
field is binned or has a time unit applied, the applied function is shown in
parentheses (e.g., ``"Profit (binned)"``, ``"Transaction Date (year-month)"`` ).
Otherwise, the title is simply the field name.
**Notes** :
1) You can customize the default field title format by providing the [fieldTitle
property in the `config <https://vega.github.io/vega-lite/docs/config.html>`__ or
`fieldTitle function via the compile function's options
<https://vega.github.io/vega-lite/docs/compile.html#field-title>`__.
2) If both field definition's ``title`` and axis, header, or legend ``title`` are
defined, axis/header/legend title will be used.
type : :class:`Type`
The encoded field's type of measurement ( ``"quantitative"``, ``"temporal"``,
``"ordinal"``, or ``"nominal"`` ).
It can also be a ``"geojson"`` type for encoding `'geoshape'
<https://vega.github.io/vega-lite/docs/geoshape.html>`__.
"""
_class_is_valid_at_instantiation = False
_encoding_name = "text"
def __init__(self, shorthand=Undefined, aggregate=Undefined, bin=Undefined, condition=Undefined,
field=Undefined, format=Undefined, timeUnit=Undefined, title=Undefined, type=Undefined,
**kwds):
super(Text, self).__init__(shorthand=shorthand, aggregate=aggregate, bin=bin,
condition=condition, field=field, format=format, timeUnit=timeUnit,
title=title, type=type, **kwds)
class TextValue(ValueChannelMixin, core.TextValueDefWithCondition):
"""TextValue schema wrapper
Mapping(required=[])
A ValueDef with Condition<ValueDef | FieldDef>
Attributes
----------
condition : anyOf(:class:`ConditionalTextFieldDef`, :class:`ConditionalValueDef`,
List(:class:`ConditionalValueDef`))
A field definition or one or more value definition(s) with a selection predicate.
value : anyOf(float, string, boolean)
A constant value in visual domain.
"""
_class_is_valid_at_instantiation = False
_encoding_name = "text"
def __init__(self, value, condition=Undefined, **kwds):
super(TextValue, self).__init__(value=value, condition=condition, **kwds)
class Tooltip(FieldChannelMixin, core.TextFieldDefWithCondition):
"""Tooltip schema wrapper
Mapping(required=[shorthand])
A FieldDef with Condition :raw-html:`<ValueDef>`
Attributes
----------
shorthand : string
shorthand for field, aggregate, and type
aggregate : :class:`Aggregate`
Aggregation function for the field
(e.g., ``mean``, ``sum``, ``median``, ``min``, ``max``, ``count`` ).
**Default value:** ``undefined`` (None)
bin : anyOf(boolean, :class:`BinParams`)
A flag for binning a ``quantitative`` field, or `an object defining binning
parameters <https://vega.github.io/vega-lite/docs/bin.html#params>`__.
If ``true``, default `binning parameters
<https://vega.github.io/vega-lite/docs/bin.html>`__ will be applied.
**Default value:** ``false``
condition : anyOf(:class:`ConditionalValueDef`, List(:class:`ConditionalValueDef`))
One or more value definition(s) with a selection predicate.
**Note:** A field definition's ``condition`` property can only contain `value
definitions <https://vega.github.io/vega-lite/docs/encoding.html#value-def>`__
since Vega-Lite only allows at most one encoded field per encoding channel.
field : anyOf(string, :class:`RepeatRef`)
**Required.** A string defining the name of the field from which to pull a data
value
or an object defining iterated values from the `repeat
<https://vega.github.io/vega-lite/docs/repeat.html>`__ operator.
**Note:** Dots ( ``.`` ) and brackets ( ``[`` and ``]`` ) can be used to access
nested objects (e.g., ``"field": "foo.bar"`` and ``"field": "foo['bar']"`` ).
If field names contain dots or brackets but are not nested, you can use ``\\`` to
escape dots and brackets (e.g., ``"a\\.b"`` and ``"a\\[0\\]"`` ).
See more details about escaping in the `field documentation
<https://vega.github.io/vega-lite/docs/field.html>`__.
**Note:** ``field`` is not required if ``aggregate`` is ``count``.
format : string
The `formatting pattern <https://vega.github.io/vega-lite/docs/format.html>`__ for a
text field. If not defined, this will be determined automatically.
timeUnit : :class:`TimeUnit`
Time unit (e.g., ``year``, ``yearmonth``, ``month``, ``hours`` ) for a temporal
field.
or `a temporal field that gets casted as ordinal
<https://vega.github.io/vega-lite/docs/type.html#cast>`__.
**Default value:** ``undefined`` (None)
title : anyOf(string, None)
A title for the field. If ``null``, the title will be removed.
**Default value:** derived from the field's name and transformation function (
``aggregate``, ``bin`` and ``timeUnit`` ). If the field has an aggregate function,
the function is displayed as part of the title (e.g., ``"Sum of Profit"`` ). If the
field is binned or has a time unit applied, the applied function is shown in
parentheses (e.g., ``"Profit (binned)"``, ``"Transaction Date (year-month)"`` ).
Otherwise, the title is simply the field name.
**Notes** :
1) You can customize the default field title format by providing the [fieldTitle
property in the `config <https://vega.github.io/vega-lite/docs/config.html>`__ or
`fieldTitle function via the compile function's options
<https://vega.github.io/vega-lite/docs/compile.html#field-title>`__.
2) If both field definition's ``title`` and axis, header, or legend ``title`` are
defined, axis/header/legend title will be used.
type : :class:`Type`
The encoded field's type of measurement ( ``"quantitative"``, ``"temporal"``,
``"ordinal"``, or ``"nominal"`` ).
It can also be a ``"geojson"`` type for encoding `'geoshape'
<https://vega.github.io/vega-lite/docs/geoshape.html>`__.
"""
_class_is_valid_at_instantiation = False
_encoding_name = "tooltip"
def __init__(self, shorthand=Undefined, aggregate=Undefined, bin=Undefined, condition=Undefined,
field=Undefined, format=Undefined, timeUnit=Undefined, title=Undefined, type=Undefined,
**kwds):
super(Tooltip, self).__init__(shorthand=shorthand, aggregate=aggregate, bin=bin,
condition=condition, field=field, format=format,
timeUnit=timeUnit, title=title, type=type, **kwds)
class TooltipValue(ValueChannelMixin, core.TextValueDefWithCondition):
"""TooltipValue schema wrapper
Mapping(required=[])
A ValueDef with Condition<ValueDef | FieldDef>
Attributes
----------
condition : anyOf(:class:`ConditionalTextFieldDef`, :class:`ConditionalValueDef`,
List(:class:`ConditionalValueDef`))
A field definition or one or more value definition(s) with a selection predicate.
value : anyOf(float, string, boolean)
A constant value in visual domain.
"""
_class_is_valid_at_instantiation = False
_encoding_name = "tooltip"
def __init__(self, value, condition=Undefined, **kwds):
super(TooltipValue, self).__init__(value=value, condition=condition, **kwds)
class X(FieldChannelMixin, core.PositionFieldDef):
"""X schema wrapper
Mapping(required=[shorthand])
Attributes
----------
shorthand : string
shorthand for field, aggregate, and type
aggregate : :class:`Aggregate`
Aggregation function for the field
(e.g., ``mean``, ``sum``, ``median``, ``min``, ``max``, ``count`` ).
**Default value:** ``undefined`` (None)
axis : anyOf(:class:`Axis`, None)
An object defining properties of axis's gridlines, ticks and labels.
If ``null``, the axis for the encoding channel will be removed.
**Default value:** If undefined, default `axis properties
<https://vega.github.io/vega-lite/docs/axis.html>`__ are applied.
bin : anyOf(boolean, :class:`BinParams`)
A flag for binning a ``quantitative`` field, or `an object defining binning
parameters <https://vega.github.io/vega-lite/docs/bin.html#params>`__.
If ``true``, default `binning parameters
<https://vega.github.io/vega-lite/docs/bin.html>`__ will be applied.
**Default value:** ``false``
field : anyOf(string, :class:`RepeatRef`)
**Required.** A string defining the name of the field from which to pull a data
value
or an object defining iterated values from the `repeat
<https://vega.github.io/vega-lite/docs/repeat.html>`__ operator.
**Note:** Dots ( ``.`` ) and brackets ( ``[`` and ``]`` ) can be used to access
nested objects (e.g., ``"field": "foo.bar"`` and ``"field": "foo['bar']"`` ).
If field names contain dots or brackets but are not nested, you can use ``\\`` to
escape dots and brackets (e.g., ``"a\\.b"`` and ``"a\\[0\\]"`` ).
See more details about escaping in the `field documentation
<https://vega.github.io/vega-lite/docs/field.html>`__.
**Note:** ``field`` is not required if ``aggregate`` is ``count``.
scale : anyOf(:class:`Scale`, None)
An object defining properties of the channel's scale, which is the function that
transforms values in the data domain (numbers, dates, strings, etc) to visual values
(pixels, colors, sizes) of the encoding channels.
If ``null``, the scale will be `disabled and the data value will be directly encoded
<https://vega.github.io/vega-lite/docs/scale.html#disable>`__.
**Default value:** If undefined, default `scale properties
<https://vega.github.io/vega-lite/docs/scale.html>`__ are applied.
sort : :class:`Sort`
Sort order for the encoded field.
For continuous fields (quantitative or temporal), ``sort`` can be either
``"ascending"`` or ``"descending"``.
For discrete fields, ``sort`` can be one of the following:
* ``"ascending"`` or ``"descending"`` -- for sorting by the values' natural order in
Javascript.
* `A sort field definition
<https://vega.github.io/vega-lite/docs/sort.html#sort-field>`__ for sorting by
another field.
* `An array specifying the field values in preferred order
<https://vega.github.io/vega-lite/docs/sort.html#sort-array>`__. In this case, the
sort order will obey the values in the array, followed by any unspecified values
in their original order. For discrete time field, values in the sort array can be
`date-time definition objects <types#datetime>`__. In addition, for time units
``"month"`` and ``"day"``, the values can be the month or day names (case
insensitive) or their 3-letter initials (e.g., ``"Mon"``, ``"Tue"`` ).
* ``null`` indicating no sort.
**Default value:** ``"ascending"``
**Note:** ``null`` is not supported for ``row`` and ``column``.
stack : anyOf(:class:`StackOffset`, None)
Type of stacking offset if the field should be stacked.
``stack`` is only applicable for ``x`` and ``y`` channels with continuous domains.
For example, ``stack`` of ``y`` can be used to customize stacking for a vertical bar
chart.
``stack`` can be one of the following values:
* `"zero"`: stacking with baseline offset at zero value of the scale (for creating
typical stacked [bar](https://vega.github.io/vega-lite/docs/stack.html#bar) and
`area <https://vega.github.io/vega-lite/docs/stack.html#area>`__ chart).
* ``"normalize"`` - stacking with normalized domain (for creating `normalized
stacked bar and area charts
<https://vega.github.io/vega-lite/docs/stack.html#normalized>`__.
:raw-html:`<br/>`
- ``"center"`` - stacking with center baseline (for `streamgraph
<https://vega.github.io/vega-lite/docs/stack.html#streamgraph>`__ ).
* ``null`` - No-stacking. This will produce layered `bar
<https://vega.github.io/vega-lite/docs/stack.html#layered-bar-chart>`__ and area
chart.
**Default value:** ``zero`` for plots with all of the following conditions are true:
(1) the mark is ``bar`` or ``area`` ;
(2) the stacked measure channel (x or y) has a linear scale;
(3) At least one of non-position channels mapped to an unaggregated field that is
different from x and y. Otherwise, ``null`` by default.
timeUnit : :class:`TimeUnit`
Time unit (e.g., ``year``, ``yearmonth``, ``month``, ``hours`` ) for a temporal
field.
or `a temporal field that gets casted as ordinal
<https://vega.github.io/vega-lite/docs/type.html#cast>`__.
**Default value:** ``undefined`` (None)
title : anyOf(string, None)
A title for the field. If ``null``, the title will be removed.
**Default value:** derived from the field's name and transformation function (
``aggregate``, ``bin`` and ``timeUnit`` ). If the field has an aggregate function,
the function is displayed as part of the title (e.g., ``"Sum of Profit"`` ). If the
field is binned or has a time unit applied, the applied function is shown in
parentheses (e.g., ``"Profit (binned)"``, ``"Transaction Date (year-month)"`` ).
Otherwise, the title is simply the field name.
**Notes** :
1) You can customize the default field title format by providing the [fieldTitle
property in the `config <https://vega.github.io/vega-lite/docs/config.html>`__ or
`fieldTitle function via the compile function's options
<https://vega.github.io/vega-lite/docs/compile.html#field-title>`__.
2) If both field definition's ``title`` and axis, header, or legend ``title`` are
defined, axis/header/legend title will be used.
type : :class:`Type`
The encoded field's type of measurement ( ``"quantitative"``, ``"temporal"``,
``"ordinal"``, or ``"nominal"`` ).
It can also be a ``"geojson"`` type for encoding `'geoshape'
<https://vega.github.io/vega-lite/docs/geoshape.html>`__.
"""
_class_is_valid_at_instantiation = False
_encoding_name = "x"
def __init__(self, shorthand=Undefined, aggregate=Undefined, axis=Undefined, bin=Undefined,
field=Undefined, scale=Undefined, sort=Undefined, stack=Undefined, timeUnit=Undefined,
title=Undefined, type=Undefined, **kwds):
super(X, self).__init__(shorthand=shorthand, aggregate=aggregate, axis=axis, bin=bin,
field=field, scale=scale, sort=sort, stack=stack, timeUnit=timeUnit,
title=title, type=type, **kwds)
class XValue(ValueChannelMixin, core.ValueDef):
"""XValue schema wrapper
Mapping(required=[value])
Definition object for a constant value of an encoding channel.
Attributes
----------
value : anyOf(float, string, boolean)
A constant value in visual domain (e.g., ``"red"`` / "#0099ff" for color, values
between ``0`` to ``1`` for opacity).
"""
_class_is_valid_at_instantiation = False
_encoding_name = "x"
def __init__(self, value, **kwds):
super(XValue, self).__init__(value=value, **kwds)
class X2(FieldChannelMixin, core.FieldDef):
"""X2 schema wrapper
Mapping(required=[shorthand])
Definition object for a data field, its type and transformation of an encoding channel.
Attributes
----------
shorthand : string
shorthand for field, aggregate, and type
aggregate : :class:`Aggregate`
Aggregation function for the field
(e.g., ``mean``, ``sum``, ``median``, ``min``, ``max``, ``count`` ).
**Default value:** ``undefined`` (None)
bin : anyOf(boolean, :class:`BinParams`)
A flag for binning a ``quantitative`` field, or `an object defining binning
parameters <https://vega.github.io/vega-lite/docs/bin.html#params>`__.
If ``true``, default `binning parameters
<https://vega.github.io/vega-lite/docs/bin.html>`__ will be applied.
**Default value:** ``false``
field : anyOf(string, :class:`RepeatRef`)
**Required.** A string defining the name of the field from which to pull a data
value
or an object defining iterated values from the `repeat
<https://vega.github.io/vega-lite/docs/repeat.html>`__ operator.
**Note:** Dots ( ``.`` ) and brackets ( ``[`` and ``]`` ) can be used to access
nested objects (e.g., ``"field": "foo.bar"`` and ``"field": "foo['bar']"`` ).
If field names contain dots or brackets but are not nested, you can use ``\\`` to
escape dots and brackets (e.g., ``"a\\.b"`` and ``"a\\[0\\]"`` ).
See more details about escaping in the `field documentation
<https://vega.github.io/vega-lite/docs/field.html>`__.
**Note:** ``field`` is not required if ``aggregate`` is ``count``.
timeUnit : :class:`TimeUnit`
Time unit (e.g., ``year``, ``yearmonth``, ``month``, ``hours`` ) for a temporal
field.
or `a temporal field that gets casted as ordinal
<https://vega.github.io/vega-lite/docs/type.html#cast>`__.
**Default value:** ``undefined`` (None)
title : anyOf(string, None)
A title for the field. If ``null``, the title will be removed.
**Default value:** derived from the field's name and transformation function (
``aggregate``, ``bin`` and ``timeUnit`` ). If the field has an aggregate function,
the function is displayed as part of the title (e.g., ``"Sum of Profit"`` ). If the
field is binned or has a time unit applied, the applied function is shown in
parentheses (e.g., ``"Profit (binned)"``, ``"Transaction Date (year-month)"`` ).
Otherwise, the title is simply the field name.
**Notes** :
1) You can customize the default field title format by providing the [fieldTitle
property in the `config <https://vega.github.io/vega-lite/docs/config.html>`__ or
`fieldTitle function via the compile function's options
<https://vega.github.io/vega-lite/docs/compile.html#field-title>`__.
2) If both field definition's ``title`` and axis, header, or legend ``title`` are
defined, axis/header/legend title will be used.
type : :class:`Type`
The encoded field's type of measurement ( ``"quantitative"``, ``"temporal"``,
``"ordinal"``, or ``"nominal"`` ).
It can also be a ``"geojson"`` type for encoding `'geoshape'
<https://vega.github.io/vega-lite/docs/geoshape.html>`__.
"""
_class_is_valid_at_instantiation = False
_encoding_name = "x2"
def __init__(self, shorthand=Undefined, aggregate=Undefined, bin=Undefined, field=Undefined,
timeUnit=Undefined, title=Undefined, type=Undefined, **kwds):
super(X2, self).__init__(shorthand=shorthand, aggregate=aggregate, bin=bin, field=field,
timeUnit=timeUnit, title=title, type=type, **kwds)
class X2Value(ValueChannelMixin, core.ValueDef):
"""X2Value schema wrapper
Mapping(required=[value])
Definition object for a constant value of an encoding channel.
Attributes
----------
value : anyOf(float, string, boolean)
A constant value in visual domain (e.g., ``"red"`` / "#0099ff" for color, values
between ``0`` to ``1`` for opacity).
"""
_class_is_valid_at_instantiation = False
_encoding_name = "x2"
def __init__(self, value, **kwds):
super(X2Value, self).__init__(value=value, **kwds)
class Y(FieldChannelMixin, core.PositionFieldDef):
"""Y schema wrapper
Mapping(required=[shorthand])
Attributes
----------
shorthand : string
shorthand for field, aggregate, and type
aggregate : :class:`Aggregate`
Aggregation function for the field
(e.g., ``mean``, ``sum``, ``median``, ``min``, ``max``, ``count`` ).
**Default value:** ``undefined`` (None)
axis : anyOf(:class:`Axis`, None)
An object defining properties of axis's gridlines, ticks and labels.
If ``null``, the axis for the encoding channel will be removed.
**Default value:** If undefined, default `axis properties
<https://vega.github.io/vega-lite/docs/axis.html>`__ are applied.
bin : anyOf(boolean, :class:`BinParams`)
A flag for binning a ``quantitative`` field, or `an object defining binning
parameters <https://vega.github.io/vega-lite/docs/bin.html#params>`__.
If ``true``, default `binning parameters
<https://vega.github.io/vega-lite/docs/bin.html>`__ will be applied.
**Default value:** ``false``
field : anyOf(string, :class:`RepeatRef`)
**Required.** A string defining the name of the field from which to pull a data
value
or an object defining iterated values from the `repeat
<https://vega.github.io/vega-lite/docs/repeat.html>`__ operator.
**Note:** Dots ( ``.`` ) and brackets ( ``[`` and ``]`` ) can be used to access
nested objects (e.g., ``"field": "foo.bar"`` and ``"field": "foo['bar']"`` ).
If field names contain dots or brackets but are not nested, you can use ``\\`` to
escape dots and brackets (e.g., ``"a\\.b"`` and ``"a\\[0\\]"`` ).
See more details about escaping in the `field documentation
<https://vega.github.io/vega-lite/docs/field.html>`__.
**Note:** ``field`` is not required if ``aggregate`` is ``count``.
scale : anyOf(:class:`Scale`, None)
An object defining properties of the channel's scale, which is the function that
transforms values in the data domain (numbers, dates, strings, etc) to visual values
(pixels, colors, sizes) of the encoding channels.
If ``null``, the scale will be `disabled and the data value will be directly encoded
<https://vega.github.io/vega-lite/docs/scale.html#disable>`__.
**Default value:** If undefined, default `scale properties
<https://vega.github.io/vega-lite/docs/scale.html>`__ are applied.
sort : :class:`Sort`
Sort order for the encoded field.
For continuous fields (quantitative or temporal), ``sort`` can be either
``"ascending"`` or ``"descending"``.
For discrete fields, ``sort`` can be one of the following:
* ``"ascending"`` or ``"descending"`` -- for sorting by the values' natural order in
Javascript.
* `A sort field definition
<https://vega.github.io/vega-lite/docs/sort.html#sort-field>`__ for sorting by
another field.
* `An array specifying the field values in preferred order
<https://vega.github.io/vega-lite/docs/sort.html#sort-array>`__. In this case, the
sort order will obey the values in the array, followed by any unspecified values
in their original order. For discrete time field, values in the sort array can be
`date-time definition objects <types#datetime>`__. In addition, for time units
``"month"`` and ``"day"``, the values can be the month or day names (case
insensitive) or their 3-letter initials (e.g., ``"Mon"``, ``"Tue"`` ).
* ``null`` indicating no sort.
**Default value:** ``"ascending"``
**Note:** ``null`` is not supported for ``row`` and ``column``.
stack : anyOf(:class:`StackOffset`, None)
Type of stacking offset if the field should be stacked.
``stack`` is only applicable for ``x`` and ``y`` channels with continuous domains.
For example, ``stack`` of ``y`` can be used to customize stacking for a vertical bar
chart.
``stack`` can be one of the following values:
* `"zero"`: stacking with baseline offset at zero value of the scale (for creating
typical stacked [bar](https://vega.github.io/vega-lite/docs/stack.html#bar) and
`area <https://vega.github.io/vega-lite/docs/stack.html#area>`__ chart).
* ``"normalize"`` - stacking with normalized domain (for creating `normalized
stacked bar and area charts
<https://vega.github.io/vega-lite/docs/stack.html#normalized>`__.
:raw-html:`<br/>`
- ``"center"`` - stacking with center baseline (for `streamgraph
<https://vega.github.io/vega-lite/docs/stack.html#streamgraph>`__ ).
* ``null`` - No-stacking. This will produce layered `bar
<https://vega.github.io/vega-lite/docs/stack.html#layered-bar-chart>`__ and area
chart.
**Default value:** ``zero`` for plots with all of the following conditions are true:
(1) the mark is ``bar`` or ``area`` ;
(2) the stacked measure channel (x or y) has a linear scale;
(3) At least one of non-position channels mapped to an unaggregated field that is
different from x and y. Otherwise, ``null`` by default.
timeUnit : :class:`TimeUnit`
Time unit (e.g., ``year``, ``yearmonth``, ``month``, ``hours`` ) for a temporal
field.
or `a temporal field that gets casted as ordinal
<https://vega.github.io/vega-lite/docs/type.html#cast>`__.
**Default value:** ``undefined`` (None)
title : anyOf(string, None)
A title for the field. If ``null``, the title will be removed.
**Default value:** derived from the field's name and transformation function (
``aggregate``, ``bin`` and ``timeUnit`` ). If the field has an aggregate function,
the function is displayed as part of the title (e.g., ``"Sum of Profit"`` ). If the
field is binned or has a time unit applied, the applied function is shown in
parentheses (e.g., ``"Profit (binned)"``, ``"Transaction Date (year-month)"`` ).
Otherwise, the title is simply the field name.
**Notes** :
1) You can customize the default field title format by providing the [fieldTitle
property in the `config <https://vega.github.io/vega-lite/docs/config.html>`__ or
`fieldTitle function via the compile function's options
<https://vega.github.io/vega-lite/docs/compile.html#field-title>`__.
2) If both field definition's ``title`` and axis, header, or legend ``title`` are
defined, axis/header/legend title will be used.
type : :class:`Type`
The encoded field's type of measurement ( ``"quantitative"``, ``"temporal"``,
``"ordinal"``, or ``"nominal"`` ).
It can also be a ``"geojson"`` type for encoding `'geoshape'
<https://vega.github.io/vega-lite/docs/geoshape.html>`__.
"""
_class_is_valid_at_instantiation = False
_encoding_name = "y"
def __init__(self, shorthand=Undefined, aggregate=Undefined, axis=Undefined, bin=Undefined,
field=Undefined, scale=Undefined, sort=Undefined, stack=Undefined, timeUnit=Undefined,
title=Undefined, type=Undefined, **kwds):
super(Y, self).__init__(shorthand=shorthand, aggregate=aggregate, axis=axis, bin=bin,
field=field, scale=scale, sort=sort, stack=stack, timeUnit=timeUnit,
title=title, type=type, **kwds)
class YValue(ValueChannelMixin, core.ValueDef):
"""YValue schema wrapper
Mapping(required=[value])
Definition object for a constant value of an encoding channel.
Attributes
----------
value : anyOf(float, string, boolean)
A constant value in visual domain (e.g., ``"red"`` / "#0099ff" for color, values
between ``0`` to ``1`` for opacity).
"""
_class_is_valid_at_instantiation = False
_encoding_name = "y"
def __init__(self, value, **kwds):
super(YValue, self).__init__(value=value, **kwds)
class Y2(FieldChannelMixin, core.FieldDef):
"""Y2 schema wrapper
Mapping(required=[shorthand])
Definition object for a data field, its type and transformation of an encoding channel.
Attributes
----------
shorthand : string
shorthand for field, aggregate, and type
aggregate : :class:`Aggregate`
Aggregation function for the field
(e.g., ``mean``, ``sum``, ``median``, ``min``, ``max``, ``count`` ).
**Default value:** ``undefined`` (None)
bin : anyOf(boolean, :class:`BinParams`)
A flag for binning a ``quantitative`` field, or `an object defining binning
parameters <https://vega.github.io/vega-lite/docs/bin.html#params>`__.
If ``true``, default `binning parameters
<https://vega.github.io/vega-lite/docs/bin.html>`__ will be applied.
**Default value:** ``false``
field : anyOf(string, :class:`RepeatRef`)
**Required.** A string defining the name of the field from which to pull a data
value
or an object defining iterated values from the `repeat
<https://vega.github.io/vega-lite/docs/repeat.html>`__ operator.
**Note:** Dots ( ``.`` ) and brackets ( ``[`` and ``]`` ) can be used to access
nested objects (e.g., ``"field": "foo.bar"`` and ``"field": "foo['bar']"`` ).
If field names contain dots or brackets but are not nested, you can use ``\\`` to
escape dots and brackets (e.g., ``"a\\.b"`` and ``"a\\[0\\]"`` ).
See more details about escaping in the `field documentation
<https://vega.github.io/vega-lite/docs/field.html>`__.
**Note:** ``field`` is not required if ``aggregate`` is ``count``.
timeUnit : :class:`TimeUnit`
Time unit (e.g., ``year``, ``yearmonth``, ``month``, ``hours`` ) for a temporal
field.
or `a temporal field that gets casted as ordinal
<https://vega.github.io/vega-lite/docs/type.html#cast>`__.
**Default value:** ``undefined`` (None)
title : anyOf(string, None)
A title for the field. If ``null``, the title will be removed.
**Default value:** derived from the field's name and transformation function (
``aggregate``, ``bin`` and ``timeUnit`` ). If the field has an aggregate function,
the function is displayed as part of the title (e.g., ``"Sum of Profit"`` ). If the
field is binned or has a time unit applied, the applied function is shown in
parentheses (e.g., ``"Profit (binned)"``, ``"Transaction Date (year-month)"`` ).
Otherwise, the title is simply the field name.
**Notes** :
1) You can customize the default field title format by providing the [fieldTitle
property in the `config <https://vega.github.io/vega-lite/docs/config.html>`__ or
`fieldTitle function via the compile function's options
<https://vega.github.io/vega-lite/docs/compile.html#field-title>`__.
2) If both field definition's ``title`` and axis, header, or legend ``title`` are
defined, axis/header/legend title will be used.
type : :class:`Type`
The encoded field's type of measurement ( ``"quantitative"``, ``"temporal"``,
``"ordinal"``, or ``"nominal"`` ).
It can also be a ``"geojson"`` type for encoding `'geoshape'
<https://vega.github.io/vega-lite/docs/geoshape.html>`__.
"""
_class_is_valid_at_instantiation = False
_encoding_name = "y2"
def __init__(self, shorthand=Undefined, aggregate=Undefined, bin=Undefined, field=Undefined,
timeUnit=Undefined, title=Undefined, type=Undefined, **kwds):
super(Y2, self).__init__(shorthand=shorthand, aggregate=aggregate, bin=bin, field=field,
timeUnit=timeUnit, title=title, type=type, **kwds)
class Y2Value(ValueChannelMixin, core.ValueDef):
"""Y2Value schema wrapper
Mapping(required=[value])
Definition object for a constant value of an encoding channel.
Attributes
----------
value : anyOf(float, string, boolean)
A constant value in visual domain (e.g., ``"red"`` / "#0099ff" for color, values
between ``0`` to ``1`` for opacity).
"""
_class_is_valid_at_instantiation = False
_encoding_name = "y2"
def __init__(self, value, **kwds):
super(Y2Value, self).__init__(value=value, **kwds)
|
bsd-3-clause
|
machinebrains/neat-python
|
examples/bnp_competition/scripts/bnp_model.py
|
1
|
6289
|
#!/usr/bin/python
###################################################################################################################
### This code is developed by HighEnergyDataScientests Team.
### Do not copy or modify without written approval from one of the team members.
###################################################################################################################
import pandas as pd
import numpy as np
import xgboost as xgb
import operator
#from __future__ import print_function
import math
from neatsociety import nn, parallel, population, visualize
from sklearn import preprocessing
from sklearn.cross_validation import train_test_split
from sklearn.preprocessing import Imputer
import sklearn.metrics
import matplotlib
import matplotlib.pyplot as plt
matplotlib.use("Agg") #Needed to save figures
import time
import os
### Controlling Parameters
output_col_name = "target"
test_col_name = "PredictedProb"
enable_feature_analysis = 1
id_col_name = "ID"
num_iterations = 5
### Creating output folders
if not os.path.isdir("../predictions"):
os.mkdir("../predictions")
if not os.path.isdir("../intermediate_data"):
os.mkdir("../intermediate_data")
if not os.path.isdir("../saved_states"):
os.mkdir("../saved_states")
def ceate_feature_map(features,featureMapFile):
outfile = open(featureMapFile, 'w')
for i, feat in enumerate(features):
outfile.write('{0}\t{1}\tq\n'.format(i, feat))
outfile.close()
def fitness(genome):
net = nn.create_feed_forward_phenotype(genome)
output = net.array_activate(X_train[features].values)
logloss_error = sklearn.metrics.log_loss(y_train, output[:,0])
return 1.0 - logloss_error
def train_model(features,num_generations):
timestamp = time.strftime("%Y%m%d-%H%M%S")
print("########################## Time Stamp ==== " + timestamp)
t0 = time.time()
print("## Train a NEAT model")
timestr = time.strftime("%Y%m%d-%H%M%S")
local_dir = os.path.dirname(__file__)
config_path = os.path.join(local_dir, 'bnp_config')
# Use a pool of four workers to evaluate fitness in parallel.
pe = parallel.ParallelEvaluator(fitness,3,progress_bar=True,verbose=1)
pop = population.Population(config_path)
pop.run(pe.evaluate, num_generations)
print("total evolution time {0:.3f} sec".format((time.time() - t0)))
print("time per generation {0:.3f} sec".format(((time.time() - t0) / pop.generation)))
print('Number of evaluations: {0:d}'.format(pop.total_evaluations))
# Verify network output against training data.
print("## Test against verification data.")
winner = pop.statistics.best_genome()
net = nn.create_feed_forward_phenotype(winner)
p_train = net.array_activate(X_train[features].values)
p_valid = net.array_activate(X_valid[features].values)
score_train = sklearn.metrics.log_loss(y_train, p_train[:,0])
score_valid = sklearn.metrics.log_loss(y_valid, p_valid[:,0])
print("Score based on training data set = ", score_train)
print("Score based on validating data set = ", score_valid)
# Visualize the winner network and plot statistics.
visualize.plot_stats(pop.statistics)
visualize.plot_species(pop.statistics)
visualize.draw_net(winner, view=True)
print("## Predicting test data")
preds = net.array_activate(test[features].values)
test[test_col_name] = preds
test[[id_col_name,test_col_name]].to_csv("../predictions/pred_" + timestr + ".csv", index=False)
if __name__ == '__main__':
timestamp = time.strftime("%Y%m%d-%H%M%S")
print("########################## Start Time Stamp ==== " + timestamp)
print("## Loading Data")
models_predictions_file = "../predictions/models_predictions.csv"
train = pd.read_csv('../inputs/train.csv')
test = pd.read_csv('../inputs/test.csv')
if os.path.isfile(models_predictions_file):
models_predictions = pd.read_csv(models_predictions_file)
else:
models_predictions = pd.DataFrame()
timestamp = time.strftime("%Y%m%d-%H%M%S")
print("########################## Time Stamp ==== " + timestamp)
print("## Data Processing")
train = train.drop(id_col_name, axis=1)
timestamp = time.strftime("%Y%m%d-%H%M%S")
print("########################## Time Stamp ==== " + timestamp)
print("## Data Encoding")
for f in train.columns:
if train[f].dtype=='object':
print(f)
lbl = preprocessing.LabelEncoder()
lbl.fit(list(train[f].values) + list(test[f].values))
train[f] = lbl.transform(list(train[f].values))
test[f] = lbl.transform(list(test[f].values))
features = [s for s in train.columns.ravel().tolist() if s != output_col_name]
print("Features: ", features)
imp = Imputer(missing_values='NaN', strategy='mean', axis=0)
imp.fit(train[features])
train[features] = imp.transform(train[features])
test[features] = imp.transform(test[features])
timestamp = time.strftime("%Y%m%d-%H%M%S")
print("########################## Time Stamp ==== " + timestamp)
print("## Training")
numPos = len(train[train[output_col_name] == 1])
numNeg = len(train[train[output_col_name] == 0])
scaleRatio = float(numNeg) / float(numPos)
print("Number of postive " + str(numPos) + " , Number of negative " + str(numNeg) + " , Ratio Negative to Postive : " , str(scaleRatio))
test_size = 0.05
X_pos = train[train[output_col_name] == 1]
X_neg = train[train[output_col_name] == 0]
X_train_pos, X_valid_pos = train_test_split(X_pos, test_size=test_size)
X_train_neg, X_valid_neg = train_test_split(X_neg, test_size=test_size)
X_train = pd.concat([X_train_pos,X_train_neg])
X_valid = pd.concat([X_valid_pos,X_valid_neg])
X_train = X_train.iloc[np.random.permutation(len(X_train))]
X_valid = X_valid.iloc[np.random.permutation(len(X_valid))]
y_train = X_train[output_col_name]
y_valid = X_valid[output_col_name]
num_generations = 1000
train_model(features,num_generations)
timestamp = time.strftime("%Y%m%d-%H%M%S")
print("########################## End Time Stamp ==== " + timestamp)
|
bsd-3-clause
|
gnina/scripts
|
affinity_search/getres.py
|
1
|
1061
|
#!/usr/bin/env python
'''Return the top and R statistics for every row of the database that has them'''
import sys, re, MySQLdb, argparse, os, json, subprocess
import pandas as pd
import makemodel
import numpy as np
def getcursor():
'''create a connection and return a cursor;
doing this guards against dropped connections'''
conn = MySQLdb.connect (host = args.host,user = "opter",passwd=args.password,db=args.db)
conn.autocommit(True)
cursor = conn.cursor()
return cursor
parser = argparse.ArgumentParser(description='Return top and R statistics for successful rows in database')
parser.add_argument('--host',type=str,help='Database host',required=True)
parser.add_argument('-p','--password',type=str,help='Database password',required=True)
parser.add_argument('--db',type=str,help='Database name',default='opt1')
args = parser.parse_args()
cursor = getcursor()
cursor.execute('SELECT serial,top,R,auc,rmse FROM params WHERE rmse IS NOT NULL')
rows = cursor.fetchall()
for row in rows:
print('%d %f %f %f %f' % row)
|
bsd-3-clause
|
sinhrks/expandas
|
pandas_ml/misc/test/test_patsy.py
|
2
|
4149
|
#!/usr/bin/env python
import pandas as pd
import pandas_ml as pdml
import pandas_ml.util.testing as tm
class TestModelFrame(tm.TestCase):
def test_patsy_matrices(self):
df = pd.DataFrame({'A': [1, 2, 3],
'B': [4, 5, 6],
'C': [7, 8, 9]},
index=['a', 'b', 'c'],
columns=['A', 'B', 'C'])
s = pd.Series([10, 11, 12], index=['a', 'b', 'c'])
mdf = pdml.ModelFrame(df, target=s)
result = mdf.transform('A ~ B + C')
self.assertIsInstance(result, pdml.ModelFrame)
self.assertEqual(result.shape, (3, 4))
tm.assert_index_equal(result.index, pd.Index(['a', 'b', 'c']))
tm.assert_index_equal(result.columns, pd.Index(['A', 'Intercept', 'B', 'C']))
expected = pd.DataFrame({'A': [1, 2, 3],
'Intercept': [1, 1, 1],
'B': [4, 5, 6],
'C': [7, 8, 9]},
index=['a', 'b', 'c'],
columns=['A', 'Intercept', 'B', 'C'],
dtype=float)
tm.assert_frame_equal(result, expected)
expected = pd.Series([1, 2, 3], index=['a', 'b', 'c'], name='A', dtype=float)
tm.assert_series_equal(result.target, expected)
self.assertEqual(result.target.name, 'A')
self.assertEqual(result.target_name, 'A')
def test_patsy_matrix(self):
df = pd.DataFrame({'A': [1, 2, 3],
'B': [4, 5, 6],
'C': [7, 8, 9]},
index=['a', 'b', 'c'],
columns=['A', 'B', 'C'])
s = pd.Series([10, 11, 12], index=['a', 'b', 'c'])
mdf = pdml.ModelFrame(df, target=s)
result = mdf.transform('B + C')
self.assertIsInstance(result, pdml.ModelFrame)
self.assertEqual(result.shape, (3, 3))
tm.assert_index_equal(result.index, pd.Index(['a', 'b', 'c']))
tm.assert_index_equal(result.columns, pd.Index(['Intercept', 'B', 'C']))
expected = pd.DataFrame({'Intercept': [1, 1, 1],
'B': [4, 5, 6],
'C': [7, 8, 9]},
index=['a', 'b', 'c'],
columns=['Intercept', 'B', 'C'],
dtype=float)
tm.assert_frame_equal(result, expected)
self.assertFalse(result.has_target())
self.assertEqual(result.target_name, '.target')
def test_patsy_deviation_coding(self):
df = pdml.ModelFrame({'X': [1, 2, 3, 4, 5], 'Y': [1, 3, 2, 2, 1],
'Z': [1, 1, 1, 2, 2]}, target='Z',
index=['a', 'b', 'c', 'd', 'e'])
result = df.transform('C(X, Sum)')
expected = pd.DataFrame({'Intercept': [1, 1, 1, 1, 1],
'C(X, Sum)[S.1]': [1, 0, 0, 0, -1],
'C(X, Sum)[S.2]': [0, 1, 0, 0, -1],
'C(X, Sum)[S.3]': [0, 0, 1, 0, -1],
'C(X, Sum)[S.4]': [0, 0, 0, 1, -1]},
index=['a', 'b', 'c', 'd', 'e'],
columns=['Intercept', 'C(X, Sum)[S.1]', 'C(X, Sum)[S.2]',
'C(X, Sum)[S.3]', 'C(X, Sum)[S.4]'],
dtype=float)
tm.assert_frame_equal(result, expected)
result = df.transform('C(Y, Sum)')
expected = pd.DataFrame({'Intercept': [1, 1, 1, 1, 1],
'C(Y, Sum)[S.1]': [1, -1, 0, 0, 1],
'C(Y, Sum)[S.2]': [0, -1, 1, 1, 0]},
index=['a', 'b', 'c', 'd', 'e'],
columns=['Intercept', 'C(Y, Sum)[S.1]', 'C(Y, Sum)[S.2]'],
dtype=float)
tm.assert_frame_equal(result, expected)
|
bsd-3-clause
|
Gleland/SpectralAnalysis
|
updatingGTanalysis.py
|
4
|
17361
|
##############################################################################
# Created by Garrett Thompson
# Graphical User Interface for Data Analysis
# Created at Northern Arizona University
# for use in the Astrophysical Ice Laboratory
# Advisors: Jennifer Hanley, Will Grundy, Henry Roe
# [email protected]
##############################################################################
import os
import csv
import time
import warnings
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit as cf
from scipy.fftpack import fft, fftfreq, ifft
from scipy.signal import savgol_filter as sgf
from scipy.integrate import trapz
def main():
folder_to_save = choose_dir()
#choose files for analysis
raw_x,raw_y, raw_xbg,raw_ybg = choose_files(folder_to_save)
print("Plotting imported data...")
plotting_data_for_inspection(raw_x,raw_y,'Raw Data','Wavenumber (cm-1)','% Transmittance','rawspectrum.pdf',folder_to_save, False)
plotting_data_for_inspection(raw_xbg,raw_ybg,'Raw Background','Wavenumber (cm-1)','% Transmittance','rawbackground.pdf',folder_to_save, False)
#user chooses method after inspecting plots
user_method = str(input('Press "s" for savitsky-golay filter, or "f" for fft filter\n:'))
choosing = True
while choosing:
if user_method.lower() == 's':
# savitsky-golay option was chosen
choosing = False
args_list = [folder_to_save, raw_y, raw_ybg, raw_x]
raw_x, norm_smooth = sgf_calc(args_list)
plot_data(raw_x,norm_smooth,folder_to_save)
elif user_method.lower() == 'f':
# fft option was chosen
choosing = False
frq_x,frq_xbg,fft_y,fft_ybg = fft_calculation(raw_x,raw_y,raw_xbg,raw_ybg,folder_to_save)
plot_figure, plot_axis = plotting_data_for_inspection(frq_x,np.log(abs(fft_ybg)),'FFT of raw bg','Cycles/Wavenumber (cm)','Log(Power/Frequency)','fft_background.pdf',folder_to_save, False)
filt_y = fft_y.copy()
filt_ybg = fft_ybg.copy()
input('Zoom to liking, then press enter to start')
print('Left to add, middle to remove nearest, and right to finish')
# global frq_cid
vert_lines=[]
frq_cid = plot_figure.canvas.mpl_connect('button_press_event',lambda event: freq_click(event, [frq_x,fft_ybg,plot_figure,plot_axis,vert_lines,filt_y,filt_ybg,folder_to_save,raw_x]))
plt.show()
plot_figure.canvas.mpl_disconnect(frq_cid)
# vert_lines, frq_x, filt_y, filt_ybg = args_dict["vert_lines"],args_dict["frq_x"],args_dict["filt_y"],args_dict["filt_ybg"]
def save_as_csv(folder_to_save,title, column1_title,column2_title,column1_data,column2_data):
os.chdir(folder_to_save)
with open(title,"w") as f:
writer = csv.writer(f)
writer.writerow([column1_title,column2_title])
writer.writerows(list(zip(column1_data,column2_data)))
os.chdir('..')
def fft_calculation(raw_x,raw_y,raw_xbg,raw_ybg,folder_to_save):
""" calculates FFT of data for use in nipping unwanted frequencies"""
# finds FFT of ydata
fft_y = fft(raw_y)
fft_ybg = fft(raw_ybg)
# gets frequencies for FFT of data from array, and sample spacing
frq_x = fftfreq(len(fft_y),((max(raw_x)-min(raw_x))/len(fft_y)))
frq_xbg = fftfreq(len(fft_ybg),((max(raw_xbg)-min(raw_xbg))/len(fft_ybg)))
save_as_csv(folder_to_save,"FFT_Raw_bg_data.csv","frq_x","log(abs(fft_bg))",frq_x,np.log(abs(fft_ybg)))
return frq_x, frq_xbg, fft_y, fft_ybg
def choose_dir():
"""
User chooses where all work will be saved and
time stamp is created for future reference
"""
# Where all work to follow will be saved
folder_to_save = input('Type name of directory to save all data being created\n:')
# make and change to directory named by user
os.mkdir(folder_to_save)
os.chdir(folder_to_save)
# recording date and time that program is run, saving it to folder
with open("time_created.txt", "w") as text_file:
text_file.write("Time this program was run: {} \n".format(time.strftime("%Y-%m-%d %H:%M")))
os.chdir('..')
return folder_to_save
def plotting_data_for_inspection(xdata,ydata,plot_title,plot_xlabel,plot_ylabel,filename_for_saving,folder_to_save, block_boolean):
"""
Plots data for user to look at within program
parameters
----------
xdata,ydata: x and y data to be plotted
plot_xlabel,plot_ylabel: label x and y axes in plot
file_name_for_saving: string given for saving file for later referece
block_boolean: True or False, tells if program waits for figure to close
"""
plot_figure, plot_axis = plt.subplots()
plt.plot(xdata,ydata,color='blue')
plt.xlabel(plot_xlabel)
plt.ylabel(plot_ylabel)
plt.suptitle(plot_title)
plt.show(block=block_boolean)
os.chdir(folder_to_save)
plt.savefig(filename_for_saving)
os.chdir('..')
return plot_figure, plot_axis
def choose_files(folder_to_save):
"""
Lets user determine which files will be imported for analysis
and saves preferences for reference later on
"""
raw_import = str(input('Enter a raw dataset for analysis\n:'))
print("\nGot it! Importing now... \n")
raw_x,raw_y = import_data(raw_import)
bg_import = str(input('Enter a raw background for analysis\n:'))
print("\nGot it! Importing now... \n")
raw_xbg,raw_ybg = import_data(bg_import)
os.chdir(folder_to_save)
with open("data_files_used.txt", "w") as text_file:
text_file.write("Raw data file used: {} \n".format(raw_import))
text_file.write("Raw background data file used: {}".format(bg_import))
concentration = str(input('Enter concentration of mixture\n:'))
# saving text file of concentration for later use in plotting
with open("concentration.txt","w") as f:
f.write(concentration)
temperature = str(input('Enter temperature of mixture\n:'))
# saving text file of temperature for later use in plotting
with open("temperature.txt","w") as f:
f.write(temperature)
os.chdir('..')
return raw_x, raw_y,raw_xbg,raw_ybg
# assumes a csv file, as all data stored from ice lab is in CSV format
def import_data(filename):
raw_data = np.loadtxt(open(filename,"rb"),delimiter=",")
xdat = raw_data[:,0]
ydat = raw_data[:,1]
return xdat,ydat
def freq_click(event, args_list):
# if button_click = left: add left line
# if button_click = middle: removes closest line
# if button_lick = right: finish
# add clicked data points to list
frq_x,fft_ybg,plot_figure,plot_axis,vert_lines, filt_y, filt_ybg,folder_to_save, raw_x = args_list
plt.xlim(plt.gca().get_xlim())
plt.ylim(plt.gca().get_ylim())
if event.button==1:
vert_lines.append(event.xdata)
plot_axis.plot(frq_x,np.log(np.abs(fft_ybg)),color='blue')
#plt.axvline(x=vert_lines[-1],color='black')
for val in vert_lines:
plt.axvline(x=val,color='black')
plt.xlabel('Cycles/Wavenumber')
plt.ylabel('Relative Intensity')
# draws points as they are added
plt.draw()
if event.button==2:
# middle click, remove closest vertical line
print ('pop!')
# gets x,y limits of graph,saves them before destroying figure
xlims = plt.gca().get_xlim()
ylims = plt.gca().get_ylim()
# clears axes, to get rid of old scatter points
plot_axis.cla()
# re-plots spectrum
plot_axis.plot(frq_x,np.log(np.abs(fft_ybg)),color='blue')
# sets axes limits to original values
plt.xlim(xlims)
plt.ylim(ylims)
plt.xlabel('Cycles/Wavenumber')
plt.ylabel('Relative Intensity')
# deletes point closest to mouse click
xindx = np.abs(vert_lines-event.xdata).argmin()
del vert_lines[xindx]
for line in vert_lines:
plt.axvline(x=line,color='black')
# draws the new set of vertical lines
plt.draw()
if event.button==3:
# right click, ends clicking awareness
# plot_figure.canvas.mpl_disconnect(frq_cid)
os.chdir(folder_to_save)
plt.savefig('FFT_filter.pdf')
with open("freq_window.csv", "w") as f:
writer = csv.writer(f)
writer.writerow(["Xposition of vert. line"])
writer.writerows(list(zip(vert_lines)))
os.chdir('..')
# first window
args_dict ={"vert_lines":vert_lines,"frq_x":frq_x,"filt_y":filt_y,"filt_ybg":filt_ybg}
plt.close("all")
argslist = [vert_lines,frq_x,filt_y,filt_ybg]
filt_y,filt_ybg = window_filter(argslist)
fft_calc(filt_y, filt_ybg, raw_x,folder_to_save)
def fft_calc(filt_y, filt_ybg, raw_x,folder_to_save):
# dividing filtered y data from filtered bg data
norm_fft = ifft(filt_y)/ifft(filt_ybg)
save_as_csv(folder_to_save,"fft_data.csv","raw_x","fft_filt",raw_x,norm_fft.real)
plot_data(raw_x,norm_fft.real,folder_to_save)
def sgf_calc(args_list):
folder_to_save, raw_y, raw_ybg, raw_x = args_list
# warning when using sgf option
warnings.filterwarnings(action="ignore", module="scipy",message="^internal gelsd")
window_param = int(input('Input window box size (must be odd number)\n:'))
poly_param = int(input('Input polynomial order for smoothing\n:'))
# saving parameters chosen for future inspection
os.chdir(folder_to_save)
with open("sgf_params.txt", "w") as sgf_file:
sgf_file.write("Window parameter used: {} \n".format(window_param))
sgf_file.write("Polynomial paramter used: {}".format(poly_param))
#global norm_smooth
smoothed_y = sgf(raw_y,window_param,poly_param,delta=(abs(raw_y)[1]-raw_y)[0])
smoothed_ybg =sgf(raw_ybg,window_param,poly_param,delta=(abs(raw_ybg)[1]-raw_ybg)[0])
# dividing filtered y data from filtered bg data
norm_smooth = smoothed_y / smoothed_ybg
rows = list(zip(raw_x,norm_smooth))
with open("sgf_data.csv", "w") as f:
writer = csv.writer(f)
writer.writerow(["window","polynomail order"])
writer.writerow([window_param,poly_param])
writer.writerow(["raw_x","sgf_filt"])
writer.writerows(rows)
os.chdir('..')
return raw_x,norm_smooth
# range of frequenices to cut out
def window_filter(args_list):
vert_lines, frq_x, filt_y, filt_ybg = args_list
window_min, window_max= vert_lines[-2], vert_lines[-1]
for i in range(len(frq_x)):
if (frq_x[i] >= window_min and frq_x[i] <=window_max) or (frq_x[i]>-1*window_max and frq_x[i]<-1*window_min):
filt_y[i] = 0
filt_ybg[i] = 0
return filt_y,filt_ybg
def plot_data(x,y,folder_to_save):
plot_figure,plot_axis = plotting_data_for_inspection(x,y,"Divide and Filtered Spectrum","Wavenumber cm-1","Relative Intensity","dv_filt_spectrum.pdf",folder_to_save, False)
order = int(input('Zoom to liking and then enter what order polynomial for continuum fit\n:'))
xcoords,ycoords = [],[]
# tells python to turn on awareness for button presses
global cid
cid = plot_figure.canvas.mpl_connect('button_press_event', lambda event: onclick(event, [xcoords,ycoords,plot_figure,plot_axis,order,folder_to_save,x,y]))
print('Left to add, middle to remove nearest, and right to finish')
plt.show()
# for creating continuum fit to divide out
def onclick(event,argslist):
xcoords,ycoords,plot_figure,plot_axis,order,folder_to_save,x,y = argslist
global pvals
if event.button==1:
# left click
plt.xlim(plt.gca().get_xlim())
plt.ylim(plt.gca().get_ylim())
#plt.cla()
try:
# only delete if curve_fit line already drawn
if len(plot_axis.lines) !=1: plot_axis.lines.remove(plot_axis.lines[-1])
except: UnboundLocalError
# add clicked data points to list
xcoords.append(event.xdata)
ycoords.append(event.ydata)
plot_axis.scatter(xcoords,ycoords,color='black')
plt.xlabel('Wavenumber cm-1')
plt.ylabel('Relative Intensity')
plt.draw()
xvals = np.array(xcoords)
yvals = np.array(ycoords)
# fits values to polynomial, rankwarning is irrelevant
warnings.simplefilter('ignore', np.RankWarning)
p_fit = np.polyfit(xvals,yvals,order)
pvals = np.poly1d(p_fit)
plot_axis.plot(x,pvals(x),color='black')
plt.draw()
# plt.show(block=False)
if event.button==2:
# middle click, remove closest point to click
print ('pop!')
# gets x,y limits of graph,saves them before destroying figure
xlims = plt.gca().get_xlim()
ylims = plt.gca().get_ylim()
# clears axes, to get rid of old scatter points
plot_axis.cla()
# re-plots spectrum
plot_axis.plot(x,y)
# sets axes limits to original values
plt.xlim(xlims)
plt.ylim(ylims)
plt.xlabel('Wavenumber cm-1')
plt.ylabel('Relative Intensity')
# deletes point closest to mouse click
xindx = np.abs(xcoords-event.xdata).argmin()
del xcoords[xindx]
yindx = np.abs(ycoords-event.ydata).argmin()
del ycoords[yindx]
# draws the new set of scatter points, and colors them
plot_axis.scatter(xcoords,ycoords,color='black')
plt.draw()
xvals = np.array(xcoords)
yvals = np.array(ycoords)
# fits values to polynomial, rankwarning is ignored
warnings.simplefilter('ignore', np.RankWarning)
p_fit = np.polyfit(xvals,yvals,order)
pvals = np.poly1d(p_fit)
plot_axis.plot(x,pvals(x),color='black')
plt.draw()
if event.button==3:
# right click,ends clicking awareness
plot_figure.canvas.mpl_disconnect(cid)
os.chdir(folder_to_save)
plt.savefig('continuum_chosen.pdf')
# Saving polynomial eqn used in continuum divide for reference
with open("continuum_polynomial.txt", "w") as save_file:
save_file.write("%s *x^ %d " %(pvals[0],0))
for i in (range(len(pvals))):
save_file.write("+ %s *x^ %d " %(pvals[i+1],i+1))
os.chdir('..')
calc_coeffs(pvals,x,y,folder_to_save)
def calc_coeffs(pvals,x,y,folder_to_save):
fit_y = pvals(x)
# flattens the continuum
new_continuum = y / fit_y
thickness = int(input('\nEnter thickness of cell in cm\n:'))
# 2 cm thickness for our work in 2016
# remove runtime errors when taking negative log and dividing
err_settings = np.seterr(invalid='ignore')
alpha_coeffs = -np.log(new_continuum) / thickness
plotting_data_for_inspection(x,alpha_coeffs,"Alpha Coefficients","Wavenumber cm-1","Absorption cm-1","alpha_coeffs.pdf",folder_to_save,False)
save_as_csv(folder_to_save,"alpha_coeffs.csv","x","alpha",x,alpha_coeffs)
# creating masks around each peak
x_mask1 = x[(x>10000) & (x<10500)]
x_mask2 = x[(x>11200) & (x<12000)]
y_mask1 = alpha_coeffs[(x>10000) & (x<10500)]
y_mask2 = alpha_coeffs[(x>11200) & (x<12000)]
# writing data for plotting later
save_as_csv(folder_to_save,"10000_peak.csv","x","y",x_mask1,y_mask1)
save_as_csv(folder_to_save,"11200_peak.csv","x","y",x_mask2,y_mask2)
# integrated area calcs
area10000=trapz(y_mask1,x_mask1)
area11200=trapz(y_mask2,x_mask2)
os.chdir(folder_to_save)
with open("10000area.txt","w") as f:
f.write(str(area10000))
with open("11200area.txt","w") as f:
f.write(str(area11200))
os.chdir('..')
finish_prog = input("Press 'y' when finished\n:")
check = True
while check:
if (finish_prog =="y"): check = False
plt.close('all')
print("Finished!")
quit() # end of program
if __name__ == '__main__':
main()
|
mit
|
saskartt/P4UL
|
pyAnalyze/approachAnalysis.py
|
1
|
8459
|
#!/usr/bin/env python
import sys
import subprocess as sb
import numpy as np
import argparse
from utilities import filesFromList, writeLog
from plotTools import userLabels, extractFromCSV, addToPlot
import matplotlib.pyplot as plt
'''
Description:
Author: Mikko Auvinen
[email protected]
University of Helsinki &
Finnish Meteorological Institute
'''
#======== Function definitions =============================#
def p2pMaxMin( r ):
# Peak to peak max and min evaluation routine.
dr = (r[1:] - r[:-1])
fpos = (dr>=0.).astype(int)
fneg = (dr<0.).astype(int)
rp_cum = 0.; rn_cum = 0.
rp_max = 0.; rn_min = 0.
i = 0
for fp in fpos:
if( fp == 0 ):
if( rp_cum > rp_max ): rp_max = rp_cum
rp_cum = 0.
rp_cum += float(fp)*dr[i]; i+=1
#print('rp_cum[{}] = {} '.format(i,rp_cum))
i = 0
for fn in fneg:
if( fn == 0 ):
if( rn_cum < rn_min ): rn_min = rn_cum
rn_cum = 0.
rn_cum += float(fn)*dr[i]; i+=1
#print('rn_cum[{}] = {} '.format(i,rn_cum))
return rp_max, rn_min
#==========================================================#
parser = argparse.ArgumentParser(prog='approachAnalysis.py')
parser.add_argument("strKey", help="Search string for collecting files.",nargs='?',\
default=".csv")
parser.add_argument("--magy", help="Magnitude of all variables.", action="store_true",\
default=False)
parser.add_argument("--yx", help="Reverse axes: plot(x,y) --> plot(y,x)", action="store_true",\
default=False)
parser.add_argument("--labels", help="User specified labels.", action="store_true",\
default=False)
parser.add_argument("--reuse", help="Reuse once specified variable selections.", action="store_true",\
default=False)
parser.add_argument("-v", "--var", help="Variable Name in CSV-file", type=str, nargs='+',\
default=['u','v','w'] )
parser.add_argument("-yl","--ylims", help="Y-axis limits: [min,max]. Default=[0,10]",\
type=float,nargs=2,default=[0.,10.])
parser.add_argument("-fn","--figName", help="Name of the (temporary) figures. (default=tmp)",\
type=str,default="tmp")
parser.add_argument("-fa","--fileAnim", help="Name of the animation file. (default=anim.gif)",\
type=str,default="anim.gif")
parser.add_argument("-na", "--noAnim", help="Do not make an animation.",\
action="store_true", default=False)
args = parser.parse_args()
writeLog( parser, args )
#==========================================================#
strKey = args.strKey
figName = args.figName
fileAnim = args.fileAnim
noAnimation = args.noAnim
ylims = args.ylims
varList = args.var
fileNos, fileList = filesFromList( "*"+strKey+"*" )
print(' The varList [-v, --var] option is over ridden at this point. ')
print(' Reading coordinate values from file {} ...'.format( fileList[0]) )
coordList = [ 'arc_length', 'Points:0', 'Points:1', 'Points:2']
xv = extractFromCSV( fileList[0] , coordList )
s = xv[0].copy() # arc_length
x = xv[1].copy(); y = xv[2].copy(); z = xv[3].copy()
xv = None
print(' Done.\n')
# -------------------------------------------------------- #
print(' Computing the mean velocity values ... ')
varList = ['u', 'v', 'w']
Ux_mean = None; Uy_mean = None; Uz_mean = None
n = 0
for fn in fileNos:
n += 1
#pfig = pl.figure(num=1, figsize=(18.,9.))
tv = extractFromCSV( fileList[fn] , varList )
u = tv[0].copy(); v = tv[1].copy(); w = tv[2].copy()
tv = None
# The velocity data may contain nan entries, which should be replaced by 0.
u[np.isnan(u)] = 0.; v[np.isnan(v)] = 0.; w[np.isnan(w)] = 0.
# Accumulate sums for mean values.
if( Ux_mean == None ):
Ux_mean = np.zeros( u.shape ) # Initialize
Uy_mean = np.zeros( u.shape )
Uz_mean = np.zeros( u.shape )
Ux_mean += u; Uy_mean += v; Uz_mean += w
# Use the sums to compute the mean values.
Ux_mean /= float(n); Uy_mean /= float(n); Uz_mean /= float(n)
print(' Done.\n')
# -------------------------------------------------------- #
print(' Extract directional data from the approach line ... ')
#pfig = plotCSV( pfig, fileList[fn], args.yx, args.magy, args.reuse )
rad2deg = 180./np.pi
deg2rad = np.pi/180.
# Starting point: Rissala's approach line (from Paraview)
p1 = np.array([ x[0], y[0], z[0] ]) # np.array([6800., 1250., 0.])
p2 = np.array([ x[-1],y[-1],z[-1] ]) # np.array([7700., 650., 72.])
da = p2 - p1
da_mag = np.sqrt( np.sum( da**2 ) )
da_xy = np.sqrt( np.sum( da[0:2]**2))
# Approach direction (normal vector)
na = da/da_mag
# Sharp angle between the runway and the mean wind
theta = np.arccos( da[0]/da_xy )
print(' Sharp angle between the runway and the mean wind: theta = {} deg'.format( theta*rad2deg ))
print(' Done.\n')
# -------------------------------------------------------- #
# Hornet's approach speed and velocity
Uappr_mag = 69.
Ua = Uappr_mag*na
# Mean headwind
Uhw_mean = Ux_mean * np.cos( theta ) - Uy_mean * np.sin( theta )
# Speed relative to the ground ... perhaps not needed.
U_grd = Uappr_mag - Uhw_mean
# Approach angle
gamma = np.arctan( da[2]/da_xy )
# F18 Data:
rho = 1.2 # standard air
CL = 1.2 # at 7deg angle of attack
CLa = 2.86 # 1/rad (alpha in range [3deg, 10deg])
Aref=18.*3.
K = 0.5*rho*Aref
# Extract deviations in the headwind and compute the changes in AoA [alpha].
Lift = K*Uappr_mag**2*CL
n = 0
dL_max = 0.
dL_sum = 0.
dL_mxv = 0. # Maximum variance.
dL_p2p_max = 0.
dL_p2p_min = 0.
for fn in fileNos:
n += 1
#pfig = pl.figure(num=1, figsize=(18.,9.))
tv = extractFromCSV( fileList[fn] , varList ) # NOTE: varList = ['u', 'v', 'w']
du = tv[0]-Ux_mean
dv = tv[1]-Uy_mean
dw = tv[2]-Uz_mean # Uz_mean could be replaced by 0.
tv = None
# The velocity data may contain nan entries, which should be replaced by 0.
du[np.isnan(du)] = 0.; dv[np.isnan(dv)] = 0.; dw[np.isnan(dw)] = 0.
dU_hw = du * np.cos( theta ) - dv * np.sin( theta )
dalpha = np.arctan( dw/Uappr_mag)
# Change in lift due to changes in AoA:
dL_a = K*Uappr_mag**2*CLa*dalpha
# Change in lift due to changes in head wind.
dL_u = 2.*K*CL*Uappr_mag*dU_hw
dLp_a = dL_a/Lift * 100. # In percentage
dLp_u = dL_u/Lift * 100.
dLp_mag= np.sqrt( (dLp_a+dLp_u)**2 )
#fig = plt.figure(num=1, figsize=(18,9))
fig, (ax1, ax2) = plt.subplots(num=1, nrows=2, sharex=True, figsize=(18,11))
lines11,=ax1.plot( s,dLp_a,'-o', linewidth=1.6 )
lines12,=ax1.plot( s,dLp_u,'-o', linewidth=1.6 )
ax1.legend( (lines11,lines12) , ('dL(alpha) [%]',' dL(u) [%]'), loc=1 )
ax1.set_ylim([-8., 8.])
ax1.set_xlim([ min(s) , 1.05*max(s)]) # s: arc_length
ax1.set_title(' Changes in Lift due to turbulence ', fontsize=22)
ax1.set_ylabel(' dL [%] ', fontsize=22); ax1.grid(True)
lines2,=ax2.plot(s,dLp_mag,'-ro', linewidth=1.6 )
ax2.legend( (lines2,) , (' ABS(SUM(dL)) [%]',), loc=1 )
ax2.set_xlim([ min(s) , 1.05*max(s)]) # s: arc_length
ax2.set_ylim([-1., 12.5]); ax2.set_xlim([ min(s) , max(s)])
ax2.set_xlabel(' Distance along approach line [m] ', fontsize=22 )
ax2.set_ylabel(' dL [%] ', fontsize=22 ); ax2.grid(True)
# Maximum variance
dL_ivar = np.var( dLp_mag[ du > 0 ] ) # Consider only nonzero values.
if( dL_ivar > dL_mxv ): dL_mxv = dL_ivar
# Mean variance
dL_sum += dL_ivar
dL_var = dL_sum/float(n)
dL_imax = np.max(dLp_mag)
if( dL_imax > dL_max ): dL_max = dL_imax
dL_ip2p_mx, dL_ip2p_mn = p2pMaxMin( (dLp_a+dLp_u) )
if( dL_ip2p_mx > dL_p2p_max ): dL_p2p_max = dL_ip2p_mx
if( dL_ip2p_mn < dL_p2p_min ): dL_p2p_min = dL_ip2p_mn
infoStr =' Time = {:4d}s\n'.format((n-1)*2)
infoStr +=' Current P2P(dL) [max,min] = [{:4.1f}% , {:4.1f}%]\n'.format(dL_ip2p_mx, dL_ip2p_mn)
infoStr +=' Running P2P(dL) [max,min] = [{:4.1f}% , {:4.1f}%]\n'.format(dL_p2p_max, dL_p2p_min)
#infoStr +=' Max(dL) = {:4.1f}%\n'.format(dL_imax)
infoStr +=' Running Max(dL) = {:4.1f}%\n'.format(dL_max)
#infoStr +=' Var(dL) = {:4.1f}%\n'.format(dL_ivar)
infoStr +=' Running Mean(Var(dL)) = {:4.1f}%\n'.format(dL_var)
infoStr +=' Running Max(Var(dL)) = {:4.1f}%\n'.format(dL_mxv)
plt.text( 1. , 5.5, infoStr , fontsize=20)
figStr = '{}_{:04d}.jpg'.format(figName,n)
print(' Saving figure {} '.format(figStr))
fig.savefig(figStr)
ax1.cla(); ax2.cla(); fig.clf()
if( not noAnimation ):
cmd = 'convert {}_* {} '.format(figName,fileAnim)
print ' Executing command: ${}'.format(cmd)
sb.call(cmd, shell=True)
print(' All Done! ')
|
mit
|
dsavoiu/kafe2
|
examples/006_advanced_errors/02_error_components.py
|
1
|
4654
|
"""
Typically, the uncertainties of the measurement data are much more complex than in the examples
discussed so far. In most cases there are uncertainties in ordinate and abscissa, and in addition to
the independent uncertainties of each data point there are common, correlated uncertainties for all
of them.
With the method add_error() or add_matrix_error() uncertainties can be specified on the 'x' and 'y'
data, either in the form of independent or correlated, relative or absolute uncertainties of all or
groups of measured values or by specifying the complete covariance or correlation matrix. All
uncertainties specified in this way are included in the global covariance matrix for the fit.
As an example, we consider measurements of a cross section as a function of the energy near a
resonance. These are combined measurement data from the four experiments at CERN's LEP accelerator,
which were corrected for effects caused by photon radiation: Measurements of the hadronic cross
section (sigma) as a function of the centre-of-mass energy (E).
"""
from kafe2 import XYContainer, Fit, Plot, ContoursProfiler
import matplotlib.pyplot as plt
# Center-of-mass energy E (GeV):
E = [88.387, 89.437, 90.223, 91.238, 92.059, 93.004, 93.916] # x data
E_errors = [0.005, 0.0015, 0.005, 0.003, 0.005, 0.0015, 0.005] # Uncorrelated absolute x errors
ECor_abs = 0.0017 # Correlated absolute x error
# Hadronic cross section with photonic corrections applied (nb):
sig = [6.803, 13.965, 26.113, 41.364, 27.535, 13.362, 7.302] # y data
sig_errors = [0.036, 0.013, 0.075, 0.010, 0.088, 0.015, 0.045] # Uncorrelated absolute y errors
sigCor_rel = 0.0007 # Correlated relative y error
# Breit-Wigner with s-dependent width:
def BreitWigner(E, s0=41.0, M=91.2, G=2.5):
s = E*E
Msq = M*M
Gsq = G*G
return s0*s*Gsq/((s-Msq)*(s-Msq)+(s*s*Gsq/Msq))
BW_data = XYContainer(E, sig) # Create data container.
# Add errors to data container.
# By default errors are assumed to be absolute and uncorrelated.
# For errors that are relative and/or correlated you need to set the corresponding kwargs.
# Add independent errors:
error_name_sig = BW_data.add_error(axis='x', name='deltaSig', err_val=E_errors)
error_name_E = BW_data.add_error(axis='y', name='deltaE', err_val=sig_errors)
# Add fully correlated, absolute Energy errors:
error_name_ECor = BW_data.add_error(axis='x', name='Ecor', err_val=ECor_abs, correlation=1.)
# Add fully correlated, relative cross section errors:
error_name_sigCor = BW_data.add_error(
axis='y', name='sigCor', err_val=sigCor_rel, correlation=1., relative=True)
# Note: kafe2 methods that add errors return a name for the added error. If no name is specified
# a random alphanumeric string is assigned automatically. Further down we will use these names to
# enable/disable some of the errors.
# Assign labels for the data and the axes:
BW_data.label = 'QED-corrected hadronic cross-sections'
BW_data.axis_labels = ('CM Energy (GeV)', r'$\sigma_h$ (nb)')
# Note: Because kafe2 containers are copied when a Fit object is created from them assigning labels
# to the original XYContainer after the fit has already been created would NOT work.
BW_fit = Fit(BW_data, BreitWigner)
# Uncomment the following two lines to assign data labels after the fit has already been created:
# BW_fit.data_container.label = 'QED-corrected hadronic cross-sections'
# BW_fit.data_container.axis_labels = ('CM Energy (GeV)', r'$\sigma_h$ (nb)')
# Model labels always have to be assigned after the fit has been created:
BW_fit.model_label = 'Beit-Wigner with s-dependent width'
# Set LaTeX names for printout in info-box:
BW_fit.assign_parameter_latex_names(E='E', s0=r'\sigma^0', M=r'M_Z', G=r'\Gamma_Z')
BW_fit.assign_model_function_latex_name(r'\sigma^{\rm ew}_{e^+e^-\to{\rm hadrons}}')
BW_fit.assign_model_function_latex_expression(
r'{s0}\frac{{ {E}^2{G}^2}}{{({E}^2-{M}^2)^2+({E}^4{G}^2/{M}^2)}}')
# Do the fit:
BW_fit.do_fit()
# Print a report:
BW_fit.report()
# Plot the fit results:
BW_plot = Plot(BW_fit)
BW_plot.y_range = (0, 1.03*max(sig)) # Explicitly set y_range to start at 0.
BW_plot.plot()
# Create a contour plot:
ContoursProfiler(BW_fit).plot_profiles_contours_matrix(show_grid_for='contours')
# Investigate the effects of individual error components: disabling the correlated uncertainty on
# energy should decrease the uncertainty of the mass M but have little to no effect otherwise.
print('====== Disabling error component %s ======' % error_name_ECor)
print()
BW_fit.disable_error(error_name_ECor)
BW_fit.do_fit()
BW_fit.report(show_data=False, show_model=False)
plt.show()
|
gpl-3.0
|
minimumcut/UnnamedEngine
|
UnnamedEngine/Vendor/bullet/examples/pybullet/testrender.py
|
3
|
1357
|
import numpy as np
import matplotlib.pyplot as plt
import pybullet
pybullet.connect(pybullet.GUI)
pybullet.loadURDF("r2d2.urdf")
camTargetPos = [0.,0.,0.]
cameraUp = [0,0,1]
cameraPos = [1,1,1]
yaw = 40
pitch = 10.0
roll=0
upAxisIndex = 2
camDistance = 4
pixelWidth = 320
pixelHeight = 240
nearPlane = 0.01
farPlane = 1000
lightDirection = [0,1,0]
lightColor = [1,1,1]#optional argument
fov = 60
#img_arr = pybullet.renderImage(pixelWidth, pixelHeight)
#renderImage(w, h, view[16], projection[16])
#img_arr = pybullet.renderImage(pixelWidth, pixelHeight, cameraPos, camTargetPos, cameraUp, nearPlane, farPlane)
for pitch in range (0,360,10) :
viewMatrix = pybullet.computeViewMatrixFromYawPitchRoll(camTargetPos, camDistance, yaw, pitch, roll, upAxisIndex)
aspect = pixelWidth / pixelHeight;
projectionMatrix = pybullet.computeProjectionMatrixFOV(fov, aspect, nearPlane, farPlane);
img_arr = pybullet.getCameraImage(pixelWidth, pixelHeight, viewMatrix,projectionMatrix, lightDirection,lightColor)
w=img_arr[0]
h=img_arr[1]
rgb=img_arr[2]
dep=img_arr[3]
#print 'width = %d height = %d' % (w,h)
# reshape creates np array
np_img_arr = np.reshape(rgb, (h, w, 4))
np_img_arr = np_img_arr*(1./255.)
#show
plt.imshow(np_img_arr,interpolation='none')
plt.pause(0.01)
pybullet.resetSimulation()
|
mit
|
pafcu/WereSim
|
plot.py
|
1
|
1521
|
import sys
import matplotlib.pylab as plt
import numpy as np
import os
import re
r = re.compile('(\d+)wolves_(\d+)healers_(\d+)seers_([0-9.]+)intuition\.txt')
caption = sys.argv[1]
outfile = sys.argv[2]
datacol = int(sys.argv[3])
files = sys.argv[4:]
plt.hold(True)
colors = ['red','green','blue','violet','orange','yellow','pink','lime','gray','black','turquoise','brown']
i=0
for path in files:
m = r.match(os.path.basename(path))
if m == None:
print "Invalid file name:", os.path.basename(path)
sys.exit(1)
if m.group(1) == '1':
wolftxt = 'wolf'
else:
wolftxt = 'wolves'
if m.group(2) == '1':
healertxt = 'healer'
else:
healertxt = 'healers'
if m.group(3) == '1':
seertxt = 'seer'
else:
seertxt = 'seers'
label = '%s %s, %s %s, %s %s, %s intuition'%(m.group(1),wolftxt,m.group(2),healertxt,m.group(3),seertxt, m.group(4))
print label
try:
data = np.loadtxt(path)
except IOError:
print "Bad data file: %s"%path
continue
plt.plot(data[:,0],data[:,datacol],label=label,color=colors[i])
plt.errorbar(data[:,0],data[:,datacol],yerr=data[:,datacol+1],color=colors[i])
i+=1
plt.xlabel('Players')
if datacol == 1:
plt.ylim((0,1.0))
plt.yticks(np.arange(0.0,1.01,0.1))
plt.ylabel('Human wins')
plt.axhline(y=0.5,linestyle='--',color='black')
elif datacol == 2:
plt.ylabel('Game length [turns]')
plt.yticks(np.arange(data[0,datacol],data[-1,datacol]+1,1))
plt.xticks(np.arange(data[0,0],data[-1,0]+1,1))
plt.title(caption)
plt.legend(loc='best')
plt.savefig(outfile)
plt.show()
|
isc
|
gclenaghan/scikit-learn
|
benchmarks/bench_plot_approximate_neighbors.py
|
244
|
6011
|
"""
Benchmark for approximate nearest neighbor search using
locality sensitive hashing forest.
There are two types of benchmarks.
First, accuracy of LSHForest queries are measured for various
hyper-parameters and index sizes.
Second, speed up of LSHForest queries compared to brute force
method in exact nearest neighbors is measures for the
aforementioned settings. In general, speed up is increasing as
the index size grows.
"""
from __future__ import division
import numpy as np
from tempfile import gettempdir
from time import time
from sklearn.neighbors import NearestNeighbors
from sklearn.neighbors.approximate import LSHForest
from sklearn.datasets import make_blobs
from sklearn.externals.joblib import Memory
m = Memory(cachedir=gettempdir())
@m.cache()
def make_data(n_samples, n_features, n_queries, random_state=0):
"""Create index and query data."""
print('Generating random blob-ish data')
X, _ = make_blobs(n_samples=n_samples + n_queries,
n_features=n_features, centers=100,
shuffle=True, random_state=random_state)
# Keep the last samples as held out query vectors: note since we used
# shuffle=True we have ensured that index and query vectors are
# samples from the same distribution (a mixture of 100 gaussians in this
# case)
return X[:n_samples], X[n_samples:]
def calc_exact_neighbors(X, queries, n_queries, n_neighbors):
"""Measures average times for exact neighbor queries."""
print ('Building NearestNeighbors for %d samples in %d dimensions' %
(X.shape[0], X.shape[1]))
nbrs = NearestNeighbors(algorithm='brute', metric='cosine').fit(X)
average_time = 0
t0 = time()
neighbors = nbrs.kneighbors(queries, n_neighbors=n_neighbors,
return_distance=False)
average_time = (time() - t0) / n_queries
return neighbors, average_time
def calc_accuracy(X, queries, n_queries, n_neighbors, exact_neighbors,
average_time_exact, **lshf_params):
"""Calculates accuracy and the speed up of LSHForest."""
print('Building LSHForest for %d samples in %d dimensions' %
(X.shape[0], X.shape[1]))
lshf = LSHForest(**lshf_params)
t0 = time()
lshf.fit(X)
lshf_build_time = time() - t0
print('Done in %0.3fs' % lshf_build_time)
accuracy = 0
t0 = time()
approx_neighbors = lshf.kneighbors(queries, n_neighbors=n_neighbors,
return_distance=False)
average_time_approx = (time() - t0) / n_queries
for i in range(len(queries)):
accuracy += np.in1d(approx_neighbors[i], exact_neighbors[i]).mean()
accuracy /= n_queries
speed_up = average_time_exact / average_time_approx
print('Average time for lshf neighbor queries: %0.3fs' %
average_time_approx)
print ('Average time for exact neighbor queries: %0.3fs' %
average_time_exact)
print ('Average Accuracy : %0.2f' % accuracy)
print ('Speed up: %0.1fx' % speed_up)
return speed_up, accuracy
if __name__ == '__main__':
import matplotlib.pyplot as plt
# Initialize index sizes
n_samples = [int(1e3), int(1e4), int(1e5), int(1e6)]
n_features = int(1e2)
n_queries = 100
n_neighbors = 10
X_index, X_query = make_data(np.max(n_samples), n_features, n_queries,
random_state=0)
params_list = [{'n_estimators': 3, 'n_candidates': 50},
{'n_estimators': 5, 'n_candidates': 70},
{'n_estimators': 10, 'n_candidates': 100}]
accuracies = np.zeros((len(n_samples), len(params_list)), dtype=float)
speed_ups = np.zeros((len(n_samples), len(params_list)), dtype=float)
for i, sample_size in enumerate(n_samples):
print ('==========================================================')
print ('Sample size: %i' % sample_size)
print ('------------------------')
exact_neighbors, average_time_exact = calc_exact_neighbors(
X_index[:sample_size], X_query, n_queries, n_neighbors)
for j, params in enumerate(params_list):
print ('LSHF parameters: n_estimators = %i, n_candidates = %i' %
(params['n_estimators'], params['n_candidates']))
speed_ups[i, j], accuracies[i, j] = calc_accuracy(
X_index[:sample_size], X_query, n_queries, n_neighbors,
exact_neighbors, average_time_exact, random_state=0, **params)
print ('')
print ('==========================================================')
# Set labels for LSHForest parameters
colors = ['c', 'm', 'y']
legend_rects = [plt.Rectangle((0, 0), 0.1, 0.1, fc=color)
for color in colors]
legend_labels = ['n_estimators={n_estimators}, '
'n_candidates={n_candidates}'.format(**p)
for p in params_list]
# Plot precision
plt.figure()
plt.legend(legend_rects, legend_labels,
loc='upper left')
for i in range(len(params_list)):
plt.scatter(n_samples, accuracies[:, i], c=colors[i])
plt.plot(n_samples, accuracies[:, i], c=colors[i])
plt.ylim([0, 1.3])
plt.xlim(np.min(n_samples), np.max(n_samples))
plt.semilogx()
plt.ylabel("Precision@10")
plt.xlabel("Index size")
plt.grid(which='both')
plt.title("Precision of first 10 neighbors with index size")
# Plot speed up
plt.figure()
plt.legend(legend_rects, legend_labels,
loc='upper left')
for i in range(len(params_list)):
plt.scatter(n_samples, speed_ups[:, i], c=colors[i])
plt.plot(n_samples, speed_ups[:, i], c=colors[i])
plt.ylim(0, np.max(speed_ups))
plt.xlim(np.min(n_samples), np.max(n_samples))
plt.semilogx()
plt.ylabel("Speed up")
plt.xlabel("Index size")
plt.grid(which='both')
plt.title("Relationship between Speed up and index size")
plt.show()
|
bsd-3-clause
|
chuajiesheng/twitter-sentiment-analysis
|
analysis/svm_sgd.py
|
1
|
3362
|
from sklearn.feature_extraction.text import *
from sklearn.linear_model import SGDClassifier
from sklearn.pipeline import Pipeline
from sklearn.model_selection import *
from sklearn.metrics import *
from tokenizers import *
import numpy as np
from pprint import pprint
from time import time
# import dataset
def get_dataset():
files = ['./analysis/input/negative_tweets.txt', './analysis/input/neutral_tweets.txt', './analysis/input/positive_tweets.txt']
x = []
for file in files:
s = []
with open(file, 'r') as f:
for line in f:
s.append(line.strip())
assert len(s) == 1367
x.extend(s)
y = np.array([-1] * 1367 + [0] * 1367 + [1] * 1367)
return x, y
tweets, target = get_dataset()
# split train/test 60/40
X_train, X_test, y_train, y_test = train_test_split(tweets, target, test_size=0.1, random_state=1)
print('Train: \t{},{}'.format(len(X_train), y_train.shape))
print('Test: \t{},{}'.format(len(X_test), y_test.shape))
pipeline = Pipeline([('vect', CountVectorizer(ngram_range=(1, 3))),
('tfidf', TfidfTransformer(norm='l2', use_idf=True)),
('clf', SGDClassifier(loss='squared_loss', penalty='l2', alpha=1e-04, n_iter=50, random_state=42))])
pipeline = pipeline.fit(X_train, y_train)
# predict
predicted = pipeline.predict(X_test)
print('Accuracy: \t\t{}'.format(accuracy_score(y_test, predicted)))
print('Macro F1: \t\t{}'.format(f1_score(y_test, predicted, average='macro')))
X_ones = np.array(X_test)[y_test == 1]
predicted_positive = pipeline.predict(X_ones)
print('Positive accuracy: \t{}'.format(np.mean(predicted_positive == 1)))
X_ones = np.array(X_test)[y_test == -1]
predicted_negative = pipeline.predict(X_ones)
print('Negative accuracy: \t{}'.format(np.mean(predicted_negative == -1)))
# metrics
predicted = pipeline.predict(X_test)
print(classification_report(y_test, predicted))
print('Confusion matrix: \n{}'.format(confusion_matrix(y_test, predicted)))
# grid search
parameters = {
# 'vect__max_df': (0.5, 0.75, 1.0),
#'vect__max_features': (None, 5000, 10000, 50000),
'vect__ngram_range': ((1, 2), (1, 3)), # unigrams or bigrams
# 'vect__tokenizer': (SkipgramTokenizer(3, 2), SkipgramTokenizer(2, 2), None),
# 'tfidf__use_idf': (True, False),
# 'tfidf__norm': ('l1', 'l2'),
'clf__loss': ('squared_loss', 'hinge', 'log', 'epsilon_insensitive'),
'clf__alpha': (0.0001, 0.00001, 0.000001),
'clf__penalty': ('l2', 'elasticnet'),
'clf__n_iter': (50, 80),
}
grid_search = GridSearchCV(pipeline, parameters, n_jobs=8, cv=ShuffleSplit(n_splits=10, test_size=0.2, random_state=10), verbose=1, scoring='accuracy')
print('Performing grid search...')
print('pipeline: {}'.format([name for name, _ in pipeline.steps]))
print('parameters:')
pprint(parameters)
t0 = time()
grid_search.fit(tweets, target)
print("Done in %0.3fs" % (time() - t0))
print()
print("Best score: %0.3f" % grid_search.best_score_)
print("Best parameters set:")
best_parameters = grid_search.best_estimator_.get_params()
for param_name in sorted(parameters.keys()):
print("\t%s: %r" % (param_name, best_parameters[param_name]))
# Best score: 0.627
# Best parameters set:
# clf__alpha: 0.0001
# clf__loss: 'squared_loss'
# clf__n_iter: 50
# clf__penalty: 'elasticnet'
# vect__ngram_range: (1, 3)
|
apache-2.0
|
plaes/numpy
|
numpy/doc/creation.py
|
3
|
5425
|
"""
==============
Array Creation
==============
Introduction
============
There are 5 general mechanisms for creating arrays:
1) Conversion from other Python structures (e.g., lists, tuples)
2) Intrinsic numpy array array creation objects (e.g., arange, ones, zeros, etc.)
3) Reading arrays from disk, either from standard or custom formats
4) Creating arrays from raw bytes through the use of strings or buffers
5) Use of special library functions (e.g., random)
This section will not cover means of replicating, joining, or otherwise
expanding or mutating existing arrays. Nor will it cover creating object
arrays or record arrays. Both of those are covered in their own sections.
Converting Python array_like Objects to Numpy Arrays
====================================================
In general, numerical data arranged in an array-like structure in Python can
be converted to arrays through the use of the array() function. The most obvious
examples are lists and tuples. See the documentation for array() for details for
its use. Some objects may support the array-protocol and allow conversion to arrays
this way. A simple way to find out if the object can be converted to a numpy array
using array() is simply to try it interactively and see if it works! (The Python Way).
Examples: ::
>>> x = np.array([2,3,1,0])
>>> x = np.array([2, 3, 1, 0])
>>> x = np.array([[1,2.0],[0,0],(1+1j,3.)]) # note mix of tuple and lists, and types
>>> x = np.array([[ 1.+0.j, 2.+0.j], [ 0.+0.j, 0.+0.j], [ 1.+1.j, 3.+0.j]])
Intrinsic Numpy Array Creation
==============================
Numpy has built-in functions for creating arrays from scratch:
zeros(shape) will create an array filled with 0 values with the specified
shape. The default dtype is float64.
``>>> np.zeros((2, 3))
array([[ 0., 0., 0.], [ 0., 0., 0.]])``
ones(shape) will create an array filled with 1 values. It is identical to
zeros in all other respects.
arange() will create arrays with regularly incrementing values. Check the
docstring for complete information on the various ways it can be used. A few
examples will be given here: ::
>>> np.arange(10)
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> np.arange(2, 10, dtype=np.float)
array([ 2., 3., 4., 5., 6., 7., 8., 9.])
>>> np.arange(2, 3, 0.1)
array([ 2. , 2.1, 2.2, 2.3, 2.4, 2.5, 2.6, 2.7, 2.8, 2.9])
Note that there are some subtleties regarding the last usage that the user
should be aware of that are described in the arange docstring.
linspace() will create arrays with a specified number of elements, and
spaced equally between the specified beginning and end values. For
example: ::
>>> np.linspace(1., 4., 6)
array([ 1. , 1.6, 2.2, 2.8, 3.4, 4. ])
The advantage of this creation function is that one can guarantee the
number of elements and the starting and end point, which arange()
generally will not do for arbitrary start, stop, and step values.
indices() will create a set of arrays (stacked as a one-higher dimensioned
array), one per dimension with each representing variation in that dimension.
An example illustrates much better than a verbal description: ::
>>> np.indices((3,3))
array([[[0, 0, 0], [1, 1, 1], [2, 2, 2]], [[0, 1, 2], [0, 1, 2], [0, 1, 2]]])
This is particularly useful for evaluating functions of multiple dimensions on
a regular grid.
Reading Arrays From Disk
========================
This is presumably the most common case of large array creation. The details,
of course, depend greatly on the format of data on disk and so this section
can only give general pointers on how to handle various formats.
Standard Binary Formats
-----------------------
Various fields have standard formats for array data. The following lists the
ones with known python libraries to read them and return numpy arrays (there
may be others for which it is possible to read and convert to numpy arrays so
check the last section as well)
::
HDF5: PyTables
FITS: PyFITS
Others? xxx
Examples of formats that cannot be read directly but for which it is not hard
to convert are libraries like PIL (able to read and write many image formats
such as jpg, png, etc).
Common ASCII Formats
------------------------
Comma Separated Value files (CSV) are widely used (and an export and import
option for programs like Excel). There are a number of ways of reading these
files in Python. There are CSV functions in Python and functions in pylab
(part of matplotlib).
More generic ascii files can be read using the io package in scipy.
Custom Binary Formats
---------------------
There are a variety of approaches one can use. If the file has a relatively
simple format then one can write a simple I/O library and use the numpy
fromfile() function and .tofile() method to read and write numpy arrays
directly (mind your byteorder though!) If a good C or C++ library exists that
read the data, one can wrap that library with a variety of techniques (see
xxx) though that certainly is much more work and requires significantly more
advanced knowledge to interface with C or C++.
Use of Special Libraries
------------------------
There are libraries that can be used to generate arrays for special purposes
and it isn't possible to enumerate all of them. The most common uses are use
of the many array generation functions in random that can generate arrays of
random values, and some utility functions to generate special matrices (e.g.
diagonal)
"""
|
bsd-3-clause
|
tiamat-studios/pandascore-python
|
tests/base_test.py
|
1
|
1334
|
import os
import unittest
DEFAULT_PER_PAGE = 50
class BaseTest(unittest.TestCase):
def setUp(self):
self.base_url = "https://api.pandascore.co/"
self.access_token = "notrealtoken"
def load_from_file(self, json_file):
cwd = os.path.dirname(__file__)
with open(os.path.join(cwd, 'data/%s' % json_file), 'r') as f:
return f.read()
def split_url(self, url):
bits = url.split('?')
if len(bits) == 1:
return url, []
qlist = bits[1].split('&')
qlist.sort()
return bits[0], qlist
def assert_url_query_equal(self, url1, url2):
""" Test if two URL queries are equal
The key=value pairs after the ? in a URL can occur in any order
(especially since dicts in python 3 are not deterministic across runs).
The method sorts the key=value pairs and then compares the URLs.
"""
base1, qlist1 = self.split_url(url1)
base2, qlist2 = self.split_url(url2)
self.assertEqual(base1, base2)
self.assertEqual(qlist1, qlist2)
def assert_get_url_equal(self, url1, url2):
if "?" in url2:
url2 += "&"
else:
url2 += "?"
url2 += "per_page=%d" % DEFAULT_PER_PAGE
return self.assert_url_query_equal(url1, url2)
|
mit
|
nelson-liu/scikit-learn
|
sklearn/tests/test_learning_curve.py
|
45
|
11897
|
# Author: Alexander Fabisch <[email protected]>
#
# License: BSD 3 clause
import sys
from sklearn.externals.six.moves import cStringIO as StringIO
import numpy as np
import warnings
from sklearn.base import BaseEstimator
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.datasets import make_classification
with warnings.catch_warnings():
warnings.simplefilter('ignore')
from sklearn.learning_curve import learning_curve, validation_curve
from sklearn.cross_validation import KFold
from sklearn.linear_model import PassiveAggressiveClassifier
class MockImprovingEstimator(BaseEstimator):
"""Dummy classifier to test the learning curve"""
def __init__(self, n_max_train_sizes):
self.n_max_train_sizes = n_max_train_sizes
self.train_sizes = 0
self.X_subset = None
def fit(self, X_subset, y_subset=None):
self.X_subset = X_subset
self.train_sizes = X_subset.shape[0]
return self
def predict(self, X):
raise NotImplementedError
def score(self, X=None, Y=None):
# training score becomes worse (2 -> 1), test error better (0 -> 1)
if self._is_training_data(X):
return 2. - float(self.train_sizes) / self.n_max_train_sizes
else:
return float(self.train_sizes) / self.n_max_train_sizes
def _is_training_data(self, X):
return X is self.X_subset
class MockIncrementalImprovingEstimator(MockImprovingEstimator):
"""Dummy classifier that provides partial_fit"""
def __init__(self, n_max_train_sizes):
super(MockIncrementalImprovingEstimator,
self).__init__(n_max_train_sizes)
self.x = None
def _is_training_data(self, X):
return self.x in X
def partial_fit(self, X, y=None, **params):
self.train_sizes += X.shape[0]
self.x = X[0]
class MockEstimatorWithParameter(BaseEstimator):
"""Dummy classifier to test the validation curve"""
def __init__(self, param=0.5):
self.X_subset = None
self.param = param
def fit(self, X_subset, y_subset):
self.X_subset = X_subset
self.train_sizes = X_subset.shape[0]
return self
def predict(self, X):
raise NotImplementedError
def score(self, X=None, y=None):
return self.param if self._is_training_data(X) else 1 - self.param
def _is_training_data(self, X):
return X is self.X_subset
class MockEstimatorFailing(BaseEstimator):
"""Dummy classifier to test error_score in learning curve"""
def fit(self, X_subset, y_subset):
raise ValueError()
def score(self, X=None, y=None):
return None
def test_learning_curve():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
with warnings.catch_warnings(record=True) as w:
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=3, train_sizes=np.linspace(0.1, 1.0, 10))
if len(w) > 0:
raise RuntimeError("Unexpected warning: %r" % w[0].message)
assert_equal(train_scores.shape, (10, 3))
assert_equal(test_scores.shape, (10, 3))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_unsupervised():
X, _ = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y=None, cv=3, train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_verbose():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
train_sizes, train_scores, test_scores = \
learning_curve(estimator, X, y, cv=3, verbose=1)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert("[learning_curve]" in out)
def test_learning_curve_error_score():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockEstimatorFailing()
_, _, test_scores = learning_curve(estimator, X, y, cv=3, error_score=0)
all_zeros = not np.any(test_scores)
assert(all_zeros)
def test_learning_curve_error_score_default_raise():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockEstimatorFailing()
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3)
def test_learning_curve_incremental_learning_not_possible():
X, y = make_classification(n_samples=2, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
# The mockup does not have partial_fit()
estimator = MockImprovingEstimator(1)
assert_raises(ValueError, learning_curve, estimator, X, y,
exploit_incremental_learning=True)
def test_learning_curve_incremental_learning():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockIncrementalImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=3, exploit_incremental_learning=True,
train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_incremental_learning_unsupervised():
X, _ = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockIncrementalImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y=None, cv=3, exploit_incremental_learning=True,
train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_batch_and_incremental_learning_are_equal():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
train_sizes = np.linspace(0.2, 1.0, 5)
estimator = PassiveAggressiveClassifier(n_iter=1, shuffle=False)
train_sizes_inc, train_scores_inc, test_scores_inc = \
learning_curve(
estimator, X, y, train_sizes=train_sizes,
cv=3, exploit_incremental_learning=True)
train_sizes_batch, train_scores_batch, test_scores_batch = \
learning_curve(
estimator, X, y, cv=3, train_sizes=train_sizes,
exploit_incremental_learning=False)
assert_array_equal(train_sizes_inc, train_sizes_batch)
assert_array_almost_equal(train_scores_inc.mean(axis=1),
train_scores_batch.mean(axis=1))
assert_array_almost_equal(test_scores_inc.mean(axis=1),
test_scores_batch.mean(axis=1))
def test_learning_curve_n_sample_range_out_of_bounds():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0, 1])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0.0, 1.0])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0.1, 1.1])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0, 20])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[1, 21])
def test_learning_curve_remove_duplicate_sample_sizes():
X, y = make_classification(n_samples=3, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(2)
train_sizes, _, _ = assert_warns(
RuntimeWarning, learning_curve, estimator, X, y, cv=3,
train_sizes=np.linspace(0.33, 1.0, 3))
assert_array_equal(train_sizes, [1, 2])
def test_learning_curve_with_boolean_indices():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
cv = KFold(n=30, n_folds=3)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=cv, train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_validation_curve():
X, y = make_classification(n_samples=2, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
param_range = np.linspace(0, 1, 10)
with warnings.catch_warnings(record=True) as w:
train_scores, test_scores = validation_curve(
MockEstimatorWithParameter(), X, y, param_name="param",
param_range=param_range, cv=2
)
if len(w) > 0:
raise RuntimeError("Unexpected warning: %r" % w[0].message)
assert_array_almost_equal(train_scores.mean(axis=1), param_range)
assert_array_almost_equal(test_scores.mean(axis=1), 1 - param_range)
|
bsd-3-clause
|
CCI-Tools/cate-core
|
tests/ops/test_arithmetics.py
|
2
|
15123
|
"""
Tests for arithmetic operations
"""
from datetime import datetime
from unittest import TestCase
import numpy as np
import pandas as pd
import xarray as xr
from cate.core.op import OP_REGISTRY
from cate.ops import arithmetics
from cate.util.misc import object_to_qualified_name
def assert_dataset_equal(expected, actual):
# this method is functionally equivalent to
# `assert expected == actual`, but it checks each aspect
# of equality separately for easier debugging
assert expected.equals(actual), (expected, actual)
class TestDsArithmetics(TestCase):
"""
Test dataset arithmetic operations
"""
def test_nominal(self):
dataset = xr.Dataset({
'first': (['lat', 'lon', 'time'], np.ones([45, 90, 3])),
'second': (['lat', 'lon', 'time'], np.ones([45, 90, 3])),
'lat': np.linspace(-88, 88, 45),
'lon': np.linspace(-178, 178, 90)})
expected = xr.Dataset({
'first': (['lat', 'lon', 'time'], np.ones([45, 90, 3])),
'second': (['lat', 'lon', 'time'], np.ones([45, 90, 3])),
'lat': np.linspace(-88, 88, 45),
'lon': np.linspace(-178, 178, 90)})
actual = arithmetics.ds_arithmetics(dataset, '+2, -2, *3, /3, *4')
assert_dataset_equal(expected * 4, actual)
actual = arithmetics.ds_arithmetics(dataset,
'exp, log, *10, log10, *2, log2, +1.5')
assert_dataset_equal(expected * 2.5, actual)
actual = arithmetics.ds_arithmetics(dataset, 'exp, -1, log1p, +3')
assert_dataset_equal(expected * 4, actual)
with self.assertRaises(ValueError) as err:
arithmetics.ds_arithmetics(dataset, 'not')
self.assertTrue('not implemented' in str(err.exception))
def test_registered(self):
"""
Test the operation when invoked through the OP_REGISTRY
"""
reg_op = OP_REGISTRY.get_op(object_to_qualified_name(arithmetics.ds_arithmetics))
dataset = xr.Dataset({
'first': (['lat', 'lon', 'time'], np.ones([45, 90, 3])),
'second': (['lat', 'lon', 'time'], np.ones([45, 90, 3])),
'lat': np.linspace(-88, 88, 45),
'lon': np.linspace(-178, 178, 90)})
expected = xr.Dataset({
'first': (['lat', 'lon', 'time'], np.ones([45, 90, 3])),
'second': (['lat', 'lon', 'time'], np.ones([45, 90, 3])),
'lat': np.linspace(-88, 88, 45),
'lon': np.linspace(-178, 178, 90)})
actual = reg_op(ds=dataset, op='+2, -2, *3, /3, *4')
assert_dataset_equal(expected * 4, actual)
class TestDiff(TestCase):
"""
Test taking the difference between two datasets
"""
def test_diff(self):
# Test nominal
dataset = xr.Dataset({
'first': (['lat', 'lon', 'time'], np.ones([45, 90, 3])),
'second': (['lat', 'lon', 'time'], np.ones([45, 90, 3])),
'lat': np.linspace(-88, 88, 45),
'lon': np.linspace(-178, 178, 90)})
expected = xr.Dataset({
'first': (['lat', 'lon', 'time'], np.ones([45, 90, 3])),
'second': (['lat', 'lon', 'time'], np.ones([45, 90, 3])),
'lat': np.linspace(-88, 88, 45),
'lon': np.linspace(-178, 178, 90)})
actual = arithmetics.diff(dataset, dataset * 2)
assert_dataset_equal(expected * -1, actual)
# Test variable mismatch
ds = xr.Dataset({
'first': (['lat', 'lon', 'time'], np.ones([45, 90, 3])),
'third': (['lat', 'lon', 'time'], np.ones([45, 90, 3])),
'lat': np.linspace(-88, 88, 45),
'lon': np.linspace(-178, 178, 90)})
ds1 = xr.Dataset({
'first': (['lat', 'lon', 'time'], np.ones([45, 90, 3])),
'second': (['lat', 'lon', 'time'], np.ones([45, 90, 3])),
'lat': np.linspace(-88, 88, 45),
'lon': np.linspace(-178, 178, 90)})
expected = xr.Dataset({
'first': (['lat', 'lon', 'time'], np.zeros([45, 90, 3])),
'lat': np.linspace(-88, 88, 45),
'lon': np.linspace(-178, 178, 90)})
actual = arithmetics.diff(ds, ds1)
assert_dataset_equal(expected, actual)
actual = arithmetics.diff(ds1, ds)
assert_dataset_equal(expected, actual)
# Test date range mismatch
ds = xr.Dataset({
'first': (['lat', 'lon', 'time'], np.ones([45, 90, 12])),
'second': (['lat', 'lon', 'time'], np.ones([45, 90, 12])),
'lat': np.linspace(-88, 88, 45),
'lon': np.linspace(-178, 178, 90),
'time': [datetime(2000, x, 1) for x in range(1, 13)]})
ds1 = xr.Dataset({
'first': (['lat', 'lon', 'time'], np.ones([45, 90, 12])),
'second': (['lat', 'lon', 'time'], np.ones([45, 90, 12])),
'lat': np.linspace(-88, 88, 45),
'lon': np.linspace(-178, 178, 90),
'time': [datetime(2003, x, 1) for x in range(1, 13)]})
expected = xr.Dataset({
'first': (['lat', 'lon', 'time'], np.zeros([45, 90, 12])),
'second': (['lat', 'lon', 'time'], np.zeros([45, 90, 12])),
'lat': np.linspace(-88, 88, 45),
'lon': np.linspace(-178, 178, 90)})
actual = arithmetics.diff(ds, ds1)
assert_dataset_equal(actual, expected)
actual = arithmetics.diff(ds, ds1.drop('time'))
expected['time'] = [datetime(2000, x, 1) for x in range(1, 13)]
assert_dataset_equal(actual, expected)
# Test broadcasting
ds = xr.Dataset({
'first': (['lat', 'lon', 'time'], np.ones([45, 90, 3])),
'second': (['lat', 'lon', 'time'], np.ones([45, 90, 3])),
'lat': np.linspace(-88, 88, 45),
'lon': np.linspace(-178, 178, 90)})
ds1 = xr.Dataset({
'first': (['lat', 'lon'], np.ones([45, 90])),
'second': (['lat', 'lon'], np.ones([45, 90])),
'lat': np.linspace(-88, 88, 45),
'lon': np.linspace(-178, 178, 90)})
expected = xr.Dataset({
'first': (['lat', 'lon', 'time'], np.zeros([45, 90, 3])),
'second': (['lat', 'lon', 'time'], np.zeros([45, 90, 3])),
'lat': np.linspace(-88, 88, 45),
'lon': np.linspace(-178, 178, 90)})
actual = arithmetics.diff(ds, ds1)
assert_dataset_equal(expected, actual)
ds['time'] = [datetime(2000, x, 1) for x in range(1, 4)]
expected['time'] = [datetime(2000, x, 1) for x in range(1, 4)]
actual = arithmetics.diff(ds, ds1)
assert_dataset_equal(expected, actual)
ds1 = xr.Dataset({
'first': (['lat', 'lon', 'time'], np.ones([45, 90, 1])),
'second': (['lat', 'lon', 'time'], np.ones([45, 90, 1])),
'lat': np.linspace(-88, 88, 45),
'lon': np.linspace(-178, 178, 90),
'time': [datetime(2001, 1, 1)]})
actual = arithmetics.diff(ds, ds1)
assert_dataset_equal(expected, actual)
ds1 = ds1.squeeze('time')
ds1['time'] = 1
actual = arithmetics.diff(ds, ds1)
assert_dataset_equal(expected, actual)
def test_registered(self):
"""
Test the operation when invoked from the OP_REGISTRY
"""
reg_op = OP_REGISTRY.get_op(object_to_qualified_name(arithmetics.diff))
dataset = xr.Dataset({
'first': (['lat', 'lon', 'time'], np.ones([45, 90, 3])),
'second': (['lat', 'lon', 'time'], np.ones([45, 90, 3])),
'lat': np.linspace(-88, 88, 45),
'lon': np.linspace(-178, 178, 90)})
expected = xr.Dataset({
'first': (['lat', 'lon', 'time'], np.ones([45, 90, 3])),
'second': (['lat', 'lon', 'time'], np.ones([45, 90, 3])),
'lat': np.linspace(-88, 88, 45),
'lon': np.linspace(-178, 178, 90)})
actual = reg_op(ds=dataset, ds2=dataset * 2)
assert_dataset_equal(expected * -1, actual)
class ComputeDatasetTest(TestCase):
def test_plain_compute(self):
da1 = np.ones([45, 90, 3])
da2 = np.ones([45, 90, 3])
lon = np.linspace(-178, 178, 90)
lat = np.linspace(-88, 88, 45)
ds1 = xr.Dataset({
'da1': (['lat', 'lon', 'time'], da1),
'da2': (['lat', 'lon', 'time'], da2),
'lat': lat,
'lon': lon
})
ds2 = arithmetics.compute_dataset(ds=ds1,
script="_x = 0.5 * da2\n"
"x1 = 2 * da1 - 3 * _x\n"
"x2 = 3 * da1 + 4 * _x\n")
self.assertIsInstance(ds2, xr.Dataset)
self.assertIn('lon', ds2)
self.assertIn('lat', ds2)
self.assertIn('x1', ds2)
self.assertIn('x2', ds2)
self.assertNotIn('da1', ds2)
self.assertNotIn('da2', ds2)
_x = 0.5 * da2
expected_x1 = 2 * da1 - 3 * _x
expected_x2 = 3 * da1 + 4 * _x
np.testing.assert_array_almost_equal(expected_x1, ds2['x1'].values)
np.testing.assert_array_almost_equal(expected_x2, ds2['x2'].values)
ds2 = arithmetics.compute_dataset(ds=ds1,
script="_x = 0.6 * da2\n"
"x1 = 4 * da1 - 4 * _x\n"
"x2 = 5 * da1 + 3 * _x\n",
copy=True)
self.assertIsInstance(ds2, xr.Dataset)
self.assertIn('lon', ds2)
self.assertIn('lat', ds2)
self.assertIn('x1', ds2)
self.assertIn('x2', ds2)
self.assertIn('da1', ds2)
self.assertIn('da2', ds2)
_x = 0.6 * da2
expected_x1 = 4 * da1 - 4 * _x
expected_x2 = 5 * da1 + 3 * _x
np.testing.assert_array_almost_equal(expected_x1, ds2['x1'].values)
np.testing.assert_array_almost_equal(expected_x2, ds2['x2'].values)
def test_plain_compute_with_context(self):
first = np.ones([45, 90, 3])
second = np.ones([45, 90, 3])
lon = np.linspace(-178, 178, 90)
lat = np.linspace(-88, 88, 45)
res_1 = xr.Dataset({
'first': (['lat', 'lon', 'time'], first),
'lat': lat,
'lon': lon
})
res_2 = xr.Dataset({
'second': (['lat', 'lon', 'time'], second),
'lat': lat,
'lon': lon
})
# Note, if executed from a workflow, _ctx will be set by the framework
_ctx = dict(value_cache=dict(res_1=res_1, res_2=res_2))
actual = arithmetics.compute_dataset(ds=None,
script="third = 6 * res_1.first - 3 * res_2.second",
_ctx=_ctx)
self.assertIsInstance(actual, xr.Dataset)
expected = xr.Dataset({
'third': (['lat', 'lon', 'time'], 6 * first - 3 * second),
'lat': lat,
'lon': lon})
assert_dataset_equal(expected, actual)
class ComputeDataFrameTest(TestCase):
def test_compute_simple(self):
s1 = 10. * np.linspace(0, 1, 11)
s2 = -2 * np.linspace(0, 1, 11)
s3 = +2 * np.linspace(0, 1, 11)
df1 = pd.DataFrame(dict(s1=s1, s2=s2, s3=s3))
df2 = arithmetics.compute_data_frame(df=df1,
script="_a = 3 * s2 - 4 * s3\n"
"a1 = 1 + 2 * s1 + _a\n"
"a2 = 2 + 3 * s1 + _a\n")
self.assertIsInstance(df2, pd.DataFrame)
self.assertEqual(11, len(df2))
self.assertIn('a1', df2)
self.assertIn('a2', df2)
self.assertNotIn('_a', df2)
self.assertNotIn('s1', df2)
self.assertNotIn('s2', df2)
self.assertNotIn('s3', df2)
expected_a = 3 * s2 - 4 * s3
expected_a1 = 1 + 2 * s1 + expected_a
expected_a2 = 2 + 3 * s1 + expected_a
np.testing.assert_array_almost_equal(expected_a1, df2['a1'].values)
np.testing.assert_array_almost_equal(expected_a2, df2['a2'].values)
df2 = arithmetics.compute_data_frame(df=df1,
script="_a = 3 * s2 - 4 * s3\n"
"a1 = 1 + 2 * s1 + _a\n"
"a2 = 2 + 3 * s1 + _a\n",
copy=True)
self.assertIsInstance(df2, pd.DataFrame)
self.assertEqual(11, len(df2))
self.assertIn('a1', df2)
self.assertIn('a2', df2)
self.assertNotIn('_a', df2)
self.assertIn('s1', df2)
self.assertIn('s2', df2)
self.assertIn('s3', df2)
expected_a = 3 * s2 - 4 * s3
expected_a1 = 1 + 2 * s1 + expected_a
expected_a2 = 2 + 3 * s1 + expected_a
np.testing.assert_array_almost_equal(expected_a1, df2['a1'].values)
np.testing.assert_array_almost_equal(expected_a2, df2['a2'].values)
def test_compute_aggregations(self):
s1 = 10. * np.linspace(0, 1, 11)
s2 = -2 * np.linspace(0, 1, 11)
s3 = +2 * np.linspace(0, 1, 11)
df1 = pd.DataFrame(dict(s1=s1, s2=s2, s3=s3))
df2 = arithmetics.compute_data_frame(df=df1,
script="s1_mean = s1.mean()\n"
"s2_sum = s2.sum()\n"
"s3_median = s3.median()\n")
self.assertIsInstance(df2, pd.DataFrame)
self.assertEqual(1, len(df2))
self.assertIn('s1_mean', df2)
self.assertIn('s2_sum', df2)
self.assertIn('s3_median', df2)
self.assertNotIn('s1', df2)
self.assertNotIn('s2', df2)
self.assertNotIn('s3', df2)
np.testing.assert_almost_equal(np.mean(s1), df2['s1_mean'].values)
np.testing.assert_almost_equal(np.sum(s2), df2['s2_sum'].values)
np.testing.assert_almost_equal(np.median(s3), df2['s3_median'].values)
df2 = arithmetics.compute_data_frame(df=df1,
script="s1_mean = s1.mean()\n"
"s2_sum = s2.sum()\n"
"s3_median = s3.median()\n",
copy=True)
self.assertIsInstance(df2, pd.DataFrame)
self.assertEqual(11, len(df2))
self.assertIn('s1_mean', df2)
self.assertIn('s2_sum', df2)
self.assertIn('s3_median', df2)
self.assertIn('s1', df2)
self.assertIn('s2', df2)
self.assertIn('s3', df2)
np.testing.assert_almost_equal(np.mean(s1), df2['s1_mean'].values)
np.testing.assert_almost_equal(np.sum(s2), df2['s2_sum'].values)
np.testing.assert_almost_equal(np.median(s3), df2['s3_median'].values)
|
mit
|
zurwolf/dotfiles
|
home/.ipython/profile_base16-railscasts-dark/ipython_notebook_config.py
|
1
|
24710
|
# Configuration file for ipython-notebook.
c = get_config()
#------------------------------------------------------------------------------
# NotebookApp configuration
#------------------------------------------------------------------------------
# NotebookApp will inherit config from: BaseIPythonApplication, Application
# The url for MathJax.js.
# c.NotebookApp.mathjax_url = ''
# Supply extra arguments that will be passed to Jinja environment.
# c.NotebookApp.jinja_environment_options = {}
# The IP address the notebook server will listen on.
# c.NotebookApp.ip = 'localhost'
# DEPRECATED use base_url
# c.NotebookApp.base_project_url = '/'
# Create a massive crash report when IPython encounters what may be an internal
# error. The default is to append a short message to the usual traceback
# c.NotebookApp.verbose_crash = False
# The random bytes used to secure cookies. By default this is a new random
# number every time you start the Notebook. Set it to a value in a config file
# to enable logins to persist across server sessions.
#
# Note: Cookie secrets should be kept private, do not share config files with
# cookie_secret stored in plaintext (you can read the value from a file).
# c.NotebookApp.cookie_secret = ''
# The number of additional ports to try if the specified port is not available.
# c.NotebookApp.port_retries = 50
# Whether to open in a browser after starting. The specific browser used is
# platform dependent and determined by the python standard library `webbrowser`
# module, unless it is overridden using the --browser (NotebookApp.browser)
# configuration option.
c.NotebookApp.open_browser = True
# The notebook manager class to use.
# c.NotebookApp.notebook_manager_class = 'IPython.html.services.notebooks.filenbmanager.FileNotebookManager'
# The date format used by logging formatters for %(asctime)s
# c.NotebookApp.log_datefmt = '%Y-%m-%d %H:%M:%S'
# The port the notebook server will listen on.
# c.NotebookApp.port = 8888
# Whether to overwrite existing config files when copying
# c.NotebookApp.overwrite = False
# Set the Access-Control-Allow-Origin header
#
# Use '*' to allow any origin to access your server.
#
# Takes precedence over allow_origin_pat.
# c.NotebookApp.allow_origin = ''
# Whether to enable MathJax for typesetting math/TeX
#
# MathJax is the javascript library IPython uses to render math/LaTeX. It is
# very large, so you may want to disable it if you have a slow internet
# connection, or for offline use of the notebook.
#
# When disabled, equations etc. will appear as their untransformed TeX source.
# c.NotebookApp.enable_mathjax = True
# Use a regular expression for the Access-Control-Allow-Origin header
#
# Requests from an origin matching the expression will get replies with:
#
# Access-Control-Allow-Origin: origin
#
# where `origin` is the origin of the request.
#
# Ignored if allow_origin is set.
# c.NotebookApp.allow_origin_pat = ''
# The full path to an SSL/TLS certificate file.
# c.NotebookApp.certfile = u''
# The base URL for the notebook server.
#
# Leading and trailing slashes can be omitted, and will automatically be added.
# c.NotebookApp.base_url = '/'
# The directory to use for notebooks and kernels.
c.NotebookApp.notebook_dir = u'/home/zubieta/Documents/Notebooks'
#
# c.NotebookApp.file_to_run = ''
# The IPython profile to use.
# c.NotebookApp.profile = u'default'
# paths for Javascript extensions. By default, this is just
# IPYTHONDIR/nbextensions
# c.NotebookApp.nbextensions_path = []
# The Logging format template
# c.NotebookApp.log_format = '[%(name)s]%(highlevel)s %(message)s'
# The name of the IPython directory. This directory is used for logging
# configuration (through profiles), history storage, etc. The default is usually
# $HOME/.ipython. This options can also be specified through the environment
# variable IPYTHONDIR.
# c.NotebookApp.ipython_dir = u''
# Set the log level by value or name.
# c.NotebookApp.log_level = 30
# Hashed password to use for web authentication.
#
# To generate, type in a python/IPython shell:
#
# from IPython.lib import passwd; passwd()
#
# The string should be of the form type:salt:hashed-password.
# c.NotebookApp.password = u''
# Set the Access-Control-Allow-Credentials: true header
# c.NotebookApp.allow_credentials = False
# Path to an extra config file to load.
#
# If specified, load this config file in addition to any other IPython config.
# c.NotebookApp.extra_config_file = u''
# Extra paths to search for serving static files.
#
# This allows adding javascript/css to be available from the notebook server
# machine, or overriding individual files in the IPython
# c.NotebookApp.extra_static_paths = []
# Whether to trust or not X-Scheme/X-Forwarded-Proto and X-Real-Ip/X-Forwarded-
# For headerssent by the upstream reverse proxy. Necessary if the proxy handles
# SSL
# c.NotebookApp.trust_xheaders = False
# Whether to install the default config files into the profile dir. If a new
# profile is being created, and IPython contains config files for that profile,
# then they will be staged into the new directory. Otherwise, default config
# files will be automatically generated.
# c.NotebookApp.copy_config_files = False
# The full path to a private key file for usage with SSL/TLS.
# c.NotebookApp.keyfile = u''
# Supply overrides for the tornado.web.Application that the IPython notebook
# uses.
# c.NotebookApp.webapp_settings = {}
# Specify what command to use to invoke a web browser when opening the notebook.
# If not specified, the default browser will be determined by the `webbrowser`
# standard library module, which allows setting of the BROWSER environment
# variable to override it.
c.NotebookApp.browser = u'luakit %s'
#------------------------------------------------------------------------------
# IPKernelApp configuration
#------------------------------------------------------------------------------
# IPython: an enhanced interactive Python shell.
# IPKernelApp will inherit config from: BaseIPythonApplication, Application,
# InteractiveShellApp
# Run the file referenced by the PYTHONSTARTUP environment variable at IPython
# startup.
# c.IPKernelApp.exec_PYTHONSTARTUP = True
# The importstring for the DisplayHook factory
# c.IPKernelApp.displayhook_class = 'IPython.kernel.zmq.displayhook.ZMQDisplayHook'
# Set the IP or interface on which the kernel will listen.
# c.IPKernelApp.ip = u''
# Pre-load matplotlib and numpy for interactive use, selecting a particular
# matplotlib backend and loop integration.
c.IPKernelApp.pylab = u'inline'
# Create a massive crash report when IPython encounters what may be an internal
# error. The default is to append a short message to the usual traceback
# c.IPKernelApp.verbose_crash = False
# The Kernel subclass to be used.
#
# This should allow easy re-use of the IPKernelApp entry point to configure and
# launch kernels other than IPython's own.
# c.IPKernelApp.kernel_class = 'IPython.kernel.zmq.ipkernel.Kernel'
# Run the module as a script.
# c.IPKernelApp.module_to_run = ''
# The date format used by logging formatters for %(asctime)s
# c.IPKernelApp.log_datefmt = '%Y-%m-%d %H:%M:%S'
# set the shell (ROUTER) port [default: random]
# c.IPKernelApp.shell_port = 0
# set the control (ROUTER) port [default: random]
# c.IPKernelApp.control_port = 0
# Whether to overwrite existing config files when copying
# c.IPKernelApp.overwrite = False
# Execute the given command string.
# c.IPKernelApp.code_to_run = ''
# set the stdin (ROUTER) port [default: random]
# c.IPKernelApp.stdin_port = 0
# Set the log level by value or name.
# c.IPKernelApp.log_level = 30
# lines of code to run at IPython startup.
# c.IPKernelApp.exec_lines = []
# Path to an extra config file to load.
#
# If specified, load this config file in addition to any other IPython config.
# c.IPKernelApp.extra_config_file = u''
# The importstring for the OutStream factory
# c.IPKernelApp.outstream_class = 'IPython.kernel.zmq.iostream.OutStream'
# Whether to create profile dir if it doesn't exist
# c.IPKernelApp.auto_create = False
# set the heartbeat port [default: random]
# c.IPKernelApp.hb_port = 0
#
# c.IPKernelApp.transport = 'tcp'
# redirect stdout to the null device
# c.IPKernelApp.no_stdout = False
# Should variables loaded at startup (by startup files, exec_lines, etc.) be
# hidden from tools like %who?
# c.IPKernelApp.hide_initial_ns = True
# dotted module name of an IPython extension to load.
# c.IPKernelApp.extra_extension = ''
# A file to be run
# c.IPKernelApp.file_to_run = ''
# The IPython profile to use.
# c.IPKernelApp.profile = u'default'
#
# c.IPKernelApp.parent_appname = u''
# kill this process if its parent dies. On Windows, the argument specifies the
# HANDLE of the parent process, otherwise it is simply boolean.
# c.IPKernelApp.parent_handle = 0
# JSON file in which to store connection info [default: kernel-<pid>.json]
#
# This file will contain the IP, ports, and authentication key needed to connect
# clients to this kernel. By default, this file will be created in the security
# dir of the current profile, but can be specified by absolute path.
# c.IPKernelApp.connection_file = ''
# If true, IPython will populate the user namespace with numpy, pylab, etc. and
# an ``import *`` is done from numpy and pylab, when using pylab mode.
#
# When False, pylab mode should not import any names into the user namespace.
# c.IPKernelApp.pylab_import_all = True
# The name of the IPython directory. This directory is used for logging
# configuration (through profiles), history storage, etc. The default is usually
# $HOME/.ipython. This options can also be specified through the environment
# variable IPYTHONDIR.
# c.IPKernelApp.ipython_dir = u''
# Configure matplotlib for interactive use with the default matplotlib backend.
# c.IPKernelApp.matplotlib = None
# ONLY USED ON WINDOWS Interrupt this process when the parent is signaled.
# c.IPKernelApp.interrupt = 0
# Whether to install the default config files into the profile dir. If a new
# profile is being created, and IPython contains config files for that profile,
# then they will be staged into the new directory. Otherwise, default config
# files will be automatically generated.
# c.IPKernelApp.copy_config_files = False
# List of files to run at IPython startup.
# c.IPKernelApp.exec_files = []
# Enable GUI event loop integration with any of ('glut', 'gtk', 'gtk3', 'none',
# 'osx', 'pyglet', 'qt', 'qt4', 'tk', 'wx').
# c.IPKernelApp.gui = None
# A list of dotted module names of IPython extensions to load.
c.IPKernelApp.extensions = ['base16_mplrc']
# redirect stderr to the null device
# c.IPKernelApp.no_stderr = False
# The Logging format template
# c.IPKernelApp.log_format = '[%(name)s]%(highlevel)s %(message)s'
# set the iopub (PUB) port [default: random]
# c.IPKernelApp.iopub_port = 0
#------------------------------------------------------------------------------
# ZMQInteractiveShell configuration
#------------------------------------------------------------------------------
# A subclass of InteractiveShell for ZMQ.
# ZMQInteractiveShell will inherit config from: InteractiveShell
# Use colors for displaying information about objects. Because this information
# is passed through a pager (like 'less'), and some pagers get confused with
# color codes, this capability can be turned off.
# c.ZMQInteractiveShell.color_info = True
# A list of ast.NodeTransformer subclass instances, which will be applied to
# user input before code is run.
# c.ZMQInteractiveShell.ast_transformers = []
#
# c.ZMQInteractiveShell.history_length = 10000
# Don't call post-execute functions that have failed in the past.
# c.ZMQInteractiveShell.disable_failing_post_execute = False
# Show rewritten input, e.g. for autocall.
# c.ZMQInteractiveShell.show_rewritten_input = True
# Set the color scheme (NoColor, Linux, or LightBG).
# c.ZMQInteractiveShell.colors = 'Linux'
#
# c.ZMQInteractiveShell.separate_in = '\n'
# Deprecated, use PromptManager.in2_template
# c.ZMQInteractiveShell.prompt_in2 = ' .\\D.: '
#
# c.ZMQInteractiveShell.separate_out = ''
# Deprecated, use PromptManager.in_template
# c.ZMQInteractiveShell.prompt_in1 = 'In [\\#]: '
# Enable deep (recursive) reloading by default. IPython can use the deep_reload
# module which reloads changes in modules recursively (it replaces the reload()
# function, so you don't need to change anything to use it). deep_reload()
# forces a full reload of modules whose code may have changed, which the default
# reload() function does not. When deep_reload is off, IPython will use the
# normal reload(), but deep_reload will still be available as dreload().
# c.ZMQInteractiveShell.deep_reload = False
# Make IPython automatically call any callable object even if you didn't type
# explicit parentheses. For example, 'str 43' becomes 'str(43)' automatically.
# The value can be '0' to disable the feature, '1' for 'smart' autocall, where
# it is not applied if there are no more arguments on the line, and '2' for
# 'full' autocall, where all callable objects are automatically called (even if
# no arguments are present).
# c.ZMQInteractiveShell.autocall = 0
#
# c.ZMQInteractiveShell.separate_out2 = ''
# Deprecated, use PromptManager.justify
# c.ZMQInteractiveShell.prompts_pad_left = True
#
# c.ZMQInteractiveShell.readline_parse_and_bind = ['tab: complete', '"\\C-l": clear-screen', 'set show-all-if-ambiguous on', '"\\C-o": tab-insert', '"\\C-r": reverse-search-history', '"\\C-s": forward-search-history', '"\\C-p": history-search-backward', '"\\C-n": history-search-forward', '"\\e[A": history-search-backward', '"\\e[B": history-search-forward', '"\\C-k": kill-line', '"\\C-u": unix-line-discard']
# Enable magic commands to be called without the leading %.
# c.ZMQInteractiveShell.automagic = True
#
# c.ZMQInteractiveShell.debug = False
#
# c.ZMQInteractiveShell.object_info_string_level = 0
#
# c.ZMQInteractiveShell.ipython_dir = ''
#
# c.ZMQInteractiveShell.readline_remove_delims = '-/~'
# Start logging to the default log file.
# c.ZMQInteractiveShell.logstart = False
# The name of the logfile to use.
# c.ZMQInteractiveShell.logfile = ''
#
# c.ZMQInteractiveShell.wildcards_case_sensitive = True
# Save multi-line entries as one entry in readline history
# c.ZMQInteractiveShell.multiline_history = True
# Start logging to the given file in append mode.
# c.ZMQInteractiveShell.logappend = ''
#
# c.ZMQInteractiveShell.xmode = 'Context'
#
# c.ZMQInteractiveShell.quiet = False
# Deprecated, use PromptManager.out_template
# c.ZMQInteractiveShell.prompt_out = 'Out[\\#]: '
# Set the size of the output cache. The default is 1000, you can change it
# permanently in your config file. Setting it to 0 completely disables the
# caching system, and the minimum value accepted is 20 (if you provide a value
# less than 20, it is reset to 0 and a warning is issued). This limit is
# defined because otherwise you'll spend more time re-flushing a too small cache
# than working
# c.ZMQInteractiveShell.cache_size = 1000
# 'all', 'last', 'last_expr' or 'none', specifying which nodes should be run
# interactively (displaying output from expressions).
# c.ZMQInteractiveShell.ast_node_interactivity = 'last_expr'
# Automatically call the pdb debugger after every exception.
# c.ZMQInteractiveShell.pdb = False
#------------------------------------------------------------------------------
# KernelManager configuration
#------------------------------------------------------------------------------
# Manages a single kernel in a subprocess on this host.
#
# This version starts kernels with Popen.
# KernelManager will inherit config from: ConnectionFileMixin
# The Popen Command to launch the kernel. Override this if you have a custom
# kernel. If kernel_cmd is specified in a configuration file, IPython does not
# pass any arguments to the kernel, because it cannot make any assumptions about
# the arguments that the kernel understands. In particular, this means that the
# kernel does not receive the option --debug if it given on the IPython command
# line.
# c.KernelManager.kernel_cmd = []
# Set the kernel's IP address [default localhost]. If the IP address is
# something other than localhost, then Consoles on other machines will be able
# to connect to the Kernel, so be careful!
# c.KernelManager.ip = u''
#
# c.KernelManager.transport = 'tcp'
# Should we autorestart the kernel if it dies.
# c.KernelManager.autorestart = False
#------------------------------------------------------------------------------
# ProfileDir configuration
#------------------------------------------------------------------------------
# An object to manage the profile directory and its resources.
#
# The profile directory is used by all IPython applications, to manage
# configuration, logging and security.
#
# This object knows how to find, create and manage these directories. This
# should be used by any code that wants to handle profiles.
# Set the profile location directly. This overrides the logic used by the
# `profile` option.
# c.ProfileDir.location = u''
#------------------------------------------------------------------------------
# Session configuration
#------------------------------------------------------------------------------
# Object for handling serialization and sending of messages.
#
# The Session object handles building messages and sending them with ZMQ sockets
# or ZMQStream objects. Objects can communicate with each other over the
# network via Session objects, and only need to work with the dict-based IPython
# message spec. The Session will handle serialization/deserialization, security,
# and metadata.
#
# Sessions support configurable serialization via packer/unpacker traits, and
# signing with HMAC digests via the key/keyfile traits.
#
# Parameters ----------
#
# debug : bool
# whether to trigger extra debugging statements
# packer/unpacker : str : 'json', 'pickle' or import_string
# importstrings for methods to serialize message parts. If just
# 'json' or 'pickle', predefined JSON and pickle packers will be used.
# Otherwise, the entire importstring must be used.
#
# The functions must accept at least valid JSON input, and output *bytes*.
#
# For example, to use msgpack:
# packer = 'msgpack.packb', unpacker='msgpack.unpackb'
# pack/unpack : callables
# You can also set the pack/unpack callables for serialization directly.
# session : bytes
# the ID of this Session object. The default is to generate a new UUID.
# username : unicode
# username added to message headers. The default is to ask the OS.
# key : bytes
# The key used to initialize an HMAC signature. If unset, messages
# will not be signed or checked.
# keyfile : filepath
# The file containing a key. If this is set, `key` will be initialized
# to the contents of the file.
# Username for the Session. Default is your system username.
# c.Session.username = u'zubieta'
# The name of the unpacker for unserializing messages. Only used with custom
# functions for `packer`.
# c.Session.unpacker = 'json'
# Threshold (in bytes) beyond which a buffer should be sent without copying.
# c.Session.copy_threshold = 65536
# The name of the packer for serializing messages. Should be one of 'json',
# 'pickle', or an import name for a custom callable serializer.
# c.Session.packer = 'json'
# The maximum number of digests to remember.
#
# The digest history will be culled when it exceeds this value.
# c.Session.digest_history_size = 65536
# The UUID identifying this session.
# c.Session.session = u''
# The digest scheme used to construct the message signatures. Must have the form
# 'hmac-HASH'.
# c.Session.signature_scheme = 'hmac-sha256'
# execution key, for extra authentication.
# c.Session.key = ''
# Debug output in the Session
# c.Session.debug = False
# The maximum number of items for a container to be introspected for custom
# serialization. Containers larger than this are pickled outright.
# c.Session.item_threshold = 64
# path to file containing execution key.
# c.Session.keyfile = ''
# Threshold (in bytes) beyond which an object's buffer should be extracted to
# avoid pickling.
# c.Session.buffer_threshold = 1024
# Metadata dictionary, which serves as the default top-level metadata dict for
# each message.
# c.Session.metadata = {}
#------------------------------------------------------------------------------
# InlineBackend configuration
#------------------------------------------------------------------------------
# An object to store configuration of the inline backend.
# The figure format to enable (deprecated use `figure_formats` instead)
# c.InlineBackend.figure_format = u''
# A set of figure formats to enable: 'png', 'retina', 'jpeg', 'svg', 'pdf'.
# c.InlineBackend.figure_formats = set(['png'])
# Extra kwargs to be passed to fig.canvas.print_figure.
#
# Logical examples include: bbox_inches, quality (for jpeg figures), etc.
# c.InlineBackend.print_figure_kwargs = {'bbox_inches': 'tight'}
# Close all figures at the end of each cell.
#
# When True, ensures that each cell starts with no active figures, but it also
# means that one must keep track of references in order to edit or redraw
# figures in subsequent cells. This mode is ideal for the notebook, where
# residual plots from other cells might be surprising.
#
# When False, one must call figure() to create new figures. This means that
# gcf() and getfigs() can reference figures created in other cells, and the
# active figure can continue to be edited with pylab/pyplot methods that
# reference the current active figure. This mode facilitates iterative editing
# of figures, and behaves most consistently with other matplotlib backends, but
# figure barriers between cells must be explicit.
# c.InlineBackend.close_figures = True
# Subset of matplotlib rcParams that should be different for the inline backend.
# c.InlineBackend.rc = {'font.size': 10, 'figure.figsize': (6.0, 4.0), 'figure.facecolor': (1, 1, 1, 0), 'savefig.dpi': 72, 'figure.subplot.bottom': 0.125, 'figure.edgecolor': (1, 1, 1, 0)}
#------------------------------------------------------------------------------
# MappingKernelManager configuration
#------------------------------------------------------------------------------
# A KernelManager that handles notebook mapping and HTTP error handling
# MappingKernelManager will inherit config from: MultiKernelManager
#
# c.MappingKernelManager.root_dir = u'/home/zubieta/.ipython'
# The kernel manager class. This is configurable to allow subclassing of the
# KernelManager for customized behavior.
# c.MappingKernelManager.kernel_manager_class = 'IPython.kernel.ioloop.IOLoopKernelManager'
#------------------------------------------------------------------------------
# NotebookManager configuration
#------------------------------------------------------------------------------
# Glob patterns to hide in file and directory listings.
# c.NotebookManager.hide_globs = [u'__pycache__']
#------------------------------------------------------------------------------
# FileNotebookManager configuration
#------------------------------------------------------------------------------
# FileNotebookManager will inherit config from: NotebookManager
# The directory name in which to keep notebook checkpoints
#
# This is a path relative to the notebook's own directory.
#
# By default, it is .ipynb_checkpoints
# c.FileNotebookManager.checkpoint_dir = '.ipynb_checkpoints'
# Glob patterns to hide in file and directory listings.
# c.FileNotebookManager.hide_globs = [u'__pycache__']
# Automatically create a Python script when saving the notebook.
#
# For easier use of import, %run and %load across notebooks, a <notebook-
# name>.py script will be created next to any <notebook-name>.ipynb on each
# save. This can also be set with the short `--script` flag.
# c.FileNotebookManager.save_script = False
#
c.FileNotebookManager.notebook_dir = u'/home/zubieta/Documents/Notebooks'
#------------------------------------------------------------------------------
# NotebookNotary configuration
#------------------------------------------------------------------------------
# A class for computing and verifying notebook signatures.
# The secret key with which notebooks are signed.
# c.NotebookNotary.secret = ''
# The file where the secret key is stored.
# c.NotebookNotary.secret_file = u''
# The hashing algorithm used to sign notebooks.
# c.NotebookNotary.algorithm = 'sha256'
|
mit
|
rseubert/scikit-learn
|
examples/svm/plot_svm_scale_c.py
|
26
|
5353
|
"""
==============================================
Scaling the regularization parameter for SVCs
==============================================
The following example illustrates the effect of scaling the
regularization parameter when using :ref:`svm` for
:ref:`classification <svm_classification>`.
For SVC classification, we are interested in a risk minimization for the
equation:
.. math::
C \sum_{i=1, n} \mathcal{L} (f(x_i), y_i) + \Omega (w)
where
- :math:`C` is used to set the amount of regularization
- :math:`\mathcal{L}` is a `loss` function of our samples
and our model parameters.
- :math:`\Omega` is a `penalty` function of our model parameters
If we consider the loss function to be the individual error per
sample, then the data-fit term, or the sum of the error for each sample, will
increase as we add more samples. The penalization term, however, will not
increase.
When using, for example, :ref:`cross validation <cross_validation>`, to
set the amount of regularization with `C`, there will be a
different amount of samples between the main problem and the smaller problems
within the folds of the cross validation.
Since our loss function is dependent on the amount of samples, the latter
will influence the selected value of `C`.
The question that arises is `How do we optimally adjust C to
account for the different amount of training samples?`
The figures below are used to illustrate the effect of scaling our
`C` to compensate for the change in the number of samples, in the
case of using an `L1` penalty, as well as the `L2` penalty.
L1-penalty case
-----------------
In the `L1` case, theory says that prediction consistency
(i.e. that under given hypothesis, the estimator
learned predicts as well as a model knowing the true distribution)
is not possible because of the bias of the `L1`. It does say, however,
that model consistency, in terms of finding the right set of non-zero
parameters as well as their signs, can be achieved by scaling
`C1`.
L2-penalty case
-----------------
The theory says that in order to achieve prediction consistency, the
penalty parameter should be kept constant
as the number of samples grow.
Simulations
------------
The two figures below plot the values of `C` on the `x-axis` and the
corresponding cross-validation scores on the `y-axis`, for several different
fractions of a generated data-set.
In the `L1` penalty case, the cross-validation-error correlates best with
the test-error, when scaling our `C` with the number of samples, `n`,
which can be seen in the first figure.
For the `L2` penalty case, the best result comes from the case where `C`
is not scaled.
.. topic:: Note:
Two separate datasets are used for the two different plots. The reason
behind this is the `L1` case works better on sparse data, while `L2`
is better suited to the non-sparse case.
"""
print(__doc__)
# Author: Andreas Mueller <[email protected]>
# Jaques Grobler <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.svm import LinearSVC
from sklearn.cross_validation import ShuffleSplit
from sklearn.grid_search import GridSearchCV
from sklearn.utils import check_random_state
from sklearn import datasets
rnd = check_random_state(1)
# set up dataset
n_samples = 100
n_features = 300
# L1 data (only 5 informative features)
X_1, y_1 = datasets.make_classification(n_samples=n_samples,
n_features=n_features, n_informative=5,
random_state=1)
# L2 data: non sparse, but less features
y_2 = np.sign(.5 - rnd.rand(n_samples))
X_2 = rnd.randn(n_samples, n_features / 5) + y_2[:, np.newaxis]
X_2 += 5 * rnd.randn(n_samples, n_features / 5)
clf_sets = [(LinearSVC(penalty='L1', loss='L2', dual=False,
tol=1e-3),
np.logspace(-2.3, -1.3, 10), X_1, y_1),
(LinearSVC(penalty='L2', loss='L2', dual=True,
tol=1e-4),
np.logspace(-4.5, -2, 10), X_2, y_2)]
colors = ['b', 'g', 'r', 'c']
for fignum, (clf, cs, X, y) in enumerate(clf_sets):
# set up the plot for each regressor
plt.figure(fignum, figsize=(9, 10))
for k, train_size in enumerate(np.linspace(0.3, 0.7, 3)[::-1]):
param_grid = dict(C=cs)
# To get nice curve, we need a large number of iterations to
# reduce the variance
grid = GridSearchCV(clf, refit=False, param_grid=param_grid,
cv=ShuffleSplit(n=n_samples, train_size=train_size,
n_iter=250, random_state=1))
grid.fit(X, y)
scores = [x[1] for x in grid.grid_scores_]
scales = [(1, 'No scaling'),
((n_samples * train_size), '1/n_samples'),
]
for subplotnum, (scaler, name) in enumerate(scales):
plt.subplot(2, 1, subplotnum + 1)
plt.xlabel('C')
plt.ylabel('CV Score')
grid_cs = cs * float(scaler) # scale the C's
plt.semilogx(grid_cs, scores, label="fraction %.2f" %
train_size)
plt.title('scaling=%s, penalty=%s, loss=%s' %
(name, clf.penalty, clf.loss))
plt.legend(loc="best")
plt.show()
|
bsd-3-clause
|
trankmichael/scikit-learn
|
examples/cluster/plot_lena_segmentation.py
|
271
|
2444
|
"""
=========================================
Segmenting the picture of Lena in regions
=========================================
This example uses :ref:`spectral_clustering` on a graph created from
voxel-to-voxel difference on an image to break this image into multiple
partly-homogeneous regions.
This procedure (spectral clustering on an image) is an efficient
approximate solution for finding normalized graph cuts.
There are two options to assign labels:
* with 'kmeans' spectral clustering will cluster samples in the embedding space
using a kmeans algorithm
* whereas 'discrete' will iteratively search for the closest partition
space to the embedding space.
"""
print(__doc__)
# Author: Gael Varoquaux <[email protected]>, Brian Cheung
# License: BSD 3 clause
import time
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
from sklearn.feature_extraction import image
from sklearn.cluster import spectral_clustering
lena = sp.misc.lena()
# Downsample the image by a factor of 4
lena = lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2] + lena[1::2, 1::2]
lena = lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2] + lena[1::2, 1::2]
# Convert the image into a graph with the value of the gradient on the
# edges.
graph = image.img_to_graph(lena)
# Take a decreasing function of the gradient: an exponential
# The smaller beta is, the more independent the segmentation is of the
# actual image. For beta=1, the segmentation is close to a voronoi
beta = 5
eps = 1e-6
graph.data = np.exp(-beta * graph.data / lena.std()) + eps
# Apply spectral clustering (this step goes much faster if you have pyamg
# installed)
N_REGIONS = 11
###############################################################################
# Visualize the resulting regions
for assign_labels in ('kmeans', 'discretize'):
t0 = time.time()
labels = spectral_clustering(graph, n_clusters=N_REGIONS,
assign_labels=assign_labels,
random_state=1)
t1 = time.time()
labels = labels.reshape(lena.shape)
plt.figure(figsize=(5, 5))
plt.imshow(lena, cmap=plt.cm.gray)
for l in range(N_REGIONS):
plt.contour(labels == l, contours=1,
colors=[plt.cm.spectral(l / float(N_REGIONS)), ])
plt.xticks(())
plt.yticks(())
plt.title('Spectral clustering: %s, %.2fs' % (assign_labels, (t1 - t0)))
plt.show()
|
bsd-3-clause
|
MatthieuBizien/scikit-learn
|
examples/datasets/plot_iris_dataset.py
|
35
|
1929
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
The Iris Dataset
=========================================================
This data sets consists of 3 different types of irises'
(Setosa, Versicolour, and Virginica) petal and sepal
length, stored in a 150x4 numpy.ndarray
The rows being the samples and the columns being:
Sepal Length, Sepal Width, Petal Length and Petal Width.
The below plot uses the first two features.
See `here <https://en.wikipedia.org/wiki/Iris_flower_data_set>`_ for more
information on this dataset.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn import datasets
from sklearn.decomposition import PCA
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features.
Y = iris.target
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
plt.figure(2, figsize=(8, 6))
plt.clf()
# Plot the training points
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
# To getter a better understanding of interaction of the dimensions
# plot the first three PCA dimensions
fig = plt.figure(1, figsize=(8, 6))
ax = Axes3D(fig, elev=-150, azim=110)
X_reduced = PCA(n_components=3).fit_transform(iris.data)
ax.scatter(X_reduced[:, 0], X_reduced[:, 1], X_reduced[:, 2], c=Y,
cmap=plt.cm.Paired)
ax.set_title("First three PCA directions")
ax.set_xlabel("1st eigenvector")
ax.w_xaxis.set_ticklabels([])
ax.set_ylabel("2nd eigenvector")
ax.w_yaxis.set_ticklabels([])
ax.set_zlabel("3rd eigenvector")
ax.w_zaxis.set_ticklabels([])
plt.show()
|
bsd-3-clause
|
ycool/apollo
|
modules/tools/mapshow/libs/subplot_traj_path.py
|
3
|
2931
|
#!/usr/bin/env python
###############################################################################
# Copyright 2017 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import matplotlib.pyplot as plt
from matplotlib import cm as cmx
from matplotlib import colors as mcolors
class TrajPathSubplot:
def __init__(self, ax):
self.ax = ax
self.path_lines = []
self.path_lines_size = 30
self.colors = []
self.init_colors()
# self.colors = ['b','r', 'y', 'k']
for i in range(self.path_lines_size):
line, = ax.plot(
[0], [0],
c=self.colors[i % len(self.colors)],
ls="-",
marker='',
lw=8,
alpha=0.3)
self.path_lines.append(line)
ax.set_xlabel("x (m)")
# ax.set_xlim([-2, 10])
# ax.set_ylim([-6, 6])
self.ax.autoscale_view()
# self.ax.relim()
# ax.set_ylabel("y (m)")
ax.set_title("PLANNING ACC")
self.set_visible(False)
def init_colors(self):
self.colors = []
values = range(self.path_lines_size)
jet = plt.get_cmap('brg')
color_norm = mcolors.Normalize(vmin=0, vmax=values[-1])
scalar_map = cmx.ScalarMappable(norm=color_norm, cmap=jet)
for val in values:
color_val = scalar_map.to_rgba(val)
self.colors.append(color_val)
def set_visible(self, visible):
for line in self.path_lines:
line.set_visible(visible)
def show(self, planning):
planning.traj_data_lock.acquire()
for i in range(len(planning.traj_path_x_history)):
if i >= self.path_lines_size:
print "WARNING: number of path lines is more than " \
+ str(self.path_lines_size)
continue
speed_line = self.path_lines[self.path_lines_size - i - 1]
speed_line.set_xdata(planning.traj_path_x_history[i])
speed_line.set_ydata(planning.traj_path_y_history[i])
speed_line.set_visible(True)
# self.ax.legend(loc="upper left", borderaxespad=0., ncol=5)
# self.ax.axis('equal')
planning.traj_data_lock.release()
self.ax.autoscale_view()
self.ax.relim()
|
apache-2.0
|
signed/intellij-community
|
python/helpers/pydev/pydev_ipython/matplotlibtools.py
|
10
|
5496
|
import sys
backends = {'tk': 'TkAgg',
'gtk': 'GTKAgg',
'wx': 'WXAgg',
'qt': 'Qt4Agg', # qt3 not supported
'qt4': 'Qt4Agg',
'qt5': 'Qt5Agg',
'osx': 'MacOSX'}
# We also need a reverse backends2guis mapping that will properly choose which
# GUI support to activate based on the desired matplotlib backend. For the
# most part it's just a reverse of the above dict, but we also need to add a
# few others that map to the same GUI manually:
backend2gui = dict(zip(backends.values(), backends.keys()))
backend2gui['Qt4Agg'] = 'qt4'
backend2gui['Qt5Agg'] = 'qt5'
# In the reverse mapping, there are a few extra valid matplotlib backends that
# map to the same GUI support
backend2gui['GTK'] = backend2gui['GTKCairo'] = 'gtk'
backend2gui['WX'] = 'wx'
backend2gui['CocoaAgg'] = 'osx'
def do_enable_gui(guiname):
from _pydev_bundle.pydev_versioncheck import versionok_for_gui
if versionok_for_gui():
try:
from pydev_ipython.inputhook import enable_gui
enable_gui(guiname)
except:
sys.stderr.write("Failed to enable GUI event loop integration for '%s'\n" % guiname)
import traceback
traceback.print_exc()
elif guiname not in ['none', '', None]:
# Only print a warning if the guiname was going to do something
sys.stderr.write("Debug console: Python version does not support GUI event loop integration for '%s'\n" % guiname)
# Return value does not matter, so return back what was sent
return guiname
def find_gui_and_backend():
"""Return the gui and mpl backend."""
matplotlib = sys.modules['matplotlib']
# WARNING: this assumes matplotlib 1.1 or newer!!
backend = matplotlib.rcParams['backend']
# In this case, we need to find what the appropriate gui selection call
# should be for IPython, so we can activate inputhook accordingly
gui = backend2gui.get(backend, None)
return gui, backend
def is_interactive_backend(backend):
""" Check if backend is interactive """
matplotlib = sys.modules['matplotlib']
from matplotlib.rcsetup import interactive_bk, non_interactive_bk # @UnresolvedImport
if backend in interactive_bk:
return True
elif backend in non_interactive_bk:
return False
else:
return matplotlib.is_interactive()
def patch_use(enable_gui_function):
""" Patch matplotlib function 'use' """
matplotlib = sys.modules['matplotlib']
def patched_use(*args, **kwargs):
matplotlib.real_use(*args, **kwargs)
gui, backend = find_gui_and_backend()
enable_gui_function(gui)
setattr(matplotlib, "real_use", getattr(matplotlib, "use"))
setattr(matplotlib, "use", patched_use)
def patch_is_interactive():
""" Patch matplotlib function 'use' """
matplotlib = sys.modules['matplotlib']
def patched_is_interactive():
return matplotlib.rcParams['interactive']
setattr(matplotlib, "real_is_interactive", getattr(matplotlib, "is_interactive"))
setattr(matplotlib, "is_interactive", patched_is_interactive)
def activate_matplotlib(enable_gui_function):
"""Set interactive to True for interactive backends.
enable_gui_function - Function which enables gui, should be run in the main thread.
"""
matplotlib = sys.modules['matplotlib']
gui, backend = find_gui_and_backend()
is_interactive = is_interactive_backend(backend)
if is_interactive:
enable_gui_function(gui)
if not matplotlib.is_interactive():
sys.stdout.write("Backend %s is interactive backend. Turning interactive mode on.\n" % backend)
matplotlib.interactive(True)
else:
if matplotlib.is_interactive():
sys.stdout.write("Backend %s is non-interactive backend. Turning interactive mode off.\n" % backend)
matplotlib.interactive(False)
patch_use(enable_gui_function)
patch_is_interactive()
def flag_calls(func):
"""Wrap a function to detect and flag when it gets called.
This is a decorator which takes a function and wraps it in a function with
a 'called' attribute. wrapper.called is initialized to False.
The wrapper.called attribute is set to False right before each call to the
wrapped function, so if the call fails it remains False. After the call
completes, wrapper.called is set to True and the output is returned.
Testing for truth in wrapper.called allows you to determine if a call to
func() was attempted and succeeded."""
# don't wrap twice
if hasattr(func, 'called'):
return func
def wrapper(*args,**kw):
wrapper.called = False
out = func(*args,**kw)
wrapper.called = True
return out
wrapper.called = False
wrapper.__doc__ = func.__doc__
return wrapper
def activate_pylab():
pylab = sys.modules['pylab']
pylab.show._needmain = False
# We need to detect at runtime whether show() is called by the user.
# For this, we wrap it into a decorator which adds a 'called' flag.
pylab.draw_if_interactive = flag_calls(pylab.draw_if_interactive)
def activate_pyplot():
pyplot = sys.modules['matplotlib.pyplot']
pyplot.show._needmain = False
# We need to detect at runtime whether show() is called by the user.
# For this, we wrap it into a decorator which adds a 'called' flag.
pyplot.draw_if_interactive = flag_calls(pyplot.draw_if_interactive)
|
apache-2.0
|
gfyoung/pandas
|
pandas/tests/base/test_constructors.py
|
3
|
5119
|
from datetime import datetime
import sys
import numpy as np
import pytest
from pandas.compat import PYPY
import pandas as pd
from pandas import DataFrame, Index, Series
import pandas._testing as tm
from pandas.core.accessor import PandasDelegate
from pandas.core.base import NoNewAttributesMixin, PandasObject
@pytest.fixture(
params=[
Series,
lambda x, **kwargs: DataFrame({"a": x}, **kwargs)["a"],
lambda x, **kwargs: DataFrame(x, **kwargs)[0],
Index,
],
ids=["Series", "DataFrame-dict", "DataFrame-array", "Index"],
)
def constructor(request):
return request.param
class TestPandasDelegate:
class Delegator:
_properties = ["foo"]
_methods = ["bar"]
def _set_foo(self, value):
self.foo = value
def _get_foo(self):
return self.foo
foo = property(_get_foo, _set_foo, doc="foo property")
def bar(self, *args, **kwargs):
""" a test bar method """
pass
class Delegate(PandasDelegate, PandasObject):
def __init__(self, obj):
self.obj = obj
def setup_method(self, method):
pass
def test_invalid_delegation(self):
# these show that in order for the delegation to work
# the _delegate_* methods need to be overridden to not raise
# a TypeError
self.Delegate._add_delegate_accessors(
delegate=self.Delegator,
accessors=self.Delegator._properties,
typ="property",
)
self.Delegate._add_delegate_accessors(
delegate=self.Delegator, accessors=self.Delegator._methods, typ="method"
)
delegate = self.Delegate(self.Delegator())
msg = "You cannot access the property foo"
with pytest.raises(TypeError, match=msg):
delegate.foo
msg = "The property foo cannot be set"
with pytest.raises(TypeError, match=msg):
delegate.foo = 5
msg = "You cannot access the property foo"
with pytest.raises(TypeError, match=msg):
delegate.foo()
@pytest.mark.skipif(PYPY, reason="not relevant for PyPy")
def test_memory_usage(self):
# Delegate does not implement memory_usage.
# Check that we fall back to in-built `__sizeof__`
# GH 12924
delegate = self.Delegate(self.Delegator())
sys.getsizeof(delegate)
class TestNoNewAttributesMixin:
def test_mixin(self):
class T(NoNewAttributesMixin):
pass
t = T()
assert not hasattr(t, "__frozen")
t.a = "test"
assert t.a == "test"
t._freeze()
assert "__frozen" in dir(t)
assert getattr(t, "__frozen")
msg = "You cannot add any new attribute"
with pytest.raises(AttributeError, match=msg):
t.b = "test"
assert not hasattr(t, "b")
class TestConstruction:
# test certain constructor behaviours on dtype inference across Series,
# Index and DataFrame
@pytest.mark.parametrize(
"klass",
[
Series,
lambda x, **kwargs: DataFrame({"a": x}, **kwargs)["a"],
pytest.param(
lambda x, **kwargs: DataFrame(x, **kwargs)[0], marks=pytest.mark.xfail
),
Index,
],
)
@pytest.mark.parametrize(
"a",
[
np.array(["2263-01-01"], dtype="datetime64[D]"),
np.array([datetime(2263, 1, 1)], dtype=object),
np.array([np.datetime64("2263-01-01", "D")], dtype=object),
np.array(["2263-01-01"], dtype=object),
],
ids=[
"datetime64[D]",
"object-datetime.datetime",
"object-numpy-scalar",
"object-string",
],
)
def test_constructor_datetime_outofbound(self, a, klass):
# GH-26853 (+ bug GH-26206 out of bound non-ns unit)
# No dtype specified (dtype inference)
# datetime64[non-ns] raise error, other cases result in object dtype
# and preserve original data
if a.dtype.kind == "M":
msg = "Out of bounds"
with pytest.raises(pd.errors.OutOfBoundsDatetime, match=msg):
klass(a)
else:
result = klass(a)
assert result.dtype == "object"
tm.assert_numpy_array_equal(result.to_numpy(), a)
# Explicit dtype specified
# Forced conversion fails for all -> all cases raise error
msg = "Out of bounds"
with pytest.raises(pd.errors.OutOfBoundsDatetime, match=msg):
klass(a, dtype="datetime64[ns]")
def test_constructor_datetime_nonns(self, constructor):
arr = np.array(["2020-01-01T00:00:00.000000"], dtype="datetime64[us]")
expected = constructor(pd.to_datetime(["2020-01-01"]))
result = constructor(arr)
tm.assert_equal(result, expected)
# https://github.com/pandas-dev/pandas/issues/34843
arr.flags.writeable = False
result = constructor(arr)
tm.assert_equal(result, expected)
|
bsd-3-clause
|
HolgerPeters/scikit-learn
|
examples/cluster/plot_adjusted_for_chance_measures.py
|
105
|
4300
|
"""
==========================================================
Adjustment for chance in clustering performance evaluation
==========================================================
The following plots demonstrate the impact of the number of clusters and
number of samples on various clustering performance evaluation metrics.
Non-adjusted measures such as the V-Measure show a dependency between
the number of clusters and the number of samples: the mean V-Measure
of random labeling increases significantly as the number of clusters is
closer to the total number of samples used to compute the measure.
Adjusted for chance measure such as ARI display some random variations
centered around a mean score of 0.0 for any number of samples and
clusters.
Only adjusted measures can hence safely be used as a consensus index
to evaluate the average stability of clustering algorithms for a given
value of k on various overlapping sub-samples of the dataset.
"""
print(__doc__)
# Author: Olivier Grisel <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from time import time
from sklearn import metrics
def uniform_labelings_scores(score_func, n_samples, n_clusters_range,
fixed_n_classes=None, n_runs=5, seed=42):
"""Compute score for 2 random uniform cluster labelings.
Both random labelings have the same number of clusters for each value
possible value in ``n_clusters_range``.
When fixed_n_classes is not None the first labeling is considered a ground
truth class assignment with fixed number of classes.
"""
random_labels = np.random.RandomState(seed).randint
scores = np.zeros((len(n_clusters_range), n_runs))
if fixed_n_classes is not None:
labels_a = random_labels(low=0, high=fixed_n_classes, size=n_samples)
for i, k in enumerate(n_clusters_range):
for j in range(n_runs):
if fixed_n_classes is None:
labels_a = random_labels(low=0, high=k, size=n_samples)
labels_b = random_labels(low=0, high=k, size=n_samples)
scores[i, j] = score_func(labels_a, labels_b)
return scores
score_funcs = [
metrics.adjusted_rand_score,
metrics.v_measure_score,
metrics.adjusted_mutual_info_score,
metrics.mutual_info_score,
]
# 2 independent random clusterings with equal cluster number
n_samples = 100
n_clusters_range = np.linspace(2, n_samples, 10).astype(np.int)
plt.figure(1)
plots = []
names = []
for score_func in score_funcs:
print("Computing %s for %d values of n_clusters and n_samples=%d"
% (score_func.__name__, len(n_clusters_range), n_samples))
t0 = time()
scores = uniform_labelings_scores(score_func, n_samples, n_clusters_range)
print("done in %0.3fs" % (time() - t0))
plots.append(plt.errorbar(
n_clusters_range, np.median(scores, axis=1), scores.std(axis=1))[0])
names.append(score_func.__name__)
plt.title("Clustering measures for 2 random uniform labelings\n"
"with equal number of clusters")
plt.xlabel('Number of clusters (Number of samples is fixed to %d)' % n_samples)
plt.ylabel('Score value')
plt.legend(plots, names)
plt.ylim(ymin=-0.05, ymax=1.05)
# Random labeling with varying n_clusters against ground class labels
# with fixed number of clusters
n_samples = 1000
n_clusters_range = np.linspace(2, 100, 10).astype(np.int)
n_classes = 10
plt.figure(2)
plots = []
names = []
for score_func in score_funcs:
print("Computing %s for %d values of n_clusters and n_samples=%d"
% (score_func.__name__, len(n_clusters_range), n_samples))
t0 = time()
scores = uniform_labelings_scores(score_func, n_samples, n_clusters_range,
fixed_n_classes=n_classes)
print("done in %0.3fs" % (time() - t0))
plots.append(plt.errorbar(
n_clusters_range, scores.mean(axis=1), scores.std(axis=1))[0])
names.append(score_func.__name__)
plt.title("Clustering measures for random uniform labeling\n"
"against reference assignment with %d classes" % n_classes)
plt.xlabel('Number of clusters (Number of samples is fixed to %d)' % n_samples)
plt.ylabel('Score value')
plt.ylim(ymin=-0.05, ymax=1.05)
plt.legend(plots, names)
plt.show()
|
bsd-3-clause
|
vinhqdang/my_mooc
|
MOOC-work/coursera/FINISHED/Computational Investing/Part I/Examples/Basic/tutorial1.py
|
5
|
3657
|
'''
(c) 2011, 2012 Georgia Tech Research Corporation
This source code is released under the New BSD license. Please see
http://wiki.quantsoftware.org/index.php?title=QSTK_License
for license details.
Created on January, 24, 2013
@author: Sourabh Bajaj
@contact: [email protected]
@summary: Example tutorial code.
'''
# QSTK Imports
import QSTK.qstkutil.qsdateutil as du
import QSTK.qstkutil.tsutil as tsu
import QSTK.qstkutil.DataAccess as da
# Third Party Imports
import datetime as dt
import matplotlib.pyplot as plt
import pandas as pd
print "Pandas Version", pd.__version__
def main():
''' Main Function'''
# List of symbols
ls_symbols = ["AAPL", "GLD", "GOOG", "$SPX", "XOM"]
# Start and End date of the charts
dt_start = dt.datetime(2006, 1, 1)
dt_end = dt.datetime(2010, 12, 31)
# We need closing prices so the timestamp should be hours=16.
dt_timeofday = dt.timedelta(hours=16)
# Get a list of trading days between the start and the end.
ldt_timestamps = du.getNYSEdays(dt_start, dt_end, dt_timeofday)
# Creating an object of the dataaccess class with Yahoo as the source.
c_dataobj = da.DataAccess('Yahoo')
# Keys to be read from the data, it is good to read everything in one go.
ls_keys = ['open', 'high', 'low', 'close', 'volume', 'actual_close']
# Reading the data, now d_data is a dictionary with the keys above.
# Timestamps and symbols are the ones that were specified before.
ldf_data = c_dataobj.get_data(ldt_timestamps, ls_symbols, ls_keys)
d_data = dict(zip(ls_keys, ldf_data))
# Filling the data for NAN
for s_key in ls_keys:
d_data[s_key] = d_data[s_key].fillna(method='ffill')
d_data[s_key] = d_data[s_key].fillna(method='bfill')
d_data[s_key] = d_data[s_key].fillna(1.0)
# Getting the numpy ndarray of close prices.
na_price = d_data['close'].values
# Plotting the prices with x-axis=timestamps
plt.clf()
plt.plot(ldt_timestamps, na_price)
plt.legend(ls_symbols)
plt.ylabel('Adjusted Close')
plt.xlabel('Date')
plt.savefig('adjustedclose.pdf', format='pdf')
# Normalizing the prices to start at 1 and see relative returns
na_normalized_price = na_price / na_price[0, :]
# Plotting the prices with x-axis=timestamps
plt.clf()
plt.plot(ldt_timestamps, na_normalized_price)
plt.legend(ls_symbols)
plt.ylabel('Normalized Close')
plt.xlabel('Date')
plt.savefig('normalized.pdf', format='pdf')
# Copy the normalized prices to a new ndarry to find returns.
na_rets = na_normalized_price.copy()
# Calculate the daily returns of the prices. (Inplace calculation)
# returnize0 works on ndarray and not dataframes.
tsu.returnize0(na_rets)
# Plotting the plot of daily returns
plt.clf()
plt.plot(ldt_timestamps[0:50], na_rets[0:50, 3]) # $SPX 50 days
plt.plot(ldt_timestamps[0:50], na_rets[0:50, 4]) # XOM 50 days
plt.axhline(y=0, color='r')
plt.legend(['$SPX', 'XOM'])
plt.ylabel('Daily Returns')
plt.xlabel('Date')
plt.savefig('rets.pdf', format='pdf')
# Plotting the scatter plot of daily returns between XOM VS $SPX
plt.clf()
plt.scatter(na_rets[:, 3], na_rets[:, 4], c='blue')
plt.ylabel('XOM')
plt.xlabel('$SPX')
plt.savefig('scatterSPXvXOM.pdf', format='pdf')
# Plotting the scatter plot of daily returns between $SPX VS GLD
plt.clf()
plt.scatter(na_rets[:, 3], na_rets[:, 1], c='blue') # $SPX v GLD
plt.ylabel('GLD')
plt.xlabel('$SPX')
plt.savefig('scatterSPXvGLD.pdf', format='pdf')
if __name__ == '__main__':
main()
|
mit
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.